util

package
v1.1.2 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Oct 23, 2021 License: Apache-2.0 Imports: 20 Imported by: 0

Documentation

Index

Constants

This section is empty.

Variables

View Source
var (
	ErrChunkFull       = errors.New("chunk full")
	ErrOutOfOrder      = errors.New("entry out of order")
	ErrInvalidSize     = errors.New("invalid size")
	ErrInvalidFlag     = errors.New("invalid flag")
	ErrInvalidChecksum = errors.New("invalid chunk checksum")
)

Errors returned by the chunk interface.

View Source
var (
	// Gzip is the gnu zip compression pool
	Gzip     = GzipPool{/* contains filtered or unexported fields */}
	Lz4_64k  = LZ4Pool{/* contains filtered or unexported fields */} // Lz4_64k is the l4z compression pool, with 64k buffer size
	Lz4_256k = LZ4Pool{/* contains filtered or unexported fields */} // Lz4_256k uses 256k buffer
	Lz4_1M   = LZ4Pool{/* contains filtered or unexported fields */} // Lz4_1M uses 1M buffer
	Lz4_4M   = LZ4Pool{/* contains filtered or unexported fields */} // Lz4_4M uses 4M buffer

	// Snappy is the snappy compression pool
	Snappy SnappyPool
	// Noop is the no compression pool
	Noop NoopPool

	// BufReaderPool is bufio.Reader pool
	BufReaderPool = &BufioReaderPool{
		pool: sync.Pool{
			New: func() interface{} { return bufio.NewReader(nil) },
		},
	}
	// BytesBufferPool is a bytes buffer used for lines decompressed.
	// Buckets [0.5KB,1KB,2KB,4KB,8KB]
	BytesBufferPool = pool.New(1<<9, 1<<13, 2, func(size int) interface{} { return make([]byte, 0, size) })
)

Functions

func BuildObjectKey

func BuildObjectKey(tableName, uploader, dbName string) string

func CompressFile

func CompressFile(src, dest string) error

func DoParallelQueries

func DoParallelQueries(ctx context.Context, tableQuerier TableQuerier, queries []bluge_db.IndexQuery, callback segment.StoredFieldVisitor) error

实现并发查询 最大queries数为100

func GetDBNameFromObjectKey

func GetDBNameFromObjectKey(objectKey string) (string, error)

func GetFileFromStorage

func GetFileFromStorage(ctx context.Context, storageClient StorageClient, objectKey, destination string) error

GetFileFromStorage downloads a file from storage to given location.

func QueriesByTable

func QueriesByTable(queries []bluge_db.IndexQuery) map[string][]bluge_db.IndexQuery

QueriesByTable groups and returns queries by tables.

func SupportedEncoding added in v1.1.2

func SupportedEncoding() string

SupportedEncoding returns the list of supported Encoding.

Types

type BufioReaderPool added in v1.1.2

type BufioReaderPool struct {
	// contains filtered or unexported fields
}

BufioReaderPool is a bufio reader that uses sync.Pool.

func (*BufioReaderPool) Get added in v1.1.2

func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader

Get returns a bufio.Reader which reads from r. The buffer size is that of the pool.

func (*BufioReaderPool) Put added in v1.1.2

func (bufPool *BufioReaderPool) Put(b *bufio.Reader)

Put puts the bufio.Reader back into the pool.

type Encoding added in v1.1.2

type Encoding byte

Encoding is the identifier for a chunk encoding.

const (
	EncNone Encoding = iota
	EncGZIP
	EncDumb
	EncLZ4_64k
	EncSnappy

	// Added for testing.
	EncLZ4_256k
	EncLZ4_1M
	EncLZ4_4M
)

The different available encodings. Make sure to preserve the order, as these numeric values are written to the chunks!

func ParseEncoding added in v1.1.2

func ParseEncoding(enc string) (Encoding, error)

ParseEncoding parses an chunk encoding (compression algorithm) by its name.

func (Encoding) String added in v1.1.2

func (e Encoding) String() string

type GzipPool added in v1.1.2

type GzipPool struct {
	// contains filtered or unexported fields
}

GzipPool is a gun zip compression pool

func (*GzipPool) GetReader added in v1.1.2

func (pool *GzipPool) GetReader(src io.Reader) io.Reader

GetReader gets or creates a new CompressionReader and reset it to read from src

func (*GzipPool) GetWriter added in v1.1.2

func (pool *GzipPool) GetWriter(dst io.Writer) io.WriteCloser

GetWriter gets or creates a new CompressionWriter and reset it to write to dst

func (*GzipPool) PutReader added in v1.1.2

func (pool *GzipPool) PutReader(reader io.Reader)

PutReader places back in the pool a CompressionReader

func (*GzipPool) PutWriter added in v1.1.2

func (pool *GzipPool) PutWriter(writer io.WriteCloser)

PutWriter places back in the pool a CompressionWriter

type LZ4Pool added in v1.1.2

type LZ4Pool struct {
	// contains filtered or unexported fields
}

func (*LZ4Pool) GetReader added in v1.1.2

func (pool *LZ4Pool) GetReader(src io.Reader) io.Reader

GetReader gets or creates a new CompressionReader and reset it to read from src

func (*LZ4Pool) GetWriter added in v1.1.2

func (pool *LZ4Pool) GetWriter(dst io.Writer) io.WriteCloser

GetWriter gets or creates a new CompressionWriter and reset it to write to dst

func (*LZ4Pool) PutReader added in v1.1.2

func (pool *LZ4Pool) PutReader(reader io.Reader)

PutReader places back in the pool a CompressionReader

func (*LZ4Pool) PutWriter added in v1.1.2

func (pool *LZ4Pool) PutWriter(writer io.WriteCloser)

PutWriter places back in the pool a CompressionWriter

type NoopPool added in v1.1.2

type NoopPool struct{}

func (*NoopPool) GetReader added in v1.1.2

func (pool *NoopPool) GetReader(src io.Reader) io.Reader

GetReader gets or creates a new CompressionReader and reset it to read from src

func (*NoopPool) GetWriter added in v1.1.2

func (pool *NoopPool) GetWriter(dst io.Writer) io.WriteCloser

GetWriter gets or creates a new CompressionWriter and reset it to write to dst

func (*NoopPool) PutReader added in v1.1.2

func (pool *NoopPool) PutReader(reader io.Reader)

PutReader places back in the pool a CompressionReader

func (*NoopPool) PutWriter added in v1.1.2

func (pool *NoopPool) PutWriter(writer io.WriteCloser)

PutWriter places back in the pool a CompressionWriter

type ReaderPool added in v1.1.2

type ReaderPool interface {
	GetReader(io.Reader) io.Reader
	PutReader(io.Reader)
}

ReaderPool similar to WriterPool but for reading chunks.

type SnappyPool added in v1.1.2

type SnappyPool struct {
	// contains filtered or unexported fields
}

func (*SnappyPool) GetReader added in v1.1.2

func (pool *SnappyPool) GetReader(src io.Reader) io.Reader

GetReader gets or creates a new CompressionReader and reset it to read from src

func (*SnappyPool) GetWriter added in v1.1.2

func (pool *SnappyPool) GetWriter(dst io.Writer) io.WriteCloser

GetWriter gets or creates a new CompressionWriter and reset it to write to dst

func (*SnappyPool) PutReader added in v1.1.2

func (pool *SnappyPool) PutReader(reader io.Reader)

PutReader places back in the pool a CompressionReader

func (*SnappyPool) PutWriter added in v1.1.2

func (pool *SnappyPool) PutWriter(writer io.WriteCloser)

PutWriter places back in the pool a CompressionWriter

type StorageClient

type StorageClient interface {
	GetObject(ctx context.Context, objectKey string) (io.ReadCloser, error)
}

type TableQuerier

type TableQuerier interface {
	MultiQueries(ctx context.Context, queries []bluge_db.IndexQuery, callback segment.StoredFieldVisitor) error
}

type WriterPool added in v1.1.2

type WriterPool interface {
	GetWriter(io.Writer) io.WriteCloser
	PutWriter(io.WriteCloser)
}

WriterPool is a pool of io.Writer This is used by every chunk to avoid unnecessary allocations.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL