v1

package
v3.0.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Apr 8, 2024 License: AGPL-3.0 Imports: 40 Imported by: 0

Documentation

Index

Constants

View Source
const (
	FileMode       = 0644
	BloomFileName  = "bloom"
	SeriesFileName = "series"
)
View Source
const (
	DefaultSchemaVersion = V1
)
View Source
const (
	MaxRuneLen = 4
)
View Source
const (

	// Add new versions below
	V1 byte = iota
)

Variables

View Source
var (

	// Pool of crc32 hash
	Crc32HashPool = ChecksumPool{
		Pool: sync.Pool{
			New: func() interface{} {
				return crc32.New(castagnoliTable)
			},
		},
	}

	// 4KB -> 128MB
	BlockPool = BytePool{
				// contains filtered or unexported fields
	}
)
View Source
var (
	DefaultBlockOptions = NewBlockOptions(0, 4, 1, 50<<20) // EncNone, 50MB
)
View Source
var DefaultMaxPageSize = 64 << 20 // 64MB

NB(chaudum): Some block pages are way bigger than others (400MiB and bigger), and loading multiple pages into memory in parallel can cause the gateways to OOM. Figure out a decent default maximum page size that we can process.

View Source
var ErrPageTooLarge = errors.Errorf("bloom page too large")
View Source
var (
	// FullBounds is the bounds that covers the entire fingerprint space
	FullBounds = NewBounds(0, model.Fingerprint(math.MaxUint64))
)
View Source
var MatchAll = matchAllTest{}

Functions

func Collect

func Collect[T any](itr Iterator[T]) ([]T, error)

Collect collects an interator into a slice. It uses CollectInto with a new slice

func CollectInto

func CollectInto[T any](itr Iterator[T], into []T) ([]T, error)

CollectInto collects the elements of an iterator into a provided slice which is returned

func EqualIterators

func EqualIterators[T any](t *testing.T, test func(a, b T), expected, actual Iterator[T])

func ExtractTestableLineFilters

func ExtractTestableLineFilters(expr syntax.Expr) []syntax.LineFilterExpr

ExtractTestableLineFilters extracts all line filters from an expression that can be tested against a bloom filter. This will skip any line filters after a line format expression. A line format expression might add content that the query later matches against, which can't be tested with a bloom filter. E.g. For {app="fake"} |= "foo" | line_format "thisNewTextShouldMatch" |= "thisNewTextShouldMatch" this function will return only the line filter for "foo" since the line filter for "thisNewTextShouldMatch" wouldn't match against the bloom filter but should match against the query.

func Identity

func Identity[A any](a A) A

general helper, in this case created for DedupeIter[T,T]

func MakeBlock

func MakeBlock(t testing.TB, nth int, fromFp, throughFp model.Fingerprint, fromTs, throughTs model.Time) (*Block, []SeriesWithBloom, [][][]byte)

func PointerSlice

func PointerSlice[T any](xs []T) []*T

func TarGz

func TarGz(dst io.Writer, reader BlockReader) error

func UnTarGz

func UnTarGz(dst string, r io.Reader) error

Types

type Block

type Block struct {
	// contains filtered or unexported fields
}

func NewBlock

func NewBlock(reader BlockReader, metrics *Metrics) *Block

func (*Block) LoadHeaders

func (b *Block) LoadHeaders() error

func (*Block) Metadata

func (b *Block) Metadata() (BlockMetadata, error)

func (*Block) Reader

func (b *Block) Reader() BlockReader

func (*Block) Schema

func (b *Block) Schema() (Schema, error)

type BlockBuilder

type BlockBuilder struct {
	// contains filtered or unexported fields
}

func NewBlockBuilder

func NewBlockBuilder(opts BlockOptions, writer BlockWriter) (*BlockBuilder, error)

func (*BlockBuilder) AddSeries

func (b *BlockBuilder) AddSeries(series SeriesWithBloom) (bool, error)

AddSeries adds a series to the block. It returns true after adding the series, the block is full.

func (*BlockBuilder) BuildFrom

func (b *BlockBuilder) BuildFrom(itr Iterator[SeriesWithBloom]) (uint32, error)

func (*BlockBuilder) Close

func (b *BlockBuilder) Close() (uint32, error)

type BlockIndex

type BlockIndex struct {
	// contains filtered or unexported fields
}

Block index is a set of series pages along with the headers for each page

func (*BlockIndex) DecodeHeaders

func (b *BlockIndex) DecodeHeaders(r io.ReadSeeker) (uint32, error)

func (*BlockIndex) NewSeriesPageDecoder

func (b *BlockIndex) NewSeriesPageDecoder(r io.ReadSeeker, header SeriesPageHeaderWithOffset, metrics *Metrics) (res *SeriesPageDecoder, err error)

decompress page and return an iterator over the bytes

type BlockMetadata

type BlockMetadata struct {
	Options  BlockOptions
	Series   SeriesHeader
	Checksum uint32
}

type BlockOptions

type BlockOptions struct {
	// Schema determines the Schema of the block and cannot be changed
	// without recreating the block from underlying data
	Schema Schema

	// target size in bytes (decompressed)
	// of each page type
	SeriesPageSize, BloomPageSize, BlockSize uint64
}

func NewBlockOptions

func NewBlockOptions(enc chunkenc.Encoding, NGramLength, NGramSkip, MaxBlockSizeBytes uint64) BlockOptions

func NewBlockOptionsFromSchema

func NewBlockOptionsFromSchema(s Schema) BlockOptions

func (*BlockOptions) DecodeFrom

func (b *BlockOptions) DecodeFrom(r io.ReadSeeker) error

func (BlockOptions) Encode

func (b BlockOptions) Encode(enc *encoding.Encbuf)

func (BlockOptions) Len

func (b BlockOptions) Len() int

type BlockQuerier

type BlockQuerier struct {
	// contains filtered or unexported fields
}

func NewBlockQuerier

func NewBlockQuerier(b *Block, noCapture bool, maxPageSize int) *BlockQuerier

NewBlockQuerier returns a new BlockQuerier for the given block. WARNING: If noCapture is true, the underlying byte slice of the bloom page will be returned to the pool for efficiency. This can only safely be used when the underlying bloom bytes don't escape the decoder, i.e. when loading blooms for querying (bloom-gw) but not for writing (bloom-compactor).

func (*BlockQuerier) At

func (bq *BlockQuerier) At() *SeriesWithBloom

func (*BlockQuerier) CheckChunksForSeries

func (bq *BlockQuerier) CheckChunksForSeries(fp model.Fingerprint, chks ChunkRefs, searches [][]byte) (ChunkRefs, error)

CheckChunksForSeries checks if the given chunks pass a set of searches in the given bloom block. It returns the list of chunks which will need to be downloaded for a query based on the initial list passed as the `chks` argument. Chunks will be removed from the result set if they are indexed in the bloom and fail to pass all the searches.

func (*BlockQuerier) Err

func (bq *BlockQuerier) Err() error

func (*BlockQuerier) Fuse

func (bq *BlockQuerier) Fuse(inputs []PeekingIterator[Request], logger log.Logger) *FusedQuerier

Fuse combines multiple requests into a single loop iteration over the data set and returns the corresponding outputs TODO(owen-d): better async control

func (*BlockQuerier) Metadata

func (bq *BlockQuerier) Metadata() (BlockMetadata, error)

func (*BlockQuerier) Next

func (bq *BlockQuerier) Next() bool

func (*BlockQuerier) Reset

func (bq *BlockQuerier) Reset() error

func (*BlockQuerier) Schema

func (bq *BlockQuerier) Schema() (Schema, error)

func (*BlockQuerier) Seek

func (bq *BlockQuerier) Seek(fp model.Fingerprint) error

type BlockReader

type BlockReader interface {
	Index() (io.ReadSeeker, error)
	Blooms() (io.ReadSeeker, error)
	TarEntries() (Iterator[TarEntry], error)
}

type BlockWriter

type BlockWriter interface {
	Index() (io.WriteCloser, error)
	Blooms() (io.WriteCloser, error)
	Size() (int, error) // byte size of accumualted index & blooms
}

type Bloom

type Bloom struct {
	filter.ScalableBloomFilter
}

func (*Bloom) Decode

func (b *Bloom) Decode(dec *encoding.Decbuf) error

func (*Bloom) DecodeCopy

func (b *Bloom) DecodeCopy(dec *encoding.Decbuf) error

func (*Bloom) Encode

func (b *Bloom) Encode(enc *encoding.Encbuf) error

type BloomBlock

type BloomBlock struct {
	// contains filtered or unexported fields
}

func NewBloomBlock

func NewBloomBlock(encoding chunkenc.Encoding) BloomBlock

func (*BloomBlock) BloomPageDecoder

func (b *BloomBlock) BloomPageDecoder(r io.ReadSeeker, pageIdx int, maxPageSize int, metrics *Metrics) (res *BloomPageDecoder, err error)

func (*BloomBlock) DecodeHeaders

func (b *BloomBlock) DecodeHeaders(r io.ReadSeeker) (uint32, error)

type BloomBlockBuilder

type BloomBlockBuilder struct {
	// contains filtered or unexported fields
}

func NewBloomBlockBuilder

func NewBloomBlockBuilder(opts BlockOptions, writer io.WriteCloser) *BloomBlockBuilder

func (*BloomBlockBuilder) Append

func (b *BloomBlockBuilder) Append(series SeriesWithBloom) (BloomOffset, error)

func (*BloomBlockBuilder) Close

func (b *BloomBlockBuilder) Close() (uint32, error)

func (*BloomBlockBuilder) WriteSchema

func (b *BloomBlockBuilder) WriteSchema() error

type BloomOffset

type BloomOffset struct {
	Page       int // page number in bloom block
	ByteOffset int // offset to beginning of bloom within page
}

func (*BloomOffset) Decode

func (o *BloomOffset) Decode(dec *encoding.Decbuf, previousOffset BloomOffset) error

func (*BloomOffset) Encode

func (o *BloomOffset) Encode(enc *encoding.Encbuf, previousOffset BloomOffset)

type BloomPageDecoder

type BloomPageDecoder struct {
	// contains filtered or unexported fields
}

Decoder is a seekable, reset-able iterator TODO(owen-d): use buffer pools. The reason we don't currently do this is because the `data` slice currently escapes the decoder via the returned bloom, so we can't know when it's safe to return it to the pool. This happens via `data ([]byte) -> dec (*encoding.Decbuf) -> bloom (Bloom)` where the final Bloom has a reference to the data slice. We could optimize this by encoding the mode (read, write) into our structs and doing copy-on-write shenannigans, but I'm avoiding this for now.

func LazyDecodeBloomPage

func LazyDecodeBloomPage(r io.Reader, pool chunkenc.ReaderPool, page BloomPageHeader) (*BloomPageDecoder, error)

func LazyDecodeBloomPageNoCompression

func LazyDecodeBloomPageNoCompression(r io.Reader, page BloomPageHeader) (*BloomPageDecoder, error)

shortcut to skip allocations when we know the page is not compressed

func NewBloomPageDecoder

func NewBloomPageDecoder(data []byte) *BloomPageDecoder

func (*BloomPageDecoder) At

func (d *BloomPageDecoder) At() *Bloom

func (*BloomPageDecoder) Err

func (d *BloomPageDecoder) Err() error

func (*BloomPageDecoder) Next

func (d *BloomPageDecoder) Next() bool

func (*BloomPageDecoder) Relinquish

func (d *BloomPageDecoder) Relinquish()

Relinquish returns the underlying byte slice to the pool for efficiency. It's intended to be used as a perf optimization. This can only safely be used when the underlying bloom bytes don't escape the decoder: on reads in the bloom-gw but not in the bloom-compactor

func (*BloomPageDecoder) Reset

func (d *BloomPageDecoder) Reset()

func (*BloomPageDecoder) Seek

func (d *BloomPageDecoder) Seek(offset int)

type BloomPageHeader

type BloomPageHeader struct {
	N, Offset, Len, DecompressedLen int
}

func (*BloomPageHeader) Decode

func (h *BloomPageHeader) Decode(dec *encoding.Decbuf) error

func (*BloomPageHeader) Encode

func (h *BloomPageHeader) Encode(enc *encoding.Encbuf)

type BloomQuerier

type BloomQuerier interface {
	Seek(BloomOffset) (*Bloom, error)
}

type BloomTest

type BloomTest interface {
	Matches(bloom filter.Checker) bool
	MatchesWithPrefixBuf(bloom filter.Checker, buf []byte, prefixLen int) bool
}

func FiltersToBloomTest

func FiltersToBloomTest(b NGramBuilder, filters ...syntax.LineFilterExpr) BloomTest

FiltersToBloomTest converts a list of line filters to a BloomTest. Note that all the line filters should be testable against a bloom filter. Use ExtractTestableLineFilters to extract testable line filters from an expression. TODO(owen-d): limits the number of bloom lookups run. An arbitrarily high number can overconsume cpu and is a DoS vector. TODO(owen-d): use for loop not recursion to protect callstack

type BloomTests

type BloomTests []BloomTest

func (BloomTests) Matches

func (b BloomTests) Matches(bloom filter.Checker) bool

func (BloomTests) MatchesWithPrefixBuf

func (b BloomTests) MatchesWithPrefixBuf(bloom filter.Checker, buf []byte, prefixLen int) bool

type BloomTokenizer

type BloomTokenizer struct {
	// contains filtered or unexported fields
}

BloomTokenizer is a utility that converts either Loki chunks or individual lines into tokens. These tokens are n-grams, representing adjacent letters, that are used to populate a bloom filter. https://en.wikipedia.org/wiki/Bloom_filter Bloom filters are utilized for faster lookups of log lines.

func NewBloomTokenizer

func NewBloomTokenizer(nGramLen, nGramSkip int, metrics *Metrics) *BloomTokenizer

NewBloomTokenizer returns a new instance of the Bloom Tokenizer. Warning: the tokens returned use the same byte slice to reduce allocations. This has two consequences: 1) The token slices generated must not be mutated externally 2) The token slice must not be used after the next call to `Tokens()` as it will repopulate the slice. 2) This is not thread safe.

func (*BloomTokenizer) N

func (bt *BloomTokenizer) N() uint64

func (*BloomTokenizer) Populate

func (bt *BloomTokenizer) Populate(swb *SeriesWithBloom, chks Iterator[ChunkRefWithIter]) (int, error)

Populate adds the tokens from the given chunks to the given seriesWithBloom.

func (*BloomTokenizer) SkipFactor

func (bt *BloomTokenizer) SkipFactor() uint64

type BoundedIter

type BoundedIter[V any] struct {
	Iterator[V]
	// contains filtered or unexported fields
}

unused, but illustrative

func NewBoundedIter

func NewBoundedIter[V any](itr Iterator[V], cmp func(V) BoundsCheck) *BoundedIter[V]

func (*BoundedIter[V]) Next

func (bi *BoundedIter[V]) Next() bool

type BoundsCheck

type BoundsCheck uint8
const (
	Before BoundsCheck = iota
	Overlap
	After
)

type BytePool

type BytePool struct {
	// contains filtered or unexported fields
}

func (*BytePool) Get

func (p *BytePool) Get(size int) []byte

func (*BytePool) Put

func (p *BytePool) Put(b []byte)

type ByteReader

type ByteReader struct {
	// contains filtered or unexported fields
}

In memory reader

func NewByteReader

func NewByteReader(index, blooms *bytes.Buffer) *ByteReader

func (*ByteReader) Blooms

func (r *ByteReader) Blooms() (io.ReadSeeker, error)

func (*ByteReader) Index

func (r *ByteReader) Index() (io.ReadSeeker, error)

func (*ByteReader) TarEntries

func (r *ByteReader) TarEntries() (Iterator[TarEntry], error)

type CancellableIter

type CancellableIter[T any] struct {
	Iterator[T]
	// contains filtered or unexported fields
}

func NewCancelableIter

func NewCancelableIter[T any](ctx context.Context, itr Iterator[T]) *CancellableIter[T]

func (*CancellableIter[T]) Err

func (cii *CancellableIter[T]) Err() error

func (*CancellableIter[T]) Next

func (cii *CancellableIter[T]) Next() bool

type ChecksumPool

type ChecksumPool struct {
	sync.Pool
}

func (*ChecksumPool) Get

func (p *ChecksumPool) Get() hash.Hash32

func (*ChecksumPool) Put

func (p *ChecksumPool) Put(h hash.Hash32)

type ChunkRef

type ChunkRef logproto.ShortRef

func (*ChunkRef) Decode

func (r *ChunkRef) Decode(dec *encoding.Decbuf, previousEnd model.Time) (model.Time, error)

func (*ChunkRef) Encode

func (r *ChunkRef) Encode(enc *encoding.Encbuf, previousEnd model.Time) model.Time

func (*ChunkRef) Less

func (r *ChunkRef) Less(other ChunkRef) bool

type ChunkRefWithIter

type ChunkRefWithIter struct {
	Ref ChunkRef
	Itr iter.EntryIterator
}

ChunkRefWithIter is a wrapper around a ChunkRef and an EntryIterator.

type ChunkRefs

type ChunkRefs []ChunkRef

func (ChunkRefs) Compare

func (refs ChunkRefs) Compare(others ChunkRefs, populateInclusive bool) (exclusive ChunkRefs, inclusive ChunkRefs)

Compare returns two sets of chunk refs, both must be sorted: 1) the chunk refs which are in the original set but not in the other set 2) the chunk refs which are in both sets the `populateInclusive` argument allows avoiding populating the inclusive set if it is not needed TODO(owen-d): can be improved to use binary search when one list is signficantly larger than the other

func (ChunkRefs) Len

func (refs ChunkRefs) Len() int

func (ChunkRefs) Less

func (refs ChunkRefs) Less(i, j int) bool

func (ChunkRefs) Swap

func (refs ChunkRefs) Swap(i, j int)

func (ChunkRefs) Unless

func (refs ChunkRefs) Unless(others []ChunkRef) ChunkRefs

Unless returns the chunk refs in this set that are not in the other set. Both must be sorted.

type CloseIter

type CloseIter[T io.Closer] struct {
	Iterator[T]
}

func NewCloseableIterator

func NewCloseableIterator[T io.Closer](itr Iterator[T]) *CloseIter[T]

func (*CloseIter[T]) Close

func (i *CloseIter[T]) Close() error

type CloseableIterator

type CloseableIterator[T any] interface {
	Iterator[T]
	Close() error
}

type CloseableResettableIterator

type CloseableResettableIterator[T any] interface {
	CloseableIterator[T]
	ResettableIterator[T]
}

type CounterIter

type CounterIter[T any] struct {
	Iterator[T] // the underlying iterator
	// contains filtered or unexported fields
}

func NewCounterIter

func NewCounterIter[T any](itr Iterator[T]) *CounterIter[T]

func (*CounterIter[T]) Count

func (it *CounterIter[T]) Count() int

func (*CounterIter[T]) Next

func (it *CounterIter[T]) Next() bool

type CounterIterator

type CounterIterator[T any] interface {
	Iterator[T]
	Count() int
}

type DedupeIter

type DedupeIter[A, B any] struct {
	// contains filtered or unexported fields
}

DedupeIter is a deduplicating iterator which creates an Iterator[B] from a sequence of Iterator[A].

func NewDedupingIter

func NewDedupingIter[A, B any](
	eq func(A, B) bool,
	from func(A) B,
	merge func(A, B) B,
	itr PeekingIterator[A],
) *DedupeIter[A, B]

func (*DedupeIter[A, B]) At

func (it *DedupeIter[A, B]) At() B

func (*DedupeIter[A, B]) Err

func (it *DedupeIter[A, B]) Err() error

func (*DedupeIter[A, B]) Next

func (it *DedupeIter[A, B]) Next() bool

type DirectoryBlockReader

type DirectoryBlockReader struct {
	// contains filtered or unexported fields
}

File reader

func NewDirectoryBlockReader

func NewDirectoryBlockReader(dir string) *DirectoryBlockReader

func (*DirectoryBlockReader) Blooms

func (r *DirectoryBlockReader) Blooms() (io.ReadSeeker, error)

func (*DirectoryBlockReader) Index

func (r *DirectoryBlockReader) Index() (io.ReadSeeker, error)

func (*DirectoryBlockReader) Init

func (r *DirectoryBlockReader) Init() error

func (*DirectoryBlockReader) TarEntries

func (r *DirectoryBlockReader) TarEntries() (Iterator[TarEntry], error)

type DirectoryBlockWriter

type DirectoryBlockWriter struct {
	// contains filtered or unexported fields
}

Directory based impl

func NewDirectoryBlockWriter

func NewDirectoryBlockWriter(dir string) *DirectoryBlockWriter

func (*DirectoryBlockWriter) Blooms

func (b *DirectoryBlockWriter) Blooms() (io.WriteCloser, error)

func (*DirectoryBlockWriter) Index

func (b *DirectoryBlockWriter) Index() (io.WriteCloser, error)

func (*DirectoryBlockWriter) Init

func (b *DirectoryBlockWriter) Init() error

func (*DirectoryBlockWriter) Size

func (b *DirectoryBlockWriter) Size() (int, error)

type EmptyIter

type EmptyIter[T any] struct {
	// contains filtered or unexported fields
}

func NewEmptyIter

func NewEmptyIter[T any]() *EmptyIter[T]

func (*EmptyIter[T]) At

func (it *EmptyIter[T]) At() T

func (*EmptyIter[T]) Err

func (it *EmptyIter[T]) Err() error

func (*EmptyIter[T]) Next

func (it *EmptyIter[T]) Next() bool

func (*EmptyIter[T]) Reset

func (it *EmptyIter[T]) Reset()

noop

type FilterIter

type FilterIter[T any] struct {
	Iterator[T]
	// contains filtered or unexported fields
}

func NewFilterIter

func NewFilterIter[T any](it Iterator[T], p Predicate[T]) *FilterIter[T]

func (*FilterIter[T]) Next

func (i *FilterIter[T]) Next() bool

type FingerprintBounds

type FingerprintBounds struct {
	Min, Max model.Fingerprint
}

func BoundsFromProto

func BoundsFromProto(pb logproto.FPBounds) FingerprintBounds

func NewBounds

func NewBounds(min, max model.Fingerprint) FingerprintBounds

func ParseBoundsFromAddr

func ParseBoundsFromAddr(s string) (FingerprintBounds, error)

ParseBoundsFromAddr parses a fingerprint bounds from a string

func ParseBoundsFromParts

func ParseBoundsFromParts(a, b string) (FingerprintBounds, error)

ParseBoundsFromParts parses a fingerprint bounds already separated strings

func (FingerprintBounds) Bounds

Bounds returns the inclusive bounds [from,through]

func (FingerprintBounds) Cmp

Cmp returns the fingerprint's position relative to the bounds

func (FingerprintBounds) Equal

func (b FingerprintBounds) Equal(target FingerprintBounds) bool

Returns whether the fingerprint bounds is equal to the target bounds

func (FingerprintBounds) GetFromThrough

func (b FingerprintBounds) GetFromThrough() (model.Fingerprint, model.Fingerprint)

GetFromThrough implements TSDBs FingerprintFilter interface, NB(owen-d): adjusts to return `[from,through)` instead of `[from,through]` which the fingerprint bounds struct tracks.

func (FingerprintBounds) Hash

func (b FingerprintBounds) Hash(h hash.Hash32) error

func (FingerprintBounds) Intersection

func (b FingerprintBounds) Intersection(target FingerprintBounds) *FingerprintBounds

Intersection returns the intersection of the two bounds

func (FingerprintBounds) Less

func (FingerprintBounds) Match

Match implements TSDBs FingerprintFilter interface

func (FingerprintBounds) Overlaps

func (b FingerprintBounds) Overlaps(target FingerprintBounds) bool

Overlaps returns whether the bounds (partially) overlap with the target bounds

func (FingerprintBounds) Range

func (b FingerprintBounds) Range() uint64

Range returns the number of fingerprints in the bounds

func (FingerprintBounds) Slice

Slice returns a new fingerprint bounds clipped to the target bounds or nil if there is no overlap

func (FingerprintBounds) String

func (b FingerprintBounds) String() string

Addr returns the string representation of the fingerprint bounds for use in content addressable storage. TODO(owen-d): incorporate this into the schema so we can change it, similar to `{,Parse}ExternalKey`

func (FingerprintBounds) Union

func (b FingerprintBounds) Union(target FingerprintBounds) (res []FingerprintBounds)

Union returns the union of the two bounds

func (FingerprintBounds) Unless

func (b FingerprintBounds) Unless(target FingerprintBounds) (res []FingerprintBounds)

Unless returns the subspace of itself which does not intersect with the target bounds

func (FingerprintBounds) Within

func (b FingerprintBounds) Within(target FingerprintBounds) bool

Within returns whether the fingerprint is fully within the target bounds

type FusedQuerier

type FusedQuerier struct {
	// contains filtered or unexported fields
}

func NewFusedQuerier

func NewFusedQuerier(bq *BlockQuerier, inputs []PeekingIterator[Request], logger log.Logger) *FusedQuerier

func (*FusedQuerier) Run

func (fq *FusedQuerier) Run() error

type HeapIterator

type HeapIterator[T any] struct {
	// contains filtered or unexported fields
}

HeapIterator is a heap implementation of BlockQuerier backed by multiple blocks It is used to merge multiple blocks into a single ordered querier NB(owen-d): it uses a custom heap implementation because Pop() only returns a single value of the top-most iterator, rather than the iterator itself

func NewHeapIterForSeriesWithBloom

func NewHeapIterForSeriesWithBloom(queriers ...PeekingIterator[*SeriesWithBloom]) *HeapIterator[*SeriesWithBloom]

func NewHeapIterator

func NewHeapIterator[T any](less func(T, T) bool, itrs ...PeekingIterator[T]) *HeapIterator[T]

func (*HeapIterator[T]) At

func (mbq *HeapIterator[T]) At() T

func (*HeapIterator[T]) Err

func (mbq *HeapIterator[T]) Err() error

TODO(owen-d): don't swallow this error

func (HeapIterator[T]) Len

func (mbq HeapIterator[T]) Len() int

func (*HeapIterator[T]) Less

func (mbq *HeapIterator[T]) Less(i, j int) bool

func (*HeapIterator[T]) Next

func (mbq *HeapIterator[T]) Next() (ok bool)

func (*HeapIterator[T]) Swap

func (mbq *HeapIterator[T]) Swap(a, b int)

type IndexBuilder

type IndexBuilder struct {
	// contains filtered or unexported fields
}

func NewIndexBuilder

func NewIndexBuilder(opts BlockOptions, writer io.WriteCloser) *IndexBuilder

func (*IndexBuilder) Append

func (b *IndexBuilder) Append(series SeriesWithOffset) error

func (*IndexBuilder) Close

func (b *IndexBuilder) Close() (uint32, error)

func (*IndexBuilder) WriteOpts

func (b *IndexBuilder) WriteOpts() error

type IndexedValue

type IndexedValue[T any] struct {
	// contains filtered or unexported fields
}

func (IndexedValue[T]) Index

func (iv IndexedValue[T]) Index() int

func (IndexedValue[T]) Value

func (iv IndexedValue[T]) Value() T

type IterWithIndex

type IterWithIndex[T any] struct {
	Iterator[T]
	// contains filtered or unexported fields
}

func (*IterWithIndex[T]) At

func (it *IterWithIndex[T]) At() IndexedValue[T]

type Iterator

type Iterator[T any] interface {
	Next() bool
	Err() error
	At() T
}

func NewIterWithIndex

func NewIterWithIndex[T any](iter Iterator[T], idx int) Iterator[IndexedValue[T]]

type LazyBloomIter

type LazyBloomIter struct {
	// contains filtered or unexported fields
}

func NewLazyBloomIter

func NewLazyBloomIter(b *Block, pool bool, maxSize int) *LazyBloomIter

NewLazyBloomIter returns a new lazy bloom iterator. If pool is true, the underlying byte slice of the bloom page will be returned to the pool for efficiency. This can only safely be used when the underlying bloom bytes don't escape the decoder.

func (*LazyBloomIter) At

func (it *LazyBloomIter) At() *Bloom

func (*LazyBloomIter) Err

func (it *LazyBloomIter) Err() error

func (*LazyBloomIter) Next

func (it *LazyBloomIter) Next() bool

func (*LazyBloomIter) Seek

func (it *LazyBloomIter) Seek(offset BloomOffset)

type LazySeriesIter

type LazySeriesIter struct {
	// contains filtered or unexported fields
}

func NewLazySeriesIter

func NewLazySeriesIter(b *Block) *LazySeriesIter

Decodes series pages one at a time and iterates through them

func (*LazySeriesIter) At

func (it *LazySeriesIter) At() *SeriesWithOffset

func (*LazySeriesIter) Err

func (it *LazySeriesIter) Err() error

func (*LazySeriesIter) Next

func (it *LazySeriesIter) Next() bool

func (*LazySeriesIter) Seek

func (it *LazySeriesIter) Seek(fp model.Fingerprint) error

Seek returns an iterator over the pages where the first fingerprint is >= fp

type MapIter

type MapIter[A any, B any] struct {
	Iterator[A]
	// contains filtered or unexported fields
}

func NewMapIter

func NewMapIter[A any, B any](src Iterator[A], f func(A) B) *MapIter[A, B]

func (*MapIter[A, B]) At

func (it *MapIter[A, B]) At() B

type MemoryBlockWriter

type MemoryBlockWriter struct {
	// contains filtered or unexported fields
}

in memory impl

func NewMemoryBlockWriter

func NewMemoryBlockWriter(index, blooms *bytes.Buffer) MemoryBlockWriter

func (MemoryBlockWriter) Blooms

func (b MemoryBlockWriter) Blooms() (io.WriteCloser, error)

func (MemoryBlockWriter) Index

func (b MemoryBlockWriter) Index() (io.WriteCloser, error)

func (MemoryBlockWriter) Size

func (b MemoryBlockWriter) Size() (int, error)

type MergeBuilder

type MergeBuilder struct {
	// contains filtered or unexported fields
}

Simplistic implementation of a merge builder that builds a single block from a list of blocks and a store of series.

func NewMergeBuilder

func NewMergeBuilder(
	blocks Iterator[*SeriesWithBloom],
	store Iterator[*Series],
	populate func(*Series, *Bloom) (int, error),
	metrics *Metrics,
) *MergeBuilder

NewMergeBuilder is a specific builder which does the following:

  1. merges multiple blocks into a single ordered querier, i) When two blocks have the same series, it will prefer the one with the most chunks already indexed
  2. iterates through the store, adding chunks to the relevant blooms via the `populate` argument

func (*MergeBuilder) Build

func (mb *MergeBuilder) Build(builder *BlockBuilder) (checksum uint32, totalBytes int, err error)

type Metrics

type Metrics struct {
	// contains filtered or unexported fields
}

func NewMetrics

func NewMetrics(r prometheus.Registerer) *Metrics

type MultiFingerprintBounds

type MultiFingerprintBounds []FingerprintBounds

func MultiBoundsFromProto

func MultiBoundsFromProto(pb []logproto.FPBounds) MultiFingerprintBounds

Unsafe cast to avoid allocation. This _requires_ that the underlying types are the same which is checked by the compiler above

func (MultiFingerprintBounds) Union

type NGramBuilder

type NGramBuilder interface {
	Tokens(line string) Iterator[[]byte]
	N() int
	SkipFactor() int
}

NGramBuilder is an interface for tokenizing strings into ngrams Extracting this interface allows us to test the bloom filter without having to use the actual tokenizer TODO: This should be moved to tokenizer.go

type NGramTokenIter

type NGramTokenIter struct {
	// contains filtered or unexported fields
}

func (*NGramTokenIter) At

func (t *NGramTokenIter) At() []byte

func (*NGramTokenIter) Err

func (t *NGramTokenIter) Err() error

func (*NGramTokenIter) Next

func (t *NGramTokenIter) Next() bool

type NGramTokenizer

type NGramTokenizer struct {
	// contains filtered or unexported fields
}

Iterable variants (more performant, less space)

func NewNGramTokenizer

func NewNGramTokenizer(n, skip int) *NGramTokenizer

N-Grams (https://en.wikipedia.org/wiki/N-gram) are a series of 'n' adjacent characters in a string. These will be utilized for the bloom filters to allow for fuzzy searching.

func (*NGramTokenizer) N

func (t *NGramTokenizer) N() int

func (*NGramTokenizer) SkipFactor

func (t *NGramTokenizer) SkipFactor() int

func (*NGramTokenizer) Tokens

func (t *NGramTokenizer) Tokens(line string) Iterator[[]byte]

Token implementsthe NGramBuilder interface The Token iterator uses shared buffers for performance. The []byte returned by At() is not safe for use after subsequent calls to Next()

type NoopCloser

type NoopCloser struct {
	io.Writer
}

func NewNoopCloser

func NewNoopCloser(w io.Writer) NoopCloser

func (NoopCloser) Close

func (n NoopCloser) Close() error

type Ord

type Ord byte
const (
	Less Ord = iota
	Eq
	Greater
)

type Orderable

type Orderable[T any] interface {
	// Return the caller's position relative to the target
	Compare(T) Ord
}

type OrderedImpl

type OrderedImpl[T any] struct {
	// contains filtered or unexported fields
}

func NewOrderable

func NewOrderable[T any](val T, cmp func(T, T) Ord) OrderedImpl[T]

convenience method for creating an Orderable implementation for a type dynamically by passing in a value and a comparison function This is useful for types that are not under our control, such as built-in types and for reducing boilerplate in testware/etc. Hot-path code should use a statically defined Orderable implementation for performance

func (OrderedImpl[T]) Compare

func (o OrderedImpl[T]) Compare(other OrderedImpl[T]) Ord

func (OrderedImpl[T]) Unwrap

func (o OrderedImpl[T]) Unwrap() T

type Output

type Output struct {
	Fp       model.Fingerprint
	Removals ChunkRefs
}

Output represents a chunk that failed to pass all searches and must be downloaded

type PageWriter

type PageWriter struct {
	// contains filtered or unexported fields
}

func NewPageWriter

func NewPageWriter(targetSize int) PageWriter

func (*PageWriter) Add

func (w *PageWriter) Add(item []byte) (offset int)

func (*PageWriter) Count

func (w *PageWriter) Count() int

func (*PageWriter) Reset

func (w *PageWriter) Reset()

func (*PageWriter) SpaceFor

func (w *PageWriter) SpaceFor(numBytes int) bool

type PeekCloseIter

type PeekCloseIter[T any] struct {
	*PeekIter[T]
	// contains filtered or unexported fields
}

func NewPeekCloseIter

func NewPeekCloseIter[T any](itr CloseableIterator[T]) *PeekCloseIter[T]

func (*PeekCloseIter[T]) Close

func (it *PeekCloseIter[T]) Close() error

type PeekIter

type PeekIter[T any] struct {
	// contains filtered or unexported fields
}

func NewPeekingIter

func NewPeekingIter[T any](itr Iterator[T]) *PeekIter[T]

func (*PeekIter[T]) At

func (it *PeekIter[T]) At() T

func (*PeekIter[T]) Err

func (it *PeekIter[T]) Err() error

func (*PeekIter[T]) Next

func (it *PeekIter[T]) Next() bool

func (*PeekIter[T]) Peek

func (it *PeekIter[T]) Peek() (T, bool)

type PeekingCloseableIterator

type PeekingCloseableIterator[T any] interface {
	PeekingIterator[T]
	CloseableIterator[T]
}

type PeekingIterator

type PeekingIterator[T any] interface {
	Peek() (T, bool)
	Iterator[T]
}

func NewSliceIterWithIndex

func NewSliceIterWithIndex[T any](xs []T, idx int) PeekingIterator[IndexedValue[T]]

type Predicate

type Predicate[T any] func(T) bool

type PrefixedTokenIter

type PrefixedTokenIter struct {
	Iterator[[]byte]
	// contains filtered or unexported fields
}

func NewPrefixedTokenIter

func NewPrefixedTokenIter(buf []byte, prefixLn int, iter Iterator[[]byte]) *PrefixedTokenIter

func (*PrefixedTokenIter) At

func (t *PrefixedTokenIter) At() []byte

type Request

type Request struct {
	Fp       model.Fingerprint
	Chks     ChunkRefs
	Search   BloomTest
	Response chan<- Output
}

type ResettableIterator

type ResettableIterator[T any] interface {
	Reset() error
	Iterator[T]
}

type Schema

type Schema struct {
	// contains filtered or unexported fields
}

func (Schema) Compatible

func (s Schema) Compatible(other Schema) bool

func (*Schema) CompressorPool

func (s *Schema) CompressorPool() chunkenc.WriterPool

func (*Schema) Decode

func (s *Schema) Decode(dec *encoding.Decbuf) error

func (*Schema) DecodeFrom

func (s *Schema) DecodeFrom(r io.ReadSeeker) error

func (*Schema) DecompressorPool

func (s *Schema) DecompressorPool() chunkenc.ReaderPool

func (*Schema) Encode

func (s *Schema) Encode(enc *encoding.Encbuf)

func (Schema) Len

func (s Schema) Len() int

byte length

func (Schema) NGramLen

func (s Schema) NGramLen() int

func (Schema) NGramSkip

func (s Schema) NGramSkip() int

func (Schema) String

func (s Schema) String() string

type SeekIter

type SeekIter[K, V any] interface {
	Seek(K) error
	Iterator[V]
}

type Series

type Series struct {
	Fingerprint model.Fingerprint
	Chunks      ChunkRefs
}

type SeriesHeader

type SeriesHeader struct {
	NumSeries         int
	Bounds            FingerprintBounds
	FromTs, ThroughTs model.Time
}

func (*SeriesHeader) Decode

func (h *SeriesHeader) Decode(dec *encoding.Decbuf) error

func (*SeriesHeader) Encode

func (h *SeriesHeader) Encode(enc *encoding.Encbuf)

type SeriesIterator

type SeriesIterator interface {
	Iterator[*SeriesWithOffset]
	Reset()
}

type SeriesPageDecoder

type SeriesPageDecoder struct {
	// contains filtered or unexported fields
}

can decode a series page one item at a time, useful when we don't need to iterate an entire page

func (*SeriesPageDecoder) At

func (d *SeriesPageDecoder) At() (res *SeriesWithOffset)

func (*SeriesPageDecoder) Err

func (d *SeriesPageDecoder) Err() error

func (*SeriesPageDecoder) Next

func (d *SeriesPageDecoder) Next() bool

func (*SeriesPageDecoder) Reset

func (d *SeriesPageDecoder) Reset()

func (*SeriesPageDecoder) Seek

func (d *SeriesPageDecoder) Seek(fp model.Fingerprint)

type SeriesPageHeaderWithOffset

type SeriesPageHeaderWithOffset struct {
	Offset, Len, DecompressedLen int
	SeriesHeader
}

Header for a series page

func (*SeriesPageHeaderWithOffset) Decode

func (*SeriesPageHeaderWithOffset) Encode

func (h *SeriesPageHeaderWithOffset) Encode(enc *encoding.Encbuf)

type SeriesWithBloom

type SeriesWithBloom struct {
	Series *Series
	Bloom  *Bloom
}

func MkBasicSeriesWithBlooms

func MkBasicSeriesWithBlooms(nSeries, _ int, fromFp, throughFp model.Fingerprint, fromTs, throughTs model.Time) (seriesList []SeriesWithBloom, keysList [][][]byte)

type SeriesWithOffset

type SeriesWithOffset struct {
	Offset BloomOffset
	Series
}

func (*SeriesWithOffset) Decode

func (s *SeriesWithOffset) Decode(dec *encoding.Decbuf, previousFp model.Fingerprint, previousOffset BloomOffset) (model.Fingerprint, BloomOffset, error)

func (*SeriesWithOffset) Encode

func (s *SeriesWithOffset) Encode(
	enc *encoding.Encbuf,
	previousFp model.Fingerprint,
	previousOffset BloomOffset,
) (model.Fingerprint, BloomOffset)

type SliceIter

type SliceIter[T any] struct {
	// contains filtered or unexported fields
}

func NewSliceIter

func NewSliceIter[T any](xs []T) *SliceIter[T]

func (*SliceIter[T]) At

func (it *SliceIter[T]) At() T

func (*SliceIter[T]) Err

func (it *SliceIter[T]) Err() error

func (*SliceIter[T]) Len

func (it *SliceIter[T]) Len() int

func (*SliceIter[T]) Next

func (it *SliceIter[T]) Next() bool

type SliceIterWithIndex

type SliceIterWithIndex[T any] struct {
	// contains filtered or unexported fields
}

func (*SliceIterWithIndex[T]) At

func (it *SliceIterWithIndex[T]) At() IndexedValue[T]

func (*SliceIterWithIndex[T]) Err

func (it *SliceIterWithIndex[T]) Err() error

func (*SliceIterWithIndex[T]) Next

func (it *SliceIterWithIndex[T]) Next() bool

func (*SliceIterWithIndex[T]) Peek

func (it *SliceIterWithIndex[T]) Peek() (IndexedValue[T], bool)

type TarEntry

type TarEntry struct {
	Name string
	Size int64
	Body io.ReadSeeker
}

type UnlessIterator

type UnlessIterator[T Orderable[T]] struct {
	// contains filtered or unexported fields
}

func NewUnlessIterator

func NewUnlessIterator[T Orderable[T]](a, b Iterator[T]) *UnlessIterator[T]

Iterators _must_ be sorted. Defers to underlying `PeekingIterator` implementation for both iterators if they implement it.

func (*UnlessIterator[T]) At

func (it *UnlessIterator[T]) At() T

func (*UnlessIterator[T]) Err

func (it *UnlessIterator[T]) Err() error

func (*UnlessIterator[T]) Next

func (it *UnlessIterator[T]) Next() bool

Directories

Path Synopsis

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL