sparseindex

package
v1.2.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Feb 29, 2024 License: Apache-2.0 Imports: 29 Imported by: 0

Documentation

Index

Constants

View Source
const (
	FullTextIdxColumnCnt = 1

	FullTextIndex         = "fullText"
	BloomFilterFileSuffix = ".idx"
	BloomFilterFilePrefix = "bloomfilter_" // bloomfilter_${columnName}.idx
)
View Source
const Continuous = "continuous"
View Source
const Empty = "empty"

Variables

View Source
var ConsiderOnlyBeTrue = NewMark(false, true)
View Source
var (
	// InitIndexFragmentFixedSize means that each fragment is fixed in size except the last fragment.
	InitIndexFragmentFixedSize = true
)
View Source
var NEGATIVE_INFINITY = &FieldRef{row: math.MinInt64}
View Source
var POSITIVE_INFINITY = &FieldRef{row: math.MaxInt64}
View Source
var SKFileReaderOnce sync.Once

Functions

func GetBloomFilterFilePath added in v1.2.0

func GetBloomFilterFilePath(dir, msName, fieldName string) string

func GetFullTextAttachFilePath added in v1.2.0

func GetFullTextAttachFilePath(dir, msName, dataFilePath string) string

func GetFullTextDetachFilePath added in v1.2.0

func GetFullTextDetachFilePath(dir, msName string) string

func GetLocalBloomFilterBlockCnts added in v1.2.0

func GetLocalBloomFilterBlockCnts(dir, msName, lockPath string, recSchema record.Schemas, skipIndex *SkipIndex,
	fullTextIdx bool) int64

GetLocalBloomFilterBlockCnts get one local bloomFilter col's block count,if not exist return 0

func RegistrySKFileReaderCreator added in v1.2.0

func RegistrySKFileReaderCreator(name string, creator SKFileReaderCreator) bool

RegistrySKFileReaderCreator is used to registry the SKFileReaderCreator

Types

type BloomFilterFullTextIndexReader added in v1.2.0

type BloomFilterFullTextIndexReader struct {
	// contains filtered or unexported fields
}

func NewBloomFilterFullTextIndexReader added in v1.2.0

func NewBloomFilterFullTextIndexReader(rpnExpr *rpn.RPNExpr, schema record.Schemas, option hybridqp.Options, isCache bool) (*BloomFilterFullTextIndexReader, error)

func (*BloomFilterFullTextIndexReader) Close added in v1.2.0

func (*BloomFilterFullTextIndexReader) MayBeInFragment added in v1.2.0

func (r *BloomFilterFullTextIndexReader) MayBeInFragment(fragId uint32) (bool, error)

func (*BloomFilterFullTextIndexReader) ReInit added in v1.2.0

func (r *BloomFilterFullTextIndexReader) ReInit(file interface{}) (err error)

type BloomFilterFullTextReaderCreator added in v1.2.0

type BloomFilterFullTextReaderCreator struct {
}

func (*BloomFilterFullTextReaderCreator) CreateSKFileReader added in v1.2.0

func (index *BloomFilterFullTextReaderCreator) CreateSKFileReader(rpnExpr *rpn.RPNExpr, schema record.Schemas, option hybridqp.Options, isCache bool) (SKFileReader, error)

type BloomFilterIndexReader added in v1.2.0

type BloomFilterIndexReader struct {
	// contains filtered or unexported fields
}

func NewBloomFilterIndexReader added in v1.2.0

func NewBloomFilterIndexReader(rpnExpr *rpn.RPNExpr, schema record.Schemas, option hybridqp.Options, isCache bool) (*BloomFilterIndexReader, error)

func (*BloomFilterIndexReader) Close added in v1.2.0

func (r *BloomFilterIndexReader) Close() error

func (*BloomFilterIndexReader) MayBeInFragment added in v1.2.0

func (r *BloomFilterIndexReader) MayBeInFragment(fragId uint32) (bool, error)

func (*BloomFilterIndexReader) ReInit added in v1.2.0

func (r *BloomFilterIndexReader) ReInit(file interface{}) (err error)

type BloomFilterReaderCreator added in v1.2.0

type BloomFilterReaderCreator struct {
}

func (*BloomFilterReaderCreator) CreateSKFileReader added in v1.2.0

func (index *BloomFilterReaderCreator) CreateSKFileReader(rpnExpr *rpn.RPNExpr, schema record.Schemas, option hybridqp.Options, isCache bool) (SKFileReader, error)

type BloomFilterWriter added in v1.2.0

type BloomFilterWriter struct {
	// contains filtered or unexported fields
}

func NewBloomFilterWriter added in v1.2.0

func NewBloomFilterWriter(dir, msName, dataFilePath, lockPath string) *BloomFilterWriter

func (*BloomFilterWriter) Close added in v1.2.0

func (b *BloomFilterWriter) Close() error

func (*BloomFilterWriter) CreateAttachSkipIndex added in v1.2.0

func (b *BloomFilterWriter) CreateAttachSkipIndex(schemaIdx, rowsPerSegment []int, writeRec *record.Record) error

func (*BloomFilterWriter) CreateDetachSkipIndex added in v1.2.0

func (b *BloomFilterWriter) CreateDetachSkipIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int,
	dataBuf [][]byte) ([][]byte, []string)

func (*BloomFilterWriter) GenBloomFilterData added in v1.2.0

func (b *BloomFilterWriter) GenBloomFilterData(src *record.ColVal, rowsPerSegment []int, refType int) []byte

func (*BloomFilterWriter) Open added in v1.2.0

func (b *BloomFilterWriter) Open() error

type ColumnRef

type ColumnRef struct {
	// contains filtered or unexported fields
}

func NewColumnRef

func NewColumnRef(name string, dataType int, column *record.ColVal) *ColumnRef

type FieldRef

type FieldRef struct {
	// contains filtered or unexported fields
}

func NewFieldRef

func NewFieldRef(cols []*ColumnRef, column int, row int) *FieldRef

func (*FieldRef) Equals

func (f *FieldRef) Equals(rhs *FieldRef) bool

func (*FieldRef) IsNegativeInfinity

func (f *FieldRef) IsNegativeInfinity() bool

func (*FieldRef) IsNull

func (f *FieldRef) IsNull() bool

func (*FieldRef) IsPositiveInfinity

func (f *FieldRef) IsPositiveInfinity() bool

func (*FieldRef) Less

func (f *FieldRef) Less(rhs *FieldRef) bool

func (*FieldRef) Set

func (f *FieldRef) Set(cols []*ColumnRef, column, row int)

func (*FieldRef) SetNegativeInfinity

func (f *FieldRef) SetNegativeInfinity()

func (*FieldRef) SetPositiveInfinity

func (f *FieldRef) SetPositiveInfinity()

type FullTextIdxWriter added in v1.2.0

type FullTextIdxWriter struct {
	// contains filtered or unexported fields
}

func NewFullTextIdxWriter added in v1.2.0

func NewFullTextIdxWriter(dir, msName, dataFilePath, lockPath string) *FullTextIdxWriter

func (*FullTextIdxWriter) Close added in v1.2.0

func (f *FullTextIdxWriter) Close() error

func (*FullTextIdxWriter) CreateAttachSkipIndex added in v1.2.0

func (f *FullTextIdxWriter) CreateAttachSkipIndex(schemaIdx, rowsPerSegment []int, writeRec *record.Record) error

func (*FullTextIdxWriter) CreateDetachSkipIndex added in v1.2.0

func (f *FullTextIdxWriter) CreateDetachSkipIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int, dataBuf [][]byte) ([][]byte, []string)

func (*FullTextIdxWriter) Open added in v1.2.0

func (f *FullTextIdxWriter) Open() error

type FunctionBase

type FunctionBase struct {
}

type IndexProperty

type IndexProperty struct {
	RowsNumPerFragment  int
	CoarseIndexFragment int
	MinRowsForSeek      int
}

func NewIndexProperty

func NewIndexProperty(rowsNumPerFragment, coarseIndexFragment, minRowsForSeek int) *IndexProperty

type KeyCondition

type KeyCondition interface {
	HavePrimaryKey() bool
	GetMaxKeyIndex() int
	IsFirstPrimaryKey() bool
	CanDoBinarySearch() bool
	MayBeInRange(usedKeySize int, indexLeft []*FieldRef, indexRight []*FieldRef, dataTypes []int) (bool, error)
	CheckInRange(rgs []*Range, dataTypes []int) (Mark, error)
	AlwaysInRange() (bool, error)
}

type KeyConditionImpl

type KeyConditionImpl struct {
	// contains filtered or unexported fields
}

func NewKeyCondition

func NewKeyCondition(timeCondition, condition influxql.Expr, pkSchema record.Schemas) (*KeyConditionImpl, error)

func (*KeyConditionImpl) AlwaysInRange added in v1.2.0

func (kc *KeyConditionImpl) AlwaysInRange() (bool, error)

AlwaysInRange checks that the index can not be used, pruning in advance to improve efficiency.

func (*KeyConditionImpl) CanDoBinarySearch

func (kc *KeyConditionImpl) CanDoBinarySearch() bool

func (*KeyConditionImpl) CheckInRange

func (kc *KeyConditionImpl) CheckInRange(
	rgs []*Range,
	dataTypes []int,
) (Mark, error)

CheckInRange check Whether the condition and its negation are feasible in the direct product of single column ranges specified by hyper-rectangle.

func (*KeyConditionImpl) GetMaxKeyIndex

func (kc *KeyConditionImpl) GetMaxKeyIndex() int

func (*KeyConditionImpl) GetRPN

func (kc *KeyConditionImpl) GetRPN() []*RPNElement

func (*KeyConditionImpl) HavePrimaryKey

func (kc *KeyConditionImpl) HavePrimaryKey() bool

func (*KeyConditionImpl) IsFirstPrimaryKey

func (kc *KeyConditionImpl) IsFirstPrimaryKey() bool

func (*KeyConditionImpl) MayBeInRange

func (kc *KeyConditionImpl) MayBeInRange(
	usedKeySize int,
	leftKeys []*FieldRef,
	rightKeys []*FieldRef,
	dataTypes []int,
) (bool, error)

MayBeInRange is used to check whether the condition is likely to be in the target range.

func (*KeyConditionImpl) SetRPN

func (kc *KeyConditionImpl) SetRPN(rpn []*RPNElement)

type Mark

type Mark struct {
	// contains filtered or unexported fields
}

Mark these special constants are used to implement KeyCondition. When used as an initial_mask argument in KeyCondition.CheckInRange methods, they effectively prevent calculation of discarded Mark component as it is already set to true.

func NewMark

func NewMark(canBeTrue, canBeFalse bool) Mark

func (Mark) And

func (m Mark) And(mask Mark) Mark

func (Mark) Not

func (m Mark) Not() Mark

func (Mark) Or

func (m Mark) Or(mask Mark) Mark

type MinMaxIndexReader added in v1.2.0

type MinMaxIndexReader struct {

	// read the data of the index according to the file and index fields.
	ReadFunc func(file interface{}, rec *record.Record, isCache bool) (*record.Record, error)
	// contains filtered or unexported fields
}

func NewMinMaxIndexReader added in v1.2.0

func NewMinMaxIndexReader(rpnExpr *rpn.RPNExpr, schema record.Schemas, option hybridqp.Options, isCache bool) (*MinMaxIndexReader, error)

func (*MinMaxIndexReader) Close added in v1.2.0

func (r *MinMaxIndexReader) Close() error

func (*MinMaxIndexReader) MayBeInFragment added in v1.2.0

func (r *MinMaxIndexReader) MayBeInFragment(fragId uint32) (bool, error)

func (*MinMaxIndexReader) ReInit added in v1.2.0

func (r *MinMaxIndexReader) ReInit(file interface{}) (err error)

type MinMaxReaderCreator added in v1.2.0

type MinMaxReaderCreator struct {
}

func (*MinMaxReaderCreator) CreateSKFileReader added in v1.2.0

func (creator *MinMaxReaderCreator) CreateSKFileReader(rpnExpr *rpn.RPNExpr, schema record.Schemas, option hybridqp.Options, isCache bool) (SKFileReader, error)

type MinMaxWriter added in v1.2.0

type MinMaxWriter struct {
	// contains filtered or unexported fields
}

func NewMinMaxWriter added in v1.2.0

func NewMinMaxWriter(dir, msName, dataFilePath, lockPath string) *MinMaxWriter

func (*MinMaxWriter) Close added in v1.2.0

func (m *MinMaxWriter) Close() error

func (*MinMaxWriter) CreateAttachSkipIndex added in v1.2.0

func (m *MinMaxWriter) CreateAttachSkipIndex(schemaIdx, rowsPerSegment []int, writeRec *record.Record) error

func (*MinMaxWriter) CreateDetachSkipIndex added in v1.2.0

func (m *MinMaxWriter) CreateDetachSkipIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int, dataBuf [][]byte) ([][]byte, []string)

func (*MinMaxWriter) Open added in v1.2.0

func (m *MinMaxWriter) Open() error

type OBSFilterPath added in v1.2.0

type OBSFilterPath struct {
	// contains filtered or unexported fields
}

func NewOBSFilterPath added in v1.2.0

func NewOBSFilterPath(localPath, remotePath string, option *obs.ObsOptions) *OBSFilterPath

func (*OBSFilterPath) LocalPath added in v1.2.0

func (o *OBSFilterPath) LocalPath() string

func (*OBSFilterPath) Name added in v1.2.0

func (o *OBSFilterPath) Name() string

func (*OBSFilterPath) Option added in v1.2.0

func (o *OBSFilterPath) Option() *obs.ObsOptions

func (*OBSFilterPath) RemotePath added in v1.2.0

func (o *OBSFilterPath) RemotePath() string

type PKIndexReader added in v1.2.0

type PKIndexReader interface {
	Scan(pkFile string,
		pkRec *record.Record,
		pkMark fragment.IndexFragment,
		keyCondition KeyCondition,
	) (fragment.FragmentRanges, error)
	Close() error
}

type PKIndexReaderImpl added in v1.2.0

type PKIndexReaderImpl struct {
	// contains filtered or unexported fields
}

func NewPKIndexReader added in v1.2.0

func NewPKIndexReader(rowsNumPerFragment int, coarseIndexFragment int, minRowsForSeek int) *PKIndexReaderImpl

func (*PKIndexReaderImpl) Close added in v1.2.0

func (r *PKIndexReaderImpl) Close() error

func (*PKIndexReaderImpl) Scan added in v1.2.0

func (r *PKIndexReaderImpl) Scan(
	pkFile string,
	pkRec *record.Record,
	pkMark fragment.IndexFragment,
	keyCondition KeyCondition,
) (fragment.FragmentRanges, error)

Scan is used to filter fragment ranges based on the primary key in the condition, and it determines whether to do binary search or exclusion search according to the sequence of keys in the primary key. give a specific example to illustrate the usage of scan.

  1. origin record: x -> [1, 2, 1, 2, 1, 2, 2, 1] y -> [1, 1, 3, 4, 2, 2, 3, 4]

  2. sorted record(sorted by x, y): x -> [1, 1, 1, 1, 2, 2, 2, 2] y -> [1, 2, 3, 4, 1, 2, 3, 4]

  3. primary index record(fragment size is 2): x -> [1, 1, 2, 2, 2] y -> [1, 3, 1, 3, 4] fragment index -> [0, 1, 2, 3]

  4. key condition: x > 1 and y < 3

  5. scan results: fragment range -> [1, 3)

type PKIndexWriter added in v1.2.0

type PKIndexWriter interface {
	Build(srcRec *record.Record,
		pkSchema record.Schemas,
		rowsNumPerFragment []int,
		tcLocation int8,
		fixRowsPerSegment int,
	) (
		*record.Record, fragment.IndexFragment, error,
	)
	Close() error
}

type PKIndexWriterImpl added in v1.2.0

type PKIndexWriterImpl struct {
}

func NewPKIndexWriter added in v1.2.0

func NewPKIndexWriter() *PKIndexWriterImpl

func (*PKIndexWriterImpl) Build added in v1.2.0

func (w *PKIndexWriterImpl) Build(
	srcRec *record.Record,
	pkSchema record.Schemas,
	rowsNumPerFragment []int,
	tcLocation int8,
	fixRowsPerSegment int,
) (
	*record.Record,
	fragment.IndexFragment,
	error,
)

Build generates sparse primary index based on sorted data to be flushed to disks.

func (*PKIndexWriterImpl) Close added in v1.2.0

func (w *PKIndexWriterImpl) Close() error

type RPNElement

type RPNElement struct {
	// contains filtered or unexported fields
}

RPNElement means that Reverse Polish notation (RPN) is a method for conveying mathematical expressions without the use of separators such as brackets and parentheses. In this notation, the operators follow their operands, hence removing the need for brackets to define evaluation priority. More details: https://en.wikipedia.org/wiki/Reverse_Polish_notation.

func NewRPNElement added in v1.2.0

func NewRPNElement(op rpn.Op) *RPNElement

type Range

type Range struct {
	// contains filtered or unexported fields
}

Range means that the range with open or closed ends, possibly unbounded.

func NewRange

func NewRange(left, right *FieldRef, li, ri bool) *Range

type SKCondition added in v1.2.0

type SKCondition interface {
	IsExist(blockId int64, reader rpn.SKBaseReader) (bool, error)
}

func NewSKCondition added in v1.2.0

func NewSKCondition(rpnExpr *rpn.RPNExpr, schema record.Schemas) (SKCondition, error)

type SKConditionImpl added in v1.2.0

type SKConditionImpl struct {
	// contains filtered or unexported fields
}

func (*SKConditionImpl) IsExist added in v1.2.0

func (c *SKConditionImpl) IsExist(blockId int64, reader rpn.SKBaseReader) (bool, error)

type SKFileReader added in v1.2.0

type SKFileReader interface {
	// MayBeInFragment determines whether a fragment in a file meets the query condition.
	MayBeInFragment(fragId uint32) (bool, error)
	// ReInit is used to that a SKFileReader is reused among multiple files.
	ReInit(file interface{}) error
	// Close is used to close the SKFileReader
	Close() error
}

SKFileReader as an executor of skip index data reading that corresponds to the index field in the query.

type SKFileReaderCreator added in v1.2.0

type SKFileReaderCreator interface {
	CreateSKFileReader(rpnExpr *rpn.RPNExpr, schema record.Schemas, option hybridqp.Options, isCache bool) (SKFileReader, error)
}

SKFileReaderCreator is used to abstract SKFileReader implementation of multiple skip indexes in factory mode.

type SKFileReaderCreatorFactory added in v1.2.0

type SKFileReaderCreatorFactory struct {
	// contains filtered or unexported fields
}
var SKFileReaderInstance *SKFileReaderCreatorFactory

func GetSKFileReaderFactoryInstance added in v1.2.0

func GetSKFileReaderFactoryInstance() *SKFileReaderCreatorFactory

func NewSKFileReaderCreatorFactory added in v1.2.0

func NewSKFileReaderCreatorFactory() *SKFileReaderCreatorFactory

func (*SKFileReaderCreatorFactory) Add added in v1.2.0

func (*SKFileReaderCreatorFactory) Find added in v1.2.0

type SKIndexReader added in v1.2.0

type SKIndexReader interface {
	// CreateSKFileReaders generates SKFileReaders for each index field based on the skip index information and condition
	// which is used to quickly determine whether a fragment meets the condition.
	CreateSKFileReaders(option hybridqp.Options, mstInfo *influxql.Measurement, isCache bool) ([]SKFileReader, error)
	// Scan is used to filter fragment ranges based on the secondary key in the condition.
	Scan(reader SKFileReader, rgs fragment.FragmentRanges) (fragment.FragmentRanges, error)
	// Close is used to close the SKIndexReader
	Close() error
}

SKIndexReader as a skip index read interface.

type SKIndexReaderImpl added in v1.2.0

type SKIndexReaderImpl struct {
	// contains filtered or unexported fields
}

func NewSKIndexReader added in v1.2.0

func NewSKIndexReader(rowsNumPerFragment int, coarseIndexFragment int, minRowsForSeek int) *SKIndexReaderImpl

func (*SKIndexReaderImpl) Close added in v1.2.0

func (r *SKIndexReaderImpl) Close() error

func (*SKIndexReaderImpl) CreateSKFileReaders added in v1.2.0

func (r *SKIndexReaderImpl) CreateSKFileReaders(option hybridqp.Options, mstInfo *influxql.Measurement, isCache bool) ([]SKFileReader, error)

func (*SKIndexReaderImpl) Scan added in v1.2.0

type SetIndexReader added in v1.2.0

type SetIndexReader struct {
	// contains filtered or unexported fields
}

func NewSetIndexReader added in v1.2.0

func NewSetIndexReader(rpnExpr *rpn.RPNExpr, schema record.Schemas, option hybridqp.Options, isCache bool) (*SetIndexReader, error)

func (*SetIndexReader) Close added in v1.2.0

func (r *SetIndexReader) Close() error

func (*SetIndexReader) MayBeInFragment added in v1.2.0

func (r *SetIndexReader) MayBeInFragment(fragId uint32) (bool, error)

func (*SetIndexReader) ReInit added in v1.2.0

func (r *SetIndexReader) ReInit(file interface{}) (err error)

type SetReaderCreator added in v1.2.0

type SetReaderCreator struct {
}

func (*SetReaderCreator) CreateSKFileReader added in v1.2.0

func (index *SetReaderCreator) CreateSKFileReader(rpnExpr *rpn.RPNExpr, schema record.Schemas, option hybridqp.Options, isCache bool) (SKFileReader, error)

type SetWriter added in v1.2.0

type SetWriter struct {
	// contains filtered or unexported fields
}

func NewSetWriter added in v1.2.0

func NewSetWriter(dir, msName, dataFilePath, lockPath string) *SetWriter

func (*SetWriter) Close added in v1.2.0

func (s *SetWriter) Close() error

func (*SetWriter) CreateAttachSkipIndex added in v1.2.0

func (s *SetWriter) CreateAttachSkipIndex(schemaIdx, rowsPerSegment []int, writeRec *record.Record) error

func (*SetWriter) CreateDetachSkipIndex added in v1.2.0

func (s *SetWriter) CreateDetachSkipIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int, dataBuf [][]byte) ([][]byte, []string)

func (*SetWriter) Open added in v1.2.0

func (s *SetWriter) Open() error

type SkInfo added in v1.2.0

type SkInfo struct {
	// contains filtered or unexported fields
}

type SkipIndex added in v1.2.0

type SkipIndex struct {
	// contains filtered or unexported fields
}

func NewSkipIndex added in v1.2.0

func NewSkipIndex() *SkipIndex

func (*SkipIndex) GenSchemaIdxes added in v1.2.0

func (s *SkipIndex) GenSchemaIdxes(schema record.Schemas, indexRelation influxql.IndexRelation)

func (*SkipIndex) GetBfIdx added in v1.2.0

func (s *SkipIndex) GetBfIdx() int

GetBfIdx get idx in schemaIdxes if bloom filter index exist bfIdx >= 0

func (*SkipIndex) GetFullTextIdx added in v1.2.0

func (s *SkipIndex) GetFullTextIdx() int

GetFullTextIdx get idx in schemaIdxes if full text index exist fullTextIdx >= 0

func (*SkipIndex) GetSchemaIdxes added in v1.2.0

func (s *SkipIndex) GetSchemaIdxes() [][]int

func (*SkipIndex) GetSkipIndexWriters added in v1.2.0

func (s *SkipIndex) GetSkipIndexWriters() []SkipIndexWriter

func (*SkipIndex) NewSkipIndexWriters added in v1.2.0

func (s *SkipIndex) NewSkipIndexWriters(dir, msName, dataFilePath, lockPath string, indexRelation influxql.IndexRelation)

type SkipIndexWriter added in v1.2.0

type SkipIndexWriter interface {
	Open() error
	Close() error
	CreateAttachSkipIndex(schemaIdx, rowsPerSegment []int, writeRec *record.Record) error
	CreateDetachSkipIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int, dataBuf [][]byte) ([][]byte, []string)
}

func NewSkipIndexWriter added in v1.2.0

func NewSkipIndexWriter(dir, msName, dataFilePath, lockPath, indexType string) SkipIndexWriter

type TsspFile added in v1.2.0

type TsspFile interface {
	Name() string
	Path() string
}

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL