Documentation ¶
Index ¶
- Constants
- Variables
- func GetBloomFilterFilePath(dir, msName, fieldName string) string
- func GetFullTextAttachFilePath(dir, msName, dataFilePath string) string
- func GetFullTextDetachFilePath(dir, msName string) string
- func GetLocalBloomFilterBlockCnts(dir, msName, lockPath string, recSchema record.Schemas, skipIndex *SkipIndex, ...) int64
- func RegistrySKFileReaderCreator(name string, creator SKFileReaderCreator) bool
- type BloomFilterFullTextIndexReader
- type BloomFilterFullTextReaderCreator
- type BloomFilterIndexReader
- type BloomFilterReaderCreator
- type BloomFilterWriter
- func (b *BloomFilterWriter) Close() error
- func (b *BloomFilterWriter) CreateAttachSkipIndex(schemaIdx, rowsPerSegment []int, writeRec *record.Record) error
- func (b *BloomFilterWriter) CreateDetachSkipIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int, dataBuf [][]byte) ([][]byte, []string)
- func (b *BloomFilterWriter) GenBloomFilterData(src *record.ColVal, rowsPerSegment []int, refType int) []byte
- func (b *BloomFilterWriter) Open() error
- type ColumnRef
- type FieldRef
- func (f *FieldRef) Equals(rhs *FieldRef) bool
- func (f *FieldRef) IsNegativeInfinity() bool
- func (f *FieldRef) IsNull() bool
- func (f *FieldRef) IsPositiveInfinity() bool
- func (f *FieldRef) Less(rhs *FieldRef) bool
- func (f *FieldRef) Set(cols []*ColumnRef, column, row int)
- func (f *FieldRef) SetNegativeInfinity()
- func (f *FieldRef) SetPositiveInfinity()
- type FullTextIdxWriter
- func (f *FullTextIdxWriter) Close() error
- func (f *FullTextIdxWriter) CreateAttachSkipIndex(schemaIdx, rowsPerSegment []int, writeRec *record.Record) error
- func (f *FullTextIdxWriter) CreateDetachSkipIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int, dataBuf [][]byte) ([][]byte, []string)
- func (f *FullTextIdxWriter) Open() error
- type FunctionBase
- type IndexProperty
- type KeyCondition
- type KeyConditionImpl
- func (kc *KeyConditionImpl) AlwaysInRange() (bool, error)
- func (kc *KeyConditionImpl) CanDoBinarySearch() bool
- func (kc *KeyConditionImpl) CheckInRange(rgs []*Range, dataTypes []int) (Mark, error)
- func (kc *KeyConditionImpl) GetMaxKeyIndex() int
- func (kc *KeyConditionImpl) GetRPN() []*RPNElement
- func (kc *KeyConditionImpl) HavePrimaryKey() bool
- func (kc *KeyConditionImpl) IsFirstPrimaryKey() bool
- func (kc *KeyConditionImpl) MayBeInRange(usedKeySize int, leftKeys []*FieldRef, rightKeys []*FieldRef, dataTypes []int) (bool, error)
- func (kc *KeyConditionImpl) SetRPN(rpn []*RPNElement)
- type Mark
- type MinMaxIndexReader
- type MinMaxReaderCreator
- type MinMaxWriter
- func (m *MinMaxWriter) Close() error
- func (m *MinMaxWriter) CreateAttachSkipIndex(schemaIdx, rowsPerSegment []int, writeRec *record.Record) error
- func (m *MinMaxWriter) CreateDetachSkipIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int, dataBuf [][]byte) ([][]byte, []string)
- func (m *MinMaxWriter) Open() error
- type OBSFilterPath
- type PKIndexReader
- type PKIndexReaderImpl
- type PKIndexWriter
- type PKIndexWriterImpl
- type RPNElement
- type Range
- type SKCondition
- type SKConditionImpl
- type SKFileReader
- type SKFileReaderCreator
- type SKFileReaderCreatorFactory
- type SKIndexReader
- type SKIndexReaderImpl
- type SetIndexReader
- type SetReaderCreator
- type SetWriter
- func (s *SetWriter) Close() error
- func (s *SetWriter) CreateAttachSkipIndex(schemaIdx, rowsPerSegment []int, writeRec *record.Record) error
- func (s *SetWriter) CreateDetachSkipIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int, dataBuf [][]byte) ([][]byte, []string)
- func (s *SetWriter) Open() error
- type SkInfo
- type SkipIndex
- func (s *SkipIndex) GenSchemaIdxes(schema record.Schemas, indexRelation influxql.IndexRelation)
- func (s *SkipIndex) GetBfIdx() int
- func (s *SkipIndex) GetFullTextIdx() int
- func (s *SkipIndex) GetSchemaIdxes() [][]int
- func (s *SkipIndex) GetSkipIndexWriters() []SkipIndexWriter
- func (s *SkipIndex) NewSkipIndexWriters(dir, msName, dataFilePath, lockPath string, ...)
- type SkipIndexWriter
- type TsspFile
Constants ¶
const ( FullTextIdxColumnCnt = 1 FullTextIndex = "fullText" BloomFilterFileSuffix = ".idx" BloomFilterFilePrefix = "bloomfilter_" // bloomfilter_${columnName}.idx )
const Continuous = "continuous"
const Empty = "empty"
Variables ¶
var ConsiderOnlyBeTrue = NewMark(false, true)
var ( // InitIndexFragmentFixedSize means that each fragment is fixed in size except the last fragment. InitIndexFragmentFixedSize = true )
var NEGATIVE_INFINITY = &FieldRef{row: math.MinInt64}
var POSITIVE_INFINITY = &FieldRef{row: math.MaxInt64}
var SKFileReaderOnce sync.Once
Functions ¶
func GetBloomFilterFilePath ¶ added in v1.2.0
func GetFullTextAttachFilePath ¶ added in v1.2.0
func GetFullTextDetachFilePath ¶ added in v1.2.0
func GetLocalBloomFilterBlockCnts ¶ added in v1.2.0
func GetLocalBloomFilterBlockCnts(dir, msName, lockPath string, recSchema record.Schemas, skipIndex *SkipIndex, fullTextIdx bool) int64
GetLocalBloomFilterBlockCnts get one local bloomFilter col's block count,if not exist return 0
func RegistrySKFileReaderCreator ¶ added in v1.2.0
func RegistrySKFileReaderCreator(name string, creator SKFileReaderCreator) bool
RegistrySKFileReaderCreator is used to registry the SKFileReaderCreator
Types ¶
type BloomFilterFullTextIndexReader ¶ added in v1.2.0
type BloomFilterFullTextIndexReader struct {
// contains filtered or unexported fields
}
func NewBloomFilterFullTextIndexReader ¶ added in v1.2.0
func (*BloomFilterFullTextIndexReader) Close ¶ added in v1.2.0
func (r *BloomFilterFullTextIndexReader) Close() error
func (*BloomFilterFullTextIndexReader) MayBeInFragment ¶ added in v1.2.0
func (r *BloomFilterFullTextIndexReader) MayBeInFragment(fragId uint32) (bool, error)
func (*BloomFilterFullTextIndexReader) ReInit ¶ added in v1.2.0
func (r *BloomFilterFullTextIndexReader) ReInit(file interface{}) (err error)
type BloomFilterFullTextReaderCreator ¶ added in v1.2.0
type BloomFilterFullTextReaderCreator struct { }
func (*BloomFilterFullTextReaderCreator) CreateSKFileReader ¶ added in v1.2.0
func (index *BloomFilterFullTextReaderCreator) CreateSKFileReader(rpnExpr *rpn.RPNExpr, schema record.Schemas, option hybridqp.Options, isCache bool) (SKFileReader, error)
type BloomFilterIndexReader ¶ added in v1.2.0
type BloomFilterIndexReader struct {
// contains filtered or unexported fields
}
func NewBloomFilterIndexReader ¶ added in v1.2.0
func (*BloomFilterIndexReader) Close ¶ added in v1.2.0
func (r *BloomFilterIndexReader) Close() error
func (*BloomFilterIndexReader) MayBeInFragment ¶ added in v1.2.0
func (r *BloomFilterIndexReader) MayBeInFragment(fragId uint32) (bool, error)
func (*BloomFilterIndexReader) ReInit ¶ added in v1.2.0
func (r *BloomFilterIndexReader) ReInit(file interface{}) (err error)
type BloomFilterReaderCreator ¶ added in v1.2.0
type BloomFilterReaderCreator struct { }
func (*BloomFilterReaderCreator) CreateSKFileReader ¶ added in v1.2.0
func (index *BloomFilterReaderCreator) CreateSKFileReader(rpnExpr *rpn.RPNExpr, schema record.Schemas, option hybridqp.Options, isCache bool) (SKFileReader, error)
type BloomFilterWriter ¶ added in v1.2.0
type BloomFilterWriter struct {
// contains filtered or unexported fields
}
func NewBloomFilterWriter ¶ added in v1.2.0
func NewBloomFilterWriter(dir, msName, dataFilePath, lockPath string) *BloomFilterWriter
func (*BloomFilterWriter) Close ¶ added in v1.2.0
func (b *BloomFilterWriter) Close() error
func (*BloomFilterWriter) CreateAttachSkipIndex ¶ added in v1.2.0
func (b *BloomFilterWriter) CreateAttachSkipIndex(schemaIdx, rowsPerSegment []int, writeRec *record.Record) error
func (*BloomFilterWriter) CreateDetachSkipIndex ¶ added in v1.2.0
func (*BloomFilterWriter) GenBloomFilterData ¶ added in v1.2.0
func (*BloomFilterWriter) Open ¶ added in v1.2.0
func (b *BloomFilterWriter) Open() error
type FieldRef ¶
type FieldRef struct {
// contains filtered or unexported fields
}
func (*FieldRef) IsNegativeInfinity ¶
func (*FieldRef) IsPositiveInfinity ¶
func (*FieldRef) SetNegativeInfinity ¶
func (f *FieldRef) SetNegativeInfinity()
func (*FieldRef) SetPositiveInfinity ¶
func (f *FieldRef) SetPositiveInfinity()
type FullTextIdxWriter ¶ added in v1.2.0
type FullTextIdxWriter struct {
// contains filtered or unexported fields
}
func NewFullTextIdxWriter ¶ added in v1.2.0
func NewFullTextIdxWriter(dir, msName, dataFilePath, lockPath string) *FullTextIdxWriter
func (*FullTextIdxWriter) Close ¶ added in v1.2.0
func (f *FullTextIdxWriter) Close() error
func (*FullTextIdxWriter) CreateAttachSkipIndex ¶ added in v1.2.0
func (f *FullTextIdxWriter) CreateAttachSkipIndex(schemaIdx, rowsPerSegment []int, writeRec *record.Record) error
func (*FullTextIdxWriter) CreateDetachSkipIndex ¶ added in v1.2.0
func (*FullTextIdxWriter) Open ¶ added in v1.2.0
func (f *FullTextIdxWriter) Open() error
type FunctionBase ¶
type FunctionBase struct { }
type IndexProperty ¶
func NewIndexProperty ¶
func NewIndexProperty(rowsNumPerFragment, coarseIndexFragment, minRowsForSeek int) *IndexProperty
type KeyCondition ¶
type KeyCondition interface { HavePrimaryKey() bool GetMaxKeyIndex() int IsFirstPrimaryKey() bool CanDoBinarySearch() bool MayBeInRange(usedKeySize int, indexLeft []*FieldRef, indexRight []*FieldRef, dataTypes []int) (bool, error) CheckInRange(rgs []*Range, dataTypes []int) (Mark, error) AlwaysInRange() (bool, error) }
type KeyConditionImpl ¶
type KeyConditionImpl struct {
// contains filtered or unexported fields
}
func NewKeyCondition ¶
func (*KeyConditionImpl) AlwaysInRange ¶ added in v1.2.0
func (kc *KeyConditionImpl) AlwaysInRange() (bool, error)
AlwaysInRange checks that the index can not be used, pruning in advance to improve efficiency.
func (*KeyConditionImpl) CanDoBinarySearch ¶
func (kc *KeyConditionImpl) CanDoBinarySearch() bool
func (*KeyConditionImpl) CheckInRange ¶
func (kc *KeyConditionImpl) CheckInRange( rgs []*Range, dataTypes []int, ) (Mark, error)
CheckInRange check Whether the condition and its negation are feasible in the direct product of single column ranges specified by hyper-rectangle.
func (*KeyConditionImpl) GetMaxKeyIndex ¶
func (kc *KeyConditionImpl) GetMaxKeyIndex() int
func (*KeyConditionImpl) GetRPN ¶
func (kc *KeyConditionImpl) GetRPN() []*RPNElement
func (*KeyConditionImpl) HavePrimaryKey ¶
func (kc *KeyConditionImpl) HavePrimaryKey() bool
func (*KeyConditionImpl) IsFirstPrimaryKey ¶
func (kc *KeyConditionImpl) IsFirstPrimaryKey() bool
func (*KeyConditionImpl) MayBeInRange ¶
func (kc *KeyConditionImpl) MayBeInRange( usedKeySize int, leftKeys []*FieldRef, rightKeys []*FieldRef, dataTypes []int, ) (bool, error)
MayBeInRange is used to check whether the condition is likely to be in the target range.
func (*KeyConditionImpl) SetRPN ¶
func (kc *KeyConditionImpl) SetRPN(rpn []*RPNElement)
type Mark ¶
type Mark struct {
// contains filtered or unexported fields
}
Mark these special constants are used to implement KeyCondition. When used as an initial_mask argument in KeyCondition.CheckInRange methods, they effectively prevent calculation of discarded Mark component as it is already set to true.
type MinMaxIndexReader ¶ added in v1.2.0
type MinMaxIndexReader struct { // read the data of the index according to the file and index fields. ReadFunc func(file interface{}, rec *record.Record, isCache bool) (*record.Record, error) // contains filtered or unexported fields }
func NewMinMaxIndexReader ¶ added in v1.2.0
func (*MinMaxIndexReader) Close ¶ added in v1.2.0
func (r *MinMaxIndexReader) Close() error
func (*MinMaxIndexReader) MayBeInFragment ¶ added in v1.2.0
func (r *MinMaxIndexReader) MayBeInFragment(fragId uint32) (bool, error)
func (*MinMaxIndexReader) ReInit ¶ added in v1.2.0
func (r *MinMaxIndexReader) ReInit(file interface{}) (err error)
type MinMaxReaderCreator ¶ added in v1.2.0
type MinMaxReaderCreator struct { }
func (*MinMaxReaderCreator) CreateSKFileReader ¶ added in v1.2.0
func (creator *MinMaxReaderCreator) CreateSKFileReader(rpnExpr *rpn.RPNExpr, schema record.Schemas, option hybridqp.Options, isCache bool) (SKFileReader, error)
type MinMaxWriter ¶ added in v1.2.0
type MinMaxWriter struct {
// contains filtered or unexported fields
}
func NewMinMaxWriter ¶ added in v1.2.0
func NewMinMaxWriter(dir, msName, dataFilePath, lockPath string) *MinMaxWriter
func (*MinMaxWriter) Close ¶ added in v1.2.0
func (m *MinMaxWriter) Close() error
func (*MinMaxWriter) CreateAttachSkipIndex ¶ added in v1.2.0
func (m *MinMaxWriter) CreateAttachSkipIndex(schemaIdx, rowsPerSegment []int, writeRec *record.Record) error
func (*MinMaxWriter) CreateDetachSkipIndex ¶ added in v1.2.0
func (*MinMaxWriter) Open ¶ added in v1.2.0
func (m *MinMaxWriter) Open() error
type OBSFilterPath ¶ added in v1.2.0
type OBSFilterPath struct {
// contains filtered or unexported fields
}
func NewOBSFilterPath ¶ added in v1.2.0
func NewOBSFilterPath(localPath, remotePath string, option *obs.ObsOptions) *OBSFilterPath
func (*OBSFilterPath) LocalPath ¶ added in v1.2.0
func (o *OBSFilterPath) LocalPath() string
func (*OBSFilterPath) Name ¶ added in v1.2.0
func (o *OBSFilterPath) Name() string
func (*OBSFilterPath) Option ¶ added in v1.2.0
func (o *OBSFilterPath) Option() *obs.ObsOptions
func (*OBSFilterPath) RemotePath ¶ added in v1.2.0
func (o *OBSFilterPath) RemotePath() string
type PKIndexReader ¶ added in v1.2.0
type PKIndexReader interface { Scan(pkFile string, pkRec *record.Record, pkMark fragment.IndexFragment, keyCondition KeyCondition, ) (fragment.FragmentRanges, error) Close() error }
type PKIndexReaderImpl ¶ added in v1.2.0
type PKIndexReaderImpl struct {
// contains filtered or unexported fields
}
func NewPKIndexReader ¶ added in v1.2.0
func NewPKIndexReader(rowsNumPerFragment int, coarseIndexFragment int, minRowsForSeek int) *PKIndexReaderImpl
func (*PKIndexReaderImpl) Close ¶ added in v1.2.0
func (r *PKIndexReaderImpl) Close() error
func (*PKIndexReaderImpl) Scan ¶ added in v1.2.0
func (r *PKIndexReaderImpl) Scan( pkFile string, pkRec *record.Record, pkMark fragment.IndexFragment, keyCondition KeyCondition, ) (fragment.FragmentRanges, error)
Scan is used to filter fragment ranges based on the primary key in the condition, and it determines whether to do binary search or exclusion search according to the sequence of keys in the primary key. give a specific example to illustrate the usage of scan.
origin record: x -> [1, 2, 1, 2, 1, 2, 2, 1] y -> [1, 1, 3, 4, 2, 2, 3, 4]
sorted record(sorted by x, y): x -> [1, 1, 1, 1, 2, 2, 2, 2] y -> [1, 2, 3, 4, 1, 2, 3, 4]
primary index record(fragment size is 2): x -> [1, 1, 2, 2, 2] y -> [1, 3, 1, 3, 4] fragment index -> [0, 1, 2, 3]
key condition: x > 1 and y < 3
scan results: fragment range -> [1, 3)
type PKIndexWriter ¶ added in v1.2.0
type PKIndexWriterImpl ¶ added in v1.2.0
type PKIndexWriterImpl struct { }
func NewPKIndexWriter ¶ added in v1.2.0
func NewPKIndexWriter() *PKIndexWriterImpl
func (*PKIndexWriterImpl) Build ¶ added in v1.2.0
func (w *PKIndexWriterImpl) Build( srcRec *record.Record, pkSchema record.Schemas, rowsNumPerFragment []int, tcLocation int8, fixRowsPerSegment int, ) ( *record.Record, fragment.IndexFragment, error, )
Build generates sparse primary index based on sorted data to be flushed to disks.
func (*PKIndexWriterImpl) Close ¶ added in v1.2.0
func (w *PKIndexWriterImpl) Close() error
type RPNElement ¶
type RPNElement struct {
// contains filtered or unexported fields
}
RPNElement means that Reverse Polish notation (RPN) is a method for conveying mathematical expressions without the use of separators such as brackets and parentheses. In this notation, the operators follow their operands, hence removing the need for brackets to define evaluation priority. More details: https://en.wikipedia.org/wiki/Reverse_Polish_notation.
func NewRPNElement ¶ added in v1.2.0
func NewRPNElement(op rpn.Op) *RPNElement
type Range ¶
type Range struct {
// contains filtered or unexported fields
}
Range means that the range with open or closed ends, possibly unbounded.
type SKCondition ¶ added in v1.2.0
type SKCondition interface {
IsExist(blockId int64, reader rpn.SKBaseReader) (bool, error)
}
func NewSKCondition ¶ added in v1.2.0
type SKConditionImpl ¶ added in v1.2.0
type SKConditionImpl struct {
// contains filtered or unexported fields
}
func (*SKConditionImpl) IsExist ¶ added in v1.2.0
func (c *SKConditionImpl) IsExist(blockId int64, reader rpn.SKBaseReader) (bool, error)
type SKFileReader ¶ added in v1.2.0
type SKFileReader interface { // MayBeInFragment determines whether a fragment in a file meets the query condition. MayBeInFragment(fragId uint32) (bool, error) // ReInit is used to that a SKFileReader is reused among multiple files. ReInit(file interface{}) error // Close is used to close the SKFileReader Close() error }
SKFileReader as an executor of skip index data reading that corresponds to the index field in the query.
type SKFileReaderCreator ¶ added in v1.2.0
type SKFileReaderCreator interface {
CreateSKFileReader(rpnExpr *rpn.RPNExpr, schema record.Schemas, option hybridqp.Options, isCache bool) (SKFileReader, error)
}
SKFileReaderCreator is used to abstract SKFileReader implementation of multiple skip indexes in factory mode.
type SKFileReaderCreatorFactory ¶ added in v1.2.0
type SKFileReaderCreatorFactory struct {
// contains filtered or unexported fields
}
var SKFileReaderInstance *SKFileReaderCreatorFactory
func GetSKFileReaderFactoryInstance ¶ added in v1.2.0
func GetSKFileReaderFactoryInstance() *SKFileReaderCreatorFactory
func NewSKFileReaderCreatorFactory ¶ added in v1.2.0
func NewSKFileReaderCreatorFactory() *SKFileReaderCreatorFactory
func (*SKFileReaderCreatorFactory) Add ¶ added in v1.2.0
func (s *SKFileReaderCreatorFactory) Add(name string, creator SKFileReaderCreator)
func (*SKFileReaderCreatorFactory) Find ¶ added in v1.2.0
func (s *SKFileReaderCreatorFactory) Find(name string) (SKFileReaderCreator, bool)
type SKIndexReader ¶ added in v1.2.0
type SKIndexReader interface { // CreateSKFileReaders generates SKFileReaders for each index field based on the skip index information and condition // which is used to quickly determine whether a fragment meets the condition. CreateSKFileReaders(option hybridqp.Options, mstInfo *influxql.Measurement, isCache bool) ([]SKFileReader, error) // Scan is used to filter fragment ranges based on the secondary key in the condition. Scan(reader SKFileReader, rgs fragment.FragmentRanges) (fragment.FragmentRanges, error) // Close is used to close the SKIndexReader Close() error }
SKIndexReader as a skip index read interface.
type SKIndexReaderImpl ¶ added in v1.2.0
type SKIndexReaderImpl struct {
// contains filtered or unexported fields
}
func NewSKIndexReader ¶ added in v1.2.0
func NewSKIndexReader(rowsNumPerFragment int, coarseIndexFragment int, minRowsForSeek int) *SKIndexReaderImpl
func (*SKIndexReaderImpl) Close ¶ added in v1.2.0
func (r *SKIndexReaderImpl) Close() error
func (*SKIndexReaderImpl) CreateSKFileReaders ¶ added in v1.2.0
func (r *SKIndexReaderImpl) CreateSKFileReaders(option hybridqp.Options, mstInfo *influxql.Measurement, isCache bool) ([]SKFileReader, error)
func (*SKIndexReaderImpl) Scan ¶ added in v1.2.0
func (r *SKIndexReaderImpl) Scan( reader SKFileReader, rgs fragment.FragmentRanges, ) (fragment.FragmentRanges, error)
type SetIndexReader ¶ added in v1.2.0
type SetIndexReader struct {
// contains filtered or unexported fields
}
func NewSetIndexReader ¶ added in v1.2.0
func (*SetIndexReader) Close ¶ added in v1.2.0
func (r *SetIndexReader) Close() error
func (*SetIndexReader) MayBeInFragment ¶ added in v1.2.0
func (r *SetIndexReader) MayBeInFragment(fragId uint32) (bool, error)
func (*SetIndexReader) ReInit ¶ added in v1.2.0
func (r *SetIndexReader) ReInit(file interface{}) (err error)
type SetReaderCreator ¶ added in v1.2.0
type SetReaderCreator struct { }
func (*SetReaderCreator) CreateSKFileReader ¶ added in v1.2.0
func (index *SetReaderCreator) CreateSKFileReader(rpnExpr *rpn.RPNExpr, schema record.Schemas, option hybridqp.Options, isCache bool) (SKFileReader, error)
type SetWriter ¶ added in v1.2.0
type SetWriter struct {
// contains filtered or unexported fields
}
func NewSetWriter ¶ added in v1.2.0
func (*SetWriter) CreateAttachSkipIndex ¶ added in v1.2.0
func (*SetWriter) CreateDetachSkipIndex ¶ added in v1.2.0
type SkipIndex ¶ added in v1.2.0
type SkipIndex struct {
// contains filtered or unexported fields
}
func NewSkipIndex ¶ added in v1.2.0
func NewSkipIndex() *SkipIndex
func (*SkipIndex) GenSchemaIdxes ¶ added in v1.2.0
func (s *SkipIndex) GenSchemaIdxes(schema record.Schemas, indexRelation influxql.IndexRelation)
func (*SkipIndex) GetBfIdx ¶ added in v1.2.0
GetBfIdx get idx in schemaIdxes if bloom filter index exist bfIdx >= 0
func (*SkipIndex) GetFullTextIdx ¶ added in v1.2.0
GetFullTextIdx get idx in schemaIdxes if full text index exist fullTextIdx >= 0
func (*SkipIndex) GetSchemaIdxes ¶ added in v1.2.0
func (*SkipIndex) GetSkipIndexWriters ¶ added in v1.2.0
func (s *SkipIndex) GetSkipIndexWriters() []SkipIndexWriter
func (*SkipIndex) NewSkipIndexWriters ¶ added in v1.2.0
func (s *SkipIndex) NewSkipIndexWriters(dir, msName, dataFilePath, lockPath string, indexRelation influxql.IndexRelation)
type SkipIndexWriter ¶ added in v1.2.0
type SkipIndexWriter interface { Open() error Close() error CreateAttachSkipIndex(schemaIdx, rowsPerSegment []int, writeRec *record.Record) error CreateDetachSkipIndex(writeRec *record.Record, schemaIdx, rowsPerSegment []int, dataBuf [][]byte) ([][]byte, []string) }
func NewSkipIndexWriter ¶ added in v1.2.0
func NewSkipIndexWriter(dir, msName, dataFilePath, lockPath, indexType string) SkipIndexWriter