immutable

package
v1.2.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Feb 29, 2024 License: Apache-2.0 Imports: 67 Imported by: 0

Documentation

Overview

Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd.

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.

Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd.

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.

Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd.

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.

Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd.

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.

Index

Constants

View Source
const (
	ChunkMetaSize int64 = 128 * 1024 * 1024
	ChunkMetaTTL        = 60 * time.Minute
)
View Source
const (
	DefaultMaxRowsPerSegment4TsStore  = 1000
	DefaultMaxRowsPerSegment4ColStore = colstore.RowsNumPerFragment // should be the same as RowsNumPerFragment@colstore
	DefaultMaxSegmentLimit4ColStore   = 256 * 1024
	DefaultMaxChunkMetaItemSize       = 256 * 1024
	DefaultMaxChunkMetaItemCount      = 512
	CompressModMaxChunkMetaItemCount  = 16

	NonStreamingCompact               = 2
	StreamingCompact                  = 1
	AutoCompact                       = 0
	DefaultExpectedSegmentSize uint32 = 1024 * 1024
)
View Source
const (
	QueryMetaCacheTTL            = 10 * time.Minute
	QueryMetaDataCacheSize int64 = 50 * 1024 * 1024 * int64(MetaIndexLen+128*int(unsafe.Sizeof(fragment.FragmentRange{})))
)
View Source
const (
	MetaIndexLimitNum         = 16
	MetaIndexHeaderSize int64 = 16
	MetaIndexItemSize         = int64(util.Int64SizeBytes*3 + util.Uint64SizeBytes + util.Uint32SizeBytes)
)
View Source
const (
	PKMetaLimitNum   = 16
	PKMetaPrefixSize = util.Uint64SizeBytes*2 + util.Uint32SizeBytes*2
)
View Source
const (
	PKMetaInfoLength int64 = 12
	PkMetaHeaderSize       = int64(util.Uint32SizeBytes * 2)
)
View Source
const (
	PRELOAD = iota
	LOAD
)
View Source
const (
	SwapperCompressNone   = 0
	SwapperCompressSnappy = 1
	SwapperCompressZSTD   = 2
)
View Source
const (
	MergeFirstAvgSize = 10 * 1024 * 1024
	MergeFirstDstSize = 10 * 1024 * 1024
	MergeFirstRatio   = 0.5
)
View Source
const (
	MinMaxTimeLen        = int(unsafe.Sizeof(SegmentRange{}))
	MetaIndexLen         = int(unsafe.Sizeof(MetaIndex{}))
	DetachedMetaIndexLen = int(unsafe.Sizeof(MetaIndex{}) - 4) //count not use
)
View Source
const (
	BLOOMFILTER_SIZE         = 8
	SERIESKEY_STATISTIC_SIZE = 24
	COMPRESSION_RATIO        = 2
)
View Source
const (
	IndexOfTimeStoreFlag         = 0
	IndexOfChunkMetaCompressFlag = 1

	TimeStoreFlag = 1
)
View Source
const (
	ChunkMetaCompressNone   = 0
	ChunkMetaCompressSnappy = 1
)
View Source
const (
	DownSampleLogDir = "downsample_log"

	TsspDirName        = "tssp"
	ColumnStoreDirName = obs.ColumnStoreDirName
	CountBinFile       = "count.txt"
)
View Source
const (
	DataFile        = "segment.bin"
	ChunkMetaFile   = "segment.meta"
	MetaIndexFile   = "segment.idx"
	PrimaryKeyFile  = "primary.idx"
	PrimaryMetaFile = "primary.meta"
)
View Source
const (
	BatchReaderRecordNum = 8
)
View Source
const (
	CompactLevels = 7
)
View Source
const (
	FD_OUTSIDE uint32 = 0x00001
)
View Source
const (
	MetaIndexSegmentNum = 16
)
View Source
const PKDataLimitNum = 16

Variables

View Source
var (
	ErrCompStopped        = errors.New("compact stopped")
	ErrDownSampleStopped  = errors.New("downSample stopped")
	ErrDroppingMst        = errors.New("measurement is dropped")
	LevelCompactRule      = []uint16{0, 1, 0, 2, 0, 3, 0, 1, 2, 3, 0, 4, 0, 5, 0, 1, 2, 6}
	LevelCompactRuleForCs = []uint16{0, 1, 0, 1, 0, 1} // columnStore currently only doing level 0 and level 1 compaction,but the full functionality is available
	LeveLMinGroupFiles    = [CompactLevels]int{8, 4, 4, 4, 4, 4, 2}

	EnableMergeOutOfOrder       = true
	MaxNumOfFileToMerge         = 256
	MaxSizeOfFileToMerge  int64 = 512 * 1024 * 1024 // 512MB

)
View Source
var (
	SegmentLen       = (Segment{}).bytes()
	ColumnMetaLenMin = (ColumnMeta{}).bytes(1)
	ChunkMetaMinLen  = (&ChunkMeta{}).minBytes()
)
View Source
var ErrDirtyLog = errors.New("incomplete log file")
View Source
var PrefixDataPath string

Functions

func AggregateData

func AggregateData(newRec, baseRec *record.Record, ops []*comm.CallOption) bool

func CacheDataInMemory

func CacheDataInMemory() bool

func CacheMetaInMemory

func CacheMetaInMemory() bool

func CanEncodeOneRowMode added in v1.2.0

func CanEncodeOneRowMode(col *record.ColVal) bool

func CompactRecovery

func CompactRecovery(path string, group *CompactGroup)

func CreateTSSPFileReader

func CreateTSSPFileReader(size int64, fd fileops.File, trailer *Trailer, tb *TableData, ver uint64, tmp bool, lockPath *string) (*tsspFileReader, error)

func DecodeColumnHeader added in v1.2.0

func DecodeColumnHeader(col *record.ColVal, data []byte, colType uint8) ([]byte, []byte, error)

func DecodeColumnOfOneValue added in v1.2.0

func DecodeColumnOfOneValue(data []byte, col *record.ColVal, typ uint8)

func EncodeColumnHeader added in v1.2.0

func EncodeColumnHeader(col *record.ColVal, dst []byte, typ uint8) []byte

func EstimateBufferSize

func EstimateBufferSize(recSize int, rows int) int

func FileOperation added in v1.0.0

func FileOperation(f TSSPFile, op func())

func FillNilCol added in v1.1.0

func FillNilCol(col *record.ColVal, size int, ref *record.Field)

func FilterByField

func FilterByField(rec *record.Record, filterRec *record.Record, filterOption *BaseFilterOptions, con influxql.Expr, rowFilters *[]clv.RowFilter,
	tags *influx.PointTags, filterBitmap *bitmap.FilterBitmap, colAux **ColAux) *record.Record

func FilterByFieldFuncs added in v1.1.0

func FilterByFieldFuncs(rec, filterRec *record.Record, filterOption *BaseFilterOptions, filterBitmap *bitmap.FilterBitmap) *record.Record

func FilterByOpts added in v1.0.1

func FilterByOpts(rec *record.Record, opt *FilterOptions) *record.Record

func FilterByTime

func FilterByTime(rec *record.Record, tr util.TimeRange) *record.Record

func FilterByTimeDescend

func FilterByTimeDescend(rec *record.Record, tr util.TimeRange) *record.Record

func GenFixRowsPerSegment added in v1.2.0

func GenFixRowsPerSegment(data *record.Record, rowNumPerSegment int) []int

func GenLogFileName added in v1.0.0

func GenLogFileName(logSeq *uint64) string

func GenRecByReserveIds added in v1.2.0

func GenRecByReserveIds(rec, filterRec *record.Record, rowNumber []int, redIdxMap map[int]struct{}) *record.Record

func GetBloomFilterBuf added in v1.2.0

func GetBloomFilterBuf() *bloomFilter

func GetChunkMetaCompressMode added in v1.2.0

func GetChunkMetaCompressMode() uint8

func GetDir added in v1.1.0

func GetDir(engineType config.EngineType, path string) string

func GetMaxRowsPerSegment4TsStore added in v1.1.0

func GetMaxRowsPerSegment4TsStore() int

func GetMergeFlag4TsStore added in v1.1.0

func GetMergeFlag4TsStore() int32

func GetMetaIndexChunkCount added in v1.2.0

func GetMetaIndexChunkCount(obsOptions *obs.ObsOptions, dataPath string) (int64, error)

func GetMetaIndexOffsetAndLengthByChunkId added in v1.2.0

func GetMetaIndexOffsetAndLengthByChunkId(chunkId int64) (offset, length int64)

func GetPKMetaOffsetLengthByChunkId added in v1.2.0

func GetPKMetaOffsetLengthByChunkId(pkMetaInfo *colstore.DetachedPKMetaInfo, chunkId int) (offset, length int64)

func GetPrefixDataPath added in v1.2.0

func GetPrefixDataPath() string

func GetSortKeyColVal added in v1.1.1

func GetSortKeyColVal(fi *FileIterator, sortKey []record.Field, ctx *ReadContext, tcDuration time.Duration, segPosition int, compactWithBlock bool) ([]*record.ColVal, []record.SortItem, error)

func GetTmpFileSuffix added in v1.1.0

func GetTmpFileSuffix() string

func Init

func Init()

func InitDecFunctions

func InitDecFunctions()

func InitQueryFileCache added in v1.1.0

func InitQueryFileCache(cap uint32, enable bool)

func InitWriterPool added in v1.1.0

func InitWriterPool(size int)

func IsFlushToFinalFile added in v1.2.0

func IsFlushToFinalFile(totalSegmentCnt, flushToFinalFileLimit uint64) bool

func IsInterfaceNil

func IsInterfaceNil(value interface{}) bool

func IsTempleFile

func IsTempleFile(name string) bool

func LeftBound added in v1.2.0

func LeftBound(nums []uint32, target uint32, left int) int

func MergeRecovery

func MergeRecovery(path string, name string, ctx *mergeContext)

func MergeTimes added in v1.1.0

func MergeTimes(a []int64, b []int64, dst []int64) []int64

func NewCsImmTableImpl added in v1.2.0

func NewCsImmTableImpl() *csImmTableImpl

func NewLastMergeTime added in v1.0.0

func NewLastMergeTime() *lastMergeTime

func NewMemReaderEvictCtx

func NewMemReaderEvictCtx() *memReaderEvictCtx

func NewMergeContext added in v1.0.0

func NewMergeContext(mst string) *mergeContext

func NewMergePerformer added in v1.0.0

func NewMergePerformer(ur *UnorderedReader, stat *statistics.MergeStatItem) *mergePerformer

func NewObsWriter added in v1.2.0

func NewObsWriter(path, fileName string, obsOpts *obs.ObsOptions) (*obsWriter, error)

func NewObsWriterByFd added in v1.2.0

func NewObsWriterByFd(fd fileops.File, obsOpts *obs.ObsOptions) (*obsWriter, error)

func NewTSSPFileReader

func NewTSSPFileReader(name string, lockPath *string) (*tsspFileReader, error)

func NewTsImmTable added in v1.2.0

func NewTsImmTable() *tsImmTableImpl

func NonStreamingCompaction

func NonStreamingCompaction(fi FilesInfo) bool

func OpenObsFile added in v1.2.0

func OpenObsFile(path, fileName string, obsOpts *obs.ObsOptions) (fileops.File, error)

func PreAggOnlyOneRow added in v1.2.0

func PreAggOnlyOneRow(buf []byte) bool

func PutBloomFilterBuf added in v1.2.0

func PutBloomFilterBuf(key *bloomFilter)

func PutChunkMeta added in v1.2.0

func PutChunkMeta(filePath string, chunkMeta *ChunkMeta)

func PutDetachedSegmentTask added in v1.2.0

func PutDetachedSegmentTask(queryID string, meta IndexFrags)

func PutIDTimePairs

func PutIDTimePairs(pair *IdTimePairs)

func ReleaseColumnBuilder

func ReleaseColumnBuilder(b PreAggBuilder)

func ReleaseMsBuilder added in v1.0.0

func ReleaseMsBuilder(msb *MsBuilder)

func RemoveTsspSuffix added in v1.1.0

func RemoveTsspSuffix(dataPath string) string

func RenameIndexFiles added in v1.2.0

func RenameIndexFiles(fname string, indexList []string) error

func RenameTmpFiles

func RenameTmpFiles(newFiles []TSSPFile) error

func RenameTmpFilesWithPKIndex added in v1.1.0

func RenameTmpFilesWithPKIndex(newFiles []TSSPFile, indexList []string) error

func RenameTmpFullTextIdxFile added in v1.2.0

func RenameTmpFullTextIdxFile(msb *MsBuilder) error

func ResetAggregateData

func ResetAggregateData(newRec *record.Record, ops []*comm.CallOption)

func SetCacheDataBlock

func SetCacheDataBlock(en bool)

func SetCacheMetaData

func SetCacheMetaData(en bool)

func SetChunkMetaCompressMode added in v1.2.0

func SetChunkMetaCompressMode(mode int)

func SetCompactLimit

func SetCompactLimit(bytesPerSec int64, burstLimit int64)

func SetCompactionEnabled added in v1.2.0

func SetCompactionEnabled(compactionEnabled bool)

func SetDetachedFlushEnabled added in v1.2.0

func SetDetachedFlushEnabled(detachFlushEnabled bool)

func SetFragmentsNumPerFlush added in v1.1.1

func SetFragmentsNumPerFlush(fragmentsNumPerFlush int)

func SetImmTableMaxMemoryPercentage

func SetImmTableMaxMemoryPercentage(sysTotalMem, percentage int)

func SetIndexCompressMode added in v1.2.0

func SetIndexCompressMode(mode int)

func SetMaxCompactor

func SetMaxCompactor(n int)

func SetMaxFullCompactor

func SetMaxFullCompactor(n int)

func SetMaxRowsPerSegment4TsStore added in v1.1.0

func SetMaxRowsPerSegment4TsStore(maxRowsPerSegmentLimit int)

func SetMaxSegmentLimit4TsStore added in v1.1.0

func SetMaxSegmentLimit4TsStore(limit int)

func SetMergeFlag4TsStore added in v1.1.0

func SetMergeFlag4TsStore(v int32)

func SetPrefixDataPath added in v1.2.0

func SetPrefixDataPath(dataPath string)

func SetSnapshotLimit

func SetSnapshotLimit(bytesPerSec int64, burstLimit int64)

func SetSnapshotTblNum added in v1.1.0

func SetSnapshotTblNum(snapshotTblNum int)

func SnapshotLimit

func SnapshotLimit() bool

func SumFilesSize added in v1.0.0

func SumFilesSize(files []TSSPFile) int64

func TimeSorted added in v1.2.0

func TimeSorted(sortKeys []string) bool

func UnrefFiles

func UnrefFiles(files ...TSSPFile)

func UpdateChunkMetaFunc added in v1.2.0

func UpdateChunkMetaFunc(_, _ cache.Entry) bool

func UpdateDetachedMetaDataCache added in v1.2.0

func UpdateDetachedMetaDataCache(old, new cache.Entry) bool

func WriteIntoFile added in v1.1.1

func WriteIntoFile(msb *MsBuilder, tmp bool, withPKIndex bool, skipIndex *influxql.IndexRelation) error

Types

type AccumulateMetaIndex added in v1.2.0

type AccumulateMetaIndex struct {
	// contains filtered or unexported fields
}

func (*AccumulateMetaIndex) GetBlockId added in v1.2.0

func (a *AccumulateMetaIndex) GetBlockId() uint64

func (*AccumulateMetaIndex) SetAccumulateMetaIndex added in v1.2.0

func (a *AccumulateMetaIndex) SetAccumulateMetaIndex(pkDataOffset uint32, blockId uint64, dataOffset, offset int64)

type BaseFilterOptions added in v1.1.0

type BaseFilterOptions struct {
	FiltersMap    influxql.FilterMapValuer
	RedIdxMap     map[int]struct{} // redundant columns, which are not required after filtering.
	FieldsIdx     []int            // field index in schema
	FilterTags    []string         // filter tag name
	CondFunctions *binaryfilterfunc.ConditionImpl
}

type BloomFilterIterator added in v1.2.0

type BloomFilterIterator struct {
	// contains filtered or unexported fields
}

func NewBloomFilterIterator added in v1.2.0

func NewBloomFilterIterator(f *FragmentIterators, oldFiles []TSSPFile, bfCols []string) (*BloomFilterIterator, error)

func (*BloomFilterIterator) AppendFileIdx added in v1.2.0

func (bfi *BloomFilterIterator) AppendFileIdx(fileIdx int)

func (*BloomFilterIterator) Write added in v1.2.0

func (bfi *BloomFilterIterator) Write(toLocal bool) error

type BooleanPreAgg

type BooleanPreAgg struct {
	// contains filtered or unexported fields
}

func NewBooleanPreAgg

func NewBooleanPreAgg() *BooleanPreAgg

type BufferReader added in v1.0.0

type BufferReader struct {
	// contains filtered or unexported fields
}

func NewBufferReader added in v1.0.0

func NewBufferReader(maxSize uint32) *BufferReader

func (*BufferReader) Read added in v1.0.0

func (br *BufferReader) Read(offset int64, size uint32) ([]byte, error)

func (*BufferReader) Reset added in v1.0.0

func (br *BufferReader) Reset(r TSSPFile)

type ChunkDataBuilder

type ChunkDataBuilder struct {
	// contains filtered or unexported fields
}

func NewChunkDataBuilder

func NewChunkDataBuilder(maxRowsPerSegment, maxSegmentLimit int) *ChunkDataBuilder

func (*ChunkDataBuilder) EncodeTime

func (b *ChunkDataBuilder) EncodeTime(offset int64, timeSorted bool) error

type ChunkIterator

type ChunkIterator struct {
	*FileIterator
	// contains filtered or unexported fields
}

func NewChunkIterator

func NewChunkIterator(r *FileIterator) *ChunkIterator

func (*ChunkIterator) Close

func (c *ChunkIterator) Close()

func (*ChunkIterator) GetRecord added in v1.0.0

func (c *ChunkIterator) GetRecord() *record.Record

func (*ChunkIterator) GetSeriesID added in v1.0.0

func (c *ChunkIterator) GetSeriesID() uint64

func (*ChunkIterator) Next

func (c *ChunkIterator) Next() bool

func (*ChunkIterator) WithLog

func (c *ChunkIterator) WithLog(log *Log.Logger)

type ChunkIterators

type ChunkIterators struct {
	// contains filtered or unexported fields
}

func (*ChunkIterators) Close

func (c *ChunkIterators) Close()

func (*ChunkIterators) Len

func (c *ChunkIterators) Len() int

func (*ChunkIterators) Less

func (c *ChunkIterators) Less(i, j int) bool

func (*ChunkIterators) Next

func (c *ChunkIterators) Next() (uint64, *record.Record, error)

func (*ChunkIterators) Pop

func (c *ChunkIterators) Pop() interface{}

func (*ChunkIterators) Push

func (c *ChunkIterators) Push(v interface{})

func (*ChunkIterators) Swap

func (c *ChunkIterators) Swap(i, j int)

func (*ChunkIterators) WithLog

func (c *ChunkIterators) WithLog(log *Log.Logger)

type ChunkMeta

type ChunkMeta struct {
	// contains filtered or unexported fields
}

func GetChunkMeta added in v1.2.0

func GetChunkMeta(filePath string) (*ChunkMeta, bool)

func (*ChunkMeta) AllocColMeta added in v1.1.0

func (m *ChunkMeta) AllocColMeta(ref *record.Field) *ColumnMeta

func (*ChunkMeta) Clone added in v1.0.0

func (m *ChunkMeta) Clone() *ChunkMeta

func (*ChunkMeta) DelEmptyColMeta added in v1.1.0

func (m *ChunkMeta) DelEmptyColMeta()

func (*ChunkMeta) GetColMeta added in v1.0.0

func (m *ChunkMeta) GetColMeta() []ColumnMeta

func (*ChunkMeta) GetSid

func (m *ChunkMeta) GetSid() (sid uint64)

func (*ChunkMeta) Len added in v1.0.0

func (m *ChunkMeta) Len() int

func (*ChunkMeta) Less added in v1.0.0

func (m *ChunkMeta) Less(i, j int) bool

func (*ChunkMeta) MinMaxTime

func (m *ChunkMeta) MinMaxTime() (min int64, max int64)

func (*ChunkMeta) Rows

func (m *ChunkMeta) Rows(ab PreAggBuilder) int

func (*ChunkMeta) SegmentCount added in v1.0.0

func (m *ChunkMeta) SegmentCount() int

func (*ChunkMeta) Size

func (m *ChunkMeta) Size() int

func (*ChunkMeta) Swap added in v1.0.0

func (m *ChunkMeta) Swap(i, j int)

func (*ChunkMeta) TimeMeta

func (m *ChunkMeta) TimeMeta() *ColumnMeta

func (*ChunkMeta) UnmarshalWithColumns added in v1.2.0

func (m *ChunkMeta) UnmarshalWithColumns(src []byte, columns []string) ([]byte, error)

type ChunkMetaContext added in v1.2.0

type ChunkMetaContext struct {
	// contains filtered or unexported fields
}

func NewChunkMetaContext added in v1.2.0

func NewChunkMetaContext(schema record.Schemas) *ChunkMetaContext

func (*ChunkMetaContext) Instance added in v1.2.0

func (ctx *ChunkMetaContext) Instance() pool.Object

func (*ChunkMetaContext) MemSize added in v1.2.0

func (ctx *ChunkMetaContext) MemSize() int

func (*ChunkMetaContext) Release added in v1.2.0

func (ctx *ChunkMetaContext) Release()

type ChunkMetaEntry added in v1.2.0

type ChunkMetaEntry struct {
	// contains filtered or unexported fields
}

func NewChunkMetaEntry added in v1.2.0

func NewChunkMetaEntry(filePath string) *ChunkMetaEntry

func (*ChunkMetaEntry) GetKey added in v1.2.0

func (e *ChunkMetaEntry) GetKey() string

func (*ChunkMetaEntry) GetTime added in v1.2.0

func (e *ChunkMetaEntry) GetTime() time.Time

func (*ChunkMetaEntry) GetValue added in v1.2.0

func (e *ChunkMetaEntry) GetValue() interface{}

func (*ChunkMetaEntry) SetTime added in v1.2.0

func (e *ChunkMetaEntry) SetTime(time time.Time)

func (*ChunkMetaEntry) SetValue added in v1.2.0

func (e *ChunkMetaEntry) SetValue(value interface{})

func (*ChunkMetaEntry) Size added in v1.2.0

func (e *ChunkMetaEntry) Size() int64

type ColAux added in v1.2.0

type ColAux struct {
	// contains filtered or unexported fields
}

func NewColAux added in v1.2.0

func NewColAux(rec *record.Record, filterOption *BaseFilterOptions) *ColAux

type ColumnBuilder

type ColumnBuilder struct {
	// contains filtered or unexported fields
}

func NewColumnBuilder

func NewColumnBuilder() *ColumnBuilder

func (*ColumnBuilder) BuildPreAgg added in v1.0.0

func (b *ColumnBuilder) BuildPreAgg()

func (*ColumnBuilder) EncodeColumn

func (b *ColumnBuilder) EncodeColumn(ref record.Field, col *record.ColVal, timeCols []record.ColVal, segRowsLimit int, dataOffset int64) ([]byte, error)

func (*ColumnBuilder) EncodeColumnBySize added in v1.2.0

func (b *ColumnBuilder) EncodeColumnBySize(ref record.Field, col *record.ColVal, timeCols []record.ColVal, rowPerSegment []int, dataOffset int64) ([]byte, error)

func (*ColumnBuilder) SetEncodeMode added in v1.2.0

func (b *ColumnBuilder) SetEncodeMode(detached bool)

type ColumnIterator added in v1.0.0

type ColumnIterator struct {
	// contains filtered or unexported fields
}

func NewColumnIterator added in v1.0.0

func NewColumnIterator(fi *FileIterator) *ColumnIterator

func (*ColumnIterator) Close added in v1.0.0

func (itr *ColumnIterator) Close()

func (*ColumnIterator) Error added in v1.0.0

func (itr *ColumnIterator) Error() error

func (*ColumnIterator) IncrChunkUsed added in v1.0.0

func (itr *ColumnIterator) IncrChunkUsed()

func (*ColumnIterator) NextChunkMeta added in v1.0.0

func (itr *ColumnIterator) NextChunkMeta() bool

func (*ColumnIterator) NextColumn added in v1.0.0

func (itr *ColumnIterator) NextColumn(colIdx int) (*record.Field, bool)

func (*ColumnIterator) PutCol added in v1.0.0

func (itr *ColumnIterator) PutCol(col *record.ColVal)

func (*ColumnIterator) Run added in v1.0.0

type ColumnIteratorPerformer added in v1.0.0

type ColumnIteratorPerformer interface {
	Handle(col *record.ColVal, times []int64, lastSeg bool) error
	HasSeries(uint64) bool
	ColumnChanged(*record.Field) error
	SeriesChanged(uint64, []int64) error
	WriteOriginal(fi *FileIterator) error
	Finish() error
}

type ColumnMeta

type ColumnMeta struct {
	// contains filtered or unexported fields
}

func (*ColumnMeta) Clone added in v1.0.0

func (m *ColumnMeta) Clone() ColumnMeta

func (*ColumnMeta) Equal added in v1.1.0

func (m *ColumnMeta) Equal(name string, ty int) bool

func (*ColumnMeta) GetPreAgg added in v1.2.0

func (m *ColumnMeta) GetPreAgg() []byte

func (*ColumnMeta) GetSegment added in v1.2.0

func (m *ColumnMeta) GetSegment(i int) (int64, uint32)

func (*ColumnMeta) IsTime added in v1.1.0

func (m *ColumnMeta) IsTime() bool

func (*ColumnMeta) Name added in v1.1.0

func (m *ColumnMeta) Name() string

func (*ColumnMeta) RowCount

func (m *ColumnMeta) RowCount(ref *record.Field, decs *ReadContext) (int64, error)

func (*ColumnMeta) Type added in v1.2.0

func (m *ColumnMeta) Type() uint8

type ColumnReader

type ColumnReader interface {
	ReadDataBlock(offset int64, size uint32, dst *[]byte, ioPriority int) ([]byte, *readcache.CachePage, error)
	ReadMetaBlock(metaIdx int, id uint64, offset int64, size uint32, count uint32, dst *pool.Buffer, ioPriority int) ([]byte, error)
	UnrefCachePage(cachePage *readcache.CachePage)
}

type CompactGroup

type CompactGroup struct {
	// contains filtered or unexported fields
}

func NewCompactGroup

func NewCompactGroup(name string, toLevle uint16, count int) *CompactGroup

type CompactedFileInfo

type CompactedFileInfo struct {
	Name    string // measurement name with version
	IsOrder bool
	OldFile []string
	NewFile []string
}

type Config

type Config struct {
	SnapshotTblNum       int
	FragmentsNumPerFlush int
	// contains filtered or unexported fields
}

func GetColStoreConfig added in v1.1.0

func GetColStoreConfig() *Config

func GetTsStoreConfig added in v1.1.0

func GetTsStoreConfig() *Config

func NewColumnStoreConfig added in v1.1.1

func NewColumnStoreConfig() *Config

func NewTsStoreConfig added in v1.1.0

func NewTsStoreConfig() *Config

func (*Config) GetCompactionEnabled added in v1.2.0

func (c *Config) GetCompactionEnabled() bool

func (*Config) GetDetachedFlushEnabled added in v1.2.0

func (c *Config) GetDetachedFlushEnabled() bool

func (*Config) GetMaxRowsPerSegment added in v1.1.0

func (c *Config) GetMaxRowsPerSegment() int

func (*Config) GetMaxSegmentLimit added in v1.1.0

func (c *Config) GetMaxSegmentLimit() int

func (*Config) SetExpectedSegmentSize added in v1.2.0

func (c *Config) SetExpectedSegmentSize(n uint32)

func (*Config) SetFilesLimit

func (c *Config) SetFilesLimit(n int64)

func (*Config) SetMaxRowsPerSegment

func (c *Config) SetMaxRowsPerSegment(maxRowsPerSegmentLimit int)

func (*Config) SetMaxSegmentLimit

func (c *Config) SetMaxSegmentLimit(n int)

type CsChunkDataImp added in v1.2.0

type CsChunkDataImp struct {
	// contains filtered or unexported fields
}

func (*CsChunkDataImp) EncodeChunk added in v1.2.0

func (c *CsChunkDataImp) EncodeChunk(b *ChunkDataBuilder, id uint64, offset int64, rec *record.Record, dst []byte, timeSorted bool) ([]byte, error)

func (*CsChunkDataImp) EncodeChunkForCompaction added in v1.2.0

func (c *CsChunkDataImp) EncodeChunkForCompaction(b *ChunkDataBuilder, offset int64, rec *record.Record, dst []byte, accumulateRowsIndex []int, timeSorted bool) ([]byte, error)

func (*CsChunkDataImp) SetAccumulateRowsIndex added in v1.2.0

func (c *CsChunkDataImp) SetAccumulateRowsIndex(accumulateRowsIndex []int)

func (*CsChunkDataImp) SetDetachedInfo added in v1.2.0

func (c *CsChunkDataImp) SetDetachedInfo(writeDetached bool)

type DetachedChunkMetaReader added in v1.2.0

type DetachedChunkMetaReader struct {
	// contains filtered or unexported fields
}

func NewDetachedChunkMetaReader added in v1.2.0

func NewDetachedChunkMetaReader(path string, obsOpts *obs.ObsOptions) (*DetachedChunkMetaReader, error)

func (*DetachedChunkMetaReader) ReadChunkMeta added in v1.2.0

func (reader *DetachedChunkMetaReader) ReadChunkMeta(offset, length []int64) ([]*ChunkMeta, error)

type DetachedMetaDataReader added in v1.2.0

type DetachedMetaDataReader struct {
	// contains filtered or unexported fields
}

func NewDetachedMetaDataReader added in v1.2.0

func NewDetachedMetaDataReader(path string, obsOpts *obs.ObsOptions, isSort bool) (*DetachedMetaDataReader, error)

func (*DetachedMetaDataReader) InitReadBatch added in v1.2.0

func (reader *DetachedMetaDataReader) InitReadBatch(s []*SegmentMeta, schema record.Schemas)

func (*DetachedMetaDataReader) ReadBatch added in v1.2.0

func (reader *DetachedMetaDataReader) ReadBatch(dst *record.Record, decs *ReadContext) (*record.Record, error)

type DetachedMetaIndexReader added in v1.2.0

type DetachedMetaIndexReader struct {
	// contains filtered or unexported fields
}

func NewDetachedMetaIndexReader added in v1.2.0

func NewDetachedMetaIndexReader(path string, obsOpts *obs.ObsOptions) (*DetachedMetaIndexReader, error)

func (*DetachedMetaIndexReader) ReadMetaIndex added in v1.2.0

func (reader *DetachedMetaIndexReader) ReadMetaIndex(offset, length []int64) ([]*MetaIndex, error)

type DetachedPKDataReader added in v1.2.0

type DetachedPKDataReader struct {
	// contains filtered or unexported fields
}

func NewDetachedPKDataReader added in v1.2.0

func NewDetachedPKDataReader(path string, opts *obs.ObsOptions) (*DetachedPKDataReader, error)

func (*DetachedPKDataReader) Read added in v1.2.0

func (reader *DetachedPKDataReader) Read(offset, length []int64, metas []*colstore.DetachedPKMeta) ([]*colstore.DetachedPKData, error)

func (*DetachedPKDataReader) SetPkMetaInfo added in v1.2.0

func (reader *DetachedPKDataReader) SetPkMetaInfo(metaInfo *colstore.DetachedPKMetaInfo)

type DetachedPKMetaInfoReader added in v1.2.0

type DetachedPKMetaInfoReader struct {
	// contains filtered or unexported fields
}

func NewDetachedPKMetaInfoReader added in v1.2.0

func NewDetachedPKMetaInfoReader(path string, opts *obs.ObsOptions) (*DetachedPKMetaInfoReader, error)

func (*DetachedPKMetaInfoReader) Read added in v1.2.0

type DetachedPKMetaReader added in v1.2.0

type DetachedPKMetaReader struct {
	// contains filtered or unexported fields
}

func NewDetachedPKMetaReader added in v1.2.0

func NewDetachedPKMetaReader(path string, opts *obs.ObsOptions) (*DetachedPKMetaReader, error)

func (*DetachedPKMetaReader) Read added in v1.2.0

func (reader *DetachedPKMetaReader) Read(offset, length []int64) ([]*colstore.DetachedPKMeta, error)

type DetachedSegmentEntry added in v1.2.0

type DetachedSegmentEntry struct {
	// contains filtered or unexported fields
}

func NewSegmentMetaDataEntry added in v1.2.0

func NewSegmentMetaDataEntry(segmentID string) *DetachedSegmentEntry

func (*DetachedSegmentEntry) GetKey added in v1.2.0

func (e *DetachedSegmentEntry) GetKey() string

func (*DetachedSegmentEntry) GetTime added in v1.2.0

func (e *DetachedSegmentEntry) GetTime() time.Time

func (*DetachedSegmentEntry) GetValue added in v1.2.0

func (e *DetachedSegmentEntry) GetValue() interface{}

func (*DetachedSegmentEntry) SetTime added in v1.2.0

func (e *DetachedSegmentEntry) SetTime(time time.Time)

func (*DetachedSegmentEntry) SetValue added in v1.2.0

func (e *DetachedSegmentEntry) SetValue(value interface{})

func (*DetachedSegmentEntry) Size added in v1.2.0

func (e *DetachedSegmentEntry) Size() int64

type EncodeChunkData added in v1.2.0

type EncodeChunkData interface {
	EncodeChunk(b *ChunkDataBuilder, id uint64, offset int64, rec *record.Record, dst []byte, timeSorted bool) ([]byte, error)
	EncodeChunkForCompaction(b *ChunkDataBuilder, offset int64, rec *record.Record, dst []byte, accumulateRowsIndex []int, timeSorted bool) ([]byte, error)
	SetAccumulateRowsIndex(rowsPerSegment []int)
	SetDetachedInfo(writeDetached bool)
}

type EncodeColumnMode added in v1.2.0

type EncodeColumnMode interface {
	// contains filtered or unexported methods
}

type FileIterator

type FileIterator struct {
	// contains filtered or unexported fields
}

func NewFileIterator

func NewFileIterator(r TSSPFile, log *Log.Logger) *FileIterator

func (*FileIterator) Close

func (itr *FileIterator) Close()

func (*FileIterator) GetCurtChunkMeta added in v1.0.0

func (itr *FileIterator) GetCurtChunkMeta() *ChunkMeta

func (*FileIterator) NextChunkMeta

func (itr *FileIterator) NextChunkMeta() bool

func (*FileIterator) ReadData added in v1.2.0

func (itr *FileIterator) ReadData(offset int64, size uint32) ([]byte, error)

func (*FileIterator) WithLog

func (itr *FileIterator) WithLog(log *Log.Logger)

type FileIterators

type FileIterators []*FileIterator

func (FileIterators) AverageRows

func (i FileIterators) AverageRows() int

func (FileIterators) Close

func (i FileIterators) Close()

func (FileIterators) MaxChunkRows

func (i FileIterators) MaxChunkRows() int

func (FileIterators) MaxColumns

func (i FileIterators) MaxColumns() int

type FileReader added in v1.1.0

type FileReader interface {
	Open() error
	Close() error
	ReadData(cm *ChunkMeta, segment int, dst *record.Record, ctx *ReadContext, ioPriority int) (*record.Record, error)
	Ref()
	Unref() int64
	MetaIndexAt(idx int) (*MetaIndex, error)
	MetaIndex(id uint64, tr util.TimeRange) (int, *MetaIndex, error)
	ChunkMeta(id uint64, offset int64, size, itemCount uint32, metaIdx int, ctx *ChunkMetaContext, ioPriority int) (*ChunkMeta, error)

	ReadMetaBlock(metaIdx int, id uint64, offset int64, size uint32, count uint32, buf *pool.Buffer, ioPriority int) ([]byte, error)
	ReadDataBlock(offset int64, size uint32, dst *[]byte, ioPriority int) ([]byte, *readcache.CachePage, error)
	Read(offset int64, size uint32, dst *[]byte, ioPriority int) ([]byte, error)
	ReadChunkMetaData(metaIdx int, m *MetaIndex, dst []ChunkMeta, ioPriority int) ([]ChunkMeta, error)
	LoadIdTimes(isOrder bool, p *IdTimePairs) error

	Stat() *Trailer
	MinMaxSeriesID() (min, max uint64, err error)
	MinMaxTime() (min, max int64, err error)
	Contains(id uint64, tm util.TimeRange) bool
	ContainsTime(tm util.TimeRange) bool
	ContainsId(id uint64) bool
	Name() string
	FileName() string
	Rename(newName string) error
	RenameOnObs(obsName string) error
	FileSize() int64
	InMemSize() int64
	Version() uint64
	FreeMemory() int64
	FreeFileHandle() error
	LoadIntoMemory() error
	LoadComponents() error
	AverageChunkRows() int
	MaxChunkRows() int
	GetFileReaderRef() int64
}

type FileReaderContext added in v1.2.0

type FileReaderContext struct {
	// contains filtered or unexported fields
}

func NewFileReaderContext added in v1.2.0

func NewFileReaderContext(tr util.TimeRange, schemas record.Schemas, decs *ReadContext, filterOpts *FilterOptions, filterBitmap *bitmap.FilterBitmap, isOrder bool) *FileReaderContext

type FileSwapper added in v1.2.0

type FileSwapper struct {
	// contains filtered or unexported fields
}

func NewFileSwapper added in v1.2.0

func NewFileSwapper(file string, lock string, limitCompact bool, compressMode int) (*FileSwapper, error)

func (*FileSwapper) CopyTo added in v1.2.0

func (s *FileSwapper) CopyTo(to io.Writer, buf []byte) (int64, error)

func (*FileSwapper) MustClose added in v1.2.0

func (s *FileSwapper) MustClose()

func (*FileSwapper) SetWriter added in v1.2.0

func (s *FileSwapper) SetWriter(w io.WriteCloser)

func (*FileSwapper) Write added in v1.2.0

func (s *FileSwapper) Write(b []byte) (int, error)

type FilesInfo

type FilesInfo struct {
	// contains filtered or unexported fields
}

type FilterOptions

type FilterOptions struct {
	// contains filtered or unexported fields
}

func NewFilterOpts

func NewFilterOpts(cond influxql.Expr, filterOption *BaseFilterOptions, tags *influx.PointTags, rowFilters *[]clv.RowFilter) *FilterOptions

func (*FilterOptions) GetCond added in v1.1.0

func (fo *FilterOptions) GetCond() influxql.Expr

func (*FilterOptions) SetCondFuncs added in v1.1.0

func (fo *FilterOptions) SetCondFuncs(filterOption *BaseFilterOptions)

type FirstLastReader added in v1.1.0

type FirstLastReader struct {
	// contains filtered or unexported fields
}

func (*FirstLastReader) Init added in v1.1.0

func (r *FirstLastReader) Init(cm *ChunkMeta, cr ColumnReader, ref *record.Field, dst *record.Record, first bool) *FirstLastReader

func (*FirstLastReader) Read added in v1.1.0

func (r *FirstLastReader) Read(ctx *ReadContext, copied bool, ioPriority int) error

func (*FirstLastReader) Release added in v1.1.0

func (r *FirstLastReader) Release()

type FloatPreAgg

type FloatPreAgg struct {
	// contains filtered or unexported fields
}

FloatPreAgg If you change the order of the elements in the structure, remember to modify marshal() and unmarshal() as well.

func NewFloatPreAgg

func NewFloatPreAgg() *FloatPreAgg

type FragmentIterator added in v1.2.0

type FragmentIterator interface {
	// contains filtered or unexported methods
}

type FragmentIterators added in v1.1.1

type FragmentIterators struct {
	SortKeyFileds []record.Field

	TableData

	Conf *Config

	PkRec             []*record.Record
	RecordResult      *record.Record
	TimeClusterResult *record.Record
	// contains filtered or unexported fields
}

func (*FragmentIterators) Close added in v1.1.1

func (f *FragmentIterators) Close()

func (*FragmentIterators) CompareWithBreakPoint added in v1.1.1

func (f *FragmentIterators) CompareWithBreakPoint(curPos, breakPos int) bool

func (*FragmentIterators) IsEmpty added in v1.2.0

func (f *FragmentIterators) IsEmpty() bool

func (*FragmentIterators) Len added in v1.1.1

func (f *FragmentIterators) Len() int

func (*FragmentIterators) Less added in v1.1.1

func (f *FragmentIterators) Less(i, j int) bool

func (*FragmentIterators) Pop added in v1.1.1

func (f *FragmentIterators) Pop() interface{}

func (*FragmentIterators) Push added in v1.1.1

func (f *FragmentIterators) Push(v interface{})

func (*FragmentIterators) Swap added in v1.1.1

func (f *FragmentIterators) Swap(i, j int)

func (*FragmentIterators) WithLog added in v1.1.1

func (f *FragmentIterators) WithLog(log *Log.Logger)

type FragmentIteratorsPool added in v1.1.1

type FragmentIteratorsPool struct {
	// contains filtered or unexported fields
}

func NewFragmentIteratorsPool added in v1.1.1

func NewFragmentIteratorsPool(n int) *FragmentIteratorsPool

type IdTimePairs

type IdTimePairs struct {
	Name string
	Ids  []uint64
	Tms  []int64
	Rows []int64
}

func GetIDTimePairs

func GetIDTimePairs(name string) *IdTimePairs

func (*IdTimePairs) Add

func (p *IdTimePairs) Add(id uint64, tm int64)

func (*IdTimePairs) AddRowCounts

func (p *IdTimePairs) AddRowCounts(rowCounts int64)

func (*IdTimePairs) Len

func (p *IdTimePairs) Len() int

func (*IdTimePairs) Marshal

func (p *IdTimePairs) Marshal(encTimes bool, dst []byte, ctx *encoding.CoderContext) []byte

func (*IdTimePairs) Reset

func (p *IdTimePairs) Reset(name string)

func (*IdTimePairs) Unmarshal

func (p *IdTimePairs) Unmarshal(decTimes bool, src []byte) ([]byte, error)

type ImmTable added in v1.1.0

type ImmTable interface {
	GetEngineType() config.EngineType
	GetCompactionType(name string) config.CompactionType

	NewFileIterators(m *MmsTables, group *CompactGroup) (FilesInfo, error)
	AddTSSPFiles(m *MmsTables, name string, isOrder bool, files ...TSSPFile)
	AddBothTSSPFiles(flushed *bool, m *MmsTables, name string, orderFiles []TSSPFile, unorderFiles []TSSPFile)
	LevelPlan(m *MmsTables, level uint16) []*CompactGroup
	SetMstInfo(name string, mstInfo *meta.MeasurementInfo)
	GetMstInfo(name string) (*meta.MeasurementInfo, bool)
	UpdateAccumulateMetaIndexInfo(name string, index *AccumulateMetaIndex)
	// contains filtered or unexported methods
}

type IndexCompressWriter added in v1.2.0

type IndexCompressWriter struct {
	// contains filtered or unexported fields
}

func (*IndexCompressWriter) BlockSize added in v1.2.0

func (w *IndexCompressWriter) BlockSize() int

func (*IndexCompressWriter) Close added in v1.2.0

func (w *IndexCompressWriter) Close() error

func (*IndexCompressWriter) CopyTo added in v1.2.0

func (w *IndexCompressWriter) CopyTo(to io.Writer) (int, error)

func (*IndexCompressWriter) GetWriter added in v1.2.0

func (w *IndexCompressWriter) GetWriter() *bufio.Writer

func (*IndexCompressWriter) Init added in v1.2.0

func (w *IndexCompressWriter) Init(name string, lock *string, cacheMeta bool, limitCompact bool)

func (*IndexCompressWriter) MetaDataBlocks added in v1.2.0

func (w *IndexCompressWriter) MetaDataBlocks(dst [][]byte) [][]byte

func (*IndexCompressWriter) Size added in v1.2.0

func (w *IndexCompressWriter) Size() int

func (*IndexCompressWriter) SwitchMetaBuffer added in v1.2.0

func (w *IndexCompressWriter) SwitchMetaBuffer() (int, error)

func (*IndexCompressWriter) Write added in v1.2.0

func (w *IndexCompressWriter) Write(p []byte) (int, error)

type IndexFrags added in v1.2.0

type IndexFrags interface {
	BasePath() string
	FragCount() int64
	IndexCount() int
	Indexes() interface{}
	AppendIndexes(...interface{})
	FragRanges() []fragment.FragmentRanges
	AppendFragRanges(...fragment.FragmentRanges)
	AddFragCount(int64)
	SetErr(error)
	GetErr() error
	Size() int
}

func GetDetachedSegmentTask added in v1.2.0

func GetDetachedSegmentTask(queryID string) (IndexFrags, bool)

type IndexWriter added in v1.2.0

type IndexWriter interface {
	Init(name string, lock *string, cacheMeta bool, limitCompact bool)
	Write(p []byte) (int, error)
	Size() int
	BlockSize() int
	CopyTo(to io.Writer) (int, error)
	SwitchMetaBuffer() (int, error)
	MetaDataBlocks(dst [][]byte) [][]byte

	Close() error
	// contains filtered or unexported methods
}

func NewPKIndexWriter added in v1.2.0

func NewPKIndexWriter(indexName string, cacheMeta bool, limitCompact bool, lockPath *string) IndexWriter

type IntegerPreAgg

type IntegerPreAgg struct {
	// contains filtered or unexported fields
}

IntegerPreAgg If you change the order of the elements in the structure, remember to modify marshal() and unmarshal() as well.

func NewIntegerPreAgg

func NewIntegerPreAgg() *IntegerPreAgg

type IteratorByBlock added in v1.2.0

type IteratorByBlock struct {
	// contains filtered or unexported fields
}

IteratorByBlock for single mst

func NewIteratorByBlock added in v1.2.0

func NewIteratorByBlock(f *FragmentIterators, conf *Config, group FilesInfo, accumulateMetaIndex *AccumulateMetaIndex) *IteratorByBlock

func (*IteratorByBlock) Flush added in v1.2.0

func (ib *IteratorByBlock) Flush(pkSchema record.Schemas, readFinal bool, tbStore *MmsTables) error

func (*IteratorByBlock) WriteDetachedMeta added in v1.2.0

func (ib *IteratorByBlock) WriteDetachedMeta(pkSchema record.Schemas) error

type IteratorByRow added in v1.2.0

type IteratorByRow struct {
	// contains filtered or unexported fields
}

func NewIteratorByRow added in v1.2.0

func NewIteratorByRow(f *FragmentIterators, conf *Config) *IteratorByRow

func (*IteratorByRow) Flush added in v1.2.0

func (ir *IteratorByRow) Flush(tbStore *MmsTables, pkSchema record.Schemas, final bool) error

func (*IteratorByRow) GetBreakPoint added in v1.2.0

func (ir *IteratorByRow) GetBreakPoint()

func (*IteratorByRow) NextWithBreakPoint added in v1.2.0

func (ir *IteratorByRow) NextWithBreakPoint()

type Location

type Location struct {
	// contains filtered or unexported fields
}

func NewLocation

func NewLocation(r TSSPFile, ctx *ReadContext) *Location

func (*Location) AscendingDone added in v1.1.0

func (l *Location) AscendingDone()

func (*Location) Contains

func (l *Location) Contains(sid uint64, tr util.TimeRange, ctx *ChunkMetaContext) (bool, error)

func (*Location) DescendingDone added in v1.1.0

func (l *Location) DescendingDone()

func (*Location) GetChunkMeta

func (l *Location) GetChunkMeta() *ChunkMeta

func (*Location) ReadData

func (l *Location) ReadData(filterOpts *FilterOptions, dst *record.Record, filterDst *record.Record) (*record.Record, error)

func (*Location) ResetMeta

func (l *Location) ResetMeta()

func (*Location) SetChunkMeta added in v1.2.0

func (l *Location) SetChunkMeta(chunkMeta *ChunkMeta)

func (*Location) SetFragmentRanges added in v1.1.0

func (l *Location) SetFragmentRanges(frs []*fragment.FragmentRange)

type LocationCursor

type LocationCursor struct {
	// contains filtered or unexported fields
}

func NewLocationCursor

func NewLocationCursor(n int) *LocationCursor

func (*LocationCursor) AddFilterRecPool added in v1.1.0

func (l *LocationCursor) AddFilterRecPool(pool *record.CircularRecordPool)

func (*LocationCursor) AddLocation

func (l *LocationCursor) AddLocation(loc *Location)

func (*LocationCursor) AddRef

func (l *LocationCursor) AddRef()

func (*LocationCursor) Close added in v1.1.0

func (l *LocationCursor) Close()

func (*LocationCursor) FragmentCount added in v1.1.0

func (l *LocationCursor) FragmentCount() int

func (*LocationCursor) Len

func (l *LocationCursor) Len() int

func (*LocationCursor) Less

func (l *LocationCursor) Less(i, j int) bool

func (*LocationCursor) ReadData

func (l *LocationCursor) ReadData(filterOpts *FilterOptions, dst *record.Record, filterBitmap *bitmap.FilterBitmap,
	unnestOperator logstore.UnnestOperator) (*record.Record, error)

func (*LocationCursor) ReadMeta

func (l *LocationCursor) ReadMeta(filterOpts *FilterOptions, dst *record.Record, filterBitmap *bitmap.FilterBitmap) (*record.Record, error)

func (*LocationCursor) ReadOutOfOrderMeta

func (l *LocationCursor) ReadOutOfOrderMeta(filterOpts *FilterOptions, dst *record.Record) (*record.Record, error)

func (*LocationCursor) Reverse

func (l *LocationCursor) Reverse()

func (*LocationCursor) RowCount added in v1.1.0

func (l *LocationCursor) RowCount() int

func (*LocationCursor) Swap

func (l *LocationCursor) Swap(i, j int)

func (*LocationCursor) Unref

func (l *LocationCursor) Unref()

type MeasurementInProcess added in v1.2.0

type MeasurementInProcess struct {
	// contains filtered or unexported fields
}

func NewMeasurementInProcess added in v1.2.0

func NewMeasurementInProcess() *MeasurementInProcess

func (*MeasurementInProcess) Add added in v1.2.0

func (m *MeasurementInProcess) Add(name string) bool

func (*MeasurementInProcess) Del added in v1.2.0

func (m *MeasurementInProcess) Del(name string)

type MemBlock

type MemBlock struct {
	// contains filtered or unexported fields
}

func (*MemBlock) AppendDataBlock

func (mb *MemBlock) AppendDataBlock(srcData []byte)

func (*MemBlock) CopyBlocks

func (mb *MemBlock) CopyBlocks(src MemoryReader)

func (*MemBlock) DataBlocks

func (mb *MemBlock) DataBlocks() [][]byte

func (*MemBlock) DataInMemory

func (mb *MemBlock) DataInMemory() bool

func (*MemBlock) FreeMemory

func (mb *MemBlock) FreeMemory() int64

func (*MemBlock) LoadIntoMemory

func (mb *MemBlock) LoadIntoMemory(dr fileops.BasicFileReader, tr *Trailer, metaIndexItems []MetaIndex) error

func (*MemBlock) MetaBlocks

func (mb *MemBlock) MetaBlocks() [][]byte

func (*MemBlock) MetaInMemory

func (mb *MemBlock) MetaInMemory() bool

func (*MemBlock) ReadChunkMetaBlock

func (mb *MemBlock) ReadChunkMetaBlock(metaIdx int, sid uint64, count uint32) []byte

func (*MemBlock) ReadDataBlock

func (mb *MemBlock) ReadDataBlock(offset int64, size uint32, dstPtr *[]byte) ([]byte, error)

func (*MemBlock) ReserveDataBlock

func (mb *MemBlock) ReserveDataBlock(n int)

func (*MemBlock) ReserveMetaBlock

func (mb *MemBlock) ReserveMetaBlock(n int)

func (*MemBlock) Reset

func (mb *MemBlock) Reset()

func (*MemBlock) SetMetaBlocks

func (mb *MemBlock) SetMetaBlocks(blocks [][]byte)

func (*MemBlock) Size

func (mb *MemBlock) Size() int64

type MemoryReader

type MemoryReader interface {
	AppendDataBlock(srcData []byte)
	ReadChunkMetaBlock(metaIdx int, sid uint64, count uint32) []byte
	ReadDataBlock(offset int64, size uint32, dstPtr *[]byte) ([]byte, error)
	CopyBlocks(src MemoryReader)
	LoadIntoMemory(dr fileops.BasicFileReader, tr *Trailer, metaIndexItems []MetaIndex) error
	FreeMemory() int64
	DataInMemory() bool
	MetaInMemory() bool
	ReserveMetaBlock(n int)
	ReserveDataBlock(n int)
	DataBlocks() [][]byte
	MetaBlocks() [][]byte
	SetMetaBlocks(blocks [][]byte)
	Size() int64
	Reset()
}

func NewMemReader

func NewMemReader() MemoryReader

func NewMemoryReader

func NewMemoryReader(blkSize int) MemoryReader

type MergeColPool added in v1.1.0

type MergeColPool struct {
	// contains filtered or unexported fields
}

func (*MergeColPool) Get added in v1.1.0

func (p *MergeColPool) Get() *record.ColVal

func (*MergeColPool) Put added in v1.1.0

func (p *MergeColPool) Put(col *record.ColVal)

type MetaIndex

type MetaIndex struct {
	// contains filtered or unexported fields
}

MetaIndex If you change the order of the elements in the structure, remember to modify marshal() and unmarshal() as well.

func (*MetaIndex) IsExist added in v1.2.0

func (m *MetaIndex) IsExist(tr util.TimeRange) bool

type MmsIdTime

type MmsIdTime struct {
	// contains filtered or unexported fields
}

func NewMmsIdTime

func NewMmsIdTime(sc *SeriesCounter) *MmsIdTime

type MmsReaders

type MmsReaders struct {
	Orders      TableReaders
	OutOfOrders TableReaders
}

type MmsTables

type MmsTables struct {
	Order      map[string]*TSSPFiles        // {"cpu_0001": *TSSPFiles}
	OutOfOrder map[string]*TSSPFiles        // {"cpu_0001": *TSSPFiles}
	CSFiles    map[string]*TSSPFiles        // {"cpu_0001": *TSSPFiles} tsspFiles for columnStore
	PKFiles    map[string]*colstore.PKFiles // {"cpu_0001": *PKFiles} PKFiles for columnStore

	ImmTable ImmTable

	Conf *Config
	// contains filtered or unexported fields
}

func NewTableStore

func NewTableStore(dir string, lock *string, tier *uint64, compactRecovery bool, config *Config) *MmsTables

func (*MmsTables) AddBothTSSPFiles added in v1.2.0

func (m *MmsTables) AddBothTSSPFiles(flushed *bool, name string, orderFiles []TSSPFile, unorderFiles []TSSPFile)

func (*MmsTables) AddPKFile added in v1.1.0

func (m *MmsTables) AddPKFile(mstName string, file string, rec *record.Record, mark fragment.IndexFragment, tcLocation int8)

func (*MmsTables) AddRowCountsBySid

func (m *MmsTables) AddRowCountsBySid(measurement string, sid uint64, rowCounts int64)

func (*MmsTables) AddTSSPFiles

func (m *MmsTables) AddTSSPFiles(name string, isOrder bool, files ...TSSPFile)

now not use for tsEngine

func (*MmsTables) AddTable

func (m *MmsTables) AddTable(mb *MsBuilder, isOrder bool, tmp bool)

func (*MmsTables) Close

func (m *MmsTables) Close() error

func (*MmsTables) CompactDone

func (m *MmsTables) CompactDone(files []string)

func (*MmsTables) CompactionDisable

func (m *MmsTables) CompactionDisable()

func (*MmsTables) CompactionEnable

func (m *MmsTables) CompactionEnable()

func (*MmsTables) CompactionEnabled

func (m *MmsTables) CompactionEnabled() bool

func (*MmsTables) DisableCompAndMerge added in v1.0.0

func (m *MmsTables) DisableCompAndMerge()

func (*MmsTables) DropMeasurement

func (m *MmsTables) DropMeasurement(_ context.Context, name string) error

func (*MmsTables) EnableCompAndMerge added in v1.0.0

func (m *MmsTables) EnableCompAndMerge()

func (*MmsTables) File

func (m *MmsTables) File(mstName string, fileName string, isOrder bool) TSSPFile

func (*MmsTables) FreeAllMemReader

func (m *MmsTables) FreeAllMemReader()

func (*MmsTables) FreeSequencer added in v1.0.0

func (m *MmsTables) FreeSequencer() bool

func (*MmsTables) FullCompact

func (m *MmsTables) FullCompact(shid uint64) error

func (*MmsTables) FullyCompacted added in v1.2.0

func (m *MmsTables) FullyCompacted() bool

func (*MmsTables) GetBothFilesRef added in v1.0.0

func (m *MmsTables) GetBothFilesRef(measurement string, hasTimeFilter bool, tr util.TimeRange, flushed *bool) ([]TSSPFile, []TSSPFile, bool)

func (*MmsTables) GetCSFiles added in v1.1.0

func (m *MmsTables) GetCSFiles(name string) (files *TSSPFiles, ok bool)

func (*MmsTables) GetFileSeq added in v1.0.0

func (m *MmsTables) GetFileSeq() uint64

func (*MmsTables) GetLastFlushTimeBySid

func (m *MmsTables) GetLastFlushTimeBySid(measurement string, sid uint64) int64

func (*MmsTables) GetMstFileStat

func (m *MmsTables) GetMstFileStat() *statistics.FileStat

func (*MmsTables) GetMstInfo added in v1.2.0

func (m *MmsTables) GetMstInfo(name string) (*meta.MeasurementInfo, bool)

func (*MmsTables) GetOutOfOrderFileNum

func (m *MmsTables) GetOutOfOrderFileNum() int

func (*MmsTables) GetPKFile added in v1.1.0

func (m *MmsTables) GetPKFile(mstName string, file string) (pkInfo *colstore.PKInfo, ok bool)

func (*MmsTables) GetRowCountsBySid

func (m *MmsTables) GetRowCountsBySid(measurement string, sid uint64) (int64, error)

func (*MmsTables) GetTSSPFiles added in v1.0.0

func (m *MmsTables) GetTSSPFiles(name string, isOrder bool) (files *TSSPFiles, ok bool)

func (*MmsTables) IsOutOfOrderFilesExist added in v1.0.0

func (m *MmsTables) IsOutOfOrderFilesExist() bool

func (*MmsTables) LevelCompact

func (m *MmsTables) LevelCompact(level uint16, shid uint64) error

func (*MmsTables) Listen added in v1.0.0

func (m *MmsTables) Listen(signal chan struct{}, onClose func())

func (*MmsTables) MergeDisable

func (m *MmsTables) MergeDisable()

func (*MmsTables) MergeEnable

func (m *MmsTables) MergeEnable()

func (*MmsTables) MergeEnabled

func (m *MmsTables) MergeEnabled() bool

func (*MmsTables) MergeOutOfOrder

func (m *MmsTables) MergeOutOfOrder(shId uint64, force bool) error

func (*MmsTables) NewChunkIterators

func (m *MmsTables) NewChunkIterators(group FilesInfo) *ChunkIterators

func (*MmsTables) NewStreamIterators

func (m *MmsTables) NewStreamIterators(group FilesInfo) *StreamIterators

func (*MmsTables) NewStreamWriteFile added in v1.0.0

func (m *MmsTables) NewStreamWriteFile(mst string) *StreamWriteFile

func (*MmsTables) NextSequence

func (m *MmsTables) NextSequence() uint64

func (*MmsTables) Open

func (m *MmsTables) Open() (int64, error)

func (*MmsTables) ReloadSequencer added in v1.1.0

func (m *MmsTables) ReloadSequencer(seq *Sequencer, async bool)

func (*MmsTables) ReplaceDownSampleFiles added in v1.0.0

func (m *MmsTables) ReplaceDownSampleFiles(mstNames []string, originFiles [][]TSSPFile, newFiles [][]TSSPFile, isOrder bool, callBack func()) (err error)

func (*MmsTables) ReplaceFiles

func (m *MmsTables) ReplaceFiles(name string, oldFiles, newFiles []TSSPFile, isOrder bool) (err error)

func (*MmsTables) ReplacePKFile added in v1.1.1

func (m *MmsTables) ReplacePKFile(mstName string, file string, rec *record.Record, mark fragment.IndexFragment, oldIndexFiles []string) error

func (*MmsTables) Sequencer

func (m *MmsTables) Sequencer() *Sequencer

func (*MmsTables) SeriesTotal added in v1.1.0

func (m *MmsTables) SeriesTotal() uint64

func (*MmsTables) SetAccumulateMetaIndex added in v1.2.0

func (m *MmsTables) SetAccumulateMetaIndex(name string, aMetaIndex *AccumulateMetaIndex)

func (*MmsTables) SetAddFunc added in v1.0.0

func (m *MmsTables) SetAddFunc(addFunc func(int64))

func (*MmsTables) SetImmTableType added in v1.1.0

func (m *MmsTables) SetImmTableType(engineType config.EngineType)

func (*MmsTables) SetLockPath added in v1.2.0

func (m *MmsTables) SetLockPath(lock *string)

func (*MmsTables) SetMstInfo added in v1.1.1

func (m *MmsTables) SetMstInfo(name string, mstInfo *meta.MeasurementInfo)

func (*MmsTables) SetOpId added in v1.0.0

func (m *MmsTables) SetOpId(shardId uint64, opId uint64)

func (*MmsTables) SetTier added in v1.2.0

func (m *MmsTables) SetTier(tier uint64)

func (*MmsTables) Tier

func (m *MmsTables) Tier() uint64

func (*MmsTables) Wait

func (m *MmsTables) Wait()

type MsBuilder

type MsBuilder struct {
	Path string
	TableData
	Conf *Config

	MaxIds int

	Files    []TSSPFile
	FileName TSSPFileName

	EncodeChunkDataImp EncodeChunkData
	// contains filtered or unexported fields
}

func NewDetachedMsBuilder added in v1.2.0

func NewDetachedMsBuilder(dir, name string, lockPath *string, conf *Config, idCount int, fileName TSSPFileName,
	tier uint64, sequencer *Sequencer, estimateSize int, engineType config.EngineType, obsOpt *obs.ObsOptions, bfCols []string, fullTextIdx bool) (*MsBuilder, error)

func NewMsBuilder added in v1.1.0

func NewMsBuilder(dir, name string, lockPath *string, conf *Config, idCount int, fileName TSSPFileName,
	tier uint64, sequencer *Sequencer, estimateSize int, engineType config.EngineType) *MsBuilder

func (*MsBuilder) BloomFilterNeedDetached added in v1.2.0

func (b *MsBuilder) BloomFilterNeedDetached(filterDetachedWriteTimes int) bool

func (*MsBuilder) FileVersion

func (b *MsBuilder) FileVersion() uint64

func (*MsBuilder) Flush

func (b *MsBuilder) Flush() error

func (*MsBuilder) GetChunkBuilder added in v1.2.0

func (b *MsBuilder) GetChunkBuilder() *ChunkDataBuilder

func (*MsBuilder) GetFullTextIdx added in v1.2.0

func (b *MsBuilder) GetFullTextIdx() bool

func (*MsBuilder) GetLocalBfCount added in v1.2.0

func (b *MsBuilder) GetLocalBfCount() int64

func (*MsBuilder) GetPKInfoNum added in v1.1.0

func (b *MsBuilder) GetPKInfoNum() int

func (*MsBuilder) GetPKMark added in v1.1.0

func (b *MsBuilder) GetPKMark(i int) fragment.IndexFragment

func (*MsBuilder) GetPKRecord added in v1.1.0

func (b *MsBuilder) GetPKRecord(i int) *record.Record

func (*MsBuilder) GetSkipIndex added in v1.2.0

func (b *MsBuilder) GetSkipIndex() *sparseindex.SkipIndex

func (*MsBuilder) MaxRowsPerSegment

func (b *MsBuilder) MaxRowsPerSegment() int

func (*MsBuilder) Name

func (b *MsBuilder) Name() string

func (*MsBuilder) NewPKIndexWriter added in v1.1.0

func (b *MsBuilder) NewPKIndexWriter()

func (*MsBuilder) NewSkipIndex added in v1.2.0

func (b *MsBuilder) NewSkipIndex(schema record.Schemas, indexRelation influxql.IndexRelation)

func (*MsBuilder) NewTSSPFile

func (b *MsBuilder) NewTSSPFile(tmp bool) (TSSPFile, error)

func (*MsBuilder) Reset

func (b *MsBuilder) Reset()

func (*MsBuilder) SetEncodeChunkDataImp added in v1.2.0

func (b *MsBuilder) SetEncodeChunkDataImp(engineType config.EngineType)

func (*MsBuilder) SetFullTextIdx added in v1.2.0

func (b *MsBuilder) SetFullTextIdx(fullTextIdx bool)

func (*MsBuilder) SetLocalBfCount added in v1.2.0

func (b *MsBuilder) SetLocalBfCount(count int64)

func (*MsBuilder) SetTCLocation added in v1.1.1

func (b *MsBuilder) SetTCLocation(tcLocation int8)

func (*MsBuilder) SetTimeSorted added in v1.2.0

func (b *MsBuilder) SetTimeSorted(timeSorted bool)

func (*MsBuilder) Size

func (b *MsBuilder) Size() int64

func (*MsBuilder) StoreTimes added in v1.1.0

func (b *MsBuilder) StoreTimes()

func (*MsBuilder) SwitchChunkMeta added in v1.2.0

func (b *MsBuilder) SwitchChunkMeta() error

func (*MsBuilder) WithLog

func (b *MsBuilder) WithLog(log *logger.Logger)

func (*MsBuilder) WriteChunkMeta added in v1.2.0

func (b *MsBuilder) WriteChunkMeta(cm *ChunkMeta) (int, error)

func (*MsBuilder) WriteData

func (b *MsBuilder) WriteData(id uint64, data *record.Record) error

func (*MsBuilder) WriteDetached added in v1.2.0

func (b *MsBuilder) WriteDetached(id uint64, data *record.Record, pkSchema record.Schemas, firstFlush bool,
	accumulateMetaIndex *AccumulateMetaIndex) error

func (*MsBuilder) WriteDetachedMetaAndIndex added in v1.2.0

func (b *MsBuilder) WriteDetachedMetaAndIndex(writeRec *record.Record, pkSchema record.Schemas, firstFlush bool,
	accumulateMetaIndex *AccumulateMetaIndex, rowsPerSegment []int, fixRowsPerSegment int) error

func (*MsBuilder) WriteDetachedSkipIndex added in v1.2.0

func (b *MsBuilder) WriteDetachedSkipIndex(writeRec *record.Record, rowsPerSegment []int) error

func (*MsBuilder) WriteRecord

func (b *MsBuilder) WriteRecord(id uint64, data *record.Record, nextFile func(fn TSSPFileName) (seq uint64, lv uint16, merge uint16, ext uint16)) (*MsBuilder, error)

func (*MsBuilder) WriteRecordByCol added in v1.2.0

func (b *MsBuilder) WriteRecordByCol(id uint64, data *record.Record, schema record.Schemas, skipIndexRelation *influxql.IndexRelation,
	nextFile func(fn TSSPFileName) (seq uint64, lv uint16, merge uint16, ext uint16)) (*MsBuilder, error)

type Offset added in v1.0.0

type Offset struct {
	// contains filtered or unexported fields
}

type PageCacheReader added in v1.2.0

type PageCacheReader struct {
	// contains filtered or unexported fields
}

func NewPageCacheReader added in v1.2.0

func NewPageCacheReader(t *Trailer, r *tsspFileReader) *PageCacheReader

func (*PageCacheReader) GetCachePageIdsAndOffsets added in v1.2.0

func (pcr *PageCacheReader) GetCachePageIdsAndOffsets(start int64, size uint32) ([]int64, []int64, error)

get all cache pageIds containning bytes from start to start + size

func (*PageCacheReader) GetMaxPageIdAndOffset added in v1.2.0

func (pcr *PageCacheReader) GetMaxPageIdAndOffset() (int64, int64)

func (*PageCacheReader) Init added in v1.2.0

func (pcr *PageCacheReader) Init()

func (*PageCacheReader) Read added in v1.2.0

func (pcr *PageCacheReader) Read(offset int64, size uint32, buf *[]byte, ioPriority int) ([]byte, *readcache.CachePage, error)

func (*PageCacheReader) ReadFixPageSize added in v1.2.0

func (pcr *PageCacheReader) ReadFixPageSize(offset int64, size uint32, buf *[]byte, ioPriority int) ([]byte, *readcache.CachePage, error)

read fileBytes of pages

func (*PageCacheReader) ReadSinglePage added in v1.2.0

func (pcr *PageCacheReader) ReadSinglePage(cacheKey string, pageOffset int64, pageSize int64, buf *[]byte, ioPriority int) (*readcache.CachePage, []byte, error)

func (*PageCacheReader) ReadVariablePageSize added in v1.2.0

func (pcr *PageCacheReader) ReadVariablePageSize(offset int64, size uint32, buf *[]byte, ioPriority int) ([]byte, *readcache.CachePage, error)

type PreAggBuilder

type PreAggBuilder interface {
	// contains filtered or unexported methods
}

type PreAggBuilders

type PreAggBuilders struct {
	// contains filtered or unexported fields
}

func (*PreAggBuilders) FloatBuilder added in v1.2.0

func (b *PreAggBuilders) FloatBuilder() *FloatPreAgg

func (*PreAggBuilders) IntegerBuilder added in v1.2.0

func (b *PreAggBuilders) IntegerBuilder() *IntegerPreAgg

func (*PreAggBuilders) Release

func (b *PreAggBuilders) Release()

type QueryfileCache added in v1.1.0

type QueryfileCache struct {
	// contains filtered or unexported fields
}

func GetQueryfileCache added in v1.1.0

func GetQueryfileCache() *QueryfileCache

func NewQueryfileCache added in v1.1.0

func NewQueryfileCache(cap uint32) *QueryfileCache

func (*QueryfileCache) Get added in v1.1.0

func (qfc *QueryfileCache) Get()

func (*QueryfileCache) GetCap added in v1.1.0

func (qfc *QueryfileCache) GetCap() uint32

func (*QueryfileCache) Put added in v1.1.0

func (qfc *QueryfileCache) Put(f TSSPFile)

type ReadContext

type ReadContext struct {
	Ascending bool
	// contains filtered or unexported fields
}

func NewReadContext

func NewReadContext(ascending bool) *ReadContext

func (*ReadContext) GetCoder added in v1.1.0

func (d *ReadContext) GetCoder() *encoding.CoderContext

func (*ReadContext) GetOps

func (d *ReadContext) GetOps() []*comm.CallOption

func (*ReadContext) GetReadBuff added in v1.1.0

func (d *ReadContext) GetReadBuff() []byte

func (*ReadContext) InitPreAggBuilder

func (d *ReadContext) InitPreAggBuilder()

func (*ReadContext) MatchPreAgg

func (d *ReadContext) MatchPreAgg() bool

func (*ReadContext) Release

func (d *ReadContext) Release()

func (*ReadContext) Reset

func (d *ReadContext) Reset()

func (*ReadContext) Set

func (d *ReadContext) Set(ascending bool, tr util.TimeRange, onlyFirstOrLast bool, ops []*comm.CallOption)

func (*ReadContext) SetClosedSignal added in v1.2.0

func (d *ReadContext) SetClosedSignal(s *int32)

func (*ReadContext) SetOps

func (d *ReadContext) SetOps(c []*comm.CallOption)

func (*ReadContext) SetSpan added in v1.1.0

func (d *ReadContext) SetSpan(readSpan, filterSpan *tracing.Span)

func (*ReadContext) SetTr

func (d *ReadContext) SetTr(tr util.TimeRange)

type Segment

type Segment struct {
	// contains filtered or unexported fields
}

Segment offset/size/minT/maxT

type SegmentMeta added in v1.2.0

type SegmentMeta struct {
	// contains filtered or unexported fields
}

func NewSegmentMeta added in v1.2.0

func NewSegmentMeta(id int, c *ChunkMeta) *SegmentMeta

func (*SegmentMeta) GetMaxTime added in v1.2.0

func (s *SegmentMeta) GetMaxTime() int64

func (*SegmentMeta) GetMinTime added in v1.2.0

func (s *SegmentMeta) GetMinTime() int64

type SegmentRange

type SegmentRange [2]int64 // min/max

type SegmentReader added in v1.0.0

type SegmentReader struct {
	// contains filtered or unexported fields
}

func NewSegmentReader added in v1.0.0

func NewSegmentReader(fi *FileIterator) *SegmentReader

func (*SegmentReader) Read added in v1.0.0

func (sr *SegmentReader) Read(seg Segment, ref *record.Field, col *record.ColVal) error

type SegmentTask added in v1.2.0

type SegmentTask struct {
	// contains filtered or unexported fields
}

type Sequencer

type Sequencer struct {
	// contains filtered or unexported fields
}

func NewSequencer

func NewSequencer() *Sequencer

func (*Sequencer) AddRowCounts

func (s *Sequencer) AddRowCounts(mn string, id uint64, rowCounts int64)

func (*Sequencer) BatchUpdateCheckTime

func (s *Sequencer) BatchUpdateCheckTime(p *IdTimePairs, incrRows bool)

func (*Sequencer) DelMmsIdTime added in v1.1.0

func (s *Sequencer) DelMmsIdTime(name string)

func (*Sequencer) Get

func (s *Sequencer) Get(mn string, id uint64) (lastFlushTime, rowCnt int64)

func (*Sequencer) IsLoading added in v1.0.0

func (s *Sequencer) IsLoading() bool

func (*Sequencer) ResetMmsIdTime added in v1.1.0

func (s *Sequencer) ResetMmsIdTime()

func (*Sequencer) SeriesTotal added in v1.1.0

func (s *Sequencer) SeriesTotal() uint64

func (*Sequencer) SetStat added in v1.1.0

func (s *Sequencer) SetStat(free, loading bool)

func (*Sequencer) SetToInLoading added in v1.1.0

func (s *Sequencer) SetToInLoading() bool

func (*Sequencer) UnRef added in v1.1.0

func (s *Sequencer) UnRef()

type SeriesCounter added in v1.1.0

type SeriesCounter struct {
	// contains filtered or unexported fields
}

func (*SeriesCounter) DecrN added in v1.1.0

func (sc *SeriesCounter) DecrN(n uint64)

func (*SeriesCounter) Get added in v1.1.0

func (sc *SeriesCounter) Get() uint64

func (*SeriesCounter) Incr added in v1.1.0

func (sc *SeriesCounter) Incr()

func (*SeriesCounter) Reset added in v1.1.0

func (sc *SeriesCounter) Reset()

type SortKeyIterator added in v1.1.1

type SortKeyIterator struct {
	*FileIterator
	// contains filtered or unexported fields
}

func NewSortKeyIterator added in v1.1.1

func NewSortKeyIterator(fi *FileIterator, sortKeyFields []record.Field, ctx *ReadContext, schema record.Schemas, tcDuration time.Duration, compactWithBlock bool, fileIdx int) (*SortKeyIterator, error)

func (*SortKeyIterator) GetNewRecord added in v1.1.1

func (s *SortKeyIterator) GetNewRecord(tcDuration time.Duration, compactWithBlock bool) error

func (*SortKeyIterator) NextSingleFragment added in v1.1.1

func (s *SortKeyIterator) NextSingleFragment(tbStore *MmsTables, impl *IteratorByRow, pkSchema record.Schemas) (*record.Record, error)

type StreamIterator

type StreamIterator struct {
	*FileIterator
	// contains filtered or unexported fields
}

func NewStreamStreamIterator

func NewStreamStreamIterator(fi *FileIterator) *StreamIterator

type StreamIterators

type StreamIterators struct {
	TableData

	Conf *Config
	// contains filtered or unexported fields
}

func (*StreamIterators) Close

func (c *StreamIterators) Close()

func (*StreamIterators) FileVersion

func (c *StreamIterators) FileVersion() uint64

func (*StreamIterators) Flush

func (c *StreamIterators) Flush() error

func (*StreamIterators) Init

func (c *StreamIterators) Init(id uint64, chunkDataOffset int64, schema record.Schemas)

func (*StreamIterators) Len

func (c *StreamIterators) Len() int

func (*StreamIterators) Less

func (c *StreamIterators) Less(i, j int) bool

func (*StreamIterators) ListenCloseSignal added in v1.2.0

func (c *StreamIterators) ListenCloseSignal(finish chan struct{})

func (*StreamIterators) NewFile

func (c *StreamIterators) NewFile(addFileExt bool) error

func (*StreamIterators) NewTSSPFile

func (c *StreamIterators) NewTSSPFile(tmp bool) (TSSPFile, error)

func (*StreamIterators) Pop

func (c *StreamIterators) Pop() interface{}

func (*StreamIterators) Push

func (c *StreamIterators) Push(v interface{})

func (*StreamIterators) SetWriter added in v1.2.0

func (c *StreamIterators) SetWriter(w fileops.FileWriter)

func (*StreamIterators) Size

func (c *StreamIterators) Size() int64

func (*StreamIterators) Swap

func (c *StreamIterators) Swap(i, j int)

func (*StreamIterators) SwitchChunkMeta added in v1.2.0

func (c *StreamIterators) SwitchChunkMeta() error

func (*StreamIterators) WithLog

func (c *StreamIterators) WithLog(log *Log.Logger)

func (*StreamIterators) WriteChunkMeta added in v1.2.0

func (c *StreamIterators) WriteChunkMeta(cm *ChunkMeta) (int, error)

type StreamIteratorsPool

type StreamIteratorsPool struct {
	// contains filtered or unexported fields
}

func NewStreamIteratorsPool

func NewStreamIteratorsPool(n int) *StreamIteratorsPool

type StreamWriteFile added in v1.0.0

type StreamWriteFile struct {
	TableData

	Conf *Config
	// contains filtered or unexported fields
}

func NewWriteScanFile added in v1.0.0

func NewWriteScanFile(mst string, m *MmsTables, file TSSPFile, schema record.Schemas) (*StreamWriteFile, error)

func (*StreamWriteFile) AppendColumn added in v1.0.0

func (c *StreamWriteFile) AppendColumn(ref *record.Field) error

func (*StreamWriteFile) ChangeColumn added in v1.0.0

func (c *StreamWriteFile) ChangeColumn(ref record.Field) error

func (*StreamWriteFile) ChangeSid added in v1.0.0

func (c *StreamWriteFile) ChangeSid(sid uint64)

func (*StreamWriteFile) Close added in v1.0.0

func (c *StreamWriteFile) Close(isError bool)

func (*StreamWriteFile) Flush added in v1.0.0

func (c *StreamWriteFile) Flush() error

func (*StreamWriteFile) GetTSSPFile added in v1.0.0

func (c *StreamWriteFile) GetTSSPFile() TSSPFile

func (*StreamWriteFile) Init added in v1.0.0

func (c *StreamWriteFile) Init(id uint64, chunkDataOffset int64, schema record.Schemas)

func (*StreamWriteFile) InitFile added in v1.0.0

func (c *StreamWriteFile) InitFile(seq uint64) error

func (*StreamWriteFile) InitMergedFile added in v1.0.0

func (c *StreamWriteFile) InitMergedFile(f TSSPFile) error

func (*StreamWriteFile) NewFile added in v1.0.0

func (c *StreamWriteFile) NewFile(addFileExt bool) error

func (*StreamWriteFile) NewTSSPFile added in v1.0.0

func (c *StreamWriteFile) NewTSSPFile(tmp bool) (TSSPFile, error)

func (*StreamWriteFile) SetValidate added in v1.1.0

func (c *StreamWriteFile) SetValidate(en bool)

func (*StreamWriteFile) Size added in v1.0.0

func (c *StreamWriteFile) Size() int64

func (*StreamWriteFile) SortColumns added in v1.2.0

func (c *StreamWriteFile) SortColumns()

func (*StreamWriteFile) SwitchChunkMeta added in v1.2.0

func (c *StreamWriteFile) SwitchChunkMeta() error

func (*StreamWriteFile) WriteChunkMeta added in v1.2.0

func (c *StreamWriteFile) WriteChunkMeta(cm *ChunkMeta) (int, error)

func (*StreamWriteFile) WriteCurrentMeta added in v1.0.0

func (c *StreamWriteFile) WriteCurrentMeta() error

func (*StreamWriteFile) WriteData added in v1.0.0

func (c *StreamWriteFile) WriteData(id uint64, ref record.Field, col record.ColVal, timeCol *record.ColVal) error

func (*StreamWriteFile) WriteFile added in v1.0.0

func (c *StreamWriteFile) WriteFile() error

func (*StreamWriteFile) WriteMeta added in v1.0.0

func (c *StreamWriteFile) WriteMeta(cm *ChunkMeta) error

type StringPreAgg

type StringPreAgg struct {
	// contains filtered or unexported fields
}

StringPreAgg If you change the order of the elements in the structure, remember to modify marshal() and unmarshal() as well.

func NewStringPreAgg

func NewStringPreAgg() *StringPreAgg

type TSSPFile

type TSSPFile interface {
	Path() string
	Name() string
	FileName() TSSPFileName
	LevelAndSequence() (uint16, uint64)
	FileNameMerge() uint16
	FileNameExtend() uint16
	IsOrder() bool
	Ref()
	Unref()
	RefFileReader()
	UnrefFileReader()
	Stop()
	Inuse() bool
	MetaIndexAt(idx int) (*MetaIndex, error)
	MetaIndex(id uint64, tr util.TimeRange) (int, *MetaIndex, error)
	ChunkMeta(id uint64, offset int64, size, itemCount uint32, metaIdx int, ctx *ChunkMetaContext, ioPriority int) (*ChunkMeta, error)
	ReadAt(cm *ChunkMeta, segment int, dst *record.Record, decs *ReadContext, ioPriority int) (*record.Record, error)
	ReadData(offset int64, size uint32, dst *[]byte, ioPriority int) ([]byte, error)
	ReadChunkMetaData(metaIdx int, m *MetaIndex, dst []ChunkMeta, ioPriority int) ([]ChunkMeta, error)

	FileStat() *Trailer
	// FileSize get the size of the disk occupied by file
	FileSize() int64
	// InMemSize get the size of the memory occupied by file
	InMemSize() int64
	Contains(id uint64) (bool, error)
	ContainsByTime(tr util.TimeRange) (bool, error)
	ContainsValue(id uint64, tr util.TimeRange) (bool, error)
	MinMaxTime() (int64, int64, error)

	Open() error
	Close() error
	LoadIntoMemory() error
	LoadComponents() error
	LoadIdTimes(p *IdTimePairs) error
	Rename(newName string) error
	Remove() error
	FreeMemory(evictLock bool) int64
	FreeFileHandle() error
	Version() uint64
	AverageChunkRows() int
	MaxChunkRows() int
	MetaIndexItemNum() int64
	AddToEvictList(level uint16)
	RemoveFromEvictList(level uint16)
	GetFileReaderRef() int64
	RenameOnObs(obsName string) error
}

func OpenTSSPFile

func OpenTSSPFile(name string, lockPath *string, isOrder bool, cacheData bool) (TSSPFile, error)

type TSSPFileAttachedReader added in v1.2.0

type TSSPFileAttachedReader struct {
	// contains filtered or unexported fields
}

func NewTSSPFileAttachedReader added in v1.2.0

func NewTSSPFileAttachedReader(files []TSSPFile, fragRanges []fragment.FragmentRanges, ctx *FileReaderContext, schema hybridqp.Options,
	unnest *influxql.Unnest) (*TSSPFileAttachedReader, error)

func (*TSSPFileAttachedReader) Close added in v1.2.0

func (t *TSSPFileAttachedReader) Close() error

func (*TSSPFileAttachedReader) EndSpan added in v1.2.0

func (t *TSSPFileAttachedReader) EndSpan()

func (*TSSPFileAttachedReader) GetSchema added in v1.2.0

func (t *TSSPFileAttachedReader) GetSchema() record.Schemas

func (*TSSPFileAttachedReader) Name added in v1.2.0

func (t *TSSPFileAttachedReader) Name() string

func (*TSSPFileAttachedReader) Next added in v1.2.0

func (*TSSPFileAttachedReader) NextAggData added in v1.2.0

func (t *TSSPFileAttachedReader) NextAggData() (*record.Record, *comm.FileInfo, error)

func (*TSSPFileAttachedReader) ResetBy added in v1.2.0

func (t *TSSPFileAttachedReader) ResetBy(files []TSSPFile, fragRanges []fragment.FragmentRanges) error

func (*TSSPFileAttachedReader) SetOps added in v1.2.0

func (t *TSSPFileAttachedReader) SetOps(ops []*comm.CallOption)

func (*TSSPFileAttachedReader) SinkPlan added in v1.2.0

func (t *TSSPFileAttachedReader) SinkPlan(plan hybridqp.QueryNode)

func (*TSSPFileAttachedReader) StartSpan added in v1.2.0

func (t *TSSPFileAttachedReader) StartSpan(span *tracing.Span)

type TSSPFileDetachedReader added in v1.2.0

type TSSPFileDetachedReader struct {
	// contains filtered or unexported fields
}

func NewTSSPFileDetachedReader added in v1.2.0

func NewTSSPFileDetachedReader(metaIndex []*MetaIndex, blocks [][]int, ctx *FileReaderContext, path *sparseindex.OBSFilterPath, unnest *influxql.Unnest,
	isSort bool, options hybridqp.Options) (*TSSPFileDetachedReader, error)

func (*TSSPFileDetachedReader) Close added in v1.2.0

func (t *TSSPFileDetachedReader) Close() error

func (*TSSPFileDetachedReader) EndSpan added in v1.2.0

func (t *TSSPFileDetachedReader) EndSpan()

func (*TSSPFileDetachedReader) GetSchema added in v1.2.0

func (t *TSSPFileDetachedReader) GetSchema() record.Schemas

func (*TSSPFileDetachedReader) Name added in v1.2.0

func (t *TSSPFileDetachedReader) Name() string

func (*TSSPFileDetachedReader) Next added in v1.2.0

func (*TSSPFileDetachedReader) NextAggData added in v1.2.0

func (t *TSSPFileDetachedReader) NextAggData() (*record.Record, *comm.FileInfo, error)

func (*TSSPFileDetachedReader) ResetBy added in v1.2.0

func (t *TSSPFileDetachedReader) ResetBy(metaIndex []*MetaIndex, blocks [][]int, ctx *FileReaderContext)

func (*TSSPFileDetachedReader) SetOps added in v1.2.0

func (t *TSSPFileDetachedReader) SetOps(ops []*comm.CallOption)

func (*TSSPFileDetachedReader) SinkPlan added in v1.2.0

func (t *TSSPFileDetachedReader) SinkPlan(plan hybridqp.QueryNode)

func (*TSSPFileDetachedReader) StartSpan added in v1.2.0

func (t *TSSPFileDetachedReader) StartSpan(span *tracing.Span)

type TSSPFileName

type TSSPFileName struct {
	// contains filtered or unexported fields
}

func NewTSSPFileName

func NewTSSPFileName(seq uint64, level, merge, extent uint16, order bool, lockPath *string) TSSPFileName

func (*TSSPFileName) Equal

func (n *TSSPFileName) Equal(other *TSSPFileName) bool

func (*TSSPFileName) ParseFileName

func (n *TSSPFileName) ParseFileName(name string) error

func (*TSSPFileName) Path

func (n *TSSPFileName) Path(dir string, tmp bool) string

func (*TSSPFileName) SetExtend

func (n *TSSPFileName) SetExtend(extend uint16)

func (*TSSPFileName) SetLevel

func (n *TSSPFileName) SetLevel(l uint16)

func (*TSSPFileName) SetMerge

func (n *TSSPFileName) SetMerge(merge uint16)

func (*TSSPFileName) SetOrder

func (n *TSSPFileName) SetOrder(v bool)

func (*TSSPFileName) SetSeq

func (n *TSSPFileName) SetSeq(seq uint64)

func (*TSSPFileName) String

func (n *TSSPFileName) String() string

func (*TSSPFileName) TmpPath

func (n *TSSPFileName) TmpPath(dir string) string

type TSSPFiles

type TSSPFiles struct {
	// contains filtered or unexported fields
}

func NewTSSPFiles

func NewTSSPFiles() *TSSPFiles

func (*TSSPFiles) Append

func (f *TSSPFiles) Append(file ...TSSPFile)

func (*TSSPFiles) Files

func (f *TSSPFiles) Files() []TSSPFile

func (*TSSPFiles) Len

func (f *TSSPFiles) Len() int

func (*TSSPFiles) Less

func (f *TSSPFiles) Less(i, j int) bool

func (*TSSPFiles) RLock added in v1.2.0

func (f *TSSPFiles) RLock()

func (*TSSPFiles) RUnlock added in v1.2.0

func (f *TSSPFiles) RUnlock()

func (*TSSPFiles) StopFiles

func (f *TSSPFiles) StopFiles()

func (*TSSPFiles) Swap

func (f *TSSPFiles) Swap(i, j int)

type TableData

type TableData struct {
	// contains filtered or unexported fields
}

type TableReaders

type TableReaders []TSSPFile

func (TableReaders) Len

func (tables TableReaders) Len() int

func (TableReaders) Less

func (tables TableReaders) Less(i, j int) bool

func (TableReaders) Swap

func (tables TableReaders) Swap(i, j int)

type TableStat

type TableStat struct {
	// contains filtered or unexported fields
}

type TableStoreGC

type TableStoreGC struct {
	// contains filtered or unexported fields
}

func (*TableStoreGC) Add

func (sgc *TableStoreGC) Add(free bool, files ...TSSPFile)

func (*TableStoreGC) GC

func (sgc *TableStoreGC) GC()

type TablesGC

type TablesGC interface {
	Add(free bool, files ...TSSPFile)
	GC()
}

func NewTableStoreGC

func NewTableStoreGC() TablesGC

type TablesStore

type TablesStore interface {
	SetOpId(shardId uint64, opId uint64)
	Open() (int64, error)
	Close() error
	AddTable(ms *MsBuilder, isOrder bool, tmp bool)
	AddTSSPFiles(name string, isOrder bool, f ...TSSPFile)
	AddBothTSSPFiles(flushed *bool, name string, orderFiles []TSSPFile, unorderFiles []TSSPFile)
	AddPKFile(name, file string, rec *record.Record, mark fragment.IndexFragment, tcLocation int8)
	GetPKFile(mstName string, file string) (pkInfo *colstore.PKInfo, ok bool)
	FreeAllMemReader()
	ReplaceFiles(name string, oldFiles, newFiles []TSSPFile, isOrder bool) error
	GetBothFilesRef(measurement string, hasTimeFilter bool, tr util.TimeRange, flushed *bool) ([]TSSPFile, []TSSPFile, bool)
	ReplaceDownSampleFiles(mstNames []string, originFiles [][]TSSPFile, newFiles [][]TSSPFile, isOrder bool, callBack func()) error
	NextSequence() uint64
	Sequencer() *Sequencer
	GetTSSPFiles(mm string, isOrder bool) (*TSSPFiles, bool)
	GetCSFiles(mm string) (*TSSPFiles, bool)
	Tier() uint64
	SetTier(tier uint64)
	File(name string, namePath string, isOrder bool) TSSPFile
	CompactDone(seq []string)
	CompactionEnable()
	CompactionDisable()
	MergeEnable()
	MergeDisable()
	CompactionEnabled() bool
	MergeEnabled() bool
	IsOutOfOrderFilesExist() bool
	MergeOutOfOrder(shId uint64, force bool) error
	LevelCompact(level uint16, shid uint64) error
	FullCompact(shid uint64) error
	SetAddFunc(addFunc func(int64))
	GetLastFlushTimeBySid(measurement string, sid uint64) int64
	GetRowCountsBySid(measurement string, sid uint64) (int64, error)
	AddRowCountsBySid(measurement string, sid uint64, rowCounts int64)
	GetOutOfOrderFileNum() int
	GetMstFileStat() *stats.FileStat
	DropMeasurement(ctx context.Context, name string) error
	GetFileSeq() uint64
	DisableCompAndMerge()
	EnableCompAndMerge()
	FreeSequencer() bool
	SetImmTableType(engineType config.EngineType)
	SetMstInfo(name string, mstInfo *meta.MeasurementInfo)
	SetAccumulateMetaIndex(name string, aMetaIndex *AccumulateMetaIndex)
	GetMstInfo(name string) (*meta.MeasurementInfo, bool)
	SeriesTotal() uint64
	SetLockPath(lock *string)
	FullyCompacted() bool
}

type TimePreAgg

type TimePreAgg struct {
	// contains filtered or unexported fields
}

func NewTimePreAgg

func NewTimePreAgg() *TimePreAgg

type Trailer

type Trailer struct {
	TableStat
	// contains filtered or unexported fields
}

func (*Trailer) ContainsId

func (t *Trailer) ContainsId(id uint64) bool

func (*Trailer) ContainsTime

func (t *Trailer) ContainsTime(tm util.TimeRange) bool

func (*Trailer) DataSize added in v1.2.0

func (t *Trailer) DataSize() int64

func (*Trailer) EqualData added in v1.1.0

func (t *Trailer) EqualData(idx int, v byte) bool

func (*Trailer) GetData added in v1.2.0

func (t *Trailer) GetData(idx int, def uint8) uint8

func (*Trailer) IndexSize added in v1.2.0

func (t *Trailer) IndexSize() int64

func (*Trailer) MetaIndexItemNum added in v1.0.0

func (t *Trailer) MetaIndexItemNum() int64

func (*Trailer) MetaIndexSize added in v1.2.0

func (t *Trailer) MetaIndexSize() int64

func (*Trailer) SetChunkMetaCompressFlag added in v1.2.0

func (t *Trailer) SetChunkMetaCompressFlag()

func (*Trailer) SetData added in v1.1.0

func (t *Trailer) SetData(idx int, v byte)

type TsChunkDataImp added in v1.2.0

type TsChunkDataImp struct {
}

func (*TsChunkDataImp) EncodeChunk added in v1.2.0

func (t *TsChunkDataImp) EncodeChunk(b *ChunkDataBuilder, id uint64, offset int64, rec *record.Record, dst []byte, timeSorted bool) ([]byte, error)

func (*TsChunkDataImp) EncodeChunkForCompaction added in v1.2.0

func (t *TsChunkDataImp) EncodeChunkForCompaction(b *ChunkDataBuilder, offset int64, rec *record.Record, dst []byte, accumulateRowsIndex []int, timeSorted bool) ([]byte, error)

func (*TsChunkDataImp) SetAccumulateRowsIndex added in v1.2.0

func (t *TsChunkDataImp) SetAccumulateRowsIndex(accumulateRowsIndex []int)

func (*TsChunkDataImp) SetDetachedInfo added in v1.2.0

func (t *TsChunkDataImp) SetDetachedInfo(writeDetached bool)

type UnnestMatch added in v1.2.0

type UnnestMatch interface {
	Get([][]byte) [][]byte
}

type UnorderedColumnReader added in v1.0.0

type UnorderedColumnReader struct {
	// contains filtered or unexported fields
}

func (*UnorderedColumnReader) Read added in v1.0.0

func (r *UnorderedColumnReader) Read(ref *record.Field, maxTime int64) (*record.ColVal, []int64, error)

reads all unordered data whose time is earlier than maxTime

func (*UnorderedColumnReader) ReadSchema added in v1.0.0

func (r *UnorderedColumnReader) ReadSchema(res map[string]record.Field, maxTime int64)

type UnorderedReader added in v1.0.0

type UnorderedReader struct {
	// contains filtered or unexported fields
}

func NewUnorderedReader added in v1.0.0

func NewUnorderedReader(log *logger.Logger) *UnorderedReader

func (*UnorderedReader) AddFiles added in v1.0.0

func (r *UnorderedReader) AddFiles(files []TSSPFile)

func (*UnorderedReader) AllocNilCol added in v1.1.0

func (r *UnorderedReader) AllocNilCol(size int, ref *record.Field) *record.ColVal

func (*UnorderedReader) Close added in v1.0.0

func (r *UnorderedReader) Close()

func (*UnorderedReader) HasSeries added in v1.1.0

func (r *UnorderedReader) HasSeries(sid uint64) bool

func (*UnorderedReader) InitTimes added in v1.0.1

func (r *UnorderedReader) InitTimes(sid uint64, maxTime int64) error

InitTimes initialize the time column of unordered data

func (*UnorderedReader) Read added in v1.0.0

func (r *UnorderedReader) Read(sid uint64, ref *record.Field, maxTime int64) (*record.ColVal, []int64, error)

Read reads data based on the series ID, column, and time range

func (*UnorderedReader) ReadAllTimes added in v1.0.1

func (r *UnorderedReader) ReadAllTimes() []int64

func (*UnorderedReader) ReadRemain added in v1.0.0

func (r *UnorderedReader) ReadRemain(sid uint64, cb remainCallback) error

ReadRemain reads all remaining data that is smaller than the current series ID in the unordered data

func (*UnorderedReader) ReadSeriesSchemas added in v1.0.0

func (r *UnorderedReader) ReadSeriesSchemas(sid uint64, maxTime int64) record.Schemas

func (*UnorderedReader) ReadTimes added in v1.0.0

func (r *UnorderedReader) ReadTimes(ref *record.Field, maxTime int64) []int64

type UnorderedReaderContext added in v1.1.0

type UnorderedReaderContext struct {
	// contains filtered or unexported fields
}

Directories

Path Synopsis

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL