Documentation ¶
Index ¶
- Constants
- func GetSegAvg(runningSegStat *structs.SegStats, currSegStat *structs.SegStats) (*utils.NumTypeEnclosure, error)
- func GetSegCardinality(runningSegStat *structs.SegStats, currSegStat *structs.SegStats) (*utils.NumTypeEnclosure, error)
- func GetSegCount(runningSegStat *structs.SegStats, currSegStat *structs.SegStats) (*utils.NumTypeEnclosure, error)
- func GetSegMax(runningSegStat *structs.SegStats, currSegStat *structs.SegStats) (*utils.NumTypeEnclosure, error)
- func GetSegMin(runningSegStat *structs.SegStats, currSegStat *structs.SegStats) (*utils.NumTypeEnclosure, error)
- func GetSegRange(runningSegStat *structs.SegStats, currSegStat *structs.SegStats) (*utils.NumTypeEnclosure, error)
- func GetSegSum(runningSegStat *structs.SegStats, currSegStat *structs.SegStats) (*utils.NumTypeEnclosure, error)
- func ReadAllTimestampsForBlock(blks map[uint16]*structs.BlockMetadataHolder, segKey string, ...) (map[uint16][]uint64, error)
- func ReadSegStats(segkey string, qid uint64) (map[string]*structs.SegStats, error)
- func ReturnTimeBuffers(og map[uint16][]uint64)
- type AgileTreeReader
- func (str *AgileTreeReader) ApplyGroupByJit(grpColNames []string, internalMops []*structs.MeasureAggregator, ...) error
- func (str *AgileTreeReader) CanUseAgileTree(grpReq *structs.GroupByRequest) (bool, error)
- func (str *AgileTreeReader) Close() error
- func (str *AgileTreeReader) GetBuckets() map[string]struct{}
- func (str *AgileTreeReader) ReadTreeMeta() error
- func (str *AgileTreeReader) SetBucketLimit(bucketLimit uint64)
- func (str *AgileTreeReader) SetBuckets(buckets map[string]struct{})
- type ColumnInfo
- type MultiColSegmentReader
- func (mcsr *MultiColSegmentReader) ApplySearchToExpressionFilterDictCsg(qValDte *utils.DtypeEnclosure, fop utils.FilterOperator, isRegexSearch bool, ...) (bool, error)
- func (mcsr *MultiColSegmentReader) ApplySearchToMatchFilterDictCsg(match *structs.MatchFilter, bsh *structs.BlockSearchHelper, cname string) (bool, error)
- func (mcsr *MultiColSegmentReader) ExtractValueFromColumnFile(col string, blockNum uint16, recordNum uint16, qid uint64) (*utils.CValueEnclosure, error)
- func (mcsr *MultiColSegmentReader) GetAllTimeStampsForBlock(blockNum uint16) ([]uint64, error)
- func (mcsr *MultiColSegmentReader) GetDictEncCvalsFromColFile(results map[uint16]map[string]interface{}, col string, blockNum uint16, ...) bool
- func (mcsr *MultiColSegmentReader) GetTimeStampForRecord(blockNum uint16, recordNum uint16, qid uint64) (uint64, error)
- func (mcsr *MultiColSegmentReader) IncrementColumnUsage(colName string)
- func (mcsr *MultiColSegmentReader) IsBlkDictEncoded(cname string, blkNum uint16) (bool, error)
- func (mcsr *MultiColSegmentReader) IsColPresent(cname string) bool
- func (mcsr *MultiColSegmentReader) ReadRawRecordFromColumnFile(col string, blockNum uint16, recordNum uint16, qid uint64) ([]byte, error)
- func (mcsr *MultiColSegmentReader) ReorderColumnUsage()
- type RollupReader
- func (rur *RollupReader) Close()
- func (rur *RollupReader) GetDayRollups() (map[uint16]map[uint64]*writer.RolledRecs, error)
- func (rur *RollupReader) GetHourRollups() (map[uint16]map[uint64]*writer.RolledRecs, error)
- func (rur *RollupReader) GetMinRollups() (map[uint16]map[uint64]*writer.RolledRecs, error)
- type SegmentFileReader
- func (sfr *SegmentFileReader) ApplySearchToExpressionFilterDictCsg(qValDte *utils.DtypeEnclosure, fop utils.FilterOperator, isRegexSearch bool, ...) (bool, error)
- func (sfr *SegmentFileReader) ApplySearchToMatchFilterDictCsg(match *structs.MatchFilter, bsh *structs.BlockSearchHelper) (bool, error)
- func (sfr *SegmentFileReader) Close() error
- func (sfr *SegmentFileReader) GetDictEncCvalsFromColFile(results map[uint16]map[string]interface{}, blockNum uint16, ...) bool
- func (sfr *SegmentFileReader) IsBlkDictEncoded(blockNum uint16) (bool, error)
- func (sfr *SegmentFileReader) ReadRecordFromBlock(blockNum uint16, recordNum uint16) ([]byte, error)
- type SharedMultiColReaders
- type StarTreeMetadata
- type TimeRangeReader
Constants ¶
const MAX_NODE_PTRS = 80_000
Variables ¶
This section is empty.
Functions ¶
func GetSegCardinality ¶
func GetSegCount ¶
func GetSegRange ¶
func ReadAllTimestampsForBlock ¶
func ReadAllTimestampsForBlock(blks map[uint16]*structs.BlockMetadataHolder, segKey string, blockSummaries []*structs.BlockSummary, parallelism int64) (map[uint16][]uint64, error)
When the caller of this function is done with the returned map, they should call ReturnTimeBuffers() on it to return the buffers to rawTimestampsBufferPool.
func ReadSegStats ¶
func ReturnTimeBuffers ¶
Types ¶
type AgileTreeReader ¶
type AgileTreeReader struct {
// contains filtered or unexported fields
}
func InitNewAgileTreeReader ¶
func InitNewAgileTreeReader(segKey string, qid uint64) (*AgileTreeReader, error)
returns a new AgileTreeReader and any errors encountered The returned AgileTreeReader must call .Close() when finished using it to close the fd
func (*AgileTreeReader) ApplyGroupByJit ¶
func (str *AgileTreeReader) ApplyGroupByJit(grpColNames []string, internalMops []*structs.MeasureAggregator, blkResults *blockresults.BlockResults, qid uint64, agileTreeBuf []byte) error
applies groupby results and returns requested measure operations first applies the first groupby column. For all returned nodes, apply second & so on until no more groupby exists
func (*AgileTreeReader) CanUseAgileTree ¶
func (str *AgileTreeReader) CanUseAgileTree(grpReq *structs.GroupByRequest) (bool, error)
parameters:
grpColNames: Names of GroupByColNames mColNames: Names of MeasureColumns
returns:
bool: if grp and mcol are present and query is fully answerable by AgileTree error: error if any
Func: If any colname either in grp or measure is not present will return false
func (*AgileTreeReader) Close ¶
func (str *AgileTreeReader) Close() error
func (*AgileTreeReader) GetBuckets ¶
func (str *AgileTreeReader) GetBuckets() map[string]struct{}
func (*AgileTreeReader) ReadTreeMeta ¶
func (str *AgileTreeReader) ReadTreeMeta() error
parameters:
none
returns:
err
func (*AgileTreeReader) SetBucketLimit ¶
func (str *AgileTreeReader) SetBucketLimit(bucketLimit uint64)
func (*AgileTreeReader) SetBuckets ¶
func (str *AgileTreeReader) SetBuckets(buckets map[string]struct{})
type ColumnInfo ¶
type ColumnInfo struct { ColumnName string // contains filtered or unexported fields }
type MultiColSegmentReader ¶
type MultiColSegmentReader struct { AllColums []*ColumnInfo // contains filtered or unexported fields }
Defines holder struct and functions to construct & manage SegmentFileReaders across multiple columns
func (*MultiColSegmentReader) ApplySearchToExpressionFilterDictCsg ¶
func (mcsr *MultiColSegmentReader) ApplySearchToExpressionFilterDictCsg(qValDte *utils.DtypeEnclosure, fop utils.FilterOperator, isRegexSearch bool, bsh *structs.BlockSearchHelper, cname string) (bool, error)
func (*MultiColSegmentReader) ApplySearchToMatchFilterDictCsg ¶
func (mcsr *MultiColSegmentReader) ApplySearchToMatchFilterDictCsg(match *structs.MatchFilter, bsh *structs.BlockSearchHelper, cname string) (bool, error)
func (*MultiColSegmentReader) ExtractValueFromColumnFile ¶
func (mcsr *MultiColSegmentReader) ExtractValueFromColumnFile(col string, blockNum uint16, recordNum uint16, qid uint64) (*utils.CValueEnclosure, error)
Reads the request value and converts it to a *utils.CValueEnclosure
func (*MultiColSegmentReader) GetAllTimeStampsForBlock ¶
func (mcsr *MultiColSegmentReader) GetAllTimeStampsForBlock(blockNum uint16) ([]uint64, error)
func (*MultiColSegmentReader) GetDictEncCvalsFromColFile ¶
func (mcsr *MultiColSegmentReader) GetDictEncCvalsFromColFile(results map[uint16]map[string]interface{}, col string, blockNum uint16, orderedRecNums []uint16, qid uint64) bool
parameters:
results: map of recNum -> colName -> colValue to be filled in. col: columnName blockNum: blocknum to search for rnMap: map of recordNumbers to for which to find the colValue for the given colname
returns:
bool: if we are able to find the requested column in dict encoding
func (*MultiColSegmentReader) GetTimeStampForRecord ¶
func (*MultiColSegmentReader) IncrementColumnUsage ¶
func (mcsr *MultiColSegmentReader) IncrementColumnUsage(colName string)
func (*MultiColSegmentReader) IsBlkDictEncoded ¶
func (mcsr *MultiColSegmentReader) IsBlkDictEncoded(cname string, blkNum uint16) (bool, error)
func (*MultiColSegmentReader) IsColPresent ¶
func (mcsr *MultiColSegmentReader) IsColPresent(cname string) bool
func (*MultiColSegmentReader) ReadRawRecordFromColumnFile ¶
func (mcsr *MultiColSegmentReader) ReadRawRecordFromColumnFile(col string, blockNum uint16, recordNum uint16, qid uint64) ([]byte, error)
Reads the raw value and returns the []byte in TLV format (type-[length]-value encoding)
func (*MultiColSegmentReader) ReorderColumnUsage ¶
func (mcsr *MultiColSegmentReader) ReorderColumnUsage()
reorders mcsr.AllColumns to be ordered on usage
type RollupReader ¶
type RollupReader struct {
// contains filtered or unexported fields
}
func InitNewRollupReader ¶
func InitNewRollupReader(segKey string, tsKey string, qid uint64) (*RollupReader, error)
func (*RollupReader) Close ¶
func (rur *RollupReader) Close()
func (*RollupReader) GetDayRollups ¶
func (rur *RollupReader) GetDayRollups() (map[uint16]map[uint64]*writer.RolledRecs, error)
func (*RollupReader) GetHourRollups ¶
func (rur *RollupReader) GetHourRollups() (map[uint16]map[uint64]*writer.RolledRecs, error)
func (*RollupReader) GetMinRollups ¶
func (rur *RollupReader) GetMinRollups() (map[uint16]map[uint64]*writer.RolledRecs, error)
type SegmentFileReader ¶
type SegmentFileReader struct { ColName string // column name this file references // contains filtered or unexported fields }
func InitNewSegFileReader ¶
func InitNewSegFileReader(fd *os.File, colName string, blockMetadata map[uint16]*structs.BlockMetadataHolder, qid uint64, blockSummaries []*structs.BlockSummary) (*SegmentFileReader, error)
returns a new SegmentFileReader and any errors encountered The returned SegmentFileReader must call .Close() when finished using it to close the fd
func (*SegmentFileReader) ApplySearchToExpressionFilterDictCsg ¶
func (sfr *SegmentFileReader) ApplySearchToExpressionFilterDictCsg(qValDte *utils.DtypeEnclosure, fop utils.FilterOperator, isRegexSearch bool, bsh *structs.BlockSearchHelper) (bool, error)
parameters:
DtypeEnclosure : input qVal FilterOperator: filter operator isRegexSearch: dictData: mapping of dict keywords --> raw recNums buf slice
returns:
bool: if there is a match err
func (*SegmentFileReader) ApplySearchToMatchFilterDictCsg ¶
func (sfr *SegmentFileReader) ApplySearchToMatchFilterDictCsg(match *structs.MatchFilter, bsh *structs.BlockSearchHelper) (bool, error)
parameters:
matchFilter : input query mf dictData: mapping of dict keywords --> raw recNums buf slice
returns:
bool: if there is a match err
func (*SegmentFileReader) Close ¶
func (sfr *SegmentFileReader) Close() error
func (*SegmentFileReader) GetDictEncCvalsFromColFile ¶
func (*SegmentFileReader) IsBlkDictEncoded ¶
func (sfr *SegmentFileReader) IsBlkDictEncoded(blockNum uint16) (bool, error)
func (*SegmentFileReader) ReadRecordFromBlock ¶
func (sfr *SegmentFileReader) ReadRecordFromBlock(blockNum uint16, recordNum uint16) ([]byte, error)
returns the raw bytes of the blockNum:recordNum combination in the current segfile optimized for subsequent calls to have the same blockNum returns : encodedVal, error
type SharedMultiColReaders ¶
type SharedMultiColReaders struct { // contains filtered or unexported fields }
Defines holder struct and functions to construct & manage SegmentFileReaders across multiple columns
func InitSharedMultiColumnReaders ¶
func InitSharedMultiColumnReaders(segKey string, colNames map[string]bool, blockMetadata map[uint16]*structs.BlockMetadataHolder, blockSummaries []*structs.BlockSummary, numReaders int, qid uint64) (*SharedMultiColReaders, error)
Inializes N MultiColumnSegmentReaders, each of which share the same file descriptor.
Only columns that exist will be loaded, not guaranteed to load all columnns in colNames It is up to the caller to close the open FDs using .Close()
func (*SharedMultiColReaders) Close ¶
func (scr *SharedMultiColReaders) Close()
Returns all buffers to the pools, closes all FDs shared across multi readers, and updates global semaphore
type StarTreeMetadata ¶
type StarTreeMetadata struct {
// contains filtered or unexported fields
}
type TimeRangeReader ¶
type TimeRangeReader struct {
// contains filtered or unexported fields
}
func InitNewTimeReader ¶
func InitNewTimeReader(segKey string, tsKey string, blockMetadata map[uint16]*structs.BlockMetadataHolder, blkRecCount map[uint16]uint16, qid uint64) (*TimeRangeReader, error)
returns a new TimeRangeReader and any errors encountered the caller is responsible for calling TimeRangeReader.Close() when finished using it to close the fd
func InitNewTimeReaderFromBlockSummaries ¶
func InitNewTimeReaderFromBlockSummaries(segKey string, tsKey string, blockMetadata map[uint16]*structs.BlockMetadataHolder, blockSummaries []*structs.BlockSummary, qid uint64) (*TimeRangeReader, error)
func InitNewTimeReaderWithFD ¶
func (*TimeRangeReader) Close ¶
func (trr *TimeRangeReader) Close() error
func (*TimeRangeReader) GetAllTimeStampsForBlock ¶
func (trr *TimeRangeReader) GetAllTimeStampsForBlock(blockNum uint16) ([]uint64, error)
func (*TimeRangeReader) GetTimeStampForRecord ¶
func (trr *TimeRangeReader) GetTimeStampForRecord(blockNum uint16, recordNum uint16, qid uint64) (uint64, error)
highly optimized for subsequent calls to handle the same blockNum