logstore

package
v1.2.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Feb 29, 2024 License: Apache-2.0 Imports: 22 Imported by: 0

Documentation

Overview

Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd.

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.

Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd.

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.

Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd.

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.

Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd.

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.

Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd.

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.

Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd.

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.

Copyright 2023 Huawei Cloud Computing Technologies Co., Ltd.

Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at

http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.

Index

Constants

View Source
const (
	RecordPoolNum                 = 3
	BLOCK_LOG_NAME                = "segment.blg"
	BLOCK_LOG_CHECK_N_BYTES       = 4 + 4
	TAGS_RECORD_PREFIX_N_BYTES    = 4
	FLAG_TAGS                     = 0
	FLAG_CONTENT                  = 1
	FLAG_LENGTH_MASK              = 0xffffff
	FLAG_HEAD_SHIFT               = 30
	RECORD_DATA_MAX_N_BYTES       = 1024 * 1024
	CONTENT_RECORD_PREFIX_N_BYTES = 4 + 4 + 8 + 8
	CONTENT_RECORD_MAX_N_BYTES    = CONTENT_RECORD_PREFIX_N_BYTES + RECORD_DATA_MAX_N_BYTES
	RECORD_MAX_N_BYTES            = CONTENT_RECORD_MAX_N_BYTES
	BLOCK_FLUSH_TRIGGER_N_BYTES   = 1024 * 1024
	FLAG_BLOCK_CHECK              = 2
	BLOCK_PREFIX_N_BYTES          = 8 + 4
)
View Source
const (
	CONTENT_NAME                = "segment.cnt"
	MAX_BLOCK_LOG_STORE_N_BYTES = BLOCK_FLUSH_TRIGGER_N_BYTES + (RECORD_MAX_N_BYTES * 2)
)
View Source
const (
	TAG_FIELD                 = "tag"
	CONTENT_FIELD             = "content"
	CircularRecordNum         = 6 // reader cache 4, hash agg and index scan each cache 1
	ContentFilterDurationSpan = "content_filter_duration"
)
View Source
const (
	META_DATA_N_BYTES  int32 = 8 + 8 + 8 + 8 + 8 + 4 + 4
	META_STORE_N_BYTES int32 = META_DATA_N_BYTES + 4
	META_DATA_SIZE     int32 = META_STORE_N_BYTES + 4
)
View Source
const (
	META_NAME            = "segment.meta"
	MAX_SCAN_META_COUNT  = 16 * 1024
	MAX_SCAN_META_Length = MAX_SCAN_META_COUNT * META_STORE_N_BYTES
)
View Source
const (
	QueryMetaCacheTTL            = 10 * time.Minute
	QueryMetaDataCacheSize int64 = 50 * 1024 * 1024 * int64(META_DATA_SIZE)
)
View Source
const (
	ReaderContentSpan         = "reader_content_num_span"
	ReaderReadContentSizeSpan = "reader_content_read_size_span"
	ReaderReadContentDuration = "reader_content_read_duration"
	MetaReadNumSpan           = "meta_read_num_span"
	MetaReadDurationSpan      = "meta_read_duration"
)

Variables

Functions

func GetMetaLen

func GetMetaLen(option *obs.ObsOptions, path string) (int64, error)

func PutMetaData

func PutMetaData(queryID string, meta []*MetaData)

func UpdateMetaData

func UpdateMetaData(old, new cache.Entry) bool

Types

type BlockLogReader

type BlockLogReader struct {
	// contains filtered or unexported fields
}

func NewBlockLogReader

func NewBlockLogReader(object *obs.ObsOptions, isStat bool, path string) (*BlockLogReader, error)

func (*BlockLogReader) Close

func (c *BlockLogReader) Close()

func (*BlockLogReader) Next

func (s *BlockLogReader) Next() ([][][]byte, int64, int, int64, error)

type ContentReader

type ContentReader struct {
	// contains filtered or unexported fields
}

func NewContentReader

func NewContentReader(option *obs.ObsOptions, path string, offset []int64, length []int64, schema record.Schemas, readBlockLog bool, maxBlockID int64, isStat bool) (*ContentReader, error)

func (*ContentReader) Close

func (s *ContentReader) Close()

func (*ContentReader) Next

func (s *ContentReader) Next() ([][][]byte, int64, error)

func (*ContentReader) StartSpan

func (s *ContentReader) StartSpan(span *tracing.Span)

type DataReader

type DataReader struct {
	// contains filtered or unexported fields
}

func NewDataReader

func NewDataReader(option *obs.ObsOptions, path string, version uint32, tr util.TimeRange, opt hybridqp.Options, offset []int64, length []int64,
	recordSchema record.Schemas, isFirstIter bool, unnest *influxql.Unnest, maxBlockID int64) (*DataReader, error)

func (*DataReader) Close

func (s *DataReader) Close()

func (*DataReader) Next

func (s *DataReader) Next() (*record.Record, error)

func (*DataReader) StartSpan

func (s *DataReader) StartSpan(span *tracing.Span)

func (*DataReader) UnnestNext

func (s *DataReader) UnnestNext() (*record.Record, error)

type IndexReader

type IndexReader struct {
	// contains filtered or unexported fields
}

func NewIndexReader

func NewIndexReader(option *obs.ObsOptions, version uint32, path string, tr util.TimeRange, opt hybridqp.Options) (*IndexReader, error)

func (*IndexReader) Close

func (s *IndexReader) Close()

func (*IndexReader) Get

func (s *IndexReader) Get() ([]*MetaData, error)

func (*IndexReader) GetMaxBlockId

func (s *IndexReader) GetMaxBlockId() int64

func (*IndexReader) GetMinMaxTime

func (s *IndexReader) GetMinMaxTime() (int64, int64)

func (*IndexReader) StartSpan

func (s *IndexReader) StartSpan(span *tracing.Span)

type MatchAllOperator

type MatchAllOperator struct {
	// contains filtered or unexported fields
}

func (*MatchAllOperator) Compute

func (r *MatchAllOperator) Compute(rec *record.Record)

type MetaControl

type MetaControl interface {
	Push(MetaDataInfo)
	Pop() (MetaDataInfo, bool)
	IsEmpty() bool
}

func NewMetaControl

func NewMetaControl(isQueue bool, count int) MetaControl

type MetaData

type MetaData struct {
	// contains filtered or unexported fields
}

func GetMetaData

func GetMetaData(queryID string) ([]*MetaData, bool)

func NewMetaData

func NewMetaData(blockIndex int64, data []byte, offset int32) (*MetaData, error)

func (*MetaData) GetBlockIndex

func (m *MetaData) GetBlockIndex() int64

func (*MetaData) GetContentBlockLength

func (m *MetaData) GetContentBlockLength() int32

func (*MetaData) GetContentBlockOffset

func (m *MetaData) GetContentBlockOffset() int64

func (*MetaData) GetMaxTime

func (m *MetaData) GetMaxTime() int64

func (*MetaData) GetMinTime

func (m *MetaData) GetMinTime() int64

type MetaDataInfo

type MetaDataInfo interface {
	GetMinTime() int64
	GetMaxTime() int64
}

type MetaDatas

type MetaDatas []*MetaData

func (MetaDatas) Len

func (a MetaDatas) Len() int

func (MetaDatas) Less

func (a MetaDatas) Less(i, j int) bool

func (MetaDatas) Swap

func (a MetaDatas) Swap(i, j int)

type MetaQueue

type MetaQueue []MetaDataInfo

func (*MetaQueue) IsEmpty

func (q *MetaQueue) IsEmpty() bool

func (*MetaQueue) Pop

func (q *MetaQueue) Pop() (MetaDataInfo, bool)

func (*MetaQueue) Push

func (q *MetaQueue) Push(v MetaDataInfo)

type MetaReader

type MetaReader interface {
	Close()
	// contains filtered or unexported methods
}

func NewMetaReader

func NewMetaReader(option *obs.ObsOptions, path string, offset int64, length int64, tr util.TimeRange, isCache bool, isStat bool) (MetaReader, error)

todo: cache reader

type MetaStack

type MetaStack []MetaDataInfo

func (*MetaStack) IsEmpty

func (s *MetaStack) IsEmpty() bool

func (*MetaStack) Pop

func (s *MetaStack) Pop() (MetaDataInfo, bool)

func (*MetaStack) Push

func (s *MetaStack) Push(value MetaDataInfo)

type MetaStorageReader

type MetaStorageReader struct {
	// contains filtered or unexported fields
}

func NewMetaStorageReader

func NewMetaStorageReader(option *obs.ObsOptions, path string, offset int64, length int64, tr util.TimeRange, isStat bool) (*MetaStorageReader, error)

func (*MetaStorageReader) Close

func (m *MetaStorageReader) Close()

type RegexpMatchAll

type RegexpMatchAll struct {
	// contains filtered or unexported fields
}

func (*RegexpMatchAll) Get

func (r *RegexpMatchAll) Get(data [][]byte) [][]byte

type SegmentMetaDataEntry

type SegmentMetaDataEntry struct {
	// contains filtered or unexported fields
}

func NewSegmentMetaDataEntry

func NewSegmentMetaDataEntry(segmentID string) *SegmentMetaDataEntry

func (*SegmentMetaDataEntry) GetKey

func (e *SegmentMetaDataEntry) GetKey() string

func (*SegmentMetaDataEntry) GetTime

func (e *SegmentMetaDataEntry) GetTime() time.Time

func (*SegmentMetaDataEntry) GetValue

func (e *SegmentMetaDataEntry) GetValue() interface{}

func (*SegmentMetaDataEntry) SetTime

func (e *SegmentMetaDataEntry) SetTime(time time.Time)

func (*SegmentMetaDataEntry) SetValue

func (e *SegmentMetaDataEntry) SetValue(value interface{})

func (*SegmentMetaDataEntry) Size

func (e *SegmentMetaDataEntry) Size() int64

type SegmentReader

type SegmentReader struct {
	// contains filtered or unexported fields
}

func NewSegmentReader

func NewSegmentReader(option *obs.ObsOptions, path string, version uint32, tr util.TimeRange, opt hybridqp.Options, schema record.Schemas, isAscending bool) (*SegmentReader, error)

func (*SegmentReader) Close

func (s *SegmentReader) Close()

func (*SegmentReader) GetRowCount

func (s *SegmentReader) GetRowCount() int64

func (*SegmentReader) Next

func (s *SegmentReader) Next() ([][][]byte, int64, error)

func (*SegmentReader) SetTr

func (s *SegmentReader) SetTr(tr util.TimeRange)

func (*SegmentReader) StartSpan

func (s *SegmentReader) StartSpan(span *tracing.Span)

func (*SegmentReader) UpdateTr

func (s *SegmentReader) UpdateTr(time int64)

type UnnestMatch

type UnnestMatch interface {
	Get([][]byte) [][]byte
}

func GetUnnestFunc

func GetUnnestFunc(unnest *influxql.Unnest, schema record.Schemas) (UnnestMatch, error)

func NewRegexpMatchAll

func NewRegexpMatchAll(unnest *influxql.Unnest, schema record.Schemas) UnnestMatch

type UnnestMatchAll

type UnnestMatchAll struct {
	// contains filtered or unexported fields
}

func NewUnnestMatchAll

func NewUnnestMatchAll(unnest *influxql.Unnest) (*UnnestMatchAll, error)

func (*UnnestMatchAll) Get

func (r *UnnestMatchAll) Get(s string) map[string]string

type UnnestOperator

type UnnestOperator interface {
	Compute(rec *record.Record)
}

func GetUnnestFuncOperator

func GetUnnestFuncOperator(unnest *influxql.Unnest, schema record.Schemas) (UnnestOperator, error)

func NewMatchAllOperator

func NewMatchAllOperator(unnest *influxql.Unnest, schemas record.Schemas) UnnestOperator

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL