storage

package
v2.6.2 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Aug 12, 2022 License: AGPL-3.0 Imports: 56 Imported by: 0

Documentation

Index

Constants

This section is empty.

Variables

This section is empty.

Functions

func IsBlockOverlapping

func IsBlockOverlapping(b chunkenc.Block, with *LazyChunk, direction logproto.Direction) bool

func NewBucketClient

func NewBucketClient(storageConfig Config) (index.BucketClient, error)

NewBucketClient makes a new bucket client based on the configuration.

func NewChunkClient

func NewChunkClient(name string, cfg Config, schemaCfg config.SchemaConfig, clientMetrics ClientMetrics, registerer prometheus.Registerer) (client.Client, error)

NewChunkClient makes a new chunk.Client of the desired types.

func NewIndexClient

func NewIndexClient(name string, cfg Config, schemaCfg config.SchemaConfig, limits StoreLimits, cm ClientMetrics, ownsTenantFn downloads.IndexGatewayOwnsTenant, registerer prometheus.Registerer) (index.Client, error)

NewIndexClient makes a new index client of the desired type.

func NewObjectClient

func NewObjectClient(name string, cfg Config, clientMetrics ClientMetrics) (client.ObjectClient, error)

NewObjectClient makes a new StorageClient of the desired types.

func NewTableClient

func NewTableClient(name string, cfg Config, cm ClientMetrics, registerer prometheus.Registerer) (index.TableClient, error)

NewTableClient makes a new table client based on the configuration.

func ResetBoltDBIndexClientWithShipper

func ResetBoltDBIndexClientWithShipper()

ResetBoltDBIndexClientWithShipper allows to reset the singleton. MUST ONLY BE USED IN TESTS

Types

type AsyncStore

type AsyncStore struct {
	stores.Store
	// contains filtered or unexported fields
}

AsyncStore does querying to both ingesters and chunk store and combines the results after deduping them. This should be used when using an async store like boltdb-shipper. AsyncStore is meant to be used only in queriers or any other service other than ingesters. It should never be used in ingesters otherwise it would start spiraling around doing queries over and over again to other ingesters.

func NewAsyncStore

func NewAsyncStore(cfg AsyncStoreCfg, store stores.Store, scfg config.SchemaConfig) *AsyncStore

func (*AsyncStore) GetChunkRefs

func (a *AsyncStore) GetChunkRefs(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([][]chunk.Chunk, []*fetcher.Fetcher, error)

func (*AsyncStore) Stats

func (a *AsyncStore) Stats(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) (*stats.Stats, error)

type AsyncStoreCfg

type AsyncStoreCfg struct {
	IngesterQuerier IngesterQuerier
	// QueryIngestersWithin defines maximum lookback beyond which ingesters are not queried for chunk ids.
	QueryIngestersWithin time.Duration
}

type ChunkMetrics

type ChunkMetrics struct {
	// contains filtered or unexported fields
}

func NewChunkMetrics

func NewChunkMetrics(r prometheus.Registerer, maxBatchSize int) *ChunkMetrics

type ClientMetrics

type ClientMetrics struct {
	AzureMetrics azure.BlobStorageMetrics
}

func NewClientMetrics

func NewClientMetrics() ClientMetrics

func (*ClientMetrics) Unregister

func (c *ClientMetrics) Unregister()

type Config

type Config struct {
	AWSStorageConfig       aws.StorageConfig         `yaml:"aws"`
	AzureStorageConfig     azure.BlobStorageConfig   `yaml:"azure"`
	BOSStorageConfig       baidubce.BOSStorageConfig `yaml:"bos"`
	GCPStorageConfig       gcp.Config                `yaml:"bigtable"`
	GCSConfig              gcp.GCSConfig             `yaml:"gcs"`
	CassandraStorageConfig cassandra.Config          `yaml:"cassandra"`
	BoltDBConfig           local.BoltDBConfig        `yaml:"boltdb"`
	FSConfig               local.FSConfig            `yaml:"filesystem"`
	Swift                  openstack.SwiftConfig     `yaml:"swift"`
	GrpcConfig             grpc.Config               `yaml:"grpc_store"`
	Hedging                hedging.Config            `yaml:"hedging"`

	IndexCacheValidity time.Duration `yaml:"index_cache_validity"`

	IndexQueriesCacheConfig  cache.Config `yaml:"index_queries_cache_config"`
	DisableBroadIndexQueries bool         `yaml:"disable_broad_index_queries"`
	MaxParallelGetChunk      int          `yaml:"max_parallel_get_chunk"`

	MaxChunkBatchSize   int                 `yaml:"max_chunk_batch_size"`
	BoltDBShipperConfig shipper.Config      `yaml:"boltdb_shipper"`
	TSDBShipperConfig   indexshipper.Config `yaml:"tsdb_shipper"`

	// Config for using AsyncStore when using async index stores like `boltdb-shipper`.
	// It is required for getting chunk ids of recently flushed chunks from the ingesters.
	EnableAsyncStore bool          `yaml:"-"`
	AsyncStoreConfig AsyncStoreCfg `yaml:"-"`
}

Config chooses which storage client to use.

func (*Config) RegisterFlags

func (cfg *Config) RegisterFlags(f *flag.FlagSet)

RegisterFlags adds the flags required to configure this flag set.

func (*Config) Validate

func (cfg *Config) Validate() error

Validate config and returns error on failure

type IngesterQuerier

type IngesterQuerier interface {
	GetChunkIDs(ctx context.Context, from, through model.Time, matchers ...*labels.Matcher) ([]string, error)
	Stats(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) (*stats.Stats, error)
}

type LazyChunk

type LazyChunk struct {
	Chunk   chunk.Chunk
	IsValid bool
	Fetcher *fetcher.Fetcher
	// contains filtered or unexported fields
}

LazyChunk loads the chunk when it is accessed.

func (*LazyChunk) IsOverlapping

func (c *LazyChunk) IsOverlapping(with *LazyChunk, direction logproto.Direction) bool

func (*LazyChunk) Iterator

func (c *LazyChunk) Iterator(
	ctx context.Context,
	from, through time.Time,
	direction logproto.Direction,
	pipeline log.StreamPipeline,
	nextChunk *LazyChunk,
) (iter.EntryIterator, error)

Iterator returns an entry iterator. The iterator returned will cache overlapping block's entries with the next chunk if passed. This way when we re-use them for ordering across batches we don't re-decompress the data again.

func (*LazyChunk) SampleIterator

func (c *LazyChunk) SampleIterator(
	ctx context.Context,
	from, through time.Time,
	extractor log.StreamSampleExtractor,
	nextChunk *LazyChunk,
) (iter.SampleIterator, error)

SampleIterator returns an sample iterator. The iterator returned will cache overlapping block's entries with the next chunk if passed. This way when we re-use them for ordering across batches we don't re-decompress the data again.

type Store

type Store interface {
	stores.Store
	SelectSamples(ctx context.Context, req logql.SelectSampleParams) (iter.SampleIterator, error)
	SelectLogs(ctx context.Context, req logql.SelectLogParams) (iter.EntryIterator, error)
	Series(ctx context.Context, req logql.SelectLogParams) ([]logproto.SeriesIdentifier, error)
	GetSchemaConfigs() []config.PeriodConfig
	SetChunkFilterer(chunkFilter chunk.RequestChunkFilterer)
}

Store is the Loki chunk store to retrieve and save chunks.

func NewStore

func NewStore(cfg Config, storeCfg config.ChunkStoreConfig, schemaCfg config.SchemaConfig,
	limits StoreLimits, clientMetrics ClientMetrics, registerer prometheus.Registerer, logger log.Logger,
) (Store, error)

NewStore creates a new Loki Store using configuration supplied.

type StoreLimits

type StoreLimits interface {
	downloads.Limits
	CardinalityLimit(userID string) int
	MaxChunksPerQueryFromStore(userID string) int
	MaxQueryLength(userID string) time.Duration
}

StoreLimits helps get Limits specific to Queries for Stores

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL