backend

package
v1.5.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Aug 17, 2022 License: AGPL-3.0 Imports: 12 Imported by: 1

Documentation

Index

Constants

View Source
const (
	MetaName          = "meta.json"
	CompactedMetaName = "meta.compacted.json"
	TenantIndexName   = "index.json.gz"
	// File name for the cluster seed file.
	ClusterSeedFileName = "tempo_cluster_seed.json"
)

Variables

View Source
var (
	ErrDoesNotExist  = fmt.Errorf("does not exist")
	ErrEmptyTenantID = fmt.Errorf("empty tenant id")
	ErrEmptyBlockID  = fmt.Errorf("empty block id")
	ErrBadSeedFile   = fmt.Errorf("bad seed file")
)

SupportedEncoding is a slice of all supported encodings

Functions

func CompactedMetaFileName added in v1.1.0

func CompactedMetaFileName(blockID uuid.UUID, tenantID string) string

CompactedMetaFileName returns the object name for the compacted block meta given a block id and tenantid

func MetaFileName added in v1.1.0

func MetaFileName(blockID uuid.UUID, tenantID string) string

MetaFileName returns the object name for the block meta given a block id and tenantid

func ObjectFileName added in v1.1.0

func ObjectFileName(keypath KeyPath, name string) string

ObjectFileName returns a unique identifier for an object in object storage given its name and keypath

func RootPath added in v1.1.0

func RootPath(blockID uuid.UUID, tenantID string) string

RootPath returns the root path for a block given a block id and tenantid

func SupportedEncodingString added in v0.6.0

func SupportedEncodingString() string

SupportedEncodingString returns the list of supported Encoding.

Types

type AllReader added in v0.7.0

type AllReader interface {
	io.Reader
	io.ReaderAt
}

AllReader is an interface that supports both io.Reader and io.ReaderAt methods

type AppendTracker

type AppendTracker interface{}

AppendTracker is an empty interface usable by the backend to track a long running append operation

type BlockMeta added in v0.5.0

type BlockMeta struct {
	Version         string    `json:"format"`          // Version indicates the block format version. This includes specifics of how the indexes and data is stored
	BlockID         uuid.UUID `json:"blockID"`         // Unique block id
	MinID           []byte    `json:"minID"`           // Minimum object id stored in this block
	MaxID           []byte    `json:"maxID"`           // Maximum object id stored in this block
	TenantID        string    `json:"tenantID"`        // ID of tehant to which this block belongs
	StartTime       time.Time `json:"startTime"`       // Roughly matches when the first obj was written to this block. Used to determine block age for different purposes (cacheing, etc)
	EndTime         time.Time `json:"endTime"`         // Currently mostly meaningless but roughly matches to the time the last obj was written to this block
	TotalObjects    int       `json:"totalObjects"`    // Total objects in this block
	Size            uint64    `json:"size"`            // Total size in bytes of the data object
	CompactionLevel uint8     `json:"compactionLevel"` // Kind of the number of times this block has been compacted
	Encoding        Encoding  `json:"encoding"`        // Encoding/compression format
	IndexPageSize   uint32    `json:"indexPageSize"`   // Size of each index page in bytes
	TotalRecords    uint32    `json:"totalRecords"`    // Total Records stored in the index file
	DataEncoding    string    `json:"dataEncoding"`    // DataEncoding is a string provided externally, but tracked by tempodb that indicates the way the bytes are encoded
	BloomShardCount uint16    `json:"bloomShards"`     // Number of bloom filter shards
	FooterSize      uint32    `json:"footerSize"`      // Size of data file footer (parquet)
}

func NewBlockMeta added in v0.5.0

func NewBlockMeta(tenantID string, blockID uuid.UUID, version string, encoding Encoding, dataEncoding string) *BlockMeta

func (*BlockMeta) ObjectAdded added in v0.5.0

func (b *BlockMeta) ObjectAdded(id []byte, start uint32, end uint32)

ObjectAdded updates the block meta appropriately based on information about an added record

start/end are unix epoch seconds

type CompactedBlockMeta added in v0.5.0

type CompactedBlockMeta struct {
	BlockMeta

	CompactedTime time.Time `json:"compactedTime"`
}

type Compactor

type Compactor interface {
	// MarkBlockCompacted marks a block as compacted. Call this after a block has been successfully compacted to a new block
	MarkBlockCompacted(blockID uuid.UUID, tenantID string) error
	// ClearBlock removes a block from the backend
	ClearBlock(blockID uuid.UUID, tenantID string) error
	// CompactedBlockMeta returns the compacted blockmeta given a block and tenant id
	CompactedBlockMeta(blockID uuid.UUID, tenantID string) (*CompactedBlockMeta, error)
}

Compactor is a collection of methods to interact with compacted elements of a tempodb block

type ContextReader added in v0.7.0

type ContextReader interface {
	ReadAt(ctx context.Context, p []byte, off int64) (int, error)
	ReadAll(ctx context.Context) ([]byte, error)

	// Return an io.Reader representing the underlying. May not be supported by all implementations
	Reader() (io.Reader, error)
}

ContextReader is an io.ReaderAt interface that passes context. It is used to simplify access to backend objects and abstract away the name/meta and other details so that the data can be accessed directly and simply

func NewContextReader added in v0.7.0

func NewContextReader(meta *BlockMeta, name string, r Reader, shouldCache bool) ContextReader

NewContextReader creates a ReaderAt for the given BlockMeta

func NewContextReaderWithAllReader added in v0.7.0

func NewContextReaderWithAllReader(r AllReader) ContextReader

NewContextReaderWithAllReader wraps a normal ReaderAt and drops the context

type Encoding added in v0.6.0

type Encoding byte

Encoding is the identifier for a chunk encoding.

const (
	EncNone Encoding = iota
	EncGZIP
	EncLZ4_64k
	EncLZ4_256k
	EncLZ4_1M
	EncLZ4_4M
	EncSnappy
	EncZstd
	EncS2
)

The different available encodings. Make sure to preserve the order, as these numeric values are written to the chunks!

func ParseEncoding added in v0.6.0

func ParseEncoding(enc string) (Encoding, error)

ParseEncoding parses a chunk encoding (compression algorithm) by its name.

func (Encoding) MarshalJSON added in v0.6.0

func (e Encoding) MarshalJSON() ([]byte, error)

MarshalJSON implements the marshaler interface of the json pkg.

func (Encoding) MarshalYAML added in v0.6.0

func (e Encoding) MarshalYAML() (interface{}, error)

MarshalYAML implements the Marshaler interface of the yaml pkg

func (Encoding) String added in v0.6.0

func (e Encoding) String() string

func (*Encoding) UnmarshalJSON added in v0.6.0

func (e *Encoding) UnmarshalJSON(b []byte) error

UnmarshalJSON implements the Unmarshaler interface of the json pkg.

func (*Encoding) UnmarshalYAML added in v0.6.0

func (e *Encoding) UnmarshalYAML(unmarshal func(interface{}) error) error

UnmarshalYAML implements the Unmarshaler interface of the yaml pkg.

type KeyPath added in v1.1.0

type KeyPath []string

KeyPath is an ordered set of strings that govern where data is read/written from the backend

func KeyPathForBlock added in v1.1.0

func KeyPathForBlock(blockID uuid.UUID, tenantID string) KeyPath

KeyPathForBlock returns a correctly ordered keypath given a block id and tenantid

type MockCompactor added in v1.1.0

type MockCompactor struct {
	BlockMetaFn func(blockID uuid.UUID, tenantID string) (*CompactedBlockMeta, error)
}

MockCompactor

func (*MockCompactor) ClearBlock added in v1.1.0

func (c *MockCompactor) ClearBlock(blockID uuid.UUID, tenantID string) error

func (*MockCompactor) CompactedBlockMeta added in v1.1.0

func (c *MockCompactor) CompactedBlockMeta(blockID uuid.UUID, tenantID string) (*CompactedBlockMeta, error)

func (*MockCompactor) MarkBlockCompacted added in v1.1.0

func (c *MockCompactor) MarkBlockCompacted(blockID uuid.UUID, tenantID string) error

type MockRawReader added in v1.1.0

type MockRawReader struct {
	L      []string
	ListFn func(ctx context.Context, keypath KeyPath) ([]string, error)
	R      []byte // read
	Range  []byte // ReadRange
	ReadFn func(ctx context.Context, name string, keypath KeyPath, shouldCache bool) (io.ReadCloser, int64, error)
}

MockRawReader

func (*MockRawReader) List added in v1.1.0

func (m *MockRawReader) List(ctx context.Context, keypath KeyPath) ([]string, error)

func (*MockRawReader) Read added in v1.1.0

func (m *MockRawReader) Read(ctx context.Context, name string, keypath KeyPath, shouldCache bool) (io.ReadCloser, int64, error)

func (*MockRawReader) ReadRange added in v1.1.0

func (m *MockRawReader) ReadRange(ctx context.Context, name string, keypath KeyPath, offset uint64, buffer []byte, _ bool) error

func (*MockRawReader) Shutdown added in v1.1.0

func (m *MockRawReader) Shutdown()

type MockRawWriter added in v1.1.0

type MockRawWriter struct {
	// contains filtered or unexported fields
}

MockRawWriter

func (*MockRawWriter) Append added in v1.1.0

func (m *MockRawWriter) Append(ctx context.Context, name string, keypath KeyPath, tracker AppendTracker, buffer []byte) (AppendTracker, error)

func (*MockRawWriter) CloseAppend added in v1.1.0

func (m *MockRawWriter) CloseAppend(ctx context.Context, tracker AppendTracker) error

func (*MockRawWriter) Write added in v1.1.0

func (m *MockRawWriter) Write(ctx context.Context, name string, keypath KeyPath, data io.Reader, size int64, shouldCache bool) error

type MockReader added in v1.1.0

type MockReader struct {
	T             []string
	B             []uuid.UUID // blocks
	BlockFn       func(ctx context.Context, tenantID string) ([]uuid.UUID, error)
	M             *BlockMeta // meta
	BlockMetaFn   func(ctx context.Context, blockID uuid.UUID, tenantID string) (*BlockMeta, error)
	TenantIndexFn func(ctx context.Context, tenantID string) (*TenantIndex, error)
	R             []byte // read
	Range         []byte // ReadRange
	ReadFn        func(name string, blockID uuid.UUID, tenantID string) ([]byte, error)
}

MockReader

func (*MockReader) BlockMeta added in v1.1.0

func (m *MockReader) BlockMeta(ctx context.Context, blockID uuid.UUID, tenantID string) (*BlockMeta, error)

func (*MockReader) Blocks added in v1.1.0

func (m *MockReader) Blocks(ctx context.Context, tenantID string) ([]uuid.UUID, error)

func (*MockReader) Read added in v1.1.0

func (m *MockReader) Read(ctx context.Context, name string, blockID uuid.UUID, tenantID string, shouldCache bool) ([]byte, error)

func (*MockReader) ReadRange added in v1.1.0

func (m *MockReader) ReadRange(ctx context.Context, name string, blockID uuid.UUID, tenantID string, offset uint64, buffer []byte, _ bool) error

func (*MockReader) Shutdown added in v1.1.0

func (m *MockReader) Shutdown()

func (*MockReader) StreamReader added in v1.1.0

func (m *MockReader) StreamReader(ctx context.Context, name string, blockID uuid.UUID, tenantID string) (io.ReadCloser, int64, error)

func (*MockReader) TenantIndex added in v1.1.0

func (m *MockReader) TenantIndex(ctx context.Context, tenantID string) (*TenantIndex, error)

func (*MockReader) Tenants added in v1.1.0

func (m *MockReader) Tenants(ctx context.Context) ([]string, error)

type MockWriter added in v1.1.0

type MockWriter struct {
	IndexMeta          map[string][]*BlockMeta
	IndexCompactedMeta map[string][]*CompactedBlockMeta
}

MockWriter

func (*MockWriter) Append added in v1.1.0

func (m *MockWriter) Append(ctx context.Context, name string, blockID uuid.UUID, tenantID string, tracker AppendTracker, buffer []byte) (AppendTracker, error)

func (*MockWriter) CloseAppend added in v1.1.0

func (m *MockWriter) CloseAppend(ctx context.Context, tracker AppendTracker) error

func (*MockWriter) StreamWriter added in v1.1.0

func (m *MockWriter) StreamWriter(ctx context.Context, name string, blockID uuid.UUID, tenantID string, data io.Reader, size int64) error

func (*MockWriter) Write added in v1.1.0

func (m *MockWriter) Write(ctx context.Context, name string, blockID uuid.UUID, tenantID string, buffer []byte, shouldCache bool) error

func (*MockWriter) WriteBlockMeta added in v1.1.0

func (m *MockWriter) WriteBlockMeta(ctx context.Context, meta *BlockMeta) error

func (*MockWriter) WriteTenantIndex added in v1.1.0

func (m *MockWriter) WriteTenantIndex(ctx context.Context, tenantID string, meta []*BlockMeta, compactedMeta []*CompactedBlockMeta) error

type RawReader added in v1.1.0

type RawReader interface {
	// List returns all objects one level beneath the provided keypath
	List(ctx context.Context, keypath KeyPath) ([]string, error)
	// Read is for streaming entire objects from the backend.  There will be an attempt to retrieve this from cache if shouldCache is true.
	Read(ctx context.Context, name string, keyPath KeyPath, shouldCache bool) (io.ReadCloser, int64, error)
	// ReadRange is for reading parts of large objects from the backend.
	// There will be an attempt to retrieve this from cache if shouldCache is true. Cache key will be tenantID:blockID:offset:bufferLength
	ReadRange(ctx context.Context, name string, keypath KeyPath, offset uint64, buffer []byte, shouldCache bool) error
	// Shutdown must be called when the Reader is finished and cleans up any associated resources.
	Shutdown()
}

RawReader is a collection of methods to read data from tempodb backends

type RawWriter added in v1.1.0

type RawWriter interface {
	// Write is for in memory data. shouldCache specifies whether or not caching should be attempted.
	Write(ctx context.Context, name string, keypath KeyPath, data io.Reader, size int64, shouldCache bool) error
	// Append starts or continues an Append job. Pass nil to AppendTracker to start a job.
	Append(ctx context.Context, name string, keypath KeyPath, tracker AppendTracker, buffer []byte) (AppendTracker, error)
	// Closes any resources associated with the AppendTracker
	CloseAppend(ctx context.Context, tracker AppendTracker) error
}

RawWriter is a collection of methods to write data to tempodb backends

type Reader

type Reader interface {
	// Reader is for reading entire objects from the backend.  There will be an attempt to retrieve this from cache if shouldCache is true.
	Read(ctx context.Context, name string, blockID uuid.UUID, tenantID string, shouldCache bool) ([]byte, error)
	// StreamReader is for streaming entire objects from the backend.  It is expected this will _not_ be cached.
	StreamReader(ctx context.Context, name string, blockID uuid.UUID, tenantID string) (io.ReadCloser, int64, error)
	// ReadRange is for reading parts of large objects from the backend.
	// There will be an attempt to retrieve this from cache if shouldCache is true. Cache key will be tenantID:blockID:offset:bufferLength
	ReadRange(ctx context.Context, name string, blockID uuid.UUID, tenantID string, offset uint64, buffer []byte, shouldCache bool) error
	// Tenants returns a list of all tenants in a backend
	Tenants(ctx context.Context) ([]string, error)
	// Blocks returns returns a list of block UUIDs given a tenant
	Blocks(ctx context.Context, tenantID string) ([]uuid.UUID, error)
	// BlockMeta returns the blockmeta given a block and tenant id
	BlockMeta(ctx context.Context, blockID uuid.UUID, tenantID string) (*BlockMeta, error)
	// TenantIndex returns lists of all metas given a tenant
	TenantIndex(ctx context.Context, tenantID string) (*TenantIndex, error)
	// Shutdown shuts...down?
	Shutdown()
}

Reader is a collection of methods to read data from tempodb backends

func NewReader added in v1.1.0

func NewReader(r RawReader) Reader

NewReader returns an object that implements Reader and bridges to a RawReader

type TenantIndex added in v1.1.0

type TenantIndex struct {
	CreatedAt     time.Time             `json:"created_at"`
	Meta          []*BlockMeta          `json:"meta"`
	CompactedMeta []*CompactedBlockMeta `json:"compacted"`
}

TenantIndex holds a list of all metas and compacted metas for a given tenant it is probably stored in /<tenantid>/blockindex.json.gz as a gzipped json file

type Writer

type Writer interface {
	// Write is for in memory data. shouldCache specifies whether or not caching should be attempted.
	Write(ctx context.Context, name string, blockID uuid.UUID, tenantID string, buffer []byte, shouldCache bool) error
	// StreamWriter is for larger data payloads streamed through an io.Reader.  It is expected this will _not_ be cached.
	StreamWriter(ctx context.Context, name string, blockID uuid.UUID, tenantID string, data io.Reader, size int64) error
	// WriteBlockMeta writes a block meta to its blocks
	WriteBlockMeta(ctx context.Context, meta *BlockMeta) error
	// Append starts or continues an Append job. Pass nil to AppendTracker to start a job.
	Append(ctx context.Context, name string, blockID uuid.UUID, tenantID string, tracker AppendTracker, buffer []byte) (AppendTracker, error)
	// Closes any resources associated with the AppendTracker
	CloseAppend(ctx context.Context, tracker AppendTracker) error
	// WriteTenantIndex writes the two meta slices as a tenant index
	WriteTenantIndex(ctx context.Context, tenantID string, meta []*BlockMeta, compactedMeta []*CompactedBlockMeta) error
}

Writer is a collection of methods to write data to tempodb backends

func NewWriter added in v1.1.0

func NewWriter(w RawWriter) Writer

NewWriter returns an object that implements Writer and bridges to a RawWriter

Directories

Path Synopsis

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL