backend

package
v0.0.0-...-bfa2730 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Apr 18, 2024 License: AGPL-3.0 Imports: 11 Imported by: 0

Documentation

Index

Constants

View Source
const (
	MetaName          = "meta.json"
	CompactedMetaName = "meta.compacted.json"
	TenantIndexName   = "index.json.gz"
	// ClusterSeedFileName File name for the cluster seed file.
	ClusterSeedFileName = "deep_cluster_seed.json"
)

Variables

View Source
var (
	ErrDoesNotExist  = fmt.Errorf("does not exist")
	ErrEmptyTenantID = fmt.Errorf("empty tenant id")
	ErrEmptyBlockID  = fmt.Errorf("empty block id")
	ErrBadSeedFile   = fmt.Errorf("bad seed file")
)

SupportedEncoding is a slice of all supported encodings

Functions

func CompactedMetaFileName

func CompactedMetaFileName(blockID uuid.UUID, tenantID string) string

CompactedMetaFileName returns the object name for the compacted block meta given a block id and tenantID

func MetaFileName

func MetaFileName(blockID uuid.UUID, tenantID string) string

MetaFileName returns the object name for the block meta given a block id and tenantID

func ObjectFileName

func ObjectFileName(keypath KeyPath, name string) string

ObjectFileName returns a unique identifier for an object in object storage given its name and keypath

func RootPath

func RootPath(blockID uuid.UUID, tenantID string) string

RootPath returns the root path for a block given a block id and tenantID

func SupportedEncodingString

func SupportedEncodingString() string

SupportedEncodingString returns the list of supported Encoding.

Types

type AppendTracker

type AppendTracker interface{}

AppendTracker is an empty interface usable by the backend to track a long-running append operation

type BlockMeta

type BlockMeta struct {
	Version         string    `json:"format"`          // Version indicates the block format version. This includes specifics of how the indexes and data is stored
	BlockID         uuid.UUID `json:"blockID"`         // Unique block id
	MinID           []byte    `json:"minID"`           // Minimum object id stored in this block
	MaxID           []byte    `json:"maxID"`           // Maximum object id stored in this block
	TenantID        string    `json:"tenantID"`        // ID of tenant to which this block belongs
	StartTime       time.Time `json:"startTime"`       // Roughly matches when the first obj was written to this block. Used to determine block age for different purposes (caching, etc)
	EndTime         time.Time `json:"endTime"`         // Currently mostly meaningless but roughly matches to the time the last obj was written to this block
	TotalObjects    int       `json:"totalObjects"`    // Total objects in this block
	Size            uint64    `json:"size"`            // Total size in bytes of the data object
	CompactionLevel uint8     `json:"compactionLevel"` // Kind of the number of times this block has been compacted
	Encoding        Encoding  `json:"encoding"`        // Encoding/compression format
	IndexPageSize   uint32    `json:"indexPageSize"`   // Size of each index page in bytes
	TotalRecords    uint32    `json:"totalRecords"`    // Total Records stored in the index file
	DataEncoding    string    `json:"dataEncoding"`    // DataEncoding is a string provided externally, but tracked by deepdb that indicates the way the bytes are encoded
	BloomShardCount uint16    `json:"bloomShards"`     // Number of bloom filter shards
	FooterSize      uint32    `json:"footerSize"`      // Size of data file footer (parquet)
}

func NewBlockMeta

func NewBlockMeta(tenantID string, blockID uuid.UUID, version string, encoding Encoding, dataEncoding string) *BlockMeta

func (*BlockMeta) ObjectAdded

func (b *BlockMeta) ObjectAdded(id []byte, start uint32)

ObjectAdded updates the block meta appropriately based on information about an added record start/end are unix epoch seconds

type CompactedBlockMeta

type CompactedBlockMeta struct {
	BlockMeta

	CompactedTime time.Time `json:"compactedTime"`
}

type Compactor

type Compactor interface {
	// MarkBlockCompacted marks a block as compacted. Call this after a block has been successfully compacted to a new block
	MarkBlockCompacted(blockID uuid.UUID, tenantID string) error
	// ClearBlock removes a block from the backend
	ClearBlock(blockID uuid.UUID, tenantID string) error
	// CompactedBlockMeta returns the compacted block meta given a block and tenant id
	CompactedBlockMeta(blockID uuid.UUID, tenantID string) (*CompactedBlockMeta, error)
}

Compactor is a collection of methods to interact with compacted elements of a deepdb block

type Encoding

type Encoding byte

Encoding is the identifier for a chunk encoding.

const (
	EncNone Encoding = iota
	EncGZIP
	EncLZ4_64k
	EncLZ4_256k
	EncLZ4_1M
	EncLZ4_4M
	EncSnappy
	EncZstd
	EncS2
)

The different available encodings. Make sure to preserve the order, as these numeric values are written to the chunks!

func ParseEncoding

func ParseEncoding(enc string) (Encoding, error)

ParseEncoding parses a chunk encoding (compression algorithm) by its name.

func (Encoding) MarshalJSON

func (e Encoding) MarshalJSON() ([]byte, error)

MarshalJSON implements the marshaler interface of the json pkg.

func (Encoding) MarshalYAML

func (e Encoding) MarshalYAML() (interface{}, error)

MarshalYAML implements the Marshaler interface of the yaml pkg

func (Encoding) String

func (e Encoding) String() string

func (*Encoding) UnmarshalJSON

func (e *Encoding) UnmarshalJSON(b []byte) error

UnmarshalJSON implements the Unmarshaler interface of the json pkg.

func (*Encoding) UnmarshalYAML

func (e *Encoding) UnmarshalYAML(unmarshal func(interface{}) error) error

UnmarshalYAML implements the Unmarshaler interface of the yaml pkg.

type KeyPath

type KeyPath []string

KeyPath is an ordered set of strings that govern where data is read/written from the backend

func KeyPathForBlock

func KeyPathForBlock(blockID uuid.UUID, tenantID string) KeyPath

KeyPathForBlock returns a correctly ordered keypath given a block id and tenantID

type MockCompactor

type MockCompactor struct {
	BlockMetaFn func(blockID uuid.UUID, tenantID string) (*CompactedBlockMeta, error)
}

MockCompactor

func (*MockCompactor) ClearBlock

func (c *MockCompactor) ClearBlock(blockID uuid.UUID, tenantID string) error

func (*MockCompactor) CompactedBlockMeta

func (c *MockCompactor) CompactedBlockMeta(blockID uuid.UUID, tenantID string) (*CompactedBlockMeta, error)

func (*MockCompactor) MarkBlockCompacted

func (c *MockCompactor) MarkBlockCompacted(blockID uuid.UUID, tenantID string) error

type MockRawReader

type MockRawReader struct {
	L      []string
	ListFn func(ctx context.Context, keypath KeyPath) ([]string, error)
	R      []byte // read
	Range  []byte // ReadRange
	ReadFn func(ctx context.Context, name string, keypath KeyPath, shouldCache bool) (io.ReadCloser, int64, error)
}

MockRawReader

func (*MockRawReader) List

func (m *MockRawReader) List(ctx context.Context, keypath KeyPath) ([]string, error)

func (*MockRawReader) Read

func (m *MockRawReader) Read(ctx context.Context, name string, keypath KeyPath, shouldCache bool) (io.ReadCloser, int64, error)

func (*MockRawReader) ReadRange

func (m *MockRawReader) ReadRange(ctx context.Context, name string, keypath KeyPath, offset uint64, buffer []byte, _ bool) error

func (*MockRawReader) Shutdown

func (m *MockRawReader) Shutdown()

type MockRawWriter

type MockRawWriter struct {
	// contains filtered or unexported fields
}

MockRawWriter

func (*MockRawWriter) Append

func (m *MockRawWriter) Append(ctx context.Context, name string, keypath KeyPath, tracker AppendTracker, buffer []byte) (AppendTracker, error)

func (*MockRawWriter) CloseAppend

func (m *MockRawWriter) CloseAppend(ctx context.Context, tracker AppendTracker) error

func (*MockRawWriter) Write

func (m *MockRawWriter) Write(ctx context.Context, name string, keypath KeyPath, data io.Reader, size int64, shouldCache bool) error

type MockReader

type MockReader struct {
	T             []string
	B             []uuid.UUID // blocks
	BlockFn       func(ctx context.Context, tenantID string) ([]uuid.UUID, error)
	M             *BlockMeta // meta
	BlockMetaFn   func(ctx context.Context, blockID uuid.UUID, tenantID string) (*BlockMeta, error)
	TenantIndexFn func(ctx context.Context, tenantID string) (*TenantIndex, error)
	R             []byte // read
	Range         []byte // ReadRange
	ReadFn        func(name string, blockID uuid.UUID, tenantID string) ([]byte, error)
}

MockReader

func (*MockReader) BlockMeta

func (m *MockReader) BlockMeta(ctx context.Context, blockID uuid.UUID, tenantID string) (*BlockMeta, error)

func (*MockReader) Blocks

func (m *MockReader) Blocks(ctx context.Context, tenantID string) ([]uuid.UUID, error)

func (*MockReader) Read

func (m *MockReader) Read(ctx context.Context, name string, blockID uuid.UUID, tenantID string, shouldCache bool) ([]byte, error)

func (*MockReader) ReadRange

func (m *MockReader) ReadRange(ctx context.Context, name string, blockID uuid.UUID, tenantID string, offset uint64, buffer []byte, _ bool) error

func (*MockReader) ReadTracepointBlock

func (m *MockReader) ReadTracepointBlock(ctx context.Context, name string) (io.ReadCloser, int64, error)

func (*MockReader) Shutdown

func (m *MockReader) Shutdown()

func (*MockReader) StreamReader

func (m *MockReader) StreamReader(ctx context.Context, name string, blockID uuid.UUID, tenantID string) (io.ReadCloser, int64, error)

func (*MockReader) TenantIndex

func (m *MockReader) TenantIndex(ctx context.Context, tenantID string) (*TenantIndex, error)

func (*MockReader) Tenants

func (m *MockReader) Tenants(ctx context.Context) ([]string, error)

type MockWriter

type MockWriter struct {
	IndexMeta          map[string][]*BlockMeta
	IndexCompactedMeta map[string][]*CompactedBlockMeta
}

MockWriter

func (*MockWriter) Append

func (m *MockWriter) Append(ctx context.Context, name string, blockID uuid.UUID, tenantID string, tracker AppendTracker, buffer []byte) (AppendTracker, error)

func (*MockWriter) CloseAppend

func (m *MockWriter) CloseAppend(ctx context.Context, tracker AppendTracker) error

func (*MockWriter) StreamWriter

func (m *MockWriter) StreamWriter(ctx context.Context, name string, blockID uuid.UUID, tenantID string, data io.Reader, size int64) error

func (*MockWriter) Write

func (m *MockWriter) Write(ctx context.Context, name string, blockID uuid.UUID, tenantID string, buffer []byte, shouldCache bool) error

func (*MockWriter) WriteBlockMeta

func (m *MockWriter) WriteBlockMeta(ctx context.Context, meta *BlockMeta) error

func (*MockWriter) WriteTenantIndex

func (m *MockWriter) WriteTenantIndex(ctx context.Context, tenantID string, meta []*BlockMeta, compactedMeta []*CompactedBlockMeta) error

func (*MockWriter) WriteTracepointBlock

func (m *MockWriter) WriteTracepointBlock(ctx context.Context, name string, data *bytes.Reader, size int64) error

type RawReader

type RawReader interface {
	// List returns all objects one level beneath the provided keypath
	List(ctx context.Context, keypath KeyPath) ([]string, error)
	// Read is for streaming entire objects from the backend.  There will be an attempt to retrieve this from cache if shouldCache is true.
	Read(ctx context.Context, name string, keyPath KeyPath, shouldCache bool) (io.ReadCloser, int64, error)
	// ReadRange is for reading parts of large objects from the backend.
	// There will be an attempt to retrieve this from cache if shouldCache is true. Cache key will be tenantID:blockID:offset:bufferLength
	ReadRange(ctx context.Context, name string, keypath KeyPath, offset uint64, buffer []byte, shouldCache bool) error
	// Shutdown must be called when the Reader is finished and cleans up any associated resources.
	Shutdown()
}

RawReader is a collection of methods to read data from deepdb backends

type RawWriter

type RawWriter interface {
	// Write is for in memory data. shouldCache specifies whether caching should be attempted.
	Write(ctx context.Context, name string, keypath KeyPath, data io.Reader, size int64, shouldCache bool) error
	// Append starts or continues an Append job. Pass nil to AppendTracker to start a job.
	Append(ctx context.Context, name string, keypath KeyPath, tracker AppendTracker, buffer []byte) (AppendTracker, error)
	// CloseAppend closes any resources associated with the AppendTracker.
	CloseAppend(ctx context.Context, tracker AppendTracker) error
}

RawWriter is a collection of methods to write data to deepdb backends

type Reader

type Reader interface {
	// Read is for reading entire objects from the backend. There will be an attempt to retrieve this
	// from cache if shouldCache is true.
	Read(ctx context.Context, name string, blockID uuid.UUID, tenantID string, shouldCache bool) ([]byte, error)
	// StreamReader is for streaming entire objects from the backend.  It is expected this will _not_ be cached.
	StreamReader(ctx context.Context, name string, blockID uuid.UUID, tenantID string) (io.ReadCloser, int64, error)
	// ReadRange is for reading parts of large objects from the backend.
	// There will be an attempt to retrieve this from cache if shouldCache is true. Cache key will be tenantID:blockID:offset:bufferLength
	ReadRange(ctx context.Context, name string, blockID uuid.UUID, tenantID string, offset uint64, buffer []byte, shouldCache bool) error
	// Tenants returns a list of all tenants in a backend
	Tenants(ctx context.Context) ([]string, error)
	// Blocks returns a list of block UUIDs given a tenant
	Blocks(ctx context.Context, tenantID string) ([]uuid.UUID, error)
	// BlockMeta returns the block meta given a block and tenant id
	BlockMeta(ctx context.Context, blockID uuid.UUID, tenantID string) (*BlockMeta, error)
	// TenantIndex returns lists of all metas given a tenant
	TenantIndex(ctx context.Context, tenantID string) (*TenantIndex, error)
	// Shutdown shuts...down?
	Shutdown()
	// ReadTracepointBlock reads the tracepoint block for the given tenantID
	ReadTracepointBlock(ctx context.Context, tenantID string) (io.ReadCloser, int64, error)
}

Reader is a collection of methods to read data from deepdb backends

func NewReader

func NewReader(r RawReader) Reader

NewReader returns an object that implements Reader and bridges to a RawReader

type TenantIndex

type TenantIndex struct {
	CreatedAt     time.Time             `json:"created_at"`
	Meta          []*BlockMeta          `json:"meta"`
	CompactedMeta []*CompactedBlockMeta `json:"compacted"`
}

TenantIndex holds a list of all metas and compacted metas for a given tenant it is probably stored in /<tenantID>/blockindex.json.gz as a gzipped json file

type Writer

type Writer interface {
	// Write is for in memory data. shouldCache specifies whether caching should be attempted.
	Write(ctx context.Context, name string, blockID uuid.UUID, tenantID string, buffer []byte, shouldCache bool) error
	// StreamWriter is for larger data payloads streamed through an io.Reader.  It is expected this will _not_ be cached.
	StreamWriter(ctx context.Context, name string, blockID uuid.UUID, tenantID string, data io.Reader, size int64) error
	// WriteBlockMeta writes a block meta to its blocks
	WriteBlockMeta(ctx context.Context, meta *BlockMeta) error
	// Append starts or continues an Append job. Pass nil to AppendTracker to start a job.
	Append(ctx context.Context, name string, blockID uuid.UUID, tenantID string, tracker AppendTracker, buffer []byte) (AppendTracker, error)
	// CloseAppend closes any resources associated with the AppendTracker
	CloseAppend(ctx context.Context, tracker AppendTracker) error
	// WriteTenantIndex writes the two meta slices as a tenant index
	WriteTenantIndex(ctx context.Context, tenantID string, meta []*BlockMeta, compactedMeta []*CompactedBlockMeta) error
	// WriteTracepointBlock writes the tracepoint block for the given tenantID
	WriteTracepointBlock(ctx context.Context, tenantID string, data *bytes.Reader, size int64) error
}

Writer is a collection of methods to write data to deepdb backends

func NewWriter

func NewWriter(w RawWriter) Writer

NewWriter returns an object that implements Writer and bridges to a RawWriter

Directories

Path Synopsis

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL