model

package
v0.0.0-...-6ee8545 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Feb 7, 2023 License: Apache-2.0 Imports: 27 Imported by: 0

Documentation

Index

Constants

View Source
const (
	MetricNameLabelName = "__name__"
	SchemaNameLabelName = "__schema__"
	ColumnNameLabelName = "__column__"
)
View Source
const EmptyExemplarValues = ""
View Source
const (
	InvalidSeriesEpoch = -1
)

Variables

View Source
var (
	MinTime = time.Unix(math.MinInt64/1000+62135596801, 0).UTC()
	MaxTime = time.Unix(math.MaxInt64/1000-62135596801, 999999999).UTC()
)

Functions

func RegisterCustomPgTypes

func RegisterCustomPgTypes(ctx context.Context, conn *pgx.Conn) error

RegisterCustomPgTypes registers the custom types specified in the `oidSql` query, into the connection's `pgtype.Map`. The types are cached to avoid querying the database every time a connection is created.

func TimestamptzToMs

func TimestamptzToMs(t pgtype.Timestamptz) int64

func UnRegisterCustomPgTypes

func UnRegisterCustomPgTypes(cfg pgconn.Config)

UnRegisterCustomPgTypes deletes the cached types for the given connection. This is useful for post test cleanup.

Types

type ArrayOfLabelArray

type ArrayOfLabelArray = pgtype.FlatArray[LabelArray]

func SliceToArrayOfLabelArray

func SliceToArrayOfLabelArray(src [][]int32) ArrayOfLabelArray

type Batch

type Batch struct {
	// contains filtered or unexported fields
}

Batch is an iterator over a collection of Insertables that returns data in the format expected for the data table row.

func NewBatch

func NewBatch() Batch

NewBatch returns a new batch that can hold samples and exemplars.

func (*Batch) Absorb

func (t *Batch) Absorb(other Batch)

func (*Batch) AppendSlice

func (t *Batch) AppendSlice(s []Insertable)

func (*Batch) Count

func (t *Batch) Count() (numSamples, numExemplars int)

func (*Batch) CountSeries

func (t *Batch) CountSeries() int

func (*Batch) Data

func (t *Batch) Data() []Insertable

func (*Batch) Len

func (t *Batch) Len() int

func (*Batch) Less

func (t *Batch) Less(i, j int) bool

func (*Batch) Reset

func (t *Batch) Reset()

func (*Batch) Swap

func (t *Batch) Swap(i, j int)

func (*Batch) Visitor

func (t *Batch) Visitor() *batchVisitor

type Copy

type Copy struct {
	Table pgx.Identifier
	Data  [][]interface{}
}

type Data

type Data struct {
	Rows         map[string][]Insertable
	ReceivedTime time.Time
}

Data wraps incoming data with its in-timestamp. It is used to warn if the rate of incoming samples vs outgoing samples is too low, based on time.

type Dispatcher

type Dispatcher interface {
	InsertTs(ctx context.Context, rows Data) (uint64, error)
	InsertMetadata(context.Context, []Metadata) (uint64, error)
	CompleteMetricCreation(context.Context) error
	Close()
}

Dispatcher is responsible for inserting label, series and data into the storage.

type ExemplarData

type ExemplarData struct {
	Labels labels.Labels `json:"labels"`
	Value  float64       `json:"value"`
	Ts     int64         `json:"timestamp"` // This is int64 in Prometheus, but we do this to avoid later conversions to decimal.
}

ExemplarData is additional information associated with a time series.

type ExemplarQueryResult

type ExemplarQueryResult struct {
	SeriesLabels labels.Labels  `json:"seriesLabels"`
	Exemplars    []ExemplarData `json:"exemplars"`
}

type ExemplarsIterator

type ExemplarsIterator interface {
	Iterator
	// Value returns the current exemplar's value array, timestamp and value.
	Value() (labels []prompb.Label, timestamp int64, value float64)
}

ExemplarsIterator iterates over exemplars.

type Insertable

type Insertable interface {
	// Series returns the reference of the series, the insertable belongs to.
	Series() *Series
	// Count returns the number data points in the current insertable.
	Count() int
	// MaxTs returns the max timestamp among the datapoints in the insertable.
	// In most cases, this will be the timestamp from the last sample, since
	// Prometheus dispatches data in sorted order of time.
	MaxTs() int64
	// Iterator returns an iterator that iterates over underlying datapoints.
	Iterator() Iterator
	// Type returns type of underlying insertable.
	Type() InsertableType
	// IsOfType returns true if the provided type matches with the underlying insertable datatype.
	IsOfType(InsertableType) bool
}

func NewPromSamples

func NewPromSamples(series *Series, sampleSet []prompb.Sample) Insertable

type InsertableExemplar

type InsertableExemplar interface {
	Insertable
	AllExemplarLabelKeys() []string
	OrderExemplarLabels(index map[string]int) (positionExists bool)
}

func NewPromExemplars

func NewPromExemplars(series *Series, exemplarSet []prompb.Exemplar) InsertableExemplar

type InsertableType

type InsertableType uint8
const (
	Sample InsertableType = iota
	Exemplar
)

type Iterator

type Iterator interface {
	// HasNext returns true if there is any datapoint that is yet to be read.
	HasNext() bool
}

Iterator iterates over datapoints.

type LabelArray

type LabelArray = pgtype.FlatArray[pgtype.Int4]

type LabelList

type LabelList struct {
	// contains filtered or unexported fields
}

func NewLabelList

func NewLabelList(size int) *LabelList

func (*LabelList) Add

func (ls *LabelList) Add(name string, value string) error

func (*LabelList) Get

Get returns the addresses of names and values slice after updating the array dimensions.

func (*LabelList) Len

func (ls *LabelList) Len() int

func (LabelList) Less

func (ls LabelList) Less(i, j int) bool

func (*LabelList) Swap

func (ls *LabelList) Swap(i, j int)

type Metadata

type Metadata struct {
	MetricFamily string `json:"metric,omitempty"`
	Unit         string `json:"unit"`
	Type         string `json:"type"`
	Help         string `json:"help"`
}

type MetricInfo

type MetricInfo struct {
	MetricID                            int64
	TableSchema, TableName, SeriesTable string
}

MetricInfo contains all the database specific metric data.

func (MetricInfo) Len

func (v MetricInfo) Len() int

Len returns the memory size of MetricInfo in bytes.

type MockBatch

type MockBatch struct {
	// contains filtered or unexported fields
}

Batch queries are a way of bundling multiple queries together to avoid unnecessary network round trips.

func (*MockBatch) Len

func (b *MockBatch) Len() int

func (*MockBatch) Queue

func (b *MockBatch) Queue(query string, arguments ...any) *pgx.QueuedQuery

type MockBatchResult

type MockBatchResult struct {
	// contains filtered or unexported fields
}

func (*MockBatchResult) Close

func (m *MockBatchResult) Close() error

Close closes the batch operation. This must be called before the underlying connection can be used again. Any error that occurred during a batch operation may have made it impossible to resyncronize the connection with the server. In this case the underlying connection will have been closed.

func (*MockBatchResult) Exec

func (m *MockBatchResult) Exec() (pgconn.CommandTag, error)

Exec reads the results from the next query in the batch as if the query has been sent with Conn.Exec.

func (*MockBatchResult) Query

func (m *MockBatchResult) Query() (pgx.Rows, error)

Query reads the results from the next query in the batch as if the query has been sent with Conn.Query.

func (*MockBatchResult) QueryRow

func (m *MockBatchResult) QueryRow() pgx.Row

QueryRow reads the results from the next query in the batch as if the query has been sent with Conn.QueryRow.

type MockInserter

type MockInserter struct {
	InsertedSeries  map[string]SeriesID
	InsertedData    []map[string][]Insertable
	InsertSeriesErr error
	InsertDataErr   error
}

func (*MockInserter) Close

func (m *MockInserter) Close()

func (*MockInserter) CompleteMetricCreation

func (m *MockInserter) CompleteMetricCreation(context.Context) error

func (*MockInserter) InsertMetadata

func (m *MockInserter) InsertMetadata(_ context.Context, metadata []Metadata) (uint64, error)

func (*MockInserter) InsertNewData

func (m *MockInserter) InsertNewData(data Data) (uint64, error)

func (*MockInserter) InsertTs

func (m *MockInserter) InsertTs(_ context.Context, data Data) (uint64, error)

type MockMetricCache

type MockMetricCache struct {
	MetricCache  map[string]MetricInfo
	GetMetricErr error
	SetMetricErr error
}

func (*MockMetricCache) Cap

func (m *MockMetricCache) Cap() int

func (*MockMetricCache) Evictions

func (m *MockMetricCache) Evictions() uint64

func (*MockMetricCache) Get

func (m *MockMetricCache) Get(schema, metric string, isExemplar bool) (MetricInfo, error)

func (*MockMetricCache) Len

func (m *MockMetricCache) Len() int

func (*MockMetricCache) Set

func (m *MockMetricCache) Set(schema, metric string, mInfo MetricInfo, isExemplar bool) error

type MockRows

type MockRows struct {
	// contains filtered or unexported fields
}

func (*MockRows) Close

func (m *MockRows) Close()

Close closes the rows, making the connection ready for use again. It is safe to call Close after rows is already closed.

func (*MockRows) CommandTag

func (m *MockRows) CommandTag() pgconn.CommandTag

CommandTag returns the command tag from this query. It is only available after Rows is closed.

func (*MockRows) Conn

func (m *MockRows) Conn() *pgx.Conn

Conn returns the underlying *Conn on which the query was executed. This may return nil if Rows did not come from a *Conn (e.g. if it was created by RowsFromResultReader)

func (*MockRows) Err

func (m *MockRows) Err() error

Err returns any error that occurred while reading.

func (*MockRows) FieldDescriptions

func (m *MockRows) FieldDescriptions() []pgconn.FieldDescription

func (*MockRows) Next

func (m *MockRows) Next() bool

Next prepares the next row for reading. It returns true if there is another row and false if no more rows are available. It automatically closes rows when all rows are read.

func (*MockRows) RawValues

func (m *MockRows) RawValues() [][]byte

RawValues returns the unparsed bytes of the row values. The returned [][]byte is only valid until the next Next call or the Rows is closed. However, the underlying byte data is safe to retain a reference to and mutate.

func (*MockRows) Scan

func (m *MockRows) Scan(dest ...interface{}) error

Scan reads the values from the current row into dest values positionally. dest can include pointers to core types, values implementing the Scanner interface, []byte, and nil. []byte will skip the decoding process and directly copy the raw bytes received from PostgreSQL. nil will skip the value entirely.

func (*MockRows) Values

func (m *MockRows) Values() ([]interface{}, error)

Values returns the decoded row values.

type MockTx

type MockTx struct {
	// contains filtered or unexported fields
}

func (*MockTx) Begin

func (t *MockTx) Begin(ctx context.Context) (pgx.Tx, error)

func (*MockTx) BeginFunc

func (t *MockTx) BeginFunc(ctx context.Context, f func(pgx.Tx) error) (err error)

func (*MockTx) Commit

func (t *MockTx) Commit(ctx context.Context) error

func (*MockTx) Conn

func (t *MockTx) Conn() *pgx.Conn

func (*MockTx) CopyFrom

func (t *MockTx) CopyFrom(ctx context.Context, tableName pgx.Identifier, columnNames []string, rowSrc pgx.CopyFromSource) (int64, error)

func (*MockTx) Exec

func (t *MockTx) Exec(ctx context.Context, sql string, arguments ...interface{}) (commandTag pgconn.CommandTag, err error)

func (*MockTx) LargeObjects

func (t *MockTx) LargeObjects() pgx.LargeObjects

func (*MockTx) Prepare

func (t *MockTx) Prepare(ctx context.Context, name, sql string) (*pgconn.StatementDescription, error)

func (*MockTx) Query

func (t *MockTx) Query(ctx context.Context, sql string, args ...interface{}) (pgx.Rows, error)

func (*MockTx) QueryRow

func (t *MockTx) QueryRow(ctx context.Context, sql string, args ...interface{}) pgx.Row

func (*MockTx) Rollback

func (t *MockTx) Rollback(ctx context.Context) error

func (*MockTx) SendBatch

func (t *MockTx) SendBatch(ctx context.Context, b *pgx.Batch) pgx.BatchResults

type PromExemplars

type PromExemplars struct {
	// contains filtered or unexported fields
}

func (*PromExemplars) AllExemplarLabelKeys

func (t *PromExemplars) AllExemplarLabelKeys() []string

func (*PromExemplars) Count

func (t *PromExemplars) Count() int

func (*PromExemplars) IsOfType

func (t *PromExemplars) IsOfType(typ InsertableType) bool

func (*PromExemplars) Iterator

func (t *PromExemplars) Iterator() Iterator

func (*PromExemplars) MaxTs

func (t *PromExemplars) MaxTs() int64

func (*PromExemplars) OrderExemplarLabels

func (t *PromExemplars) OrderExemplarLabels(index map[string]int) (positionExists bool)

OrderExemplarLabels orders the existing labels in each exemplar, based on the index of the label key in _prom_catalog.exemplar_label_key_position table. The index received is the positions for each label key which is used to re-format the labels slice in exemplars.

During ingestion, we need the label's value part only, as the label's key part is already handled by the index received from the _prom_catalog.exemplar_label_key_position table.

OrderExemplarLabels returns positionNotExists as true if the index does not contain the position of some key. This happens when for same metric, two different series have exemplars with different labels_set, which will require the cache to have positions as union of the different labels_set. For this to happen, we need to re-fetch the positions with the missing keys and update the underlying cache (which happens in the calling function).

func (*PromExemplars) Series

func (t *PromExemplars) Series() *Series

func (*PromExemplars) Type

func (t *PromExemplars) Type() InsertableType

type ReusableArray

type ReusableArray[T any] struct {
	pgtype.FlatArray[T]
}

Wrapper to allow DecodeBinary to reuse the existing array so that a pool is effective

func (*ReusableArray[T]) SetDimensions

func (a *ReusableArray[T]) SetDimensions(dimensions []pgtype.ArrayDimension) error

type RowResults

type RowResults [][]interface{}

RowResults represents a collection of a multi-column row result

type SamplesIterator

type SamplesIterator interface {
	Iterator
	// Value returns current samples timestamp and value.
	Value() (timestamp int64, value float64)
}

SamplesIterator iterates over samples.

type Series

type Series struct {
	// contains filtered or unexported fields
}

Series stores a Prometheus labels.Labels in its canonical string representation

func NewSeries

func NewSeries(key string, labelPairs []prompb.Label) *Series

func (*Series) Compare

func (l *Series) Compare(b *Series) int

Compare returns a comparison int between two Labels

func (*Series) Equal

func (l *Series) Equal(b *Series) bool

Equal returns true if two Labels are equal

func (*Series) FinalSizeBytes

func (l *Series) FinalSizeBytes() uint64

FinalSizeBytes returns the size in bytes /after/ the seriesID is set

func (*Series) GetSeriesID

func (l *Series) GetSeriesID() (SeriesID, SeriesEpoch, error)

func (*Series) IsSeriesIDSet

func (l *Series) IsSeriesIDSet() bool

func (*Series) MetricName

func (l *Series) MetricName() string

func (*Series) NameValues

func (l *Series) NameValues() (names []string, values []string, ok bool)

NameValues returns the names and values, only valid if the seriesIDIsNotSet

func (*Series) SetSeriesID

func (l *Series) SetSeriesID(sid SeriesID, eid SeriesEpoch)

note this has to be idempotent

func (*Series) String

func (l *Series) String() string

Get a string representation for hashing and comparison This representation is guaranteed to uniquely represent the underlying label set, though need not human-readable, or indeed, valid utf-8

type SeriesEpoch

type SeriesEpoch int64

SeriesEpoch represents the series epoch

type SeriesID

type SeriesID int64

SeriesID represents a globally unique id for the series. This should be equivalent to the PostgreSQL type in the series table (currently BIGINT).

func (SeriesID) String

func (s SeriesID) String() string

type SqlQuery

type SqlQuery struct {
	Sql           string
	Args          []interface{}
	ArgsUnordered bool
	Results       RowResults
	Err           error
	Copy          *Copy
}

type SqlRecorder

type SqlRecorder struct {
	// contains filtered or unexported fields
}

func NewErrorSqlRecorder

func NewErrorSqlRecorder(queries []SqlQuery, err error, t *testing.T) *SqlRecorder

func NewSqlRecorder

func NewSqlRecorder(queries []SqlQuery, t *testing.T) *SqlRecorder

func (*SqlRecorder) Acquire

func (r *SqlRecorder) Acquire(ctx context.Context) (*pgxpool.Conn, error)

func (*SqlRecorder) BeginTx

func (r *SqlRecorder) BeginTx(ctx context.Context) (pgx.Tx, error)

func (*SqlRecorder) Close

func (r *SqlRecorder) Close()

func (*SqlRecorder) CopyFrom

func (r *SqlRecorder) CopyFrom(
	ctx context.Context,
	tx pgx.Tx,
	tableName pgx.Identifier,
	columnNames []string,
	rowSrc pgx.CopyFromSource,
	oids []uint32,
) (int64, error)

func (*SqlRecorder) CopyFromRows

func (r *SqlRecorder) CopyFromRows(rows [][]interface{}) pgx.CopyFromSource

func (*SqlRecorder) Exec

func (r *SqlRecorder) Exec(ctx context.Context, sql string, arguments ...interface{}) (pgconn.CommandTag, error)

func (*SqlRecorder) NewBatch

func (r *SqlRecorder) NewBatch() pgxconn.PgxBatch

func (*SqlRecorder) Query

func (r *SqlRecorder) Query(ctx context.Context, sql string, args ...interface{}) (pgxconn.PgxRows, error)

func (*SqlRecorder) QueryRow

func (r *SqlRecorder) QueryRow(ctx context.Context, sql string, args ...interface{}) pgx.Row

func (*SqlRecorder) SendBatch

func (r *SqlRecorder) SendBatch(ctx context.Context, b pgxconn.PgxBatch) (pgx.BatchResults, error)

Directories

Path Synopsis

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL