db

package
v0.0.0-...-477d63b Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Apr 11, 2024 License: MIT Imports: 30 Imported by: 0

Documentation

Index

Constants

This section is empty.

Variables

View Source
var (
	GenesisInfo GenesisInfoType
	GenesisData GenesisType
)
View Source
var Query func(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error)

The Query part of the SQL client.

View Source
var TheDB *sql.DB

The SQL client object used for ad-hoc DB manipulation like aggregate refreshing (and by tests).

View Source
var WebsocketNotify *chan struct{}

Functions

func AggregateList

func AggregateList() (ret []string)

Returns the list of registered aggregates. Used by the `./cmd/aggregates` tool.

func AggregatesDDL

func AggregatesDDL() []string

func CheckBatchInserterMarked

func CheckBatchInserterMarked() bool

func CombinedForkInfoMap

func CombinedForkInfoMap() *map[string]config.ForkInfo

func ConfigHasGenesis

func ConfigHasGenesis() bool

func CoreDDL

func CoreDDL() []string

func CreateWebsocketChannel

func CreateWebsocketChannel()

Create websockets channel, called if enabled by config.

func DebugPrintQuery

func DebugPrintQuery(msg string, query string, args ...interface{})

func DestoryGenesis

func DestoryGenesis()

func DropAggregates

func DropAggregates() (err error)

func EnsureDBMatchesChain

func EnsureDBMatchesChain()

func FullyCaughtUp

func FullyCaughtUp() bool

FullyCaughtUp returns true if the last stage of block processing (aggregation) is less than the configured amount of time in the past. At this point Midgard is fully functional and is ready to serve up-to-date data.

func GenesisExits

func GenesisExits() bool

func GetRegisteredAggregateByName

func GetRegisteredAggregateByName(name string) *aggregateDescription

Returns a registered aggregate. Used by the `./cmd/aggregates` tool.

func GetRootFromChainIdName

func GetRootFromChainIdName(chainIdName string) string

func HeightFromEventId

func HeightFromEventId(eid int64) int64

func HeightToEventId

func HeightToEventId(height uint64) uint64

func InitAggregatesRefresh

func InitAggregatesRefresh(ctx context.Context) jobs.NamedFunction

func InitGenesis

func InitGenesis()

func InitializeChainVars

func InitializeChainVars(chainId string, startHeight int64, hash string)

Takes the parameters of the current chain and initializes both the `CurrentChain` and `RootChain` global variables

func InitializeChainVarsFromThorNode

func InitializeChainVarsFromThorNode()

Queries ThorNode `status` endpoint and initializes both the `CurrentChain` and `RootChain` global variables based on the result. If the current chain is the root chain, initializes the `FirstBlock` variable too.

Convenience function intended for standalone tools

func InitializeChainVarsFromThorNodeStatus

func InitializeChainVarsFromThorNodeStatus(status *coretypes.ResultStatus)

Takes the results from ThorNode `status` query and initializes both the `CurrentChain` and `RootChain` global variables. If the current chain is the root chain, initializes the `FirstBlock` variable too.

func MarkBatchInserterFail

func MarkBatchInserterFail()

func NewAggregate

func NewAggregate(name string, table string) *aggregateDescription

func PreventOverrunAfterFork

func PreventOverrunAfterFork()

If a Midgard is not updated with the correct value of HardForkHeight in time before the fork, it might add bogus blocks to the DB. In such a case we force it to resync.

func PrintableHash

func PrintableHash(encodedHash string) string

func ReadDBGenesisHeight

func ReadDBGenesisHeight() int64

func RefreshAggregatesForTests

func RefreshAggregatesForTests()

func RegisterAggregate

func RegisterAggregate(agg *aggregateDescription) *aggregateDescription

func RegisterWatermarkedMaterializedView

func RegisterWatermarkedMaterializedView(name string, query string)

func RequestAggregatesRefresh

func RequestAggregatesRefresh()

func ResetGlobalVarsForTests

func ResetGlobalVarsForTests()

func SchemaCleanUp

func SchemaCleanUp(schema string) string

func SelectTruncatedTimestamp

func SelectTruncatedTimestamp(targetColumn string, buckets Buckets) string

Select field that truncates the value considering the buckets.Interval Result is date in seconds.

func SetAndCheckFirstBlock

func SetAndCheckFirstBlock(hash string, height int64, timestamp Nano)

Checks that values provided are consistent with global variables already initialized.

func SetFetchCaughtUp

func SetFetchCaughtUp()

func SetupDoNotCallDirectly

func SetupDoNotCallDirectly()

Use `Setup` from internal/db/init instead

func SetupWithoutUpdate

func SetupWithoutUpdate()

func SleepWithContext

func SleepWithContext(ctx context.Context, duration time.Duration)

func TableCleanup

func TableCleanup(schema string) string

We have so many tables that dropping them all in one transaction makes us run out of locks (max_locks_per_transaction is too low). So, we drop the tables one-by-one in separate transactions.

func UpdateDDLIfNeeded

func UpdateDDLIfNeeded(dbObj *sql.DB, tag string, ddl []string, hashKey string, noauto bool)

func UpdateDDLsIfNeeded

func UpdateDDLsIfNeeded(dbObj *sql.DB, cfg config.TimeScale)

func WatermarkedMaterializedTables

func WatermarkedMaterializedTables() []string

func WebsocketsPing

func WebsocketsPing()

func Where

func Where(filters ...string) string

Helper function to join posibbly empty filters for a WHERE clause. Empty strings are discarded.

Types

type AppState

type AppState struct {
	Auth      JsonMap   `json:"auth"`
	Bank      Bank      `json:"bank"`
	Thorchain Thorchain `json:"thorchain"`
}

type Balance

type Balance struct {
	Address string `json:"address"`
	Coins   []Coin `json:"coins"`
}

type Bank

type Bank struct {
	Balances []Balance `json:"balances"`
	Supplies []Coin    `json:"supply"`
}

type BatchInserter

type BatchInserter struct {
	// contains filtered or unexported fields
}
var TheBatchInserter *BatchInserter

func (*BatchInserter) EndBlock

func (bi *BatchInserter) EndBlock() error

func (*BatchInserter) Flush

func (bi *BatchInserter) Flush() error

func (*BatchInserter) Insert

func (bi *BatchInserter) Insert(table string, columns []string, values ...interface{}) error

func (*BatchInserter) StartBlock

func (bi *BatchInserter) StartBlock() error

type BlockId

type BlockId struct {
	Height    int64
	Timestamp Nano
}

type Buckets

type Buckets struct {
	Timestamps Seconds
	// contains filtered or unexported fields
}

Bucketing has two modes: a) If interval is given then all timestamps are rounded to the interval boundaries.

Timestamps contains count+1 timestamps, so the last timestamp should be the endTime
of the last bucket.

b) If interval is nill, then it's an exact search with from..to parameters.

In this case there are exactly two Timestamps.

func AllHistoryBuckets

func AllHistoryBuckets() Buckets

func BucketsFromQuery

func BucketsFromQuery(ctx context.Context, urlParams *url.Values) (Buckets, miderr.Err)

func BucketsFromWindow

func BucketsFromWindow(ctx context.Context, window Window, interval Interval) (ret Buckets, merr miderr.Err)

TODO(acsaba): Migrate graphql to use GenerateBuckets.

func OneIntervalBuckets

func OneIntervalBuckets(from, to Second) Buckets

func (Buckets) AggregateName

func (b Buckets) AggregateName() string

func (Buckets) Bucket

func (b Buckets) Bucket(idx int) (startTime, endTime Second)

func (Buckets) BucketWindow

func (b Buckets) BucketWindow(idx int) Window

func (Buckets) Count

func (b Buckets) Count() int

func (Buckets) End

func (b Buckets) End() Second

func (Buckets) OneInterval

func (b Buckets) OneInterval() bool

func (Buckets) Start

func (b Buckets) Start() Second

func (Buckets) Window

func (b Buckets) Window() Window

type Coin

type Coin struct {
	Amount int64  `json:"amount,string"`
	Denom  string `json:"denom"`
}

type EventId

type EventId struct {
	BlockHeight int64
	Location    EventLocation
	TxIndex     int
	EventIndex  int
}

func ParseEventId

func ParseEventId(eid int64) (res EventId)

Opposite of AsBigint.

func (EventId) AsBigint

func (e EventId) AsBigint() int64

type EventLocation

type EventLocation int
const (
	BeginBlockEvents EventLocation = iota
	TxsResults
	EndBlockEvents
)

type FullyQualifiedChainId

type FullyQualifiedChainId struct {
	Name           string
	StartHash      string
	StartHeight    int64
	HardForkHeight int64
}

func EnrichAndGetRoot

func EnrichAndGetRoot(chainId *FullyQualifiedChainId) FullyQualifiedChainId

type GenesisInfoType

type GenesisInfoType struct {
	Height int64
	Hash   string
}

Genesis Type - Maybe it should be seperate file?

func (GenesisInfoType) Get

type GenesisType

type GenesisType struct {
	GenesisTime   time.Time `json:"genesis_time"`
	ChainID       string    `json:"chain_id"`
	InitialHeight string    `json:"initial_height"`
	AppState      AppState  `json:"app_state,omitempty"`
}

This genesis type is custom made from THORNode: https://gitlab.com/thorchain/thornode/-/blob/95ece18f92e363381aa0d09a9df779b4d63318f5/x/thorchain/genesis.pb.go#L132

func (*GenesisType) GetGenesisHeight

func (g *GenesisType) GetGenesisHeight() (height int64)

type ImmediateInserter

type ImmediateInserter struct {
	// contains filtered or unexported fields
}

Creates a separate transaction for every block and inserts rows as they come, each with a separate SQL query within the transaction. This is the fallback Inserter and also used for testing.

When in a transaction, a SAVEPOINT is created before any operation, and if the operation fails the transaction is rolled back to the state before it. This is necessary at the moment, as we can't guarantee that we won't run an invalid operation while processing a block.

var TheImmediateInserter *ImmediateInserter

func (*ImmediateInserter) EndBlock

func (txi *ImmediateInserter) EndBlock() (err error)

func (*ImmediateInserter) Flush

func (txi *ImmediateInserter) Flush() error

func (*ImmediateInserter) Insert

func (txi *ImmediateInserter) Insert(table string, columns []string, values ...interface{}) (err error)

func (*ImmediateInserter) StartBlock

func (txi *ImmediateInserter) StartBlock() (err error)

type Interval

type Interval int
const (
	Min5 Interval = iota
	Hour
	Day
	Week
	Month
	Quarter
	Year
	UndefinedInterval
)

type IntervalDescription

type IntervalDescription struct {
	// contains filtered or unexported fields
}

type JsonMap

type JsonMap map[string]interface{}

type LP

type LP struct {
	Pool         string `json:"asset"`
	AssetAddr    string `json:"asset_address"`
	AssetE8      int64  `json:"asset_deposit_value,string"`
	PendingAsset int64  `json:"pending_asset,string"`
	RuneAddr     string `json:"rune_address"`
	RuneE8       int64  `json:"rune_deposit_value,string"`
	PendingRune  int64  `json:"pending_rune,string"`
	Units        int64  `json:"units,string"`
	LastHeight   int64  `json:"last_add_height,string"`
}

type Loan

type Loan struct {
	Asset               string `json:"asset"`
	Owner               string `json:"owner"`
	CollateralDeposited int64  `json:"collateral_deposited,string"`
	CollateralWithdrawn int64  `json:"collateral_withdrawn,string"`
	DebtIssued          int64  `json:"debt_issued,string"`
	DebtRepaid          int64  `json:"debt_repaid,string"`
	LastOpenHeight      int64  `json:"last_open_height,string"`
}

type Mimir

type Mimir struct {
	Key   string `json:"Key"`
	Value int64  `json:"value,string"`
}

type Nano

type Nano int64

func NowNano

func NowNano() Nano

func TimeBucketCeil

func TimeBucketCeil(time Nano, period Nano) Nano

TODO(huginn): move this to buckets

func TimeBucketFloor

func TimeBucketFloor(time Nano, period Nano) Nano

TODO(huginn): move this to buckets

func TimeToNano

func TimeToNano(t time.Time) Nano

TODO(acsaba): get rid of this function, remove time dependency.

func (Nano) ToI

func (n Nano) ToI() int64

func (Nano) ToSecond

func (n Nano) ToSecond() Second

func (Nano) ToTime

func (n Nano) ToTime() time.Time

type Node

type Node struct {
	NodeAddress string `json:"node_address"`
	Status      string `json:"status"`
	BondE8      int64  `json:"bond,string"`
	BondAddr    string `json:"bond_address"`
}

type Pool

type Pool struct {
	BalanceRune         int64  `json:"balance_rune,string"`
	BalanceAsset        int64  `json:"balance_asset,string"`
	Asset               string `json:"asset"`
	LPUnits             int64  `json:"LP_units,string"`
	Status              string `json:"status"`
	StatusSince         int64  `json:"status_since,string"`
	Decimals            int64  `json:"decimals,string"`
	SynthUnits          int64  `json:"synth_units,string"`
	PendingInboundRune  int64  `json:"pending_inbound_rune,string"`
	PendingInboundAsset int64  `json:"pending_inbound_asset,string"`
}

type RowInserter

type RowInserter interface {
	StartBlock() error
	EndBlock() error
	Flush() error
	Insert(table string, columns []string, values ...interface{}) error
}

Abstraction for block recorder inserting rows into tables.

Note: Does not support concurrent use.

var Inserter RowInserter

Global RowInserter object used by block recorder

type Second

type Second int64

func NowSecond

func NowSecond() Second

func StrToSec

func StrToSec(s string) Second

func TimeToSecond

func TimeToSecond(t time.Time) Second

TODO(acsaba): get rid of this function, remove time dependency.

func (Second) Add

func (s Second) Add(duration time.Duration) Second

func (Second) ToI

func (s Second) ToI() int64

func (Second) ToNano

func (s Second) ToNano() Nano

func (Second) ToTime

func (s Second) ToTime() time.Time

type Seconds

type Seconds []Second

type StoredBlockId

type StoredBlockId struct {
	// contains filtered or unexported fields
}
var (
	LastThorNodeBlock  StoredBlockId
	LastFetchedBlock   StoredBlockId
	LastCommittedBlock StoredBlockId

	// Note: during batch inserting in the syncup phase the LastAggregatedBlock's Height is not
	// updated (e.g. might be 0) and the timestamp is not an exact block timsetamp.
	// When in sync both Height and Timestamp of the LastAggregatedBlock is correct.
	LastAggregatedBlock StoredBlockId

	FirstBlock StoredBlockId
)

func (*StoredBlockId) AsHeightTS

func (s *StoredBlockId) AsHeightTS() oapigen.HeightTS

func (*StoredBlockId) Get

func (s *StoredBlockId) Get() BlockId

func (*StoredBlockId) Set

func (s *StoredBlockId) Set(height int64, timestamp Nano)

type StoredFullyQualifiedChainId

type StoredFullyQualifiedChainId struct {
	// contains filtered or unexported fields
}

func (*StoredFullyQualifiedChainId) Get

type SwapDirection

type SwapDirection int8
const (
	RuneToAsset   SwapDirection = 0
	AssetToRune   SwapDirection = 1
	RuneToSynth   SwapDirection = 2
	SynthToRune   SwapDirection = 3
	RuneToDerived SwapDirection = 4
	DerivedToRune SwapDirection = 5
)

Do not change these constantss. SQL Queries may assume this value dirrectly.

type THORName

type THORName struct {
	Name              string          `json:"name"`
	ExpireBlockHeight int64           `json:"expire_block_height,string"`
	Owner             string          `json:"owner"`
	PreferredAsset    string          `json:"preferred_asset"`
	Aliases           []THORNameAlias `json:"aliases"`
}

type THORNameAlias

type THORNameAlias struct {
	Chain   string `json:"chain"`
	Address string `json:"address"`
}

type Thorchain

type Thorchain struct {
	LPs       []LP       `json:"liquidity_providers"`
	Loans     []Loan     `json:"loans"`
	Pools     []Pool     `json:"pools"`
	THORNames []THORName `json:"THORNames"`
	Nodes     []Node     `json:"node_accounts"`
	Mimirs    []Mimir    `json:"mimirs"`
}

type Window

type Window struct {
	From  Second // lower bound [inclusive]
	Until Second // upper bound [exclusive]
}

Window specifies the applicable time period.

Directories

Path Synopsis

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL