Documentation ¶
Overview ¶
Package qdb provides an api to a quasardb server
Index ¶
- func ClusterKeyFromFile(clusterPublicKeyFile string) (string, error)
- func CountUndefined() uint64
- func Int64Undefined() int64
- func MaxTimespec() time.Time
- func MinTimespec() time.Time
- func NeverExpires() time.Time
- func PreserveExpiration() time.Time
- func SetLogFile(filePath string)
- func UserCredentialFromFile(userCredentialFile string) (string, string, error)
- type BlobEntry
- func (entry *BlobEntry) CompareAndSwap(newValue []byte, newComparand []byte, expiry time.Time) ([]byte, error)
- func (entry BlobEntry) Get() ([]byte, error)
- func (entry BlobEntry) GetAndRemove() ([]byte, error)
- func (entry *BlobEntry) GetAndUpdate(newContent []byte, expiry time.Time) ([]byte, error)
- func (entry BlobEntry) GetNoAlloc(content []byte) (int, error)
- func (entry BlobEntry) Put(content []byte, expiry time.Time) error
- func (entry BlobEntry) RemoveIf(comparand []byte) error
- func (entry *BlobEntry) Update(newContent []byte, expiry time.Time) error
- type Cluster
- type Compression
- type DirectBlobEntry
- type DirectEntry
- type DirectHandleType
- func (h DirectHandleType) Blob(alias string) DirectBlobEntry
- func (h DirectHandleType) Close() error
- func (h DirectHandleType) Integer(alias string) DirectIntegerEntry
- func (h DirectHandleType) PrefixGet(prefix string, limit int) ([]string, error)
- func (h DirectHandleType) Release(buffer unsafe.Pointer)
- type DirectIntegerEntry
- type Encryption
- type Endpoint
- type Entry
- func (e Entry) Alias() string
- func (e Entry) AttachTag(tag string) error
- func (e Entry) AttachTags(tags []string) error
- func (e Entry) DetachTag(tag string) error
- func (e Entry) DetachTags(tags []string) error
- func (e Entry) ExpiresAt(expiry time.Time) error
- func (e Entry) ExpiresFromNow(expiry time.Duration) error
- func (e Entry) GetLocation() (NodeLocation, error)
- func (e Entry) GetMetadata() (Metadata, error)
- func (e Entry) GetTagged(tag string) ([]string, error)
- func (e Entry) GetTags() ([]string, error)
- func (e Entry) HasTag(tag string) error
- func (e Entry) Remove() error
- type EntryType
- type ErrorType
- type Find
- type HandleType
- func MustSetupHandle(clusterURI string, timeout time.Duration) HandleType
- func MustSetupSecuredHandle(clusterURI, clusterPublicKeyFile, userCredentialFile string, ...) HandleType
- func NewHandle() (HandleType, error)
- func SetupHandle(clusterURI string, timeout time.Duration) (HandleType, error)
- func SetupSecuredHandle(clusterURI, clusterPublicKeyFile, userCredentialFile string, ...) (HandleType, error)
- func (h HandleType) APIBuild() string
- func (h HandleType) APIVersion() string
- func (h HandleType) AddClusterPublicKey(secret string) error
- func (h HandleType) AddUserCredentials(name, secret string) error
- func (h HandleType) Blob(alias string) BlobEntry
- func (h HandleType) Close() error
- func (h HandleType) Cluster() *Cluster
- func (h HandleType) Connect(clusterURI string) error
- func (h HandleType) DirectConnect(nodeURI string) (DirectHandleType, error)
- func (h HandleType) Find() *Find
- func (h HandleType) GetClientMaxInBufSize() (uint, error)
- func (h HandleType) GetClientMaxParallelism() (uint, error)
- func (h HandleType) GetClusterMaxInBufSize() (uint, error)
- func (h HandleType) GetLastError() (string, error)
- func (h HandleType) GetTagged(tag string) ([]string, error)
- func (h HandleType) GetTags(entryAlias string) ([]string, error)
- func (h HandleType) Integer(alias string) IntegerEntry
- func (h HandleType) Node(uri string) *Node
- func (h HandleType) NodeStatistics(nodeID string) (Statistics, error)deprecated
- func (h HandleType) Open(protocol Protocol) error
- func (h HandleType) PrefixCount(prefix string) (uint64, error)
- func (h HandleType) PrefixGet(prefix string, limit int) ([]string, error)
- func (h HandleType) Query(query string) *Query
- func (h HandleType) Release(buffer unsafe.Pointer)
- func (h HandleType) SetClientMaxInBufSize(bufSize uint) error
- func (h HandleType) SetClientMaxParallelism(threadCount uint) error
- func (h HandleType) SetCompression(compressionLevel Compression) error
- func (h HandleType) SetEncryption(encryption Encryption) error
- func (h HandleType) SetMaxCardinality(maxCardinality uint) error
- func (h HandleType) SetTimeout(timeout time.Duration) error
- func (h HandleType) Statistics() (map[string]Statistics, error)
- func (h HandleType) Timeseries(alias string) TimeseriesEntry
- func (h HandleType) TsBatch(cols ...TsBatchColumnInfo) (*TsBatch, error)
- type IntegerEntry
- type Metadata
- type Node
- type NodeLocation
- type NodeStatus
- type NodeTopology
- type Protocol
- type Query
- type QueryPoint
- func (r *QueryPoint) Get() QueryPointResult
- func (r *QueryPoint) GetBlob() ([]byte, error)
- func (r *QueryPoint) GetCount() (int64, error)
- func (r *QueryPoint) GetDouble() (float64, error)
- func (r *QueryPoint) GetInt64() (int64, error)
- func (r *QueryPoint) GetString() (string, error)
- func (r *QueryPoint) GetTimestamp() (time.Time, error)
- type QueryPointResult
- type QueryResult
- func (r QueryResult) Columns(row *QueryPoint) QueryRow
- func (r QueryResult) ColumnsCount() int64
- func (r QueryResult) ColumnsNames() []string
- func (r QueryResult) ErrorMessage() string
- func (r QueryResult) RowCount() int64
- func (r QueryResult) Rows() QueryRows
- func (r QueryResult) ScannedPoints() int64
- type QueryResultValueType
- type QueryRow
- type QueryRows
- type RefID
- type Statistics
- type TimeseriesEntry
- func (entry TimeseriesEntry) BlobColumn(columnName string) TsBlobColumn
- func (entry TimeseriesEntry) Bulk(cols ...TsColumnInfo) (*TsBulk, error)
- func (entry TimeseriesEntry) Columns() ([]TsBlobColumn, []TsDoubleColumn, []TsInt64Column, []TsStringColumn, ...)
- func (entry TimeseriesEntry) ColumnsInfo() ([]TsColumnInfo, error)
- func (entry TimeseriesEntry) Create(shardSize time.Duration, cols ...TsColumnInfo) error
- func (entry TimeseriesEntry) DoubleColumn(columnName string) TsDoubleColumn
- func (entry TimeseriesEntry) InsertColumns(cols ...TsColumnInfo) error
- func (entry TimeseriesEntry) Int64Column(columnName string) TsInt64Column
- func (entry TimeseriesEntry) StringColumn(columnName string) TsStringColumn
- func (entry TimeseriesEntry) SymbolColumn(columnName string, symtableName string) TsStringColumn
- func (entry TimeseriesEntry) TimestampColumn(columnName string) TsTimestampColumn
- type TsAggregationType
- type TsBatch
- func (t *TsBatch) ExtraColumns(cols ...TsBatchColumnInfo) error
- func (t *TsBatch) Push() error
- func (t *TsBatch) PushFast() error
- func (t *TsBatch) Release()
- func (t *TsBatch) RowSetBlob(index int64, content []byte) error
- func (t *TsBatch) RowSetBlobNoCopy(index int64, content []byte) error
- func (t *TsBatch) RowSetDouble(index int64, value float64) error
- func (t *TsBatch) RowSetInt64(index, value int64) error
- func (t *TsBatch) RowSetString(index int64, content string) error
- func (t *TsBatch) RowSetStringNoCopy(index int64, content string) error
- func (t *TsBatch) RowSetTimestamp(index int64, value time.Time) error
- func (t *TsBatch) StartRow(timestamp time.Time) error
- type TsBatchColumnInfo
- type TsBlobAggregation
- type TsBlobColumn
- func (column TsBlobColumn) Aggregate(aggs ...*TsBlobAggregation) ([]TsBlobAggregation, error)
- func (column TsBlobColumn) EraseRanges(rgs ...TsRange) (uint64, error)
- func (column TsBlobColumn) GetRanges(rgs ...TsRange) ([]TsBlobPoint, error)
- func (column TsBlobColumn) Insert(points ...TsBlobPoint) error
- type TsBlobPoint
- type TsBulk
- func (t *TsBulk) GetBlob() ([]byte, error)
- func (t *TsBulk) GetDouble() (float64, error)
- func (t *TsBulk) GetInt64() (int64, error)
- func (t *TsBulk) GetRanges(rgs ...TsRange) error
- func (t *TsBulk) GetString() (string, error)
- func (t *TsBulk) GetTimestamp() (time.Time, error)
- func (t *TsBulk) Ignore() *TsBulk
- func (t *TsBulk) NextRow() (time.Time, error)
- func (t *TsBulk) Release()
- func (t *TsBulk) Row(timestamp time.Time) *TsBulk
- func (t TsBulk) RowCount() int
- type TsColumnInfo
- type TsColumnType
- type TsDoubleAggregation
- type TsDoubleColumn
- func (column TsDoubleColumn) Aggregate(aggs ...*TsDoubleAggregation) ([]TsDoubleAggregation, error)
- func (column TsDoubleColumn) EraseRanges(rgs ...TsRange) (uint64, error)
- func (column TsDoubleColumn) GetRanges(rgs ...TsRange) ([]TsDoublePoint, error)
- func (column TsDoubleColumn) Insert(points ...TsDoublePoint) error
- type TsDoublePoint
- type TsInt64Aggregation
- type TsInt64Column
- func (column TsInt64Column) Aggregate(aggs ...*TsInt64Aggregation) ([]TsInt64Aggregation, error)
- func (column TsInt64Column) EraseRanges(rgs ...TsRange) (uint64, error)
- func (column TsInt64Column) GetRanges(rgs ...TsRange) ([]TsInt64Point, error)
- func (column TsInt64Column) Insert(points ...TsInt64Point) error
- type TsInt64Point
- type TsRange
- type TsStringAggregation
- type TsStringColumn
- func (column TsStringColumn) Aggregate(aggs ...*TsStringAggregation) ([]TsStringAggregation, error)
- func (column TsStringColumn) EraseRanges(rgs ...TsRange) (uint64, error)
- func (column TsStringColumn) GetRanges(rgs ...TsRange) ([]TsStringPoint, error)
- func (column TsStringColumn) Insert(points ...TsStringPoint) error
- type TsStringPoint
- type TsTimestampAggregation
- type TsTimestampColumn
- func (column TsTimestampColumn) Aggregate(aggs ...*TsTimestampAggregation) ([]TsTimestampAggregation, error)
- func (column TsTimestampColumn) EraseRanges(rgs ...TsRange) (uint64, error)
- func (column TsTimestampColumn) GetRanges(rgs ...TsRange) ([]TsTimestampPoint, error)
- func (column TsTimestampColumn) Insert(points ...TsTimestampPoint) error
- type TsTimestampPoint
Examples ¶
- BlobEntry
- Entry.Alias
- HandleType
- IntegerEntry
- Node
- Query
- TimeseriesEntry
- TimeseriesEntry.BlobColumn
- TimeseriesEntry.Bulk
- TimeseriesEntry.Columns
- TimeseriesEntry.ColumnsInfo
- TimeseriesEntry.Create
- TimeseriesEntry.DoubleColumn
- TimeseriesEntry.InsertColumns
- TimeseriesEntry.Int64Column
- TimeseriesEntry.TimestampColumn
- TsBlobColumn.Aggregate
- TsBlobColumn.EraseRanges
- TsBlobColumn.GetRanges
- TsBlobColumn.Insert
- TsDoubleColumn.Aggregate
- TsDoubleColumn.EraseRanges
- TsDoubleColumn.GetRanges
- TsDoubleColumn.Insert
- TsInt64Column.EraseRanges
- TsInt64Column.GetRanges
- TsInt64Column.Insert
- TsTimestampColumn.EraseRanges
- TsTimestampColumn.GetRanges
- TsTimestampColumn.Insert
Constants ¶
This section is empty.
Variables ¶
This section is empty.
Functions ¶
func ClusterKeyFromFile ¶
ClusterKeyFromFile : retrieve cluster key from a file
func CountUndefined ¶
func CountUndefined() uint64
CountUndefined : return a uint64 value corresponding to quasardb undefined count value
func Int64Undefined ¶
func Int64Undefined() int64
Int64Undefined : return a int64 value corresponding to quasardb undefined int64 value
func MaxTimespec ¶
MaxTimespec : return a time value corresponding to quasardb maximum timespec value
func MinTimespec ¶
MinTimespec : return a time value corresponding to quasardb minimum timespec value
func NeverExpires ¶
NeverExpires : return a time value corresponding to quasardb never expires value
func PreserveExpiration ¶
PreserveExpiration : return a time value corresponding to quasardb preserve expiration value
Types ¶
type BlobEntry ¶
type BlobEntry struct {
Entry
}
BlobEntry : blob data type
Example ¶
SetLogFile(ExamplesLogFilePath) h := MustSetupHandle(insecureURI, 120*time.Second) defer h.Close() alias := "BlobAlias" blob := h.Blob(alias) defer blob.Remove() content := []byte("content") blob.Put(content, NeverExpires()) obtainedContent, _ := blob.Get() fmt.Println("Get content:", string(obtainedContent)) updateContent := []byte("updated content") blob.Update(updateContent, PreserveExpiration()) obtainedContent, _ = blob.Get() fmt.Println("Get updated content:", string(obtainedContent)) newContent := []byte("new content") previousContent, _ := blob.GetAndUpdate(newContent, PreserveExpiration()) fmt.Println("Previous content:", string(previousContent)) obtainedContent, _ = blob.Get() fmt.Println("Get new content:", string(obtainedContent))
Output: Get content: content Get updated content: updated content Previous content: updated content Get new content: new content
func (*BlobEntry) CompareAndSwap ¶
func (entry *BlobEntry) CompareAndSwap(newValue []byte, newComparand []byte, expiry time.Time) ([]byte, error)
CompareAndSwap : Atomically compares the entry with comparand and updates it to new_value if, and only if, they match.
The function returns the original value of the entry in case of a mismatch. When it matches, no content is returned. The entry must already exist. Update will occur if and only if the content of the entry matches bit for bit the content of the comparand buffer.
func (BlobEntry) Get ¶
Get : Retrieve an entry's content
If the entry does not exist, the function will fail and return 'alias not found' error.
func (BlobEntry) GetAndRemove ¶
GetAndRemove : Atomically gets an entry from the quasardb server and removes it.
If the entry does not exist, the function will fail and return 'alias not found' error.
func (*BlobEntry) GetAndUpdate ¶
GetAndUpdate : Atomically gets and updates (in this order) the entry on the quasardb server.
The entry must already exist.
func (BlobEntry) GetNoAlloc ¶
GetNoAlloc : Retrieve an entry's content to already allocated buffer
If the entry does not exist, the function will fail and return 'alias not found' error. If the buffer is not large enough to hold the data, the function will fail and return `buffer is too small`, content length will nevertheless be returned with entry size so that the caller may resize its buffer and try again.
func (BlobEntry) Put ¶
Put : Creates a new entry and sets its content to the provided blob.
If the entry already exists the function will fail and will return 'alias already exists' error. You can specify an expiry or use NeverExpires if you don’t want the entry to expire.
func (BlobEntry) RemoveIf ¶
RemoveIf : Atomically removes the entry on the server if the content matches.
The entry must already exist. Removal will occur if and only if the content of the entry matches bit for bit the content of the comparand buffer.
type Cluster ¶
type Cluster struct {
HandleType
}
Cluster : An object permitting calls to a cluster
func (Cluster) PurgeAll ¶
PurgeAll : Removes irremediably all data from all the nodes of the cluster.
This function is useful when quasardb is used as a cache and is not the golden source. This call is not atomic: if the command cannot be dispatched on the whole cluster, it will be dispatched on as many nodes as possible and the function will return with a qdb_e_ok code. By default cluster does not allow this operation and the function returns a qdb_e_operation_disabled error.
func (Cluster) PurgeCache ¶
PurgeCache : Removes all cached data from all the nodes of the cluster.
This function is disabled on a transient cluster. Prefer purge_all in this case. This call is not atomic: if the command cannot be dispatched on the whole cluster, it will be dispatched on as many nodes as possible and the function will return with a qdb_e_ok code.
func (Cluster) TrimAll ¶
TrimAll : Trims all data on all the nodes of the cluster.
Quasardb uses Multi-Version Concurrency Control (MVCC) as a foundation of its transaction engine. It will automatically clean up old versions as entries are accessed. This call is not atomic: if the command cannot be dispatched on the whole cluster, it will be dispatched on as many nodes as possible and the function will return with a qdb_e_ok code. Entries that are not accessed may not be cleaned up, resulting in increasing disk usage. This function will request each nodes to trim all entries, release unused memory and compact files on disk. Because this operation is I/O and CPU intensive it is not recommended to run it when the cluster is heavily used.
type Compression ¶
type Compression C.qdb_compression_t
Compression : compression parameter
const ( CompNone Compression = C.qdb_comp_none CompFast Compression = C.qdb_comp_fast CompBest Compression = C.qdb_comp_best )
Compression values:
CompNone : No compression. CompFast : Maximum compression speed, potentially minimum compression ratio. This is currently the default. CompBest : Maximum compression ratio, potentially minimum compression speed. This is currently not implemented.
type DirectBlobEntry ¶
type DirectBlobEntry struct {
DirectEntry
}
DirectBlobEntry is an Entry for a blob data type
func (DirectBlobEntry) Get ¶
func (e DirectBlobEntry) Get() ([]byte, error)
Get returns an entry's contents
func (DirectBlobEntry) Put ¶
func (e DirectBlobEntry) Put(content []byte, expiry time.Time) error
Put creates a new entry and sets its content to the provided blob This will return an error if the entry alias already exists You can specify an expiry or use NeverExpires if you don’t want the entry to expire.
func (*DirectBlobEntry) Update ¶
func (e *DirectBlobEntry) Update(newContent []byte, expiry time.Time) error
Update creates or updates an entry and sets its content to the provided blob. If the entry already exists, the function will modify the entry. You can specify an expiry or use NeverExpires if you don’t want the entry to expire.
type DirectEntry ¶
type DirectEntry struct { DirectHandleType // contains filtered or unexported fields }
DirectEntry is a base type for composition. Similar to a regular entry
func (DirectEntry) Remove ¶
func (e DirectEntry) Remove() error
Remove an entry from the local node's storage, regardless of its type.
This function bypasses the clustering mechanism and accesses the node local storage. Entries in the local node storage are not accessible via the regular API and vice versa.
The call is ACID, regardless of the type of the entry and a transaction will be created if need be.
type DirectHandleType ¶
type DirectHandleType struct {
// contains filtered or unexported fields
}
DirectHandleType is an opaque handle needed for maintaining a direct connection to a node.
func (DirectHandleType) Blob ¶
func (h DirectHandleType) Blob(alias string) DirectBlobEntry
Blob creates a direct blob entry object
func (DirectHandleType) Close ¶
func (h DirectHandleType) Close() error
Close releases a direct connect previously opened with DirectConnect
func (DirectHandleType) Integer ¶
func (h DirectHandleType) Integer(alias string) DirectIntegerEntry
Integer creates a direct integer entry object
func (DirectHandleType) PrefixGet ¶
func (h DirectHandleType) PrefixGet(prefix string, limit int) ([]string, error)
PrefixGet : Retrieves the list of all entries matching the provided prefix.
A prefix-based search will enable you to find all entries matching a provided prefix. This function returns the list of aliases. It’s up to the user to query the content associated with every entry, if needed.
func (DirectHandleType) Release ¶
func (h DirectHandleType) Release(buffer unsafe.Pointer)
Release frees API allocated buffers
type DirectIntegerEntry ¶
type DirectIntegerEntry struct {
DirectEntry
}
DirectIntegerEntry is an Entry for a int data type
func (DirectIntegerEntry) Add ¶
func (e DirectIntegerEntry) Add(added int64) (int64, error)
Add : Atomically increases or decreases a signed 64-bit integer.
The specified entry will be atomically increased (or decreased) according to the given addend value: To increase the value, specify a positive added To decrease the value, specify a negative added The function return the result of the operation. The entry must already exist.
func (DirectIntegerEntry) Get ¶
func (e DirectIntegerEntry) Get() (int64, error)
Get returns the value of a signed 64-bit integer
func (DirectIntegerEntry) Put ¶
func (e DirectIntegerEntry) Put(content int64, expiry time.Time) error
Put creates a new signed 64-bit integer.
Atomically creates an entry of the given alias and sets it to a cross-platform signed 64-bit integer. If the entry already exists, the function returns an error. You can specify an expiry time or use NeverExpires if you don’t want the entry to expire. If you want to create or update an entry use Update. The value will be correctly translated independently of the endianness of the client’s platform.
func (DirectIntegerEntry) Update ¶
func (e DirectIntegerEntry) Update(newContent int64, expiry time.Time) error
Update creates or updates a signed 64-bit integer.
Atomically updates an entry of the given alias to the provided value. If the entry doesn’t exist, it will be created. You can specify an expiry time or use NeverExpires if you don’t want the entry to expire.
type Encryption ¶
type Encryption C.qdb_encryption_t
Encryption : encryption option
const ( EncryptNone Encryption = C.qdb_crypt_none EncryptAES Encryption = C.qdb_crypt_aes_gcm_256 )
Encryption values:
EncryptNone : No encryption. EncryptAES : Uses aes gcm 256 encryption.
type Entry ¶
type Entry struct { HandleType // contains filtered or unexported fields }
Entry : cannot be constructed base type for composition
func (Entry) Alias ¶
Alias : Return an alias string of the object
Example ¶
SetLogFile(ExamplesLogFilePath) h := MustSetupHandle(insecureURI, 120*time.Second) defer h.Close() blob1 := h.Blob("BLOB_1") blob1.Put([]byte("blob 1 content"), NeverExpires()) defer blob1.Remove() blob2 := h.Blob("BLOB_2") blob2.Put([]byte("blob 2 content"), NeverExpires()) defer blob2.Remove() fmt.Println("Alias blob 1:", blob1.Alias()) fmt.Println("Alias blob 2:", blob2.Alias()) tags1 := []string{"tag blob 1", "tag both blob"} blob1.AttachTags(tags1) defer blob1.DetachTags(tags1) tags2 := []string{"tag blob 2", "tag both blob"} blob2.AttachTags(tags2) defer blob2.DetachTags(tags2) resultTagBlob1, _ := blob1.GetTagged("tag blob 1") fmt.Println("Tagged with 'tag blob 1':", resultTagBlob1) resultTagBlob2, _ := blob1.GetTagged("tag blob 2") fmt.Println("Tagged with 'tag blob 2':", resultTagBlob2) resultTagBoth, _ := blob1.GetTagged("tag both blob") fmt.Println("Tagged with 'tag both blob':", resultTagBoth)
Output: Alias blob 1: BLOB_1 Alias blob 2: BLOB_2 Tagged with 'tag blob 1': [BLOB_1] Tagged with 'tag blob 2': [BLOB_2] Tagged with 'tag both blob': [BLOB_1 BLOB_2]
func (Entry) AttachTag ¶
AttachTag : Adds a tag entry.
Tagging an entry enables you to search for entries based on their tags. Tags scale across nodes. The entry must exist. The tag may or may not exist.
func (Entry) AttachTags ¶
AttachTags : Adds a collection of tags to a single entry.
Tagging an entry enables you to search for entries based on their tags. Tags scale across nodes. The function will ignore existing tags. The entry must exist. The tag may or may not exist.
func (Entry) DetachTag ¶
DetachTag : Removes a tag from an entry.
Tagging an entry enables you to search for entries based on their tags. Tags scale across nodes. The entry must exist. The tag must exist.
func (Entry) DetachTags ¶
DetachTags : Removes a collection of tags from a single entry.
Tagging an entry enables you to search for entries based on their tags. Tags scale across nodes. The entry must exist. The tags must exist.
func (Entry) ExpiresAt ¶
ExpiresAt : Sets the absolute expiration time of an entry.
Blobs and integers can have an expiration time and will be automatically removed by the cluster when they expire. The absolute expiration time is the Unix epoch, that is, the number of milliseconds since 1 January 1970, 00:00::00 UTC. To use a relative expiration time (that is expiration relative to the time of the call), use ExpiresFromNow. To remove the expiration time of an entry, specify the value NeverExpires as ExpiryTime parameter. Values in the past are refused, but the cluster will have a certain tolerance to account for clock skews.
func (Entry) ExpiresFromNow ¶
ExpiresFromNow : Sets the expiration time of an entry, relative to the current time of the client.
Blobs and integers can have an expiration time and will automatically be removed by the cluster when they expire. The expiration is relative to the current time of the machine. To remove the expiration time of an entry or to use an absolute expiration time use ExpiresAt.
func (Entry) GetLocation ¶
func (e Entry) GetLocation() (NodeLocation, error)
GetLocation : Returns the primary node of an entry.
The exact location of an entry should be assumed random and users should not bother about its location as the API will transparently locate the best node for the requested operation. This function is intended for higher level APIs that need to optimize transfers and potentially push computation close to the data.
func (Entry) GetMetadata ¶
GetMetadata : Gets the meta-information about an entry, if it exists.
func (Entry) GetTagged ¶
GetTagged : Retrieves all entries that have the specified tag.
Tagging an entry enables you to search for entries based on their tags. Tags scale across nodes. The tag must exist. The complexity of this function is constant.
func (Entry) GetTags ¶
GetTags : Retrieves all the tags of an entry.
Tagging an entry enables you to search for entries based on their tags. Tags scale across nodes. The entry must exist.
func (Entry) HasTag ¶
HasTag : Tests if an entry has the request tag.
Tagging an entry enables you to search for entries based on their tags. Tags scale across nodes. The entry must exist.
func (Entry) Remove ¶
Remove : Removes an entry from the cluster, regardless of its type.
This call will remove the entry, whether it is a blob, integer, deque, stream. It will properly untag the entry. If the entry spawns on multiple entries or nodes (deques and streams) all blocks will be properly removed. The call is ACID, regardless of the type of the entry and a transaction will be created if need be
type EntryType ¶
type EntryType C.qdb_entry_type_t
EntryType : An enumeration representing possible entries type.
const ( EntryUninitialized EntryType = C.qdb_entry_uninitialized EntryBlob EntryType = C.qdb_entry_blob EntryInteger EntryType = C.qdb_entry_integer EntryHSet EntryType = C.qdb_entry_hset EntryTag EntryType = C.qdb_entry_tag EntryDeque EntryType = C.qdb_entry_deque EntryStream EntryType = C.qdb_entry_stream EntryTS EntryType = C.qdb_entry_ts )
EntryType Values
EntryUninitialized : Uninitialized value. EntryBlob : A binary large object (blob). EntryInteger : A signed 64-bit integer. EntryHSet : A distributed hash set. EntryTag : A tag. EntryDeque : A distributed double-entry queue (deque). EntryTS : A distributed time series. EntryStream : A distributed binary stream.
type ErrorType ¶
type ErrorType C.qdb_error_t
ErrorType obfuscating qdb_error_t
const ( Success ErrorType = C.qdb_e_ok Created ErrorType = C.qdb_e_ok_created ErrUninitialized ErrorType = C.qdb_e_uninitialized ErrAliasNotFound ErrorType = C.qdb_e_alias_not_found ErrAliasAlreadyExists ErrorType = C.qdb_e_alias_already_exists ErrOutOfBounds ErrorType = C.qdb_e_out_of_bounds ErrSkipped ErrorType = C.qdb_e_skipped ErrIncompatibleType ErrorType = C.qdb_e_incompatible_type ErrContainerEmpty ErrorType = C.qdb_e_container_empty ErrContainerFull ErrorType = C.qdb_e_container_full ErrElementNotFound ErrorType = C.qdb_e_element_not_found ErrElementAlreadyExists ErrorType = C.qdb_e_element_already_exists ErrOverflow ErrorType = C.qdb_e_overflow ErrUnderflow ErrorType = C.qdb_e_underflow ErrTagAlreadySet ErrorType = C.qdb_e_tag_already_set ErrTagNotSet ErrorType = C.qdb_e_tag_not_set ErrTimeout ErrorType = C.qdb_e_timeout ErrConnectionRefused ErrorType = C.qdb_e_connection_refused ErrConnectionReset ErrorType = C.qdb_e_connection_reset ErrUnstableCluster ErrorType = C.qdb_e_unstable_cluster ErrTryAgain ErrorType = C.qdb_e_try_again ErrConflict ErrorType = C.qdb_e_conflict ErrNotConnected ErrorType = C.qdb_e_not_connected ErrResourceLocked ErrorType = C.qdb_e_resource_locked ErrSystemRemote ErrorType = C.qdb_e_system_remote ErrSystemLocal ErrorType = C.qdb_e_system_local ErrInternalRemote ErrorType = C.qdb_e_internal_remote ErrInternalLocal ErrorType = C.qdb_e_internal_local ErrNoMemoryRemote ErrorType = C.qdb_e_no_memory_remote ErrNoMemoryLocal ErrorType = C.qdb_e_no_memory_local ErrInvalidProtocol ErrorType = C.qdb_e_invalid_protocol ErrHostNotFound ErrorType = C.qdb_e_host_not_found ErrBufferTooSmall ErrorType = C.qdb_e_buffer_too_small ErrNotImplemented ErrorType = C.qdb_e_not_implemented ErrInvalidVersion ErrorType = C.qdb_e_invalid_version ErrInvalidArgument ErrorType = C.qdb_e_invalid_argument ErrInvalidHandle ErrorType = C.qdb_e_invalid_handle ErrReservedAlias ErrorType = C.qdb_e_reserved_alias ErrUnmatchedContent ErrorType = C.qdb_e_unmatched_content ErrInvalidIterator ErrorType = C.qdb_e_invalid_iterator ErrEntryTooLarge ErrorType = C.qdb_e_entry_too_large ErrTransactionPartialFailure ErrorType = C.qdb_e_transaction_partial_failure ErrOperationDisabled ErrorType = C.qdb_e_operation_disabled ErrOperationNotPermitted ErrorType = C.qdb_e_operation_not_permitted ErrIteratorEnd ErrorType = C.qdb_e_iterator_end ErrInvalidReply ErrorType = C.qdb_e_invalid_reply ErrNoSpaceLeft ErrorType = C.qdb_e_no_space_left ErrQuotaExceeded ErrorType = C.qdb_e_quota_exceeded ErrAliasTooLong ErrorType = C.qdb_e_alias_too_long ErrClockSkew ErrorType = C.qdb_e_clock_skew ErrAccessDenied ErrorType = C.qdb_e_access_denied ErrLoginFailed ErrorType = C.qdb_e_login_failed ErrColumnNotFound ErrorType = C.qdb_e_column_not_found ErrQueryTooComplex ErrorType = C.qdb_e_query_too_complex ErrInvalidCryptoKey ErrorType = C.qdb_e_invalid_crypto_key ErrInvalidQuery ErrorType = C.qdb_e_invalid_query ErrInvalidRegex ErrorType = C.qdb_e_invalid_regex ErrUnknownUser ErrorType = C.qdb_e_unknown_user ErrInterrupted ErrorType = C.qdb_e_interrupted ErrNetworkInbufTooSmall ErrorType = C.qdb_e_network_inbuf_too_small ErrNetworkError ErrorType = C.qdb_e_network_error ErrDataCorruption ErrorType = C.qdb_e_data_corruption )
Success : Success. Created : Success. A new entry has been created. ErrUninitialized : Uninitialized error. ErrAliasNotFound : Entry alias/key was not found. ErrAliasAlreadyExists : Entry alias/key already exists. ErrOutOfBounds : Index out of bounds. ErrSkipped : Skipped operation. Used in batches and transactions. ErrIncompatibleType : Entry or column is incompatible with the operation. ErrContainerEmpty : Container is empty. ErrContainerFull : Container is full. ErrElementNotFound : Element was not found. ErrElementAlreadyExists : Element already exists. ErrOverflow : Arithmetic operation overflows. ErrUnderflow : Arithmetic operation underflows. ErrTagAlreadySet : Tag is already set. ErrTagNotSet : Tag is not set. ErrTimeout : Operation timed out. ErrConnectionRefused : Connection was refused. ErrConnectionReset : Connection was reset. ErrUnstableCluster : Cluster is unstable. ErrTryAgain : Please retry. ErrConflict : There is another ongoing conflicting operation. ErrNotConnected : Handle is not connected. ErrResourceLocked : Resource is locked. ErrSystemRemote : System error on remote node (server-side). Please check errno or GetLastError() for actual error. ErrSystemLocal : System error on local system (client-side). Please check errno or GetLastError() for actual error. ErrInternalRemote : Internal error on remote node (server-side). ErrInternalLocal : Internal error on local system (client-side). ErrNoMemoryRemote : No memory on remote node (server-side). ErrNoMemoryLocal : No memory on local system (client-side). ErrInvalidProtocol : Protocol is invalid. ErrHostNotFound : Host was not found. ErrBufferTooSmall : Buffer is too small. ErrNotImplemented : Operation is not implemented. ErrInvalidVersion : Version is invalid. ErrInvalidArgument : Argument is invalid. ErrInvalidHandle : Handle is invalid. ErrReservedAlias : Alias/key is reserved. ErrUnmatchedContent : Content did not match. ErrInvalidIterator : Iterator is invalid. ErrEntryTooLarge : Entry is too large. ErrTransactionPartialFailure : Transaction failed partially. ErrOperationDisabled : Operation has not been enabled in cluster configuration. ErrOperationNotPermitted : Operation is not permitted. ErrIteratorEnd : Iterator reached the end. ErrInvalidReply : Cluster sent an invalid reply. ErrNoSpaceLeft : No more space on disk. ErrQuotaExceeded : Disk space quota has been reached. ErrAliasTooLong : Alias is too long. ErrClockSkew : Cluster nodes have important clock differences. ErrAccessDenied : Access is denied. ErrLoginFailed : Login failed. ErrColumnNotFound : Column was not found. ErrQueryTooComplex : Find is too complex. ErrInvalidCryptoKey : Security key is invalid. ErrInvalidQuery : Query is invalid. ErrInvalidRegex : Regular expression is invalid. ErrUnknownUser : Unknown user. ErrInterrupted : Operation has been interrupted. ErrNetworkInbufTooSmall : Network input buffer is too small to complete the operation. ErrNetworkError : Network error. ErrDataCorruption : Data corruption has been detected.
type Find ¶
type Find struct { HandleType // contains filtered or unexported fields }
Find : a building type to execute a query Retrieves all entries’ aliases that match the specified query. For the complete grammar, please refer to the documentation. Queries are transactional. The complexity of this function is dependent on the complexity of the query.
func (Find) ExecuteString ¶
ExecuteString : Execute a string query immediately
type HandleType ¶
type HandleType struct {
// contains filtered or unexported fields
}
HandleType : An opaque handle to internal API-allocated structures needed for maintaining connection to a cluster.
Example ¶
var h HandleType h.Open(ProtocolTCP)
Output:
func MustSetupHandle ¶
func MustSetupHandle(clusterURI string, timeout time.Duration) HandleType
MustSetupHandle : Setup a handle, panic on error
The handle is already opened with tcp protocol The handle is already connected with the clusterURI string Panic on error
func MustSetupSecuredHandle ¶
func MustSetupSecuredHandle(clusterURI, clusterPublicKeyFile, userCredentialFile string, timeout time.Duration, encryption Encryption) HandleType
MustSetupSecuredHandle : Setup a secured handle, panic on error
The handle is already opened with tcp protocol The handle is already secured with the cluster public key and the user credential files provided (Note: the filenames are needed, not the content of the files) The handle is already connected with the clusterURI string
func NewHandle ¶
func NewHandle() (HandleType, error)
NewHandle : Create a new handle, return error if needed
The handle is already opened (not connected) with tcp protocol
func SetupHandle ¶
func SetupHandle(clusterURI string, timeout time.Duration) (HandleType, error)
SetupHandle : Setup a handle, return error if needed
The handle is already opened with tcp protocol The handle is already connected with the clusterURI string
func SetupSecuredHandle ¶
func SetupSecuredHandle(clusterURI, clusterPublicKeyFile, userCredentialFile string, timeout time.Duration, encryption Encryption) (HandleType, error)
SetupSecuredHandle : Setup a secured handle, return error if needed
The handle is already opened with tcp protocol The handle is already secured with the cluster public key and the user credential files provided (Note: the filenames are needed, not the content of the files) The handle is already connected with the clusterURI string
func (HandleType) APIBuild ¶
func (h HandleType) APIBuild() string
APIBuild : Returns a string describing the exact API build.
func (HandleType) APIVersion ¶
func (h HandleType) APIVersion() string
APIVersion : Returns a string describing the API version.
func (HandleType) AddClusterPublicKey ¶
func (h HandleType) AddClusterPublicKey(secret string) error
AddClusterPublicKey : add the cluster public key from a cluster config file.
func (HandleType) AddUserCredentials ¶
func (h HandleType) AddUserCredentials(name, secret string) error
AddUserCredentials : add a username and key from a user name and secret.
func (HandleType) Blob ¶
func (h HandleType) Blob(alias string) BlobEntry
Blob : Create a blob entry object
func (HandleType) Close ¶
func (h HandleType) Close() error
Close : Closes the handle previously opened.
This results in terminating all connections and releasing all internal buffers, including buffers which may have been allocated as or a result of batch operations or get operations.
func (HandleType) Cluster ¶
func (h HandleType) Cluster() *Cluster
Cluster : Create a cluster object to execute commands on a cluster
func (HandleType) Connect ¶
func (h HandleType) Connect(clusterURI string) error
Connect : connect a previously opened handle
Binds the client instance to a quasardb cluster and connect to at least one node within. Quasardb URI are in the form qdb://<address>:<port> where <address> is either an IPv4 or IPv6 (surrounded with square brackets), or a domain name. It is recommended to specify multiple addresses should the designated node be unavailable. URI examples: qdb://myserver.org:2836 - Connects to myserver.org on the port 2836 qdb://127.0.0.1:2836 - Connects to the local IPv4 loopback on the port 2836 qdb://myserver1.org:2836,myserver2.org:2836 - Connects to myserver1.org or myserver2.org on the port 2836 qdb://[::1]:2836 - Connects to the local IPv6 loopback on the port 2836
func (HandleType) DirectConnect ¶
func (h HandleType) DirectConnect(nodeURI string) (DirectHandleType, error)
DirectConnect opens a connection to a node for use with the direct API
The returned direct handle must be freed with Close(). Releasing the handle has no impact on non-direct connections or other direct handles.
func (HandleType) GetClientMaxInBufSize ¶
func (h HandleType) GetClientMaxInBufSize() (uint, error)
GetClientMaxInBufSize : Gets the maximum incoming buffer size for all network operations of the client.
func (HandleType) GetClientMaxParallelism ¶ added in v3.13.2
func (h HandleType) GetClientMaxParallelism() (uint, error)
GetClientMaxParallelism : Gets the maximum parallelism option of the client.
func (HandleType) GetClusterMaxInBufSize ¶
func (h HandleType) GetClusterMaxInBufSize() (uint, error)
GetClusterMaxInBufSize : Gets the maximum incoming buffer size for all network operations of the client.
func (HandleType) GetLastError ¶
func (h HandleType) GetLastError() (string, error)
func (HandleType) GetTagged ¶
func (h HandleType) GetTagged(tag string) ([]string, error)
GetTagged : Retrieves all entries that have the specified tag.
Tagging an entry enables you to search for entries based on their tags. Tags scale across nodes. The tag must exist. The complexity of this function is constant.
func (HandleType) GetTags ¶
func (h HandleType) GetTags(entryAlias string) ([]string, error)
GetTags : Retrieves all the tags of an entry.
Tagging an entry enables you to search for entries based on their tags. Tags scale across nodes. The entry must exist.
func (HandleType) Integer ¶
func (h HandleType) Integer(alias string) IntegerEntry
Integer : Create an integer entry object
func (HandleType) NodeStatistics
deprecated
func (h HandleType) NodeStatistics(nodeID string) (Statistics, error)
NodeStatistics : Retrieve statistics for a specific node
Deprecated: Statistics will be fetched directly from the node using the new direct API
func (HandleType) Open ¶
func (h HandleType) Open(protocol Protocol) error
Open : Creates a handle.
No connection will be established. Not needed if you created your handle with NewHandle.
func (HandleType) PrefixCount ¶
func (h HandleType) PrefixCount(prefix string) (uint64, error)
PrefixCount : Retrieves the count of all entries matching the provided prefix.
A prefix-based count counts all entries matching a provided prefix.
func (HandleType) PrefixGet ¶
func (h HandleType) PrefixGet(prefix string, limit int) ([]string, error)
PrefixGet : Retrieves the list of all entries matching the provided prefix.
A prefix-based search will enable you to find all entries matching a provided prefix. This function returns the list of aliases. It’s up to the user to query the content associated with every entry, if needed.
func (HandleType) Query ¶
func (h HandleType) Query(query string) *Query
Query : Create an query object to execute
func (HandleType) Release ¶
func (h HandleType) Release(buffer unsafe.Pointer)
Release : Releases an API-allocated buffer.
Failure to properly call this function may result in excessive memory usage. Most operations that return a content (e.g. batch operations, qdb_blob_get, qdb_blob_get_and_update, qdb_blob_compare_and_swap...) will allocate a buffer for the content and will not release the allocated buffer until you either call this function or close the handle. The function will be able to release any kind of buffer allocated by a quasardb API call, whether it’s a single buffer, an array or an array of buffers.
func (HandleType) SetClientMaxInBufSize ¶
func (h HandleType) SetClientMaxInBufSize(bufSize uint) error
SetClientMaxInBufSize : Set the Sets the maximum incoming buffer size for all network operations of the client.
Only modify this setting if you expect to receive very large answers from the server.
func (HandleType) SetClientMaxParallelism ¶ added in v3.13.2
func (h HandleType) SetClientMaxParallelism(threadCount uint) error
SetClientMaxParallelism : Gets the maximum parallelism option of the client.
func (HandleType) SetCompression ¶
func (h HandleType) SetCompression(compressionLevel Compression) error
SetCompression : Set the compression level for all future messages emitted by the specified handle.
Regardless of this parameter, the API will be able to read whatever compression the server uses.
func (HandleType) SetEncryption ¶
func (h HandleType) SetEncryption(encryption Encryption) error
SetEncryption : Creates a handle.
No connection will be established. Not needed if you created your handle with NewHandle.
func (HandleType) SetMaxCardinality ¶
func (h HandleType) SetMaxCardinality(maxCardinality uint) error
SetMaxCardinality : Sets the maximum allowed cardinality of a quasardb query.
The default value is 10,007. The minimum allowed values is 100.
func (HandleType) SetTimeout ¶
func (h HandleType) SetTimeout(timeout time.Duration) error
SetTimeout : Sets the timeout of all network operations.
The lower the timeout, the higher the risk of having timeout errors. Keep in mind that the server-side timeout might be shorter.
func (HandleType) Statistics ¶
func (h HandleType) Statistics() (map[string]Statistics, error)
Statistics : Retrieve statistics for all nodes
func (HandleType) Timeseries ¶
func (h HandleType) Timeseries(alias string) TimeseriesEntry
Timeseries : Create a timeseries entry object
func (HandleType) TsBatch ¶
func (h HandleType) TsBatch(cols ...TsBatchColumnInfo) (*TsBatch, error)
TsBatch : create a batch object for the specified columns
type IntegerEntry ¶
type IntegerEntry struct {
Entry
}
IntegerEntry : int data type
Example ¶
SetLogFile(ExamplesLogFilePath) h := MustSetupHandle(insecureURI, 120*time.Second) defer h.Close() alias := "IntAlias" integer := h.Integer(alias) integer.Put(int64(3), NeverExpires()) defer integer.Remove() obtainedContent, _ := integer.Get() fmt.Println("Get content:", obtainedContent) newContent := int64(87) integer.Update(newContent, NeverExpires()) obtainedContent, _ = integer.Get() fmt.Println("Get updated content:", obtainedContent) integer.Add(3) obtainedContent, _ = integer.Get() fmt.Println("Get added content:", obtainedContent)
Output: Get content: 3 Get updated content: 87 Get added content: 90
func (IntegerEntry) Add ¶
func (entry IntegerEntry) Add(added int64) (int64, error)
Add : Atomically increases or decreases a signed 64-bit integer.
The specified entry will be atomically increased (or decreased) according to the given addend value: To increase the value, specify a positive added To decrease the value, specify a negative added The function return the result of the operation. The entry must already exist.
func (IntegerEntry) Get ¶
func (entry IntegerEntry) Get() (int64, error)
Get : Atomically retrieves the value of a signed 64-bit integer.
Atomically retrieves the value of an existing 64-bit integer.
func (IntegerEntry) Put ¶
func (entry IntegerEntry) Put(content int64, expiry time.Time) error
Put : Creates a new signed 64-bit integer.
Atomically creates an entry of the given alias and sets it to a cross-platform signed 64-bit integer. If the entry already exists, the function returns an error. You can specify an expiry time or use NeverExpires if you don’t want the entry to expire. If you want to create or update an entry use Update. The value will be correctly translated independently of the endianness of the client’s platform.
func (*IntegerEntry) Update ¶
func (entry *IntegerEntry) Update(newContent int64, expiry time.Time) error
Update : Creates or updates a signed 64-bit integer.
Atomically updates an entry of the given alias to the provided value. If the entry doesn’t exist, it will be created. You can specify an expiry time or use NeverExpires if you don’t want the entry to expire.
type Metadata ¶
type Metadata struct { Ref RefID Type EntryType Size uint64 ModificationTime time.Time ExpiryTime time.Time }
Metadata : A structure representing the metadata of an entry in the database.
type Node ¶
type Node struct { HandleType // contains filtered or unexported fields }
Node : a structure giving access to various pieces of information or actions on a node
Example ¶
SetLogFile(ExamplesLogFilePath) h := MustSetupHandle(insecureURI, 120*time.Second) defer h.Close() node := h.Node(insecureURI) status, _ := node.Status() fmt.Println("Status - Network.ListeningEndpoint:", status.Network.ListeningEndpoint) config_bytes, _ := node.Config() config, _ := gabs.ParseJSON(config_bytes) fmt.Println("Config - Listen On:", config.Path("local.network.listen_on").Data().(string)) topology, _ := node.Topology() fmt.Println("Topology - Successor is same as predecessor:", topology.Successor.Endpoint == topology.Predecessor.Endpoint)
Output: Status - Network.ListeningEndpoint: 127.0.0.1:2836 Config - Listen On: 127.0.0.1:2836 Topology - Successor is same as predecessor: true
func (Node) Config ¶
Config :
Returns the configuration as a byte array of a json object, you can use a method of your choice to unmarshall it. An example is available using the gabs library The configuration is a JSON object, as described in the documentation.
func (Node) RawConfig ¶
RawConfig :
Returns the configuration of a node. The configuration is a JSON object as a byte array, as described in the documentation.
func (Node) RawStatus ¶
RawStatus :
Returns the status of a node. The status is a JSON object as a byte array and contains current information of the node state, as described in the documentation.
func (Node) RawTopology ¶
RawTopology :
Returns the topology of a node. The topology is a JSON object as a byte array containing the node address, and the addresses of its successor and predecessor.
func (Node) Status ¶
func (n Node) Status() (NodeStatus, error)
Status :
Returns the status of a node. The status is a JSON object and contains current information of the node state, as described in the documentation.
func (Node) Topology ¶
func (n Node) Topology() (NodeTopology, error)
Topology :
Returns the topology of a node. The topology is a JSON object containing the node address, and the addresses of its successor and predecessor.
type NodeLocation ¶
NodeLocation : A structure representing the address of a quasardb node.
type NodeStatus ¶
type NodeStatus struct { Memory struct { VM struct { Used int64 `json:"used"` Total int64 `json:"total"` } `json:"vm"` Physmem struct { Used int64 `json:"used"` Total int64 `json:"total"` } `json:"physmem"` } `json:"memory"` CPUTimes struct { Idle int64 `json:"idle"` System int64 `json:"system"` User int64 `json:"user"` } `json:"cpu_times"` DiskUsage struct { Free int64 `json:"free"` Total int64 `json:"total"` } `json:"disk_usage"` Network struct { ListeningEndpoint string `json:"listening_endpoint"` Partitions struct { Count int `json:"count"` MaxSessions int `json:"max_sessions"` AvailableSessions int `json:"available_sessions"` } `json:"partitions"` } `json:"network"` NodeID string `json:"node_id"` OperatingSystem string `json:"operating_system"` HardwareConcurrency int `json:"hardware_concurrency"` Timestamp time.Time `json:"timestamp"` Startup time.Time `json:"startup"` EngineVersion string `json:"engine_version"` EngineBuildDate time.Time `json:"engine_build_date"` Entries struct { Resident struct { Count int `json:"count"` Size int `json:"size"` } `json:"resident"` Persisted struct { Count int `json:"count"` Size int `json:"size"` } `json:"persisted"` } `json:"entries"` Operations struct { Get struct { Count int `json:"count"` Successes int `json:"successes"` Failures int `json:"failures"` Pageins int `json:"pageins"` Evictions int `json:"evictions"` InBytes int `json:"in_bytes"` OutBytes int `json:"out_bytes"` } `json:"get"` GetAndRemove struct { Count int `json:"count"` Successes int `json:"successes"` Failures int `json:"failures"` Pageins int `json:"pageins"` Evictions int `json:"evictions"` InBytes int `json:"in_bytes"` OutBytes int `json:"out_bytes"` } `json:"get_and_remove"` Put struct { Count int `json:"count"` Successes int `json:"successes"` Failures int `json:"failures"` Pageins int `json:"pageins"` Evictions int `json:"evictions"` InBytes int `json:"in_bytes"` OutBytes int `json:"out_bytes"` } `json:"put"` Update struct { Count int `json:"count"` Successes int `json:"successes"` Failures int `json:"failures"` Pageins int `json:"pageins"` Evictions int `json:"evictions"` InBytes int `json:"in_bytes"` OutBytes int `json:"out_bytes"` } `json:"update"` GetAndUpdate struct { Count int `json:"count"` Successes int `json:"successes"` Failures int `json:"failures"` Pageins int `json:"pageins"` Evictions int `json:"evictions"` InBytes int `json:"in_bytes"` OutBytes int `json:"out_bytes"` } `json:"get_and_update"` CompareAndSwap struct { Count int `json:"count"` Successes int `json:"successes"` Failures int `json:"failures"` Pageins int `json:"pageins"` Evictions int `json:"evictions"` InBytes int `json:"in_bytes"` OutBytes int `json:"out_bytes"` } `json:"compare_and_swap"` Remove struct { Count int `json:"count"` Successes int `json:"successes"` Failures int `json:"failures"` Pageins int `json:"pageins"` Evictions int `json:"evictions"` InBytes int `json:"in_bytes"` OutBytes int `json:"out_bytes"` } `json:"remove"` RemoveIf struct { Count int `json:"count"` Successes int `json:"successes"` Failures int `json:"failures"` Pageins int `json:"pageins"` Evictions int `json:"evictions"` InBytes int `json:"in_bytes"` OutBytes int `json:"out_bytes"` } `json:"remove_if"` PurgeAll struct { Count int `json:"count"` Successes int `json:"successes"` Failures int `json:"failures"` Pageins int `json:"pageins"` Evictions int `json:"evictions"` InBytes int `json:"in_bytes"` OutBytes int `json:"out_bytes"` } `json:"purge_all"` } `json:"operations"` Overall struct { Count int `json:"count"` Successes int `json:"successes"` Failures int `json:"failures"` Pageins int `json:"pageins"` Evictions int `json:"evictions"` InBytes int `json:"in_bytes"` OutBytes int `json:"out_bytes"` } `json:"overall"` }
NodeStatus : a json representation object containing the status of a node
type NodeTopology ¶
type NodeTopology struct { Predecessor struct { Reference string `json:"reference"` Endpoint string `json:"endpoint"` } `json:"predecessor"` Center struct { Reference string `json:"reference"` Endpoint string `json:"endpoint"` } `json:"center"` Successor struct { Reference string `json:"reference"` Endpoint string `json:"endpoint"` } `json:"successor"` }
type Query ¶
type Query struct { HandleType // contains filtered or unexported fields }
Query : query object
Example ¶
SetLogFile(ExamplesLogFilePath) h := MustSetupHandle(insecureURI, 120*time.Second) defer h.Close() var aliases []string aliases = append(aliases, generateAlias(16)) aliases = append(aliases, generateAlias(16)) blob := h.Blob("alias_blob") blob.Put([]byte("asd"), NeverExpires()) defer blob.Remove() blob.AttachTag("all") blob.AttachTag("first") integer := h.Integer("alias_integer") integer.Put(32, NeverExpires()) defer integer.Remove() integer.AttachTag("all") integer.AttachTag("second") var obtainedAliases []string obtainedAliases, _ = h.Find().Tag("all").Execute() fmt.Println("Get all aliases:", obtainedAliases) obtainedAliases, _ = h.Find().Tag("all").NotTag("second").Execute() fmt.Println("Get only first alias:", obtainedAliases) obtainedAliases, _ = h.Find().Tag("all").Type("int").Execute() fmt.Println("Get only integer alias:", obtainedAliases) obtainedAliases, _ = h.Find().Tag("unexisting_alias").Execute() fmt.Println("Get no aliases:", obtainedAliases) _, err := h.Find().NotTag("second").Execute() fmt.Println("Error:", err) _, err = h.Find().Type("int").Execute() fmt.Println("Error:", err)
Output: Get all aliases: [alias_blob alias_integer] Get only first alias: [alias_blob] Get only integer alias: [alias_integer] Get no aliases: [] Error: query should have at least one valid tag Error: query should have at least one valid tag
type QueryPoint ¶
type QueryPoint C.qdb_point_result_t
QueryPoint : a variadic structure holding the result type as well as the result value
func (*QueryPoint) Get ¶
func (r *QueryPoint) Get() QueryPointResult
Get : retrieve the raw interface
func (*QueryPoint) GetBlob ¶
func (r *QueryPoint) GetBlob() ([]byte, error)
GetBlob : retrieve a double from the interface
func (*QueryPoint) GetCount ¶
func (r *QueryPoint) GetCount() (int64, error)
GetCount : retrieve the count from the interface
func (*QueryPoint) GetDouble ¶
func (r *QueryPoint) GetDouble() (float64, error)
GetDouble : retrieve a double from the interface
func (*QueryPoint) GetInt64 ¶
func (r *QueryPoint) GetInt64() (int64, error)
GetInt64 : retrieve an int64 from the interface
func (*QueryPoint) GetString ¶
func (r *QueryPoint) GetString() (string, error)
GetString : retrieve a string from the interface
func (*QueryPoint) GetTimestamp ¶
func (r *QueryPoint) GetTimestamp() (time.Time, error)
GetTimestamp : retrieve a timestamp from the interface
type QueryPointResult ¶
type QueryPointResult struct {
// contains filtered or unexported fields
}
QueryPointResult : a query result point
func (QueryPointResult) Type ¶
func (r QueryPointResult) Type() QueryResultValueType
Type : gives the type of the query point result
func (QueryPointResult) Value ¶
func (r QueryPointResult) Value() interface{}
Value : gives the interface{} value of the query point result
type QueryResult ¶
type QueryResult struct {
// contains filtered or unexported fields
}
QueryResult : a query result
func (QueryResult) Columns ¶
func (r QueryResult) Columns(row *QueryPoint) QueryRow
Columns : create columns from a row
func (QueryResult) ColumnsCount ¶
func (r QueryResult) ColumnsCount() int64
ColumnsCount : get the number of columns of each row
func (QueryResult) ColumnsNames ¶
func (r QueryResult) ColumnsNames() []string
ColumnsNames : get the number of columns names of each row
func (QueryResult) ErrorMessage ¶
func (r QueryResult) ErrorMessage() string
ErrorMessage : the error message in case of failure
func (QueryResult) RowCount ¶
func (r QueryResult) RowCount() int64
RowCount : the number of returned rows
func (QueryResult) Rows ¶
func (r QueryResult) Rows() QueryRows
Rows : get rows of a query table result
func (QueryResult) ScannedPoints ¶
func (r QueryResult) ScannedPoints() int64
ScannedPoints : number of points scanned
The actual number of scanned points may be greater
type QueryResultValueType ¶
type QueryResultValueType int64
QueryResultValueType : an enum of possible query point result types
const ( QueryResultNone QueryResultValueType = C.qdb_query_result_none QueryResultDouble QueryResultValueType = C.qdb_query_result_double QueryResultBlob QueryResultValueType = C.qdb_query_result_blob QueryResultInt64 QueryResultValueType = C.qdb_query_result_int64 QueryResultString QueryResultValueType = C.qdb_query_result_string QueryResultTimestamp QueryResultValueType = C.qdb_query_result_timestamp QueryResultCount QueryResultValueType = C.qdb_query_result_count )
QueryResultNone : query result value none QueryResultDouble : query result value double QueryResultBlob : query result value blob QueryResultInt64 : query result value int64 QueryResultString : query result value string QueryResultSymbol : query result value symbol QueryResultTimestamp : query result value timestamp QueryResultCount : query result value count
type Statistics ¶
type Statistics struct { CPU struct { Idle int64 `json:"idle"` System int64 `json:"system"` User int64 `json:"user"` } `json:"cpu"` Disk struct { BytesFree int64 `json:"bytes_free"` BytesTotal int64 `json:"bytes_total"` Path string `json:"path"` } `json:"disk"` EngineBuildDate string `json:"engine_build_date"` EngineVersion string `json:"engine_version"` HardwareConcurrency int64 `json:"hardware_concurrency"` Memory struct { BytesResident int64 `json:"bytes_resident_size"` ResidentCount int64 `json:"resident_count"` Physmem struct { Used int64 `json:"bytes_used"` Total int64 `json:"bytes_total"` } `json:"physmem"` VM struct { Used int64 `json:"bytes_used"` Total int64 `json:"bytes_total"` } `json:"vm"` } `json:"memory"` Network struct { CurrentUsersCount int64 `json:"current_users_count"` Sessions struct { AvailableCount int64 `json:"available_count"` UnavailableCount int64 `json:"unavailable_count"` MaxCount int64 `json:"max_count"` } `json:"sessions"` } `json:"network"` PartitionsCount int64 `json:"partitions_count"` NodeID string `json:"node_id"` OperatingSystem string `json:"operating_system"` Persistence struct { BytesCapacity int64 `json:"bytes_capacity"` BytesRead int64 `json:"bytes_read"` BytesUtilized int64 `json:"bytes_utilized"` BytesWritten int64 `json:"bytes_written"` EntriesCount int64 `json:"entries_count"` } `json:"persistence"` Requests struct { BytesOut int64 `json:"bytes_out"` SuccessesCount int64 `json:"successes_count"` TotalCount int64 `json:"total_count"` } `json:"requests"` Startup int64 `json:"startup"` }
Statistics : json adptable structure with node information
type TimeseriesEntry ¶
type TimeseriesEntry struct {
Entry
}
TimeseriesEntry : timeseries double entry data type
Example ¶
SetLogFile(ExamplesLogFilePath) h := MustSetupHandle(insecureURI, 120*time.Second) defer h.Close() timeseries := h.Timeseries("alias") fmt.Println("timeseries:", timeseries.Alias())
Output: timeseries: alias
func (TimeseriesEntry) BlobColumn ¶
func (entry TimeseriesEntry) BlobColumn(columnName string) TsBlobColumn
BlobColumn : create a column object
Example ¶
SetLogFile(ExamplesLogFilePath) h, timeseries := MustCreateTimeseriesWithData("ExampleTimeseriesEntry_BlobColumn") defer h.Close() column := timeseries.BlobColumn("series_column_blob") fmt.Println("column:", column.Name())
Output: column: series_column_blob
func (TimeseriesEntry) Bulk ¶
func (entry TimeseriesEntry) Bulk(cols ...TsColumnInfo) (*TsBulk, error)
Bulk : create a bulk object for the specified columns
If no columns are specified it gets the server side registered columns
Example ¶
SetLogFile(ExamplesLogFilePath) h, timeseries := MustCreateTimeseriesWithColumns("ExampleTimeseriesEntry_Bulk") defer h.Close() bulk, err := timeseries.Bulk(NewTsColumnInfo("series_column_blob", TsColumnBlob), NewTsColumnInfo("series_column_double", TsColumnDouble)) if err != nil { return // handle error } // Don't forget to release defer bulk.Release() if err != nil { return // handle error } fmt.Println("RowCount:", bulk.RowCount())
Output: RowCount: 0
func (TimeseriesEntry) Columns ¶
func (entry TimeseriesEntry) Columns() ([]TsBlobColumn, []TsDoubleColumn, []TsInt64Column, []TsStringColumn, []TsTimestampColumn, error)
Columns : return the current columns
Example ¶
SetLogFile(ExamplesLogFilePath) h, timeseries := MustCreateTimeseriesWithColumns("ExampleTimeseriesEntry_Columns") defer h.Close() blobColumns, doubleColumns, int64Columns, stringColumns, timestampColumns, err := timeseries.Columns() if err != nil { // handle error } for _, col := range blobColumns { fmt.Println("column:", col.Name()) // do something like Insert, GetRanges with a blob column } for _, col := range doubleColumns { fmt.Println("column:", col.Name()) // do something like Insert, GetRanges with a double column } for _, col := range int64Columns { fmt.Println("column:", col.Name()) // do something like Insert, GetRanges with a int64 column } for _, col := range stringColumns { fmt.Println("column:", col.Name()) // do something like Insert, GetRanges with a string column } for _, col := range timestampColumns { fmt.Println("column:", col.Name()) // do something like Insert, GetRanges with a timestamp column }
Output: column: series_column_blob column: series_column_double column: series_column_int64 column: series_column_string column: series_column_symbol column: series_column_timestamp
func (TimeseriesEntry) ColumnsInfo ¶
func (entry TimeseriesEntry) ColumnsInfo() ([]TsColumnInfo, error)
ColumnsInfo : return the current columns information
Example ¶
SetLogFile(ExamplesLogFilePath) h, timeseries := MustCreateTimeseriesWithColumns("ExampleTimeseriesEntry_ColumnsInfo") defer h.Close() columns, err := timeseries.ColumnsInfo() if err != nil { // handle error } for _, col := range columns { fmt.Println("column:", col.Name()) }
Output: column: series_column_blob column: series_column_double column: series_column_int64 column: series_column_string column: series_column_timestamp column: series_column_symbol
func (TimeseriesEntry) Create ¶
func (entry TimeseriesEntry) Create(shardSize time.Duration, cols ...TsColumnInfo) error
Create : create a new timeseries
First parameter is the duration limit to organize a shard Ex: shardSize := 24 * time.Hour
Example ¶
SetLogFile(ExamplesLogFilePath) h, timeseries := MustCreateTimeseries("ExampleTimeseriesEntry_Create") defer h.Close() // duration, columns... timeseries.Create(24*time.Hour, NewTsColumnInfo("series_column_blob", TsColumnBlob), NewTsColumnInfo("series_column_double", TsColumnDouble))
Output:
func (TimeseriesEntry) DoubleColumn ¶
func (entry TimeseriesEntry) DoubleColumn(columnName string) TsDoubleColumn
DoubleColumn : create a column object
Example ¶
SetLogFile(ExamplesLogFilePath) h, timeseries := MustCreateTimeseriesWithColumns("ExampleTimeseriesEntry_DoubleColumn") defer h.Close() column := timeseries.DoubleColumn("series_column_double") fmt.Println("column:", column.Name())
Output: column: series_column_double
func (TimeseriesEntry) InsertColumns ¶
func (entry TimeseriesEntry) InsertColumns(cols ...TsColumnInfo) error
InsertColumns : insert columns in a existing timeseries
Example ¶
SetLogFile(ExamplesLogFilePath) h, timeseries := MustCreateTimeseriesWithColumns("ExampleTimeseriesEntry_InsertColumns") defer h.Close() err := timeseries.InsertColumns(NewTsColumnInfo("series_column_blob_2", TsColumnBlob), NewTsColumnInfo("series_column_double_2", TsColumnDouble)) if err != nil { // handle error } columns, err := timeseries.ColumnsInfo() if err != nil { // handle error } for _, col := range columns { fmt.Println("column:", col.Name()) }
Output: column: series_column_blob column: series_column_double column: series_column_int64 column: series_column_string column: series_column_timestamp column: series_column_symbol column: series_column_blob_2 column: series_column_double_2
func (TimeseriesEntry) Int64Column ¶
func (entry TimeseriesEntry) Int64Column(columnName string) TsInt64Column
Int64Column : create a column object
Example ¶
SetLogFile(ExamplesLogFilePath) h, timeseries := MustCreateTimeseriesWithColumns("ExampleTimeseriesEntry_Int64Column") defer h.Close() column := timeseries.Int64Column("series_column_int64") fmt.Println("column:", column.Name())
Output: column: series_column_int64
func (TimeseriesEntry) StringColumn ¶
func (entry TimeseriesEntry) StringColumn(columnName string) TsStringColumn
StringColumn : create a column object
func (TimeseriesEntry) SymbolColumn ¶ added in v3.13.0
func (entry TimeseriesEntry) SymbolColumn(columnName string, symtableName string) TsStringColumn
SymbolColumn : create a column object (the symbol table name is not set)
func (TimeseriesEntry) TimestampColumn ¶
func (entry TimeseriesEntry) TimestampColumn(columnName string) TsTimestampColumn
TimestampColumn : create a column object
Example ¶
SetLogFile(ExamplesLogFilePath) h, timeseries := MustCreateTimeseriesWithColumns("ExampleTimeseriesEntry_TimestampColumn") defer h.Close() column := timeseries.TimestampColumn("series_column_timestamp") fmt.Println("column:", column.Name())
Output: column: series_column_timestamp
type TsAggregationType ¶
type TsAggregationType C.qdb_ts_aggregation_type_t
TsAggregationType typedef of C.qdb_ts_aggregation_type
const ( AggFirst TsAggregationType = C.qdb_agg_first AggLast TsAggregationType = C.qdb_agg_last AggMin TsAggregationType = C.qdb_agg_min AggMax TsAggregationType = C.qdb_agg_max AggArithmeticMean TsAggregationType = C.qdb_agg_arithmetic_mean AggHarmonicMean TsAggregationType = C.qdb_agg_harmonic_mean AggGeometricMean TsAggregationType = C.qdb_agg_geometric_mean AggQuadraticMean TsAggregationType = C.qdb_agg_quadratic_mean AggCount TsAggregationType = C.qdb_agg_count AggSum TsAggregationType = C.qdb_agg_sum AggSumOfSquares TsAggregationType = C.qdb_agg_sum_of_squares AggSpread TsAggregationType = C.qdb_agg_spread AggSampleVariance TsAggregationType = C.qdb_agg_sample_variance AggSampleStddev TsAggregationType = C.qdb_agg_sample_stddev AggPopulationVariance TsAggregationType = C.qdb_agg_population_variance AggPopulationStddev TsAggregationType = C.qdb_agg_population_stddev AggAbsMin TsAggregationType = C.qdb_agg_abs_min AggAbsMax TsAggregationType = C.qdb_agg_abs_max AggProduct TsAggregationType = C.qdb_agg_product AggSkewness TsAggregationType = C.qdb_agg_skewness AggKurtosis TsAggregationType = C.qdb_agg_kurtosis )
Each type gets its value between the begin and end timestamps of aggregation
type TsBatch ¶
type TsBatch struct {
// contains filtered or unexported fields
}
TsBatch : A structure that permits to append data to a timeseries
func (*TsBatch) ExtraColumns ¶
func (t *TsBatch) ExtraColumns(cols ...TsBatchColumnInfo) error
ExtraColumns : Appends columns to the current batch table
func (*TsBatch) PushFast ¶
PushFast : Fast, in-place batch push that is efficient when doing lots of small, incremental pushes.
func (*TsBatch) Release ¶
func (t *TsBatch) Release()
Release : release the memory of the batch table
func (*TsBatch) RowSetBlob ¶
RowSetBlob : Set blob at specified index in current row
func (*TsBatch) RowSetBlobNoCopy ¶
RowSetBlobNoCopy : Set blob at specified index in current row without copying it
func (*TsBatch) RowSetDouble ¶
RowSetDouble : Set double at specified index in current row
func (*TsBatch) RowSetInt64 ¶
RowSetInt64 : Set int64 at specified index in current row
func (*TsBatch) RowSetString ¶
RowSetString : Set string at specified index in current row
func (*TsBatch) RowSetStringNoCopy ¶
RowSetStringNoCopy : Set string at specified index in current row without copying it
func (*TsBatch) RowSetTimestamp ¶
RowSetTimestamp : Add a timestamp to current row
type TsBatchColumnInfo ¶
TsBatchColumnInfo : Represents one column in a timeseries Preallocate the underlying structure with the ElementCountHint
func NewTsBatchColumnInfo ¶
func NewTsBatchColumnInfo(timeseries string, column string, hint int64) TsBatchColumnInfo
NewTsBatchColumnInfo : Creates a new TsBatchColumnInfo
type TsBlobAggregation ¶
type TsBlobAggregation struct {
// contains filtered or unexported fields
}
TsBlobAggregation : Aggregation of double type
func NewBlobAggregation ¶
func NewBlobAggregation(kind TsAggregationType, rng TsRange) *TsBlobAggregation
NewBlobAggregation : Create new timeseries blob aggregation
func (TsBlobAggregation) Count ¶
func (t TsBlobAggregation) Count() int64
Count : returns the number of points aggregated into the result
func (TsBlobAggregation) Range ¶
func (t TsBlobAggregation) Range() TsRange
Range : returns the range of the aggregation
func (TsBlobAggregation) Result ¶
func (t TsBlobAggregation) Result() TsBlobPoint
Result : result of the aggregation
func (TsBlobAggregation) Type ¶
func (t TsBlobAggregation) Type() TsAggregationType
Type : returns the type of the aggregation
type TsBlobColumn ¶
type TsBlobColumn struct {
// contains filtered or unexported fields
}
TsBlobColumn : a time series blob column
func (TsBlobColumn) Aggregate ¶
func (column TsBlobColumn) Aggregate(aggs ...*TsBlobAggregation) ([]TsBlobAggregation, error)
Aggregate : Aggregate a sub-part of the time series.
It is an error to call this function on a non existing time-series.
Example ¶
SetLogFile(ExamplesLogFilePath) h, timeseries := MustCreateTimeseriesWithData("ExampleTsBlobColumn_Aggregate") defer h.Close() column := timeseries.BlobColumn("series_column_blob") r := NewRange(time.Unix(0, 0), time.Unix(40, 5)) aggFirst := NewBlobAggregation(AggFirst, r) results, err := column.Aggregate(aggFirst) if err != nil { // handle error } fmt.Println("first:", string(results[0].Result().Content()))
Output: first: content_0
func (TsBlobColumn) EraseRanges ¶
func (column TsBlobColumn) EraseRanges(rgs ...TsRange) (uint64, error)
EraseRanges : erase all points in the specified ranges
Example ¶
SetLogFile(ExamplesLogFilePath) h, timeseries := MustCreateTimeseriesWithData("ExampleTsBlobColumn_EraseRanges") defer h.Close() column := timeseries.BlobColumn("series_column_blob") r := NewRange(time.Unix(0, 0), time.Unix(40, 5)) numberOfErasedValues, err := column.EraseRanges(r) if err != nil { // handle error } fmt.Println("Number of erased values:", numberOfErasedValues)
Output: Number of erased values: 4
func (TsBlobColumn) GetRanges ¶
func (column TsBlobColumn) GetRanges(rgs ...TsRange) ([]TsBlobPoint, error)
GetRanges : Retrieves blobs in the specified range of the time series column.
It is an error to call this function on a non existing time-series.
Example ¶
SetLogFile(ExamplesLogFilePath) h, timeseries := MustCreateTimeseriesWithData("ExampleTsBlobColumn_GetRanges") defer h.Close() column := timeseries.BlobColumn("series_column_blob") r := NewRange(time.Unix(0, 0), time.Unix(40, 5)) blobPoints, err := column.GetRanges(r) if err != nil { // handle error } for _, point := range blobPoints { fmt.Println("timestamp:", point.Timestamp().UTC(), "- value:", string(point.Content())) }
Output: timestamp: 1970-01-01 00:00:10 +0000 UTC - value: content_0 timestamp: 1970-01-01 00:00:20 +0000 UTC - value: content_1 timestamp: 1970-01-01 00:00:30 +0000 UTC - value: content_2 timestamp: 1970-01-01 00:00:40 +0000 UTC - value: content_3
func (TsBlobColumn) Insert ¶
func (column TsBlobColumn) Insert(points ...TsBlobPoint) error
Insert blob points into a timeseries
Example ¶
SetLogFile(ExamplesLogFilePath) h, timeseries := MustCreateTimeseriesWithColumns("ExampleTsBlobColumn_Insert") defer h.Close() column := timeseries.BlobColumn("series_column_blob") // Insert only one point: column.Insert(NewTsBlobPoint(time.Now(), []byte("content"))) // Insert multiple points blobPoints := make([]TsBlobPoint, 2) blobPoints[0] = NewTsBlobPoint(time.Now(), []byte("content")) blobPoints[1] = NewTsBlobPoint(time.Now(), []byte("content_2")) err := column.Insert(blobPoints...) if err != nil { // handle error }
Output:
type TsBlobPoint ¶
type TsBlobPoint struct {
// contains filtered or unexported fields
}
TsBlobPoint : timestamped data
func NewTsBlobPoint ¶
func NewTsBlobPoint(timestamp time.Time, value []byte) TsBlobPoint
NewTsBlobPoint : Create new timeseries blob point
func (TsBlobPoint) Content ¶
func (t TsBlobPoint) Content() []byte
Content : return data point content
func (TsBlobPoint) Timestamp ¶
func (t TsBlobPoint) Timestamp() time.Time
Timestamp : return data point timestamp
type TsBulk ¶
type TsBulk struct {
// contains filtered or unexported fields
}
TsBulk : A structure that permits to append data to a timeseries
func (*TsBulk) GetTimestamp ¶
GetTimestamp : gets a timestamp in row
type TsColumnInfo ¶
type TsColumnInfo struct {
// contains filtered or unexported fields
}
TsColumnInfo : column information in timeseries
func NewSymbolColumnInfo ¶ added in v3.13.0
func NewSymbolColumnInfo(columnName string, symtableName string) TsColumnInfo
func NewTsColumnInfo ¶
func NewTsColumnInfo(columnName string, columnType TsColumnType) TsColumnInfo
NewTsColumnInfo : create a column info structure
func (TsColumnInfo) Symtable ¶ added in v3.13.0
func (t TsColumnInfo) Symtable() string
Symtable : return column symbol table name
type TsColumnType ¶
type TsColumnType C.qdb_ts_column_type_t
TsColumnType : Timeseries column types
const ( TsColumnUninitialized TsColumnType = C.qdb_ts_column_uninitialized TsColumnBlob TsColumnType = C.qdb_ts_column_blob TsColumnDouble TsColumnType = C.qdb_ts_column_double TsColumnInt64 TsColumnType = C.qdb_ts_column_int64 TsColumnString TsColumnType = C.qdb_ts_column_string TsColumnTimestamp TsColumnType = C.qdb_ts_column_timestamp TsColumnSymbol TsColumnType = C.qdb_ts_column_symbol )
Values
TsColumnDouble : column is a double point TsColumnBlob : column is a blob point TsColumnInt64 : column is a int64 point TsColumnTimestamp : column is a timestamp point TsColumnString : column is a string point TsColumnSymbol : column is a symbol point
type TsDoubleAggregation ¶
type TsDoubleAggregation struct {
// contains filtered or unexported fields
}
TsDoubleAggregation : Aggregation of double type
func NewDoubleAggregation ¶
func NewDoubleAggregation(kind TsAggregationType, rng TsRange) *TsDoubleAggregation
NewDoubleAggregation : Create new timeseries double aggregation
func (TsDoubleAggregation) Count ¶
func (t TsDoubleAggregation) Count() int64
Count : returns the number of points aggregated into the result
func (TsDoubleAggregation) Range ¶
func (t TsDoubleAggregation) Range() TsRange
Range : returns the range of the aggregation
func (TsDoubleAggregation) Result ¶
func (t TsDoubleAggregation) Result() TsDoublePoint
Result : result of the aggregation
func (TsDoubleAggregation) Type ¶
func (t TsDoubleAggregation) Type() TsAggregationType
Type : returns the type of the aggregation
type TsDoubleColumn ¶
type TsDoubleColumn struct {
// contains filtered or unexported fields
}
TsDoubleColumn : a time series double column
func (TsDoubleColumn) Aggregate ¶
func (column TsDoubleColumn) Aggregate(aggs ...*TsDoubleAggregation) ([]TsDoubleAggregation, error)
Aggregate : Aggregate a sub-part of a timeseries from the specified aggregations.
It is an error to call this function on a non existing time-series.
Example ¶
SetLogFile(ExamplesLogFilePath) h, timeseries := MustCreateTimeseriesWithData("ExampleTsDoubleColumn_Aggregate") defer h.Close() column := timeseries.DoubleColumn("series_column_double") r := NewRange(time.Unix(0, 0), time.Unix(40, 5)) aggFirst := NewDoubleAggregation(AggFirst, r) aggMean := NewDoubleAggregation(AggArithmeticMean, r) results, err := column.Aggregate(aggFirst, aggMean) if err != nil { // handle error } fmt.Println("first:", results[0].Result().Content()) fmt.Println("mean:", results[1].Result().Content()) fmt.Println("number of elements reviewed for mean:", results[1].Count())
Output: first: 0 mean: 1.5 number of elements reviewed for mean: 4
func (TsDoubleColumn) EraseRanges ¶
func (column TsDoubleColumn) EraseRanges(rgs ...TsRange) (uint64, error)
EraseRanges : erase all points in the specified ranges
Example ¶
SetLogFile(ExamplesLogFilePath) h, timeseries := MustCreateTimeseriesWithData("ExampleTsDoubleColumn_EraseRanges") defer h.Close() column := timeseries.DoubleColumn("series_column_double") r := NewRange(time.Unix(0, 0), time.Unix(40, 5)) numberOfErasedValues, err := column.EraseRanges(r) if err != nil { // handle error } fmt.Println("Number of erased values:", numberOfErasedValues)
Output: Number of erased values: 4
func (TsDoubleColumn) GetRanges ¶
func (column TsDoubleColumn) GetRanges(rgs ...TsRange) ([]TsDoublePoint, error)
GetRanges : Retrieves blobs in the specified range of the time series column.
It is an error to call this function on a non existing time-series.
Example ¶
SetLogFile(ExamplesLogFilePath) h, timeseries := MustCreateTimeseriesWithData("ExampleTsDoubleColumn_GetRanges") defer h.Close() column := timeseries.DoubleColumn("series_column_double") r := NewRange(time.Unix(0, 0), time.Unix(40, 5)) doublePoints, err := column.GetRanges(r) if err != nil { // handle error } for _, point := range doublePoints { fmt.Println("timestamp:", point.Timestamp().UTC(), "- value:", point.Content()) }
Output: timestamp: 1970-01-01 00:00:10 +0000 UTC - value: 0 timestamp: 1970-01-01 00:00:20 +0000 UTC - value: 1 timestamp: 1970-01-01 00:00:30 +0000 UTC - value: 2 timestamp: 1970-01-01 00:00:40 +0000 UTC - value: 3
func (TsDoubleColumn) Insert ¶
func (column TsDoubleColumn) Insert(points ...TsDoublePoint) error
Insert double points into a timeseries
Example ¶
SetLogFile(ExamplesLogFilePath) h, timeseries := MustCreateTimeseriesWithColumns("ExampleTsDoubleColumn_Insert") defer h.Close() column := timeseries.DoubleColumn("series_column_double") // Insert only one point: column.Insert(NewTsDoublePoint(time.Now(), 3.2)) // Insert multiple points doublePoints := make([]TsDoublePoint, 2) doublePoints[0] = NewTsDoublePoint(time.Now(), 3.2) doublePoints[1] = NewTsDoublePoint(time.Now(), 4.8) err := column.Insert(doublePoints...) if err != nil { // handle error }
Output:
type TsDoublePoint ¶
type TsDoublePoint struct {
// contains filtered or unexported fields
}
TsDoublePoint : timestamped double data point
func NewTsDoublePoint ¶
func NewTsDoublePoint(timestamp time.Time, value float64) TsDoublePoint
NewTsDoublePoint : Create new timeseries double point
func (TsDoublePoint) Content ¶
func (t TsDoublePoint) Content() float64
Content : return data point content
func (TsDoublePoint) Timestamp ¶
func (t TsDoublePoint) Timestamp() time.Time
Timestamp : return data point timestamp
type TsInt64Aggregation ¶
type TsInt64Aggregation struct {
// contains filtered or unexported fields
}
TsInt64Aggregation : Aggregation of int64 type
func NewInt64Aggregation ¶
func NewInt64Aggregation(kind TsAggregationType, rng TsRange) *TsInt64Aggregation
NewInt64Aggregation : Create new timeseries int64 aggregation
func (TsInt64Aggregation) Count ¶
func (t TsInt64Aggregation) Count() int64
Count : returns the number of points aggregated into the result
func (TsInt64Aggregation) Range ¶
func (t TsInt64Aggregation) Range() TsRange
Range : returns the range of the aggregation
func (TsInt64Aggregation) Result ¶
func (t TsInt64Aggregation) Result() TsInt64Point
Result : result of the aggregation
func (TsInt64Aggregation) Type ¶
func (t TsInt64Aggregation) Type() TsAggregationType
Type : returns the type of the aggregation
type TsInt64Column ¶
type TsInt64Column struct {
// contains filtered or unexported fields
}
TsInt64Column : a time series int64 column
func (TsInt64Column) Aggregate ¶
func (column TsInt64Column) Aggregate(aggs ...*TsInt64Aggregation) ([]TsInt64Aggregation, error)
Aggregate : Aggregate a sub-part of a timeseries from the specified aggregations.
It is an error to call this function on a non existing time-series.
func (TsInt64Column) EraseRanges ¶
func (column TsInt64Column) EraseRanges(rgs ...TsRange) (uint64, error)
EraseRanges : erase all points in the specified ranges
Example ¶
SetLogFile(ExamplesLogFilePath) h, timeseries := MustCreateTimeseriesWithData("ExampleTsInt64Column_EraseRanges") defer h.Close() column := timeseries.Int64Column("series_column_int64") r := NewRange(time.Unix(0, 0), time.Unix(40, 5)) numberOfErasedValues, err := column.EraseRanges(r) if err != nil { // handle error } fmt.Println("Number of erased values:", numberOfErasedValues)
Output: Number of erased values: 4
func (TsInt64Column) GetRanges ¶
func (column TsInt64Column) GetRanges(rgs ...TsRange) ([]TsInt64Point, error)
GetRanges : Retrieves int64s in the specified range of the time series column.
It is an error to call this function on a non existing time-series.
Example ¶
SetLogFile(ExamplesLogFilePath) h, timeseries := MustCreateTimeseriesWithData("ExampleTsInt64Column_GetRanges") defer h.Close() column := timeseries.Int64Column("series_column_int64") r := NewRange(time.Unix(0, 0), time.Unix(40, 5)) int64Points, err := column.GetRanges(r) if err != nil { // handle error } for _, point := range int64Points { fmt.Println("timestamp:", point.Timestamp().UTC(), "- value:", point.Content()) }
Output: timestamp: 1970-01-01 00:00:10 +0000 UTC - value: 0 timestamp: 1970-01-01 00:00:20 +0000 UTC - value: 1 timestamp: 1970-01-01 00:00:30 +0000 UTC - value: 2 timestamp: 1970-01-01 00:00:40 +0000 UTC - value: 3
func (TsInt64Column) Insert ¶
func (column TsInt64Column) Insert(points ...TsInt64Point) error
Insert int64 points into a timeseries
Example ¶
SetLogFile(ExamplesLogFilePath) h, timeseries := MustCreateTimeseriesWithColumns("ExampleTsInt64Column_Insert") defer h.Close() column := timeseries.Int64Column("series_column_int64") // Insert only one point: column.Insert(NewTsInt64Point(time.Now(), 3)) // Insert multiple points int64Points := make([]TsInt64Point, 2) int64Points[0] = NewTsInt64Point(time.Now(), 3) int64Points[1] = NewTsInt64Point(time.Now(), 4) err := column.Insert(int64Points...) if err != nil { // handle error }
Output:
type TsInt64Point ¶
type TsInt64Point struct {
// contains filtered or unexported fields
}
TsInt64Point : timestamped int64 data point
func NewTsInt64Point ¶
func NewTsInt64Point(timestamp time.Time, value int64) TsInt64Point
NewTsInt64Point : Create new timeseries int64 point
func (TsInt64Point) Content ¶
func (t TsInt64Point) Content() int64
Content : return data point content
func (TsInt64Point) Timestamp ¶
func (t TsInt64Point) Timestamp() time.Time
Timestamp : return data point timestamp
type TsRange ¶
type TsRange struct {
// contains filtered or unexported fields
}
TsRange : timeseries range with begin and end timestamp
type TsStringAggregation ¶
type TsStringAggregation struct {
// contains filtered or unexported fields
}
TsStringAggregation : Aggregation of double type
func NewStringAggregation ¶
func NewStringAggregation(kind TsAggregationType, rng TsRange) *TsStringAggregation
NewStringAggregation : Create new timeseries string aggregation
func (TsStringAggregation) Count ¶
func (t TsStringAggregation) Count() int64
Count : returns the number of points aggregated into the result
func (TsStringAggregation) Range ¶
func (t TsStringAggregation) Range() TsRange
Range : returns the range of the aggregation
func (TsStringAggregation) Result ¶
func (t TsStringAggregation) Result() TsStringPoint
Result : result of the aggregation
func (TsStringAggregation) Type ¶
func (t TsStringAggregation) Type() TsAggregationType
Type : returns the type of the aggregation
type TsStringColumn ¶
type TsStringColumn struct {
// contains filtered or unexported fields
}
TsStringColumn : a time series string column
func (TsStringColumn) Aggregate ¶
func (column TsStringColumn) Aggregate(aggs ...*TsStringAggregation) ([]TsStringAggregation, error)
Aggregate : Aggregate a sub-part of the time series.
It is an error to call this function on a non existing time-series.
func (TsStringColumn) EraseRanges ¶
func (column TsStringColumn) EraseRanges(rgs ...TsRange) (uint64, error)
EraseRanges : erase all points in the specified ranges
func (TsStringColumn) GetRanges ¶
func (column TsStringColumn) GetRanges(rgs ...TsRange) ([]TsStringPoint, error)
GetRanges : Retrieves strings in the specified range of the time series column.
It is an error to call this function on a non existing time-series.
func (TsStringColumn) Insert ¶
func (column TsStringColumn) Insert(points ...TsStringPoint) error
Insert string points into a timeseries
type TsStringPoint ¶
type TsStringPoint struct {
// contains filtered or unexported fields
}
TsStringPoint : timestamped data
func NewTsStringPoint ¶
func NewTsStringPoint(timestamp time.Time, value string) TsStringPoint
NewTsStringPoint : Create new timeseries string point
func (TsStringPoint) Content ¶
func (t TsStringPoint) Content() string
Content : return data point content
func (TsStringPoint) Timestamp ¶
func (t TsStringPoint) Timestamp() time.Time
Timestamp : return data point timestamp
type TsTimestampAggregation ¶
type TsTimestampAggregation struct {
// contains filtered or unexported fields
}
TsTimestampAggregation : Aggregation of timestamp type
func NewTimestampAggregation ¶
func NewTimestampAggregation(kind TsAggregationType, rng TsRange) *TsTimestampAggregation
NewTimestampAggregation : Create new timeseries timestamp aggregation
func (TsTimestampAggregation) Count ¶
func (t TsTimestampAggregation) Count() int64
Count : returns the number of points aggregated into the result
func (TsTimestampAggregation) Range ¶
func (t TsTimestampAggregation) Range() TsRange
Range : returns the range of the aggregation
func (TsTimestampAggregation) Result ¶
func (t TsTimestampAggregation) Result() TsTimestampPoint
Result : result of the aggregation
func (TsTimestampAggregation) Type ¶
func (t TsTimestampAggregation) Type() TsAggregationType
Type : returns the type of the aggregation
type TsTimestampColumn ¶
type TsTimestampColumn struct {
// contains filtered or unexported fields
}
TsTimestampColumn : a time series timestamp column
func (TsTimestampColumn) Aggregate ¶
func (column TsTimestampColumn) Aggregate(aggs ...*TsTimestampAggregation) ([]TsTimestampAggregation, error)
Aggregate : Aggregate a sub-part of a timeseries from the specified aggregations.
It is an error to call this function on a non existing time-series.
func (TsTimestampColumn) EraseRanges ¶
func (column TsTimestampColumn) EraseRanges(rgs ...TsRange) (uint64, error)
EraseRanges : erase all points in the specified ranges
Example ¶
SetLogFile(ExamplesLogFilePath) h, timeseries := MustCreateTimeseriesWithData("ExampleTsTimestampColumn_EraseRanges") defer h.Close() column := timeseries.TimestampColumn("series_column_timestamp") r := NewRange(time.Unix(0, 0), time.Unix(40, 5)) numberOfErasedValues, err := column.EraseRanges(r) if err != nil { // handle error } fmt.Println("Number of erased values:", numberOfErasedValues)
Output: Number of erased values: 4
func (TsTimestampColumn) GetRanges ¶
func (column TsTimestampColumn) GetRanges(rgs ...TsRange) ([]TsTimestampPoint, error)
GetRanges : Retrieves timestamps in the specified range of the time series column.
It is an error to call this function on a non existing time-series.
Example ¶
SetLogFile(ExamplesLogFilePath) h, timeseries := MustCreateTimeseriesWithData("ExampleTsTimestampColumn_GetRanges") defer h.Close() column := timeseries.TimestampColumn("series_column_timestamp") r := NewRange(time.Unix(0, 0), time.Unix(40, 5)) timestampPoints, err := column.GetRanges(r) if err != nil { // handle error } for _, point := range timestampPoints { fmt.Println("timestamp:", point.Timestamp().UTC(), "- value:", point.Content().UTC()) }
Output: timestamp: 1970-01-01 00:00:10 +0000 UTC - value: 1970-01-01 00:00:10 +0000 UTC timestamp: 1970-01-01 00:00:20 +0000 UTC - value: 1970-01-01 00:00:20 +0000 UTC timestamp: 1970-01-01 00:00:30 +0000 UTC - value: 1970-01-01 00:00:30 +0000 UTC timestamp: 1970-01-01 00:00:40 +0000 UTC - value: 1970-01-01 00:00:40 +0000 UTC
func (TsTimestampColumn) Insert ¶
func (column TsTimestampColumn) Insert(points ...TsTimestampPoint) error
Insert timestamp points into a timeseries
Example ¶
SetLogFile(ExamplesLogFilePath) h, timeseries := MustCreateTimeseriesWithColumns("ExampleTsTimestampColumn_Insert") defer h.Close() column := timeseries.TimestampColumn("series_column_timestamp") // Insert only one point: column.Insert(NewTsTimestampPoint(time.Now(), time.Now())) // Insert multiple points timestampPoints := make([]TsTimestampPoint, 2) timestampPoints[0] = NewTsTimestampPoint(time.Now(), time.Now()) timestampPoints[1] = NewTsTimestampPoint(time.Now(), time.Now()) err := column.Insert(timestampPoints...) if err != nil { // handle error }
Output:
type TsTimestampPoint ¶
type TsTimestampPoint struct {
// contains filtered or unexported fields
}
TsTimestampPoint : timestamped timestamp data point
func NewTsTimestampPoint ¶
func NewTsTimestampPoint(timestamp time.Time, value time.Time) TsTimestampPoint
NewTsTimestampPoint : Create new timeseries timestamp point
func (TsTimestampPoint) Content ¶
func (t TsTimestampPoint) Content() time.Time
Content : return data point content
func (TsTimestampPoint) Timestamp ¶
func (t TsTimestampPoint) Timestamp() time.Time
Timestamp : return data point timestamp
Source Files ¶
- cluster.go
- constants.go
- direct.go
- entry.go
- entry_blob.go
- entry_integer.go
- entry_timeseries_blob.go
- entry_timeseries_common.go
- entry_timeseries_double.go
- entry_timeseries_int64.go
- entry_timeseries_string.go
- entry_timeseries_timestamp.go
- error.go
- find.go
- handle.go
- json_objects.go
- library_link.go
- logger.go
- node.go
- query.go
- statistics.go
- time.go
- utils.go