go-driver: github.com/arangodb/go-driver Index | Examples | Files | Directories

package driver

import "github.com/arangodb/go-driver"

Package driver implements a Go driver for the ArangoDB database.

To get started, create a connection to the database and wrap a client around it.

// Create an HTTP connection to the database
conn, err := http.NewConnection(http.ConnectionConfig{
	Endpoints: []string{"http://localhost:8529"},
})
if err != nil {
	// Handle error
}
// Create a client
c, err := driver.NewClient(driver.ClientConfig{
	Connection: conn,
})
if err != nil {
	// Handle error
}

Code:

//
// DISCLAIMER
//
// Copyright 2017 ArangoDB GmbH, Cologne, Germany
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Copyright holder is ArangoDB GmbH, Cologne, Germany
//
// Author Ewout Prangsma
//

// +build !auth

// This example demonstrates how to create a single document.
package main

import (
    "fmt"
    "log"

    driver "github.com/arangodb/go-driver"
    "github.com/arangodb/go-driver/http"
)

type Book struct {
    Title   string `json:"title"`
    NoPages int    `json:"no_pages"`
}

func main() {
    conn, err := http.NewConnection(http.ConnectionConfig{
        Endpoints: []string{"http://localhost:8529"},
    })
    if err != nil {
        log.Fatalf("Failed to create HTTP connection: %v", err)
    }
    c, err := driver.NewClient(driver.ClientConfig{
        Connection: conn,
    })

    // Create database
    db, err := c.CreateDatabase(nil, "examples_books", nil)
    if err != nil {
        log.Fatalf("Failed to create database: %v", err)
    }

    // Create collection
    col, err := db.CreateCollection(nil, "books", nil)
    if err != nil {
        log.Fatalf("Failed to create collection: %v", err)
    }

    // Create document
    book := Book{
        Title:   "ArangoDB Cookbook",
        NoPages: 257,
    }
    meta, err := col.CreateDocument(nil, book)
    if err != nil {
        log.Fatalf("Failed to create document: %v", err)
    }
    fmt.Printf("Created document in collection '%s' in database '%s'\n", col.Name(), db.Name())

    // Read the document back
    var result Book
    if _, err := col.ReadDocument(nil, meta.Key, &result); err != nil {
        log.Fatalf("Failed to read document: %v", err)
    }
    fmt.Printf("Read book '%+v'\n", result)

}

Code:

//
// DISCLAIMER
//
// Copyright 2017 ArangoDB GmbH, Cologne, Germany
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Copyright holder is ArangoDB GmbH, Cologne, Germany
//
// Author Ewout Prangsma
//

// +build !auth

// This example demonstrates how to create multiple documents at once.
package main

import (
    "flag"
    "fmt"
    "log"
    "strings"

    driver "github.com/arangodb/go-driver"
    "github.com/arangodb/go-driver/http"
)

type User struct {
    Name string `json:"name"`
    Age  int    `json:"age"`
}

func main() {
    flag.Parse()
    conn, err := http.NewConnection(http.ConnectionConfig{
        Endpoints: []string{"http://localhost:8529"},
    })
    if err != nil {
        log.Fatalf("Failed to create HTTP connection: %v", err)
    }
    c, err := driver.NewClient(driver.ClientConfig{
        Connection: conn,
    })

    // Create database
    db, err := c.CreateDatabase(nil, "examples_users", nil)
    if err != nil {
        log.Fatalf("Failed to create database: %v", err)
    }

    // Create collection
    col, err := db.CreateCollection(nil, "users", nil)
    if err != nil {
        log.Fatalf("Failed to create collection: %v", err)
    }

    // Create documents
    users := []User{
        User{
            Name: "John",
            Age:  65,
        },
        User{
            Name: "Tina",
            Age:  25,
        },
        User{
            Name: "George",
            Age:  31,
        },
    }
    metas, errs, err := col.CreateDocuments(nil, users)
    if err != nil {
        log.Fatalf("Failed to create documents: %v", err)
    } else if err := errs.FirstNonNil(); err != nil {
        log.Fatalf("Failed to create documents: first error: %v", err)
    }

    fmt.Printf("Created documents with keys '%s' in collection '%s' in database '%s'\n", strings.Join(metas.Keys(), ","), col.Name(), db.Name())
}

Code:

//
// DISCLAIMER
//
// Copyright 2017 ArangoDB GmbH, Cologne, Germany
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Copyright holder is ArangoDB GmbH, Cologne, Germany
//

// +build !auth

// This example demonstrates how to create a graph, how to add vertices and edges and how to delete it again.
package main

import (
    "fmt"
    "log"

    driver "github.com/arangodb/go-driver"
    "github.com/arangodb/go-driver/http"
)

type MyObject struct {
    Name string `json:"_key"`
    Age  int    `json:"age"`
}

type MyEdgeObject struct {
    From string `json:"_from"`
    To   string `json:"_to"`
}

func main() {
    fmt.Println("Hello World")

    // Create an HTTP connection to the database
    conn, err := http.NewConnection(http.ConnectionConfig{
        Endpoints: []string{"http://localhost:8529"},
    })
    if err != nil {
        log.Fatalf("Failed to create HTTP connection: %v", err)
    }
    // Create a client
    c, err := driver.NewClient(driver.ClientConfig{
        Connection: conn,
    })

    // Create database
    db, err := c.CreateDatabase(nil, "my_graph_db", nil)
    if err != nil {
        log.Fatalf("Failed to create database: %v", err)
    }

    // define the edgeCollection to store the edges
    var edgeDefinition driver.EdgeDefinition
    edgeDefinition.Collection = "myEdgeCollection"
    // define a set of collections where an edge is going out...
    edgeDefinition.From = []string{"myCollection1", "myCollection2"}

    // repeat this for the collections where an edge is going into
    edgeDefinition.To = []string{"myCollection1", "myCollection3"}

    // A graph can contain additional vertex collections, defined in the set of orphan collections
    var options driver.CreateGraphOptions
    options.OrphanVertexCollections = []string{"myCollection4", "myCollection5"}
    options.EdgeDefinitions = []driver.EdgeDefinition{edgeDefinition}

    // now it's possible to create a graph
    graph, err := db.CreateGraph(nil, "myGraph", &options)
    if err != nil {
        log.Fatalf("Failed to create graph: %v", err)
    }

    // add vertex
    vertexCollection1, err := graph.VertexCollection(nil, "myCollection1")
    if err != nil {
        log.Fatalf("Failed to get vertex collection: %v", err)
    }

    myObjects := []MyObject{
        MyObject{
            "Homer",
            38,
        },
        MyObject{
            "Marge",
            36,
        },
    }
    _, _, err = vertexCollection1.CreateDocuments(nil, myObjects)
    if err != nil {
        log.Fatalf("Failed to create vertex documents: %v", err)
    }

    // add edge
    edgeCollection, _, err := graph.EdgeCollection(nil, "myEdgeCollection")
    if err != nil {
        log.Fatalf("Failed to select edge collection: %v", err)
    }

    edge := MyEdgeObject{From: "myCollection1/Homer", To: "myCollection1/Marge"}
    _, err = edgeCollection.CreateDocument(nil, edge)
    if err != nil {
        log.Fatalf("Failed to create edge document: %v", err)
    }

    // delete graph
    graph.Remove(nil)
}

Index

Examples

Package Files

authentication.go client.go client_admin_backup.go client_admin_backup_impl.go client_cluster.go client_cluster_impl.go client_databases.go client_databases_impl.go client_impl.go client_replication.go client_replication_impl.go client_server_admin.go client_server_admin_impl.go client_server_info.go client_server_info_impl.go client_users.go client_users_impl.go cluster.go cluster_impl.go collection.go collection_document_impl.go collection_documents.go collection_impl.go collection_indexes.go collection_indexes_impl.go connection.go content_type.go context.go cursor.go cursor_impl.go database.go database_arangosearch_analyzers.go database_arangosearch_analyzers_impl.go database_collections.go database_collections_impl.go database_graphs.go database_graphs_impl.go database_impl.go database_transactions.go database_transactions_impl.go database_views.go database_views_impl.go doc.go edge.go edge_collection_documents_impl.go edge_collection_impl.go edge_collection_indexes_impl.go encode-go_1_8.go error.go graph.go graph_edge_collections.go graph_edge_collections_impl.go graph_impl.go graph_vertex_collections.go graph_vertex_collections_impl.go id.go index.go index_impl.go meta.go protocol.go query.go replication.go replication_impl.go transaction.go user.go user_impl.go version.go vertex_collection_documents_impl.go vertex_collection_impl.go vertex_collection_indexes_impl.go view.go view_arangosearch.go view_arangosearch_impl.go view_impl.go

Constants

const (
    CollectionStatusNewBorn   = CollectionStatus(1)
    CollectionStatusUnloaded  = CollectionStatus(2)
    CollectionStatusLoaded    = CollectionStatus(3)
    CollectionStatusUnloading = CollectionStatus(4)
    CollectionStatusDeleted   = CollectionStatus(5)
    CollectionStatusLoading   = CollectionStatus(6)
)
const (
    // ImportOnDuplicateError will not import the current document because of the unique key constraint violation.
    // This is the default setting.
    ImportOnDuplicateError = ImportOnDuplicate("error")
    // ImportOnDuplicateUpdate will update an existing document in the database with the data specified in the request.
    // Attributes of the existing document that are not present in the request will be preseved.
    ImportOnDuplicateUpdate = ImportOnDuplicate("update")
    // ImportOnDuplicateReplace will replace an existing document in the database with the data specified in the request.
    ImportOnDuplicateReplace = ImportOnDuplicate("replace")
    // ImportOnDuplicateIgnore will not update an existing document and simply ignore the error caused by a unique key constraint violation.
    ImportOnDuplicateIgnore = ImportOnDuplicate("ignore")
)
const (
    EngineTypeMMFiles = EngineType("mmfiles")
    EngineTypeRocksDB = EngineType("rocksdb")
)
const (
    // CollectionTypeDocument specifies a document collection
    CollectionTypeDocument = CollectionType(2)
    // CollectionTypeEdge specifies an edges collection
    CollectionTypeEdge = CollectionType(3)
)
const (
    KeyGeneratorTraditional   = KeyGeneratorType("traditional")
    KeyGeneratorAutoIncrement = KeyGeneratorType("autoincrement")
)
const (
    PrimaryIndex    = IndexType("primary")
    FullTextIndex   = IndexType("fulltext")
    HashIndex       = IndexType("hash")
    SkipListIndex   = IndexType("skiplist")
    PersistentIndex = IndexType("persistent")
    GeoIndex        = IndexType("geo")
    EdgeIndex       = IndexType("edge")
    TTLIndex        = IndexType("ttl")
)

Symbolic constants for index types

const (
    // ReplicationFactorSatellite represents a satellite collection's replication factor
    ReplicationFactorSatellite int = -1
)
const (
    // ViewTypeArangoSearch specifies an ArangoSearch view type.
    ViewTypeArangoSearch = ViewType("arangosearch")
)

Variables

var (
    // WithStack is called on every return of an error to add stacktrace information to the error.
    // When setting this function, also set the Cause function.
    // The interface of this function is compatible with functions in github.com/pkg/errors.
    WithStack = func(err error) error { return err }
    // Cause is used to get the root cause of the given error.
    // The interface of this function is compatible with functions in github.com/pkg/errors.
    Cause = func(err error) error { return err }
)

func IsArangoError Uses

func IsArangoError(err error) bool

IsArangoError returns true when the given error is an ArangoError.

func IsArangoErrorWithCode Uses

func IsArangoErrorWithCode(err error, code int) bool

IsArangoErrorWithCode returns true when the given error is an ArangoError and its Code field is equal to the given code.

func IsArangoErrorWithErrorNum Uses

func IsArangoErrorWithErrorNum(err error, errorNum ...int) bool

IsArangoErrorWithErrorNum returns true when the given error is an ArangoError and its ErrorNum field is equal to one of the given numbers.

func IsCanceled Uses

func IsCanceled(err error) bool

IsCanceled returns true if the given error is the result on a cancelled context.

func IsConflict Uses

func IsConflict(err error) bool

IsConflict returns true if the given error is an ArangoError with code 409, indicating a conflict.

func IsForbidden Uses

func IsForbidden(err error) bool

IsForbidden returns true if the given error is an ArangoError with code 403, indicating a forbidden request.

func IsInvalidArgument Uses

func IsInvalidArgument(err error) bool

IsInvalidArgument returns true if the given error is an InvalidArgumentError.

func IsInvalidRequest Uses

func IsInvalidRequest(err error) bool

IsInvalidRequest returns true if the given error is an ArangoError with code 400, indicating an invalid request.

func IsNoLeader Uses

func IsNoLeader(err error) bool

IsNoLeader returns true if the given error is an ArangoError with code 503 error number 1496.

func IsNoLeaderOrOngoing Uses

func IsNoLeaderOrOngoing(err error) bool

IsNoLeaderOrOngoing return true if the given error is an ArangoError with code 503 and error number 1496 or 1495

func IsNoMoreDocuments Uses

func IsNoMoreDocuments(err error) bool

IsNoMoreDocuments returns true if the given error is an NoMoreDocumentsError.

func IsNotFound Uses

func IsNotFound(err error) bool

IsNotFound returns true if the given error is an ArangoError with code 404, indicating a object not found.

Code:

var result Book
if _, err := collection.ReadDocument(nil, "keyDoesNotExist", &result); driver.IsNotFound(err) {
    // No document with given key exists
}

func IsPreconditionFailed Uses

func IsPreconditionFailed(err error) bool

IsPreconditionFailed returns true if the given error is an ArangoError with code 412, indicating a failed precondition.

Code:

var result Book
ctx := driver.WithRevision(context.Background(), "an-old-revision")
if _, err := collection.ReadDocument(ctx, "someValidKey", &result); driver.IsPreconditionFailed(err) {
    // Document is found, but its revision is incorrect
}

func IsResponse Uses

func IsResponse(err error) bool

IsResponse returns true if the given error is (or is caused by) a ResponseError.

func IsTimeout Uses

func IsTimeout(err error) bool

IsTimeout returns true if the given error is the result on a deadline that has been exceeded.

func IsUnauthorized Uses

func IsUnauthorized(err error) bool

IsUnauthorized returns true if the given error is an ArangoError with code 401, indicating an unauthorized request.

func WithAllowDirtyReads Uses

func WithAllowDirtyReads(parent context.Context, wasDirtyRead *bool) context.Context

WithAllowDirtyReads is used in an active failover deployment to allow reads from the follower. You can pass a reference to a boolean that will set according to whether a potentially dirty read happened or not. nil is allowed. This is valid for document reads, aql queries, gharial vertex and edge reads.

func WithBatchID Uses

func WithBatchID(parent context.Context, id string) context.Context

WithBatchID is used to configure a context that includes an ID of a Batch. This is used in replication functions.

func WithConfigured Uses

func WithConfigured(parent context.Context, value ...bool) context.Context

WithConfigured is used to configure a context to return the configured value of a user grant instead of the effective grant.

func WithDBServerID Uses

func WithDBServerID(parent context.Context, id string) context.Context

WithDBServerID is used to configure a context that includes an ID of a specific DBServer.

func WithDetails Uses

func WithDetails(parent context.Context, value ...bool) context.Context

WithDetails is used to configure a context to make Client.Version return additional details. You can pass a single (optional) boolean. If that is set to false, you explicitly ask to not provide details.

func WithEndpoint Uses

func WithEndpoint(parent context.Context, endpoint string) context.Context

WithEndpoint is used to configure a context that forces a request to be executed on a specific endpoint. If you specify an endpoint like this, failover is disabled. If you specify an unknown endpoint, an InvalidArgumentError is returned from requests.

func WithEnforceReplicationFactor Uses

func WithEnforceReplicationFactor(parent context.Context, value bool) context.Context

WithEnforceReplicationFactor is used to configure a context to make adding collections fail if the replication factor is too high (default or true) or silently accept (false).

func WithFollowLeaderRedirect Uses

func WithFollowLeaderRedirect(parent context.Context, value bool) context.Context

WithFollowLeaderRedirect is used to configure a context to return turn on/off following redirection responses from the server when the request is answered by a follower. Default behavior is "on".

func WithIgnoreRevisions Uses

func WithIgnoreRevisions(parent context.Context, value ...bool) context.Context

WithIgnoreRevisions is used to configure a context to make modification functions ignore revisions in the update. Do not use in combination with WithRevision or WithRevisions.

func WithImportDetails Uses

func WithImportDetails(parent context.Context, value *[]string) context.Context

WithImportDetails is used to configure a context that will make import document requests return details about documents that could not be imported.

func WithIsRestore Uses

func WithIsRestore(parent context.Context, value bool) context.Context

WithIsRestore is used to configure a context to make insert functions use the "isRestore=<value>" setting. Note: This function is intended for internal (replication) use. It is NOT intended to be used by normal client. This CAN screw up your database.

func WithIsSystem Uses

func WithIsSystem(parent context.Context, value bool) context.Context

WithIsSystem is used to configure a context to make insert functions use the "isSystem=<value>" setting.

func WithJobIDResponse Uses

func WithJobIDResponse(parent context.Context, jobID *string) context.Context

WithJobIDResponse is used to configure a context that includes a reference to a JobID that is filled on a error-free response. This is used in cluster functions.

func WithKeepNull Uses

func WithKeepNull(parent context.Context, value bool) context.Context

WithKeepNull is used to configure a context to make update functions keep null fields (value==true) or remove fields with null values (value==false).

func WithMergeObjects Uses

func WithMergeObjects(parent context.Context, value bool) context.Context

WithMergeObjects is used to configure a context to make update functions merge objects present in both the existing document and the patch document (value==true) or overwrite objects in the existing document with objects found in the patch document (value==false)

func WithQueryBatchSize Uses

func WithQueryBatchSize(parent context.Context, value int) context.Context

WithQueryBatchSize is used to configure a context that will set the BatchSize of a query request,

func WithQueryCache Uses

func WithQueryCache(parent context.Context, value ...bool) context.Context

WithQueryCache is used to configure a context that will set the Cache of a query request, If value is not given it defaults to true.

func WithQueryCount Uses

func WithQueryCount(parent context.Context, value ...bool) context.Context

WithQueryCount is used to configure a context that will set the Count of a query request, If value is not given it defaults to true.

func WithQueryFullCount Uses

func WithQueryFullCount(parent context.Context, value ...bool) context.Context

WithQueryFullCount is used to configure whether the query returns the full count of results before the last LIMIT statement

func WithQueryMaxRuntime Uses

func WithQueryMaxRuntime(parent context.Context, value ...float64) context.Context

func WithQueryMemoryLimit Uses

func WithQueryMemoryLimit(parent context.Context, value int64) context.Context

WithQueryMemoryLimit is used to configure a context that will set the MemoryList of a query request,

func WithQuerySatelliteSyncWait Uses

func WithQuerySatelliteSyncWait(parent context.Context, value time.Duration) context.Context

WithQuerySatelliteSyncWait sets the satelliteSyncWait query value on the query cursor request

func WithQueryStream Uses

func WithQueryStream(parent context.Context, value ...bool) context.Context

WithQueryStream is used to configure whether this becomes a stream query. A stream query is not executed right away, but continually evaluated when the client is requesting more results. Should the cursor expire the query transaction is canceled. This means for writing queries clients have to read the query-cursor until the HasMore() method returns false.

func WithQueryTTL Uses

func WithQueryTTL(parent context.Context, value time.Duration) context.Context

WithQueryTTL is used to configure a context that will set the TTL of a query request,

func WithRawResponse Uses

func WithRawResponse(parent context.Context, value *[]byte) context.Context

WithRawResponse is used to configure a context that will make all functions store the raw response into a buffer.

func WithResponse Uses

func WithResponse(parent context.Context, value *Response) context.Context

WithResponse is used to configure a context that will make all functions store the response into the given value.

func WithReturnNew Uses

func WithReturnNew(parent context.Context, result interface{}) context.Context

WithReturnNew is used to configure a context to make create, update & replace document functions return the new document into the given result.

func WithReturnOld Uses

func WithReturnOld(parent context.Context, result interface{}) context.Context

WithReturnOld is used to configure a context to make update & replace document functions return the old document into the given result.

func WithRevision Uses

func WithRevision(parent context.Context, revision string) context.Context

WithRevision is used to configure a context to make document functions specify an explicit revision of the document using an `If-Match` condition.

Code:

var result Book
// Using WithRevision we get an error when the current revision of the document is different.
ctx := driver.WithRevision(context.Background(), "a-specific-revision")
if _, err := collection.ReadDocument(ctx, "someValidKey", &result); err != nil {
    // This call will fail when a document does not exist, or when its current revision is different.
}

func WithRevisions Uses

func WithRevisions(parent context.Context, revisions []string) context.Context

WithRevisions is used to configure a context to make multi-document functions specify explicit revisions of the documents.

func WithSilent Uses

func WithSilent(parent context.Context, value ...bool) context.Context

WithSilent is used to configure a context to make functions return an empty result (silent==true), instead of a metadata result (silent==false, default). You can pass a single (optional) boolean. If that is set to false, you explicitly ask to return metadata result.

Code:

var result Book
// Using WithSilent we do not care about any returned meta data.
ctx := driver.WithSilent(context.Background())
if _, err := collection.ReadDocument(ctx, "someValidKey", &result); err != nil {
    // No meta data is returned
}

func WithTransactionID Uses

func WithTransactionID(parent context.Context, tid TransactionID) context.Context

WithTransactionID is used to bind a request to a specific transaction

func WithWaitForSync Uses

func WithWaitForSync(parent context.Context, value ...bool) context.Context

WithWaitForSync is used to configure a context to make modification functions wait until the data has been synced to disk (or not). You can pass a single (optional) boolean. If that is set to false, you explicitly do not wait for data to be synced to disk.

type AbortTransactionOptions Uses

type AbortTransactionOptions struct{}

AbortTransactionOptions provides options for CommitTransaction. Currently unused

type AccessTarget Uses

type AccessTarget interface {
    // Name returns the name of the database/collection.
    Name() string
}

AccessTarget is implemented by Database & Collection and it used to get/set/remove collection permissions.

type ArangoError Uses

type ArangoError struct {
    HasError     bool   `json:"error"`
    Code         int    `json:"code"`
    ErrorNum     int    `json:"errorNum"`
    ErrorMessage string `json:"errorMessage"`
}

ArangoError is a Go error with arangodb specific error information.

func (ArangoError) Error Uses

func (ae ArangoError) Error() string

Error returns the error message of an ArangoError.

func (ArangoError) Temporary Uses

func (ae ArangoError) Temporary() bool

Temporary returns true when the given error is a temporary error.

func (ArangoError) Timeout Uses

func (ae ArangoError) Timeout() bool

Timeout returns true when the given error is a timeout error.

type ArangoSearchAnalyzer Uses

type ArangoSearchAnalyzer interface {
    // Name returns the analyzer name
    Name() string

    // Type returns the analyzer type
    Type() ArangoSearchAnalyzerType

    // UniqueName returns the unique name: <database>::<analyzer-name>
    UniqueName() string

    // Definition returns the analyzer definition
    Definition() ArangoSearchAnalyzerDefinition

    // Properties returns the analyzer properties
    Properties() ArangoSearchAnalyzerProperties

    // Database returns the database of this analyzer
    Database() Database

    // Removes the analyzers
    Remove(ctx context.Context, force bool) error
}

type ArangoSearchAnalyzerDefinition Uses

type ArangoSearchAnalyzerDefinition struct {
    Name       string                         `json:"name,omitempty"`
    Type       ArangoSearchAnalyzerType       `json:"type,omitempty"`
    Properties ArangoSearchAnalyzerProperties `json:"properties,omitempty"`
    Features   []ArangoSearchAnalyzerFeature  `json:"features,omitempty"`
}

ArangoSearchAnalyzerDefinition provides definition of an analyzer

type ArangoSearchAnalyzerFeature Uses

type ArangoSearchAnalyzerFeature string

ArangoSearchAnalyzerFeature specifies a feature to an analyzer

const (
    // ArangoSearchAnalyzerFeatureFrequency how often a term is seen, required for PHRASE()
    ArangoSearchAnalyzerFeatureFrequency ArangoSearchAnalyzerFeature = "frequency"
    // ArangoSearchAnalyzerFeatureNorm the field normalization factor
    ArangoSearchAnalyzerFeatureNorm ArangoSearchAnalyzerFeature = "norm"
    // ArangoSearchAnalyzerFeaturePosition sequentially increasing term position, required for PHRASE(). If present then the frequency feature is also required
    ArangoSearchAnalyzerFeaturePosition ArangoSearchAnalyzerFeature = "position"
)

type ArangoSearchAnalyzerProperties Uses

type ArangoSearchAnalyzerProperties struct {
    // Locale used by Stem, Norm, Text
    Locale string `json:"locale,omitempty"`
    // Delimiter used by Delimiter
    Delimiter string `json:"delimiter,omitempty"`
    // Accent used by Norm, Text
    Accent *bool `json:"accent,omitempty"`
    // Case used by Norm, Text
    Case ArangoSearchCaseType `json:"case,omitempty"`

    // Min used by NGram
    Min *int64 `json:"min,omitempty"`
    // Max used by NGram
    Max *int64 `json:"max,omitempty"`
    // PreserveOriginal used by NGram
    PreserveOriginal *bool `json:"preserveOriginal,omitempty"`

    // StartMarker used by NGram
    StartMarker *string `json:"startMarker,omitempty"`
    // EndMarker used by NGram
    EndMarker *string `json:"endMarker,omitempty"`
    // StreamType used by NGram
    StreamType *ArangoSearchNGramStreamType `json:"streamType,omitempty"`

    // Stemming used by Text
    Stemming *bool `json:"stemming,omitempty"`
    // Stopword used by Text
    Stopwords []string `json:"stopwords,omitempty"`
    // StopwordsPath used by Text
    StopwordsPath []string `json:"stopwordsPath,omitempty"`
}

ArangoSearchAnalyzerProperties specifies options for the analyzer. Which fields are required and respected depends on the analyzer type. more information can be found here: https://www.arangodb.com/docs/stable/arangosearch-analyzers.html#analyzer-properties

type ArangoSearchAnalyzerType Uses

type ArangoSearchAnalyzerType string

ArangoSearchAnalyzerType specifies type of an analyzer

const (
    // ArangoSearchAnalyzerTypeIdentity treat value as atom (no transformation)
    ArangoSearchAnalyzerTypeIdentity ArangoSearchAnalyzerType = "identity"
    // ArangoSearchAnalyzerTypeDelimiter split into tokens at user-defined character
    ArangoSearchAnalyzerTypeDelimiter ArangoSearchAnalyzerType = "delimiter"
    // ArangoSearchAnalyzerTypeStem apply stemming to the value as a whole
    ArangoSearchAnalyzerTypeStem ArangoSearchAnalyzerType = "stem"
    // ArangoSearchAnalyzerTypeNorm apply normalization to the value as a whole
    ArangoSearchAnalyzerTypeNorm ArangoSearchAnalyzerType = "norm"
    // ArangoSearchAnalyzerTypeNGram create n-grams from value with user-defined lengths
    ArangoSearchAnalyzerTypeNGram ArangoSearchAnalyzerType = "ngram"
    // ArangoSearchAnalyzerTypeText tokenize into words, optionally with stemming, normalization and stop-word filtering
    ArangoSearchAnalyzerTypeText ArangoSearchAnalyzerType = "text"
)

type ArangoSearchCaseType Uses

type ArangoSearchCaseType string
const (
    // ArangoSearchCaseUpper to convert to all lower-case characters
    ArangoSearchCaseUpper ArangoSearchCaseType = "upper"
    // ArangoSearchCaseLower to convert to all upper-case characters
    ArangoSearchCaseLower ArangoSearchCaseType = "lower"
    // ArangoSearchCaseNone to not change character case (default)
    ArangoSearchCaseNone ArangoSearchCaseType = "none"
)

type ArangoSearchConsolidationPolicy Uses

type ArangoSearchConsolidationPolicy struct {
    // Type returns the type of the ConsolidationPolicy. This interface can then be casted to the corresponding ArangoSearchConsolidationPolicy* struct.
    Type ArangoSearchConsolidationPolicyType `json:"type,omitempty"`

    ArangoSearchConsolidationPolicyBytesAccum
    ArangoSearchConsolidationPolicyTier
}

ArangoSearchConsolidationPolicy holds threshold values specifying when to consolidate view data. Semantics of the values depend on where they are used.

type ArangoSearchConsolidationPolicyBytesAccum Uses

type ArangoSearchConsolidationPolicyBytesAccum struct {
    // Threshold, see ArangoSearchConsolidationTypeBytesAccum
    Threshold *float64 `json:"threshold,omitempty"`
}

ArangoSearchConsolidationPolicyBytesAccum contains fields used for ArangoSearchConsolidationPolicyTypeBytesAccum

type ArangoSearchConsolidationPolicyTier Uses

type ArangoSearchConsolidationPolicyTier struct {
    // MinSegments specifies the minimum number of segments that will be evaluated as candidates for consolidation.
    MinSegments *int64 `json:"minSegments,omitempty"`
    // MaxSegments specifies the maximum number of segments that will be evaluated as candidates for consolidation.
    MaxSegments *int64 `json:"maxSegments,omitempty"`
    // SegmentsBytesMax specifies the maxinum allowed size of all consolidated segments in bytes.
    SegmentsBytesMax *int64 `json:"segmentsBytesMax,omitempty"`
    // SegmentsBytesFloor defines the value (in bytes) to treat all smaller segments as equal for consolidation selection.
    SegmentsBytesFloor *int64 `json:"segmentsBytesFloor,omitempty"`
    // Lookahead specifies the number of additionally searched tiers except initially chosen candidated based on min_segments,
    // max_segments, segments_bytes_max, segments_bytes_floor with respect to defined values.
    // Default value falls to integer_traits<size_t>::const_max (in C++ source code).
    Lookahead *int64 `json:"lookahead,omitempty"`
}

ArangoSearchConsolidationPolicyTier contains fields used for ArangoSearchConsolidationPolicyTypeTier

type ArangoSearchConsolidationPolicyType Uses

type ArangoSearchConsolidationPolicyType string

ArangoSearchConsolidationPolicyType strings for consolidation types

const (
    // ArangoSearchConsolidationPolicyTypeTier consolidate based on segment byte size and live document count as dictated by the customization attributes.
    ArangoSearchConsolidationPolicyTypeTier ArangoSearchConsolidationPolicyType = "tier"
    // ArangoSearchConsolidationPolicyTypeBytesAccum consolidate if and only if ({threshold} range [0.0, 1.0])
    // {threshold} > (segment_bytes + sum_of_merge_candidate_segment_bytes) / all_segment_bytes,
    // i.e. the sum of all candidate segment's byte size is less than the total segment byte size multiplied by the {threshold}.
    ArangoSearchConsolidationPolicyTypeBytesAccum ArangoSearchConsolidationPolicyType = "bytes_accum"
)

type ArangoSearchElementProperties Uses

type ArangoSearchElementProperties struct {
    AnalyzerDefinitions []ArangoSearchAnalyzerDefinition `json:"analyzerDefinitions,omitempty"`
    // The list of analyzers to be used for indexing of string values. Defaults to ["identify"].
    Analyzers []string `json:"analyzers,omitempty"`
    // If set to true, all fields of this element will be indexed. Defaults to false.
    IncludeAllFields *bool `json:"includeAllFields,omitempty"`
    // If set to true, values in a listed are treated as separate values. Defaults to false.
    TrackListPositions *bool `json:"trackListPositions,omitempty"`
    // This values specifies how the view should track values.
    StoreValues ArangoSearchStoreValues `json:"storeValues,omitempty"`
    // Fields contains the properties for individual fields of the element.
    // The key of the map are field names.
    Fields ArangoSearchFields `json:"fields,omitempty"`
}

ArangoSearchElementProperties contains properties that specify how an element is indexed in an ArangoSearch view. Note that this structure is recursive. Settings not specified (nil) at a given level will inherit their setting from a lower level.

type ArangoSearchFields Uses

type ArangoSearchFields map[string]ArangoSearchElementProperties

ArangoSearchFields is a strongly typed map containing properties per field. The keys in the map are field names.

type ArangoSearchLinks map[string]ArangoSearchElementProperties

ArangoSearchLinks is a strongly typed map containing links between a collection and a view. The keys in the map are collection names.

type ArangoSearchNGramStreamType Uses

type ArangoSearchNGramStreamType string
const (
    // ArangoSearchNGramStreamBinary used by NGram. Default value
    ArangoSearchNGramStreamBinary ArangoSearchNGramStreamType = "binary"
    // ArangoSearchNGramStreamUTF8 used by NGram
    ArangoSearchNGramStreamUTF8 ArangoSearchNGramStreamType = "utf8"
)

type ArangoSearchPrimarySortEntry Uses

type ArangoSearchPrimarySortEntry struct {
    Field     string                     `json:"field,omitempty"`
    Ascending *bool                      `json:"asc,omitempty"`
    Direction *ArangoSearchSortDirection `json:"direction,omitempty"`
}

ArangoSearchPrimarySortEntry describes an entry for the primarySort list

func (ArangoSearchPrimarySortEntry) GetAscending Uses

func (pse ArangoSearchPrimarySortEntry) GetAscending() bool

GetAscending returns the value of Ascending or false if not set

func (ArangoSearchPrimarySortEntry) GetDirection Uses

func (pse ArangoSearchPrimarySortEntry) GetDirection() ArangoSearchSortDirection

GetDirection returns the sort direction or empty string if not set

type ArangoSearchSortDirection Uses

type ArangoSearchSortDirection string

ArangoSearchSortDirection describes the sorting direction

const (
    // ArangoSearchSortDirectionAsc sort ascending
    ArangoSearchSortDirectionAsc ArangoSearchSortDirection = "ASC"
    // ArangoSearchSortDirectionDesc sort descending
    ArangoSearchSortDirectionDesc ArangoSearchSortDirection = "DESC"
)

type ArangoSearchStoreValues Uses

type ArangoSearchStoreValues string

ArangoSearchStoreValues is the type of the StoreValues option of an ArangoSearch element.

const (
    // ArangoSearchStoreValuesNone specifies that a view should not store values.
    ArangoSearchStoreValuesNone ArangoSearchStoreValues = "none"
    // ArangoSearchStoreValuesID specifies that a view should only store
    // information about value presence, to allow use of the EXISTS() function.
    ArangoSearchStoreValuesID ArangoSearchStoreValues = "id"
)

type ArangoSearchView Uses

type ArangoSearchView interface {
    // Include generic View functions
    View

    // Properties fetches extended information about the view.
    Properties(ctx context.Context) (ArangoSearchViewProperties, error)

    // SetProperties changes properties of the view.
    SetProperties(ctx context.Context, options ArangoSearchViewProperties) error
}

ArangoSearchView provides access to the information of a view. Views are only available in ArangoDB 3.4 and higher.

type ArangoSearchViewProperties Uses

type ArangoSearchViewProperties struct {
    // CleanupIntervalStep specifies the minimum number of commits to wait between
    // removing unused files in the data directory.
    // Defaults to 10.
    // Use 0 to disable waiting.
    // For the case where the consolidation policies merge segments often
    // (i.e. a lot of commit+consolidate), a lower value will cause a lot of
    // disk space to be wasted.
    // For the case where the consolidation policies rarely merge segments
    // (i.e. few inserts/deletes), a higher value will impact performance
    // without any added benefits.
    CleanupIntervalStep *int64 `json:"cleanupIntervalStep,omitempty"`
    // ConsolidationInterval specifies the minimum number of milliseconds that must be waited
    // between committing index data changes and making them visible to queries.
    // Defaults to 60000.
    // Use 0 to disable.
    // For the case where there are a lot of inserts/updates, a lower value,
    // until commit, will cause the index not to account for them and memory usage
    // would continue to grow.
    // For the case where there are a few inserts/updates, a higher value will
    // impact performance and waste disk space for each commit call without
    // any added benefits.
    ConsolidationInterval *int64 `json:"consolidationIntervalMsec,omitempty"`
    // ConsolidationPolicy specifies thresholds for consolidation.
    ConsolidationPolicy *ArangoSearchConsolidationPolicy `json:"consolidationPolicy,omitempty"`

    // CommitInterval ArangoSearch waits at least this many milliseconds between committing view data store changes and making documents visible to queries
    CommitInterval *int64 `json:"commitIntervalMsec,omitempty"`

    // WriteBufferIdel specifies the maximum number of writers (segments) cached in the pool.
    // 0 value turns off caching, default value is 64.
    WriteBufferIdel *int64 `json:"writebufferIdle,omitempty"`

    // WriteBufferActive specifies the maximum number of concurrent active writers (segments) performs (a transaction).
    // Other writers (segments) are wait till current active writers (segments) finish.
    // 0 value turns off this limit and used by default.
    WriteBufferActive *int64 `json:"writebufferActive,omitempty"`

    // WriteBufferSizeMax specifies maximum memory byte size per writer (segment) before a writer (segment) flush is triggered.
    // 0 value turns off this limit fon any writer (buffer) and will be flushed only after a period defined for special thread during ArangoDB server startup.
    // 0 value should be used with carefully due to high potential memory consumption.
    WriteBufferSizeMax *int64 `json:"writebufferSizeMax,omitempty"`

    // Links contains the properties for how individual collections
    // are indexed in thie view.
    // The key of the map are collection names.
    Links ArangoSearchLinks `json:"links,omitempty"`

    // PrimarySort describes how individual fields are sorted
    PrimarySort []ArangoSearchPrimarySortEntry `json:"primarySort,omitempty"`
}

ArangoSearchViewProperties contains properties an an ArangoSearch view.

type Authentication Uses

type Authentication interface {
    // Returns the type of authentication
    Type() AuthenticationType
    // Get returns a configuration property of the authentication.
    // Supported properties depend on type of authentication.
    Get(property string) string
}

Authentication implements a kind of authentication.

func BasicAuthentication Uses

func BasicAuthentication(userName, password string) Authentication

BasicAuthentication creates an authentication implementation based on the given username & password.

func JWTAuthentication Uses

func JWTAuthentication(userName, password string) Authentication

JWTAuthentication creates a JWT token authentication implementation based on the given username & password.

func RawAuthentication Uses

func RawAuthentication(value string) Authentication

RawAuthentication creates a raw authentication implementation based on the given value for the Authorization header.

type AuthenticationType Uses

type AuthenticationType int
const (
    // AuthenticationTypeBasic uses username+password basic authentication
    AuthenticationTypeBasic AuthenticationType = iota
    // AuthenticationTypeJWT uses username+password JWT token based authentication
    AuthenticationTypeJWT
    // AuthenticationTypeRaw uses a raw value for the Authorization header
    AuthenticationTypeRaw
)

type BackupCreateOptions Uses

type BackupCreateOptions struct {
    Label             string        `json:"label,omitempty"`
    AllowInconsistent bool          `json:"allowInconsistent,omitempty"`
    Timeout           time.Duration `json:"timeout,omitempty"`
}

BackupCreateOptions provides options for Create

type BackupCreateResponse Uses

type BackupCreateResponse struct {
    NumberOfFiles           uint
    NumberOfDBServers       uint
    SizeInBytes             uint64
    PotentiallyInconsistent bool
    CreationTime            time.Time
}

BackupCreateResponse contains information about a newly created backup

type BackupID Uses

type BackupID string

BackupID identifies a backup

type BackupListOptions Uses

type BackupListOptions struct {
    // Only receive meta data about a specific id
    ID BackupID `json:"id,omitempty"`
}

BackupListOptions provides options for List

type BackupMeta Uses

type BackupMeta struct {
    ID                      BackupID  `json:"id,omitempty"`
    Version                 string    `json:"version,omitempty"`
    DateTime                time.Time `json:"datetime,omitempty"`
    NumberOfFiles           uint      `json:"nrFiles,omitempty"`
    NumberOfDBServers       uint      `json:"nrDBServers,omitempty"`
    SizeInBytes             uint64    `json:"sizeInBytes,omitempty"`
    PotentiallyInconsistent bool      `json:"potentiallyInconsistent,omitempty"`
    Available               bool      `json:"available,omitempty"`
    NumberOfPiecesPresent   uint      `json:"nrPiecesPresent,omitempty"`
}

BackupMeta provides meta data of a backup

type BackupRestoreOptions Uses

type BackupRestoreOptions struct {
    // do not version check when doing a restore (expert only)
    IgnoreVersion bool `json:"ignoreVersion,omitempty"`
}

BackupRestoreOptions provides options for Restore

type BackupTransferJobID Uses

type BackupTransferJobID string

BackupTransferJobID represents a Transfer (upload/download) job

type BackupTransferProgressReport Uses

type BackupTransferProgressReport struct {
    BackupID  BackupID                        `json:"BackupID,omitempty"`
    Cancelled bool                            `json:"Cancelled,omitempty"`
    Timestamp string                          `json:"Timestamp,omitempty"`
    DBServers map[string]BackupTransferReport `json:"DBServers,omitempty"`
}

BackupTransferProgressReport provides progress information for a backup transfer job

type BackupTransferReport Uses

type BackupTransferReport struct {
    Status       BackupTransferStatus `json:"Status,omitempty"`
    Error        int                  `json:"Error,omitempty"`
    ErrorMessage string               `json:"ErrorMessage,omitempty"`
    Progress     struct {
        Total     int    `json:"Total,omitempty"`
        Done      int    `json:"Done,omitempty"`
        Timestamp string `json:"Timestamp,omitempty"`
    }   `json:"Progress,omitempty"`
}

BackupTransferReport provides progress information of a backup transfer job for a single dbserver

type BackupTransferStatus Uses

type BackupTransferStatus string

BackupTransferStatus represents all possible states a transfer job can be in

const (
    TransferAcknowledged BackupTransferStatus = "ACK"
    TransferStarted      BackupTransferStatus = "STARTED"
    TransferCompleted    BackupTransferStatus = "COMPLETED"
    TransferFailed       BackupTransferStatus = "FAILED"
    TransferCancelled    BackupTransferStatus = "CANCELLED"
)

type Batch Uses

type Batch interface {
    // id of this batch
    BatchID() string
    // LastTick reported by the server for this batch
    LastTick() Tick
    // Extend the lifetime of an existing batch on the server
    Extend(ctx context.Context, ttl time.Duration) error
    // DeleteBatch deletes an existing batch on the server
    Delete(ctx context.Context) error
}

Batch represents state on the server used during certain replication operations to keep state required by the client (such as Write-Ahead Log, inventory and data-files)

type BeginTransactionOptions Uses

type BeginTransactionOptions struct {
    WaitForSync        bool
    AllowImplicit      bool
    LockTimeout        time.Duration
    MaxTransactionSize uint64
}

BeginTransactionOptions provides options for BeginTransaction call

type Client Uses

type Client interface {
    // SynchronizeEndpoints fetches all endpoints from an ArangoDB cluster and updates the
    // connection to use those endpoints.
    // When this client is connected to a single server, nothing happens.
    // When this client is connected to a cluster of servers, the connection will be updated to reflect
    // the layout of the cluster.
    // This function requires ArangoDB 3.1.15 or up.
    SynchronizeEndpoints(ctx context.Context) error

    // SynchronizeEndpoints2 fetches all endpoints from an ArangoDB cluster and updates the
    // connection to use those endpoints.
    // When this client is connected to a single server, nothing happens.
    // When this client is connected to a cluster of servers, the connection will be updated to reflect
    // the layout of the cluster.
    // Compared to SynchronizeEndpoints, this function expects a database name as additional parameter.
    // This database name is used to call `_db/<dbname>/_api/cluster/endpoints`. SynchronizeEndpoints uses
    // the default database, i.e. `_system`. In the case the user does not have access to `_system`,
    // SynchronizeEndpoints does not work with earlier versions of arangodb.
    SynchronizeEndpoints2(ctx context.Context, dbname string) error

    // Connection returns the connection used by this client
    Connection() Connection

    // Database functions
    ClientDatabases

    // User functions
    ClientUsers

    // Cluster functions
    ClientCluster

    // Individual server information functions
    ClientServerInfo

    // Server/cluster administration functions
    ClientServerAdmin

    // Replication functions
    ClientReplication

    // Backup functions
    ClientAdminBackup
}

Client provides access to a single arangodb database server, or an entire cluster of arangodb servers.

func NewClient Uses

func NewClient(config ClientConfig) (Client, error)

NewClient creates a new Client based on the given config setting.

Code:

// Create an HTTP connection to the database
conn, err := http.NewConnection(http.ConnectionConfig{
    Endpoints: []string{"http://localhost:8529"},
})
if err != nil {
    log.Fatalf("Failed to create HTTP connection: %v", err)
}
// Create a client
c, err := driver.NewClient(driver.ClientConfig{
    Connection: conn,
})
// Ask the version of the server
versionInfo, err := c.Version(nil)
if err != nil {
    log.Fatalf("Failed to get version info: %v", err)
}
fmt.Printf("Database has version '%s' and license '%s'\n", versionInfo.Version, versionInfo.License)

type ClientAdminBackup Uses

type ClientAdminBackup interface {
    Backup() ClientBackup
}

ClientAdminBackup provides access to the Backup API via the Client interface

type ClientBackup Uses

type ClientBackup interface {
    // Create creates a new backup and returns its id
    Create(ctx context.Context, opt *BackupCreateOptions) (BackupID, BackupCreateResponse, error)

    // Delete deletes the backup with given id
    Delete(ctx context.Context, id BackupID) error

    // Restore restores the backup with given id
    Restore(ctx context.Context, id BackupID, opt *BackupRestoreOptions) error

    // List returns meta data about some/all backups available
    List(ctx context.Context, opt *BackupListOptions) (map[BackupID]BackupMeta, error)

    // Upload triggers an upload to the remote repository of backup with id using the given config
    // and returns the job id.
    Upload(ctx context.Context, id BackupID, remoteRepository string, config interface{}) (BackupTransferJobID, error)

    // Download triggers an download to the remote repository of backup with id using the given config
    // and returns the job id.
    Download(ctx context.Context, id BackupID, remoteRepository string, config interface{}) (BackupTransferJobID, error)

    // Progress returns the progress state of the given Transfer job
    Progress(ctx context.Context, job BackupTransferJobID) (BackupTransferProgressReport, error)

    // Abort aborts the Transfer job if possible
    Abort(ctx context.Context, job BackupTransferJobID) error
}

ClientBackup provides access to server/cluster backup functions of an arangodb database server or an entire cluster of arangodb servers.

type ClientCluster Uses

type ClientCluster interface {
    // Cluster provides access to cluster wide specific operations.
    // To use this interface, an ArangoDB cluster is required.
    // If this method is a called without a cluster, a PreconditionFailed error is returned.
    Cluster(ctx context.Context) (Cluster, error)
}

ClientCluster provides methods needed to access cluster functionality from a client.

type ClientConfig Uses

type ClientConfig struct {
    // Connection is the actual server/cluster connection.
    // See http.NewConnection.
    Connection Connection
    // Authentication implements authentication on the server.
    Authentication Authentication
    // SynchronizeEndpointsInterval is the interval between automatisch synchronization of endpoints.
    // If this value is 0, no automatic synchronization is performed.
    // If this value is > 0, automatic synchronization is started on a go routine.
    // This feature requires ArangoDB 3.1.15 or up.
    SynchronizeEndpointsInterval time.Duration
}

ClientConfig contains all settings needed to create a client.

type ClientDatabases Uses

type ClientDatabases interface {
    // Database opens a connection to an existing database.
    // If no database with given name exists, an NotFoundError is returned.
    Database(ctx context.Context, name string) (Database, error)

    // DatabaseExists returns true if a database with given name exists.
    DatabaseExists(ctx context.Context, name string) (bool, error)

    // Databases returns a list of all databases found by the client.
    Databases(ctx context.Context) ([]Database, error)

    // AccessibleDatabases returns a list of all databases that can be accessed by the authenticated user.
    AccessibleDatabases(ctx context.Context) ([]Database, error)

    // CreateDatabase creates a new database with given name and opens a connection to it.
    // If the a database with given name already exists, a DuplicateError is returned.
    CreateDatabase(ctx context.Context, name string, options *CreateDatabaseOptions) (Database, error)
}

ClientDatabases provides access to the databases in a single arangodb database server, or an entire cluster of arangodb servers.

type ClientReplication Uses

type ClientReplication interface {
    // Replication provides access to replication specific operations.
    Replication() Replication
}

ClientReplication provides methods needed to access replication functionality from a client.

type ClientServerAdmin Uses

type ClientServerAdmin interface {
    // ServerMode returns the current mode in which the server/cluster is operating.
    // This call needs ArangoDB 3.3 and up.
    ServerMode(ctx context.Context) (ServerMode, error)
    // SetServerMode changes the current mode in which the server/cluster is operating.
    // This call needs a client that uses JWT authentication.
    // This call needs ArangoDB 3.3 and up.
    SetServerMode(ctx context.Context, mode ServerMode) error

    // Shutdown a specific server, optionally removing it from its cluster.
    Shutdown(ctx context.Context, removeFromCluster bool) error

    // Statistics queries statistics from a specific server
    Statistics(ctx context.Context) (ServerStatistics, error)
}

ClientServerAdmin provides access to server administrations functions of an arangodb database server or an entire cluster of arangodb servers.

type ClientServerInfo Uses

type ClientServerInfo interface {
    // Version returns version information from the connected database server.
    // Use WithDetails to configure a context that will include additional details in the return VersionInfo.
    Version(ctx context.Context) (VersionInfo, error)

    // ServerRole returns the role of the server that answers the request.
    ServerRole(ctx context.Context) (ServerRole, error)

    // Gets the ID of this server in the cluster.
    // An error is returned when calling this to a server that is not part of a cluster.
    ServerID(ctx context.Context) (string, error)
}

ClientServerInfo provides access to information about a single ArangoDB server. When your client uses multiple endpoints, it is undefined which server will respond to requests of this interface.

type ClientStats Uses

type ClientStats struct {
    HTTPConnections int64 `json:"httpConnections"`
    ConnectionTime  Stats `json:"connectionTime"`
    TotalTime       Stats `json:"totalTime"`
    RequestTime     Stats `json:"requestTime"`
    QueueTime       Stats `json:"queueTime"`
    IoTime          Stats `json:"ioTime"`
    BytesSent       Stats `json:"bytesSent"`
    BytesReceived   Stats `json:"bytesReceived"`
}

type ClientUsers Uses

type ClientUsers interface {
    // User opens a connection to an existing user.
    // If no user with given name exists, an NotFoundError is returned.
    User(ctx context.Context, name string) (User, error)

    // UserExists returns true if a user with given name exists.
    UserExists(ctx context.Context, name string) (bool, error)

    // Users returns a list of all users found by the client.
    Users(ctx context.Context) ([]User, error)

    // CreateUser creates a new user with given name and opens a connection to it.
    // If a user with given name already exists, a Conflict error is returned.
    CreateUser(ctx context.Context, name string, options *UserOptions) (User, error)
}

ClientUsers provides access to the users in a single arangodb database server, or an entire cluster of arangodb servers.

type Cluster Uses

type Cluster interface {
    // Get the cluster configuration & health
    Health(ctx context.Context) (ClusterHealth, error)

    // Get the inventory of the cluster containing all collections (with entire details) of a database.
    DatabaseInventory(ctx context.Context, db Database) (DatabaseInventory, error)

    // MoveShard moves a single shard of the given collection from server `fromServer` to
    // server `toServer`.
    MoveShard(ctx context.Context, col Collection, shard ShardID, fromServer, toServer ServerID) error

    // CleanOutServer triggers activities to clean out a DBServer.
    CleanOutServer(ctx context.Context, serverID string) error

    // ResignServer triggers activities to let a DBServer resign for all shards.
    ResignServer(ctx context.Context, serverID string) error

    // IsCleanedOut checks if the dbserver with given ID has been cleaned out.
    IsCleanedOut(ctx context.Context, serverID string) (bool, error)

    // RemoveServer is a low-level option to remove a server from a cluster.
    // This function is suitable for servers of type coordinator or dbserver.
    // The use of `ClientServerAdmin.Shutdown` is highly recommended above this function.
    RemoveServer(ctx context.Context, serverID ServerID) error
}

Cluster provides access to cluster wide specific operations. To use this interface, an ArangoDB cluster is required.

type ClusterHealth Uses

type ClusterHealth struct {
    // Unique identifier of the entire cluster.
    // This ID is created when the cluster was first created.
    ID  string `json:"ClusterId"`
    // Health per server
    Health map[ServerID]ServerHealth `json:"Health"`
}

ClusterHealth contains health information for all servers in a cluster.

type Collection Uses

type Collection interface {
    // Name returns the name of the collection.
    Name() string

    // Database returns the database containing the collection.
    Database() Database

    // Status fetches the current status of the collection.
    Status(ctx context.Context) (CollectionStatus, error)

    // Count fetches the number of document in the collection.
    Count(ctx context.Context) (int64, error)

    // Statistics returns the number of documents and additional statistical information about the collection.
    Statistics(ctx context.Context) (CollectionStatistics, error)

    // Revision fetches the revision ID of the collection.
    // The revision ID is a server-generated string that clients can use to check whether data
    // in a collection has changed since the last revision check.
    Revision(ctx context.Context) (string, error)

    // Properties fetches extended information about the collection.
    Properties(ctx context.Context) (CollectionProperties, error)

    // SetProperties changes properties of the collection.
    SetProperties(ctx context.Context, options SetCollectionPropertiesOptions) error

    // Load the collection into memory.
    Load(ctx context.Context) error

    // UnLoad the collection from memory.
    Unload(ctx context.Context) error

    // Remove removes the entire collection.
    // If the collection does not exist, a NotFoundError is returned.
    Remove(ctx context.Context) error

    // Truncate removes all documents from the collection, but leaves the indexes intact.
    Truncate(ctx context.Context) error

    // All index functions
    CollectionIndexes

    // All document functions
    CollectionDocuments
}

Collection provides access to the information of a single collection, all its documents and all its indexes.

type CollectionDocuments Uses

type CollectionDocuments interface {
    // DocumentExists checks if a document with given key exists in the collection.
    DocumentExists(ctx context.Context, key string) (bool, error)

    // ReadDocument reads a single document with given key from the collection.
    // The document data is stored into result, the document meta data is returned.
    // If no document exists with given key, a NotFoundError is returned.
    ReadDocument(ctx context.Context, key string, result interface{}) (DocumentMeta, error)

    // ReadDocuments reads multiple documents with given keys from the collection.
    // The documents data is stored into elements of the given results slice,
    // the documents meta data is returned.
    // If no document exists with a given key, a NotFoundError is returned at its errors index.
    ReadDocuments(ctx context.Context, keys []string, results interface{}) (DocumentMetaSlice, ErrorSlice, error)

    // CreateDocument creates a single document in the collection.
    // The document data is loaded from the given document, the document meta data is returned.
    // If the document data already contains a `_key` field, this will be used as key of the new document,
    // otherwise a unique key is created.
    // A ConflictError is returned when a `_key` field contains a duplicate key, other any other field violates an index constraint.
    // To return the NEW document, prepare a context with `WithReturnNew`.
    // To wait until document has been synced to disk, prepare a context with `WithWaitForSync`.
    CreateDocument(ctx context.Context, document interface{}) (DocumentMeta, error)

    // CreateDocuments creates multiple documents in the collection.
    // The document data is loaded from the given documents slice, the documents meta data is returned.
    // If a documents element already contains a `_key` field, this will be used as key of the new document,
    // otherwise a unique key is created.
    // If a documents element contains a `_key` field with a duplicate key, other any other field violates an index constraint,
    // a ConflictError is returned in its inded in the errors slice.
    // To return the NEW documents, prepare a context with `WithReturnNew`. The data argument passed to `WithReturnNew` must be
    // a slice with the same number of entries as the `documents` slice.
    // To wait until document has been synced to disk, prepare a context with `WithWaitForSync`.
    // If the create request itself fails or one of the arguments is invalid, an error is returned.
    CreateDocuments(ctx context.Context, documents interface{}) (DocumentMetaSlice, ErrorSlice, error)

    // UpdateDocument updates a single document with given key in the collection.
    // The document meta data is returned.
    // To return the NEW document, prepare a context with `WithReturnNew`.
    // To return the OLD document, prepare a context with `WithReturnOld`.
    // To wait until document has been synced to disk, prepare a context with `WithWaitForSync`.
    // If no document exists with given key, a NotFoundError is returned.
    UpdateDocument(ctx context.Context, key string, update interface{}) (DocumentMeta, error)

    // UpdateDocuments updates multiple document with given keys in the collection.
    // The updates are loaded from the given updates slice, the documents meta data are returned.
    // To return the NEW documents, prepare a context with `WithReturnNew` with a slice of documents.
    // To return the OLD documents, prepare a context with `WithReturnOld` with a slice of documents.
    // To wait until documents has been synced to disk, prepare a context with `WithWaitForSync`.
    // If no document exists with a given key, a NotFoundError is returned at its errors index.
    // If keys is nil, each element in the updates slice must contain a `_key` field.
    UpdateDocuments(ctx context.Context, keys []string, updates interface{}) (DocumentMetaSlice, ErrorSlice, error)

    // ReplaceDocument replaces a single document with given key in the collection with the document given in the document argument.
    // The document meta data is returned.
    // To return the NEW document, prepare a context with `WithReturnNew`.
    // To return the OLD document, prepare a context with `WithReturnOld`.
    // To wait until document has been synced to disk, prepare a context with `WithWaitForSync`.
    // If no document exists with given key, a NotFoundError is returned.
    ReplaceDocument(ctx context.Context, key string, document interface{}) (DocumentMeta, error)

    // ReplaceDocuments replaces multiple documents with given keys in the collection with the documents given in the documents argument.
    // The replacements are loaded from the given documents slice, the documents meta data are returned.
    // To return the NEW documents, prepare a context with `WithReturnNew` with a slice of documents.
    // To return the OLD documents, prepare a context with `WithReturnOld` with a slice of documents.
    // To wait until documents has been synced to disk, prepare a context with `WithWaitForSync`.
    // If no document exists with a given key, a NotFoundError is returned at its errors index.
    // If keys is nil, each element in the documents slice must contain a `_key` field.
    ReplaceDocuments(ctx context.Context, keys []string, documents interface{}) (DocumentMetaSlice, ErrorSlice, error)

    // RemoveDocument removes a single document with given key from the collection.
    // The document meta data is returned.
    // To return the OLD document, prepare a context with `WithReturnOld`.
    // To wait until removal has been synced to disk, prepare a context with `WithWaitForSync`.
    // If no document exists with given key, a NotFoundError is returned.
    RemoveDocument(ctx context.Context, key string) (DocumentMeta, error)

    // RemoveDocuments removes multiple documents with given keys from the collection.
    // The document meta data are returned.
    // To return the OLD documents, prepare a context with `WithReturnOld` with a slice of documents.
    // To wait until removal has been synced to disk, prepare a context with `WithWaitForSync`.
    // If no document exists with a given key, a NotFoundError is returned at its errors index.
    RemoveDocuments(ctx context.Context, keys []string) (DocumentMetaSlice, ErrorSlice, error)

    // ImportDocuments imports one or more documents into the collection.
    // The document data is loaded from the given documents argument, statistics are returned.
    // The documents argument can be one of the following:
    // - An array of structs: All structs will be imported as individual documents.
    // - An array of maps: All maps will be imported as individual documents.
    // To wait until all documents have been synced to disk, prepare a context with `WithWaitForSync`.
    // To return details about documents that could not be imported, prepare a context with `WithImportDetails`.
    ImportDocuments(ctx context.Context, documents interface{}, options *ImportDocumentOptions) (ImportDocumentStatistics, error)
}

CollectionDocuments provides access to the documents in a single collection.

type CollectionIndexes Uses

type CollectionIndexes interface {
    // Index opens a connection to an existing index within the collection.
    // If no index with given name exists, an NotFoundError is returned.
    Index(ctx context.Context, name string) (Index, error)

    // IndexExists returns true if an index with given name exists within the collection.
    IndexExists(ctx context.Context, name string) (bool, error)

    // Indexes returns a list of all indexes in the collection.
    Indexes(ctx context.Context) ([]Index, error)

    // EnsureFullTextIndex creates a fulltext index in the collection, if it does not already exist.
    // Fields is a slice of attribute names. Currently, the slice is limited to exactly one attribute.
    // The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false).
    EnsureFullTextIndex(ctx context.Context, fields []string, options *EnsureFullTextIndexOptions) (Index, bool, error)

    // EnsureGeoIndex creates a hash index in the collection, if it does not already exist.
    // Fields is a slice with one or two attribute paths. If it is a slice with one attribute path location,
    // then a geo-spatial index on all documents is created using location as path to the coordinates.
    // The value of the attribute must be a slice with at least two double values. The slice must contain the latitude (first value)
    // and the longitude (second value). All documents, which do not have the attribute path or with value that are not suitable, are ignored.
    // If it is a slice with two attribute paths latitude and longitude, then a geo-spatial index on all documents is created
    // using latitude and longitude as paths the latitude and the longitude. The value of the attribute latitude and of the
    // attribute longitude must a double. All documents, which do not have the attribute paths or which values are not suitable, are ignored.
    // The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false).
    EnsureGeoIndex(ctx context.Context, fields []string, options *EnsureGeoIndexOptions) (Index, bool, error)

    // EnsureHashIndex creates a hash index in the collection, if it does not already exist.
    // Fields is a slice of attribute paths.
    // The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false).
    EnsureHashIndex(ctx context.Context, fields []string, options *EnsureHashIndexOptions) (Index, bool, error)

    // EnsurePersistentIndex creates a persistent index in the collection, if it does not already exist.
    // Fields is a slice of attribute paths.
    // The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false).
    EnsurePersistentIndex(ctx context.Context, fields []string, options *EnsurePersistentIndexOptions) (Index, bool, error)

    // EnsureSkipListIndex creates a skiplist index in the collection, if it does not already exist.
    // Fields is a slice of attribute paths.
    // The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false).
    EnsureSkipListIndex(ctx context.Context, fields []string, options *EnsureSkipListIndexOptions) (Index, bool, error)

    // EnsureTTLIndex creates a TLL collection, if it does not already exist.
    // The index is returned, together with a boolean indicating if the index was newly created (true) or pre-existing (false).
    EnsureTTLIndex(ctx context.Context, field string, expireAfter int, options *EnsureTTLIndexOptions) (Index, bool, error)
}

CollectionIndexes provides access to the indexes in a single collection.

type CollectionInfo Uses

type CollectionInfo struct {
    // The identifier of the collection.
    ID  string `json:"id,omitempty"`
    // The name of the collection.
    Name string `json:"name,omitempty"`
    // The status of the collection
    Status CollectionStatus `json:"status,omitempty"`
    // The type of the collection
    Type CollectionType `json:"type,omitempty"`
    // If true then the collection is a system collection.
    IsSystem bool `json:"isSystem,omitempty"`
}

CollectionInfo contains information about a collection

type CollectionKeyOptions Uses

type CollectionKeyOptions struct {
    // If set to true, then it is allowed to supply own key values in the _key attribute of a document.
    // If set to false, then the key generator will solely be responsible for generating keys and supplying own
    // key values in the _key attribute of documents is considered an error.
    AllowUserKeys bool `json:"allowUserKeys,omitempty"`
    // Specifies the type of the key generator. The currently available generators are traditional and autoincrement.
    Type KeyGeneratorType `json:"type,omitempty"`
    // increment value for autoincrement key generator. Not used for other key generator types.
    Increment int `json:"increment,omitempty"`
    // Initial offset value for autoincrement key generator. Not used for other key generator types.
    Offset int `json:"offset,omitempty"`
}

CollectionKeyOptions specifies ways for creating keys of a collection.

type CollectionProperties Uses

type CollectionProperties struct {
    CollectionInfo

    // WaitForSync; If true then creating, changing or removing documents will wait until the data has been synchronized to disk.
    WaitForSync bool `json:"waitForSync,omitempty"`
    // DoCompact specifies whether or not the collection will be compacted.
    DoCompact bool `json:"doCompact,omitempty"`
    // JournalSize is the maximal size setting for journals / datafiles in bytes.
    JournalSize int64 `json:"journalSize,omitempty"`
    KeyOptions  struct {
        // Type specifies the type of the key generator. The currently available generators are traditional and autoincrement.
        Type KeyGeneratorType `json:"type,omitempty"`
        // AllowUserKeys; if set to true, then it is allowed to supply own key values in the _key attribute of a document.
        // If set to false, then the key generator is solely responsible for generating keys and supplying own key values in
        // the _key attribute of documents is considered an error.
        AllowUserKeys bool `json:"allowUserKeys,omitempty"`
    }   `json:"keyOptions,omitempty"`
    // NumberOfShards is the number of shards of the collection.
    // Only available in cluster setup.
    NumberOfShards int `json:"numberOfShards,omitempty"`
    // ShardKeys contains the names of document attributes that are used to determine the target shard for documents.
    // Only available in cluster setup.
    ShardKeys []string `json:"shardKeys,omitempty"`
    // ReplicationFactor contains how many copies of each shard are kept on different DBServers.
    // Only available in cluster setup.
    ReplicationFactor int `json:"replicationFactor,omitempty"`
    // Deprecated: use 'WriteConcern' instead
    MinReplicationFactor int `json:"minReplicationFactor,omitempty"`
    // WriteConcern contains how many copies must be available before a collection can be written.
    // It is required that 1 <= WriteConcern <= ReplicationFactor.
    // Default is 1. Not available for satellite collections.
    // Available from 3.6 arangod version.
    WriteConcern int `json:"writeConcern,omitempty"`
    // SmartJoinAttribute
    // See documentation for smart joins.
    // This requires ArangoDB Enterprise Edition.
    SmartJoinAttribute string `json:"smartJoinAttribute,omitempty"`
    // This attribute specifies the name of the sharding strategy to use for the collection.
    // Can not be changed after creation.
    ShardingStrategy ShardingStrategy `json:"shardingStrategy,omitempty"`
}

CollectionProperties contains extended information about a collection.

func (*CollectionProperties) IsSatellite Uses

func (p *CollectionProperties) IsSatellite() bool

IsSatellite returns true if the collection is a satellite collection

func (*CollectionProperties) MarshalJSON Uses

func (p *CollectionProperties) MarshalJSON() ([]byte, error)

MarshalJSON converts CollectionProperties into json

func (*CollectionProperties) UnmarshalJSON Uses

func (p *CollectionProperties) UnmarshalJSON(d []byte) error

UnmarshalJSON loads CollectionProperties from json

type CollectionStatistics Uses

type CollectionStatistics struct {
    //The number of documents currently present in the collection.
    Count int64 `json:"count,omitempty"`
    // The maximal size of a journal or datafile in bytes.
    JournalSize int64 `json:"journalSize,omitempty"`
    Figures     struct {
        DataFiles struct {
            // The number of datafiles.
            Count int64 `json:"count,omitempty"`
            // The total filesize of datafiles (in bytes).
            FileSize int64 `json:"fileSize,omitempty"`
        }   `json:"datafiles"`
        // The number of markers in the write-ahead log for this collection that have not been transferred to journals or datafiles.
        UncollectedLogfileEntries int64 `json:"uncollectedLogfileEntries,omitempty"`
        // The number of references to documents in datafiles that JavaScript code currently holds. This information can be used for debugging compaction and unload issues.
        DocumentReferences int64 `json:"documentReferences,omitempty"`
        CompactionStatus   struct {
            // The action that was performed when the compaction was last run for the collection. This information can be used for debugging compaction issues.
            Message string `json:"message,omitempty"`
            // The point in time the compaction for the collection was last executed. This information can be used for debugging compaction issues.
            Time time.Time `json:"time,omitempty"`
        }   `json:"compactionStatus"`
        Compactors struct {
            // The number of compactor files.
            Count int64 `json:"count,omitempty"`
            // The total filesize of all compactor files (in bytes).
            FileSize int64 `json:"fileSize,omitempty"`
        }   `json:"compactors"`
        Dead struct {
            // The number of dead documents. This includes document versions that have been deleted or replaced by a newer version. Documents deleted or replaced that are contained the write-ahead log only are not reported in this figure.
            Count int64 `json:"count,omitempty"`
            // The total number of deletion markers. Deletion markers only contained in the write-ahead log are not reporting in this figure.
            Deletion int64 `json:"deletion,omitempty"`
            // The total size in bytes used by all dead documents.
            Size int64 `json:"size,omitempty"`
        }   `json:"dead"`
        Indexes struct {
            // The total number of indexes defined for the collection, including the pre-defined indexes (e.g. primary index).
            Count int64 `json:"count,omitempty"`
            // The total memory allocated for indexes in bytes.
            Size int64 `json:"size,omitempty"`
        }   `json:"indexes"`
        ReadCache struct {
            // The number of revisions of this collection stored in the document revisions cache.
            Count int64 `json:"count,omitempty"`
            // The memory used for storing the revisions of this collection in the document revisions cache (in bytes). This figure does not include the document data but only mappings from document revision ids to cache entry locations.
            Size int64 `json:"size,omitempty"`
        }   `json:"readcache"`
        // An optional string value that contains information about which object type is at the head of the collection's cleanup queue. This information can be used for debugging compaction and unload issues.
        WaitingFor string `json:"waitingFor,omitempty"`
        Alive      struct {
            // The number of currently active documents in all datafiles and journals of the collection. Documents that are contained in the write-ahead log only are not reported in this figure.
            Count int64 `json:"count,omitempty"`
            // The total size in bytes used by all active documents of the collection. Documents that are contained in the write-ahead log only are not reported in this figure.
            Size int64 `json:"size,omitempty"`
        }   `json:"alive"`
        // The tick of the last marker that was stored in a journal of the collection. This might be 0 if the collection does not yet have a journal.
        LastTick int64 `json:"lastTick,omitempty"`
        Journals struct {
            // The number of journal files.
            Count int64 `json:"count,omitempty"`
            // The total filesize of all journal files (in bytes).
            FileSize int64 `json:"fileSize,omitempty"`
        }   `json:"journals"`
        Revisions struct {
            // The number of revisions of this collection managed by the storage engine.
            Count int64 `json:"count,omitempty"`
            // The memory used for storing the revisions of this collection in the storage engine (in bytes). This figure does not include the document data but only mappings from document revision ids to storage engine datafile positions.
            Size int64 `json:"size,omitempty"`
        }   `json:"revisions"`
    }   `json:"figures"`
}

CollectionStatistics contains the number of documents and additional statistical information about a collection.

type CollectionStatus Uses

type CollectionStatus int

CollectionStatus indicates the status of a collection.

type CollectionType Uses

type CollectionType int

CollectionType is the type of a collection.

type CommitTransactionOptions Uses

type CommitTransactionOptions struct{}

CommitTransactionOptions provides options for CommitTransaction. Currently unused

type Connection Uses

type Connection interface {
    // NewRequest creates a new request with given method and path.
    NewRequest(method, path string) (Request, error)

    // Do performs a given request, returning its response.
    Do(ctx context.Context, req Request) (Response, error)

    // Unmarshal unmarshals the given raw object into the given result interface.
    Unmarshal(data RawObject, result interface{}) error

    // Endpoints returns the endpoints used by this connection.
    Endpoints() []string

    // UpdateEndpoints reconfigures the connection to use the given endpoints.
    UpdateEndpoints(endpoints []string) error

    // Configure the authentication used for this connection.
    SetAuthentication(Authentication) (Connection, error)

    // Protocols returns all protocols used by this connection.
    Protocols() ProtocolSet
}

Connection is a connenction to a database server using a specific protocol.

type ContentType Uses

type ContentType int

ContentType identifies the type of encoding to use for the data.

const (
    // ContentTypeJSON encodes data as json
    ContentTypeJSON ContentType = iota
    // ContentTypeVelocypack encodes data as Velocypack
    ContentTypeVelocypack
)

func (ContentType) String Uses

func (ct ContentType) String() string

type ContextKey Uses

type ContextKey string

ContextKey is an internal type used for holding values in a `context.Context` do not use!.

type CreateCollectionOptions Uses

type CreateCollectionOptions struct {
    // The maximal size of a journal or datafile in bytes. The value must be at least 1048576 (1 MiB). (The default is a configuration parameter)
    JournalSize int `json:"journalSize,omitempty"`
    // ReplicationFactor in a cluster (default is 1), this attribute determines how many copies of each shard are kept on different DBServers.
    // The value 1 means that only one copy (no synchronous replication) is kept.
    // A value of k means that k-1 replicas are kept. Any two copies reside on different DBServers.
    // Replication between them is synchronous, that is, every write operation to the "leader" copy will be replicated to all "follower" replicas,
    // before the write operation is reported successful. If a server fails, this is detected automatically
    // and one of the servers holding copies take over, usually without an error being reported.
    ReplicationFactor int `json:"replicationFactor,omitempty"`
    // Deprecated: use 'WriteConcern' instead
    MinReplicationFactor int `json:"minReplicationFactor,omitempty"`
    // WriteConcern contains how many copies must be available before a collection can be written.
    // It is required that 1 <= WriteConcern <= ReplicationFactor.
    // Default is 1. Not available for satellite collections.
    // Available from 3.6 arangod version.
    WriteConcern int `json:"writeConcern,omitempty"`
    // If true then the data is synchronized to disk before returning from a document create, update, replace or removal operation. (default: false)
    WaitForSync bool `json:"waitForSync,omitempty"`
    // Whether or not the collection will be compacted (default is true)
    DoCompact *bool `json:"doCompact,omitempty"`
    // If true then the collection data is kept in-memory only and not made persistent.
    // Unloading the collection will cause the collection data to be discarded. Stopping or re-starting the server will also
    // cause full loss of data in the collection. Setting this option will make the resulting collection be slightly faster
    // than regular collections because ArangoDB does not enforce any synchronization to disk and does not calculate any
    // CRC checksums for datafiles (as there are no datafiles). This option should therefore be used for cache-type collections only,
    // and not for data that cannot be re-created otherwise. (The default is false)
    IsVolatile bool `json:"isVolatile,omitempty"`
    // In a cluster, this attribute determines which document attributes are used to
    // determine the target shard for documents. Documents are sent to shards based on the values of their shard key attributes.
    // The values of all shard key attributes in a document are hashed, and the hash value is used to determine the target shard.
    // Note: Values of shard key attributes cannot be changed once set. This option is meaningless in a single server setup.
    // The default is []string{"_key"}.
    ShardKeys []string `json:"shardKeys,omitempty"`
    // In a cluster, this value determines the number of shards to create for the collection. In a single server setup, this option is meaningless. (default is 1)
    NumberOfShards int `json:"numberOfShards,omitempty"`
    // If true, create a system collection. In this case collection-name should start with an underscore.
    // End users should normally create non-system collections only. API implementors may be required to create system
    // collections in very special occasions, but normally a regular collection will do. (The default is false)
    IsSystem bool `json:"isSystem,omitempty"`
    // The type of the collection to create. (default is CollectionTypeDocument)
    Type CollectionType `json:"type,omitempty"`
    // The number of buckets into which indexes using a hash table are split. The default is 16 and this number has to be a power
    // of 2 and less than or equal to 1024. For very large collections one should increase this to avoid long pauses when the hash
    // table has to be initially built or resized, since buckets are resized individually and can be initially built in parallel.
    // For example, 64 might be a sensible value for a collection with 100 000 000 documents.
    // Currently, only the edge index respects this value, but other index types might follow in future ArangoDB versions.
    // Changes are applied when the collection is loaded the next time.
    IndexBuckets int `json:"indexBuckets,omitempty"`
    // Specifies how keys in the collection are created.
    KeyOptions *CollectionKeyOptions `json:"keyOptions,omitempty"`
    // This field is used for internal purposes only. DO NOT USE.
    DistributeShardsLike string `json:"distributeShardsLike,omitempty"`
    // Set to create a smart edge or vertex collection.
    // This requires ArangoDB Enterprise Edition.
    IsSmart bool `json:"isSmart,omitempty"`
    // This field must be set to the attribute that will be used for sharding or smart graphs.
    // All vertices are required to have this attribute set. Edges derive the attribute from their connected vertices.
    // This requires ArangoDB Enterprise Edition.
    SmartGraphAttribute string `json:"smartGraphAttribute,omitempty"`
    // SmartJoinAttribute
    // In the specific case that the two collections have the same number of shards, the data of the two collections can
    // be co-located on the same server for the same shard key values. In this case the extra hop via the coordinator will not be necessary.
    // See documentation for smart joins.
    // This requires ArangoDB Enterprise Edition.
    SmartJoinAttribute string `json:"smartJoinAttribute,omitempty"`
    // This attribute specifies the name of the sharding strategy to use for the collection.
    // Must be one of ShardingStrategy* values.
    ShardingStrategy ShardingStrategy `json:"shardingStrategy,omitempty"`
}

CreateCollectionOptions contains options that customize the creating of a collection.

type CreateDatabaseDefaultOptions Uses

type CreateDatabaseDefaultOptions struct {
    // Default replication factor for collections in database
    ReplicationFactor int `json:"replicationFactor,omitempty"`
    // Default write concern for collections in database
    WriteConcern int `json:"writeConcern,omitempty"`
    // Default sharding for collections in database
    Sharding DatabaseSharding `json:"sharding,omitempty"`
}

CreateDatabaseDefaultOptions contains options that change defaults for collections

type CreateDatabaseOptions Uses

type CreateDatabaseOptions struct {
    // List of users to initially create for the new database. User information will not be changed for users that already exist.
    // If users is not specified or does not contain any users, a default user root will be created with an empty string password.
    // This ensures that the new database will be accessible after it is created.
    Users []CreateDatabaseUserOptions `json:"users,omitempty"`

    // Options database defaults
    Options CreateDatabaseDefaultOptions `json:"options,omitempty"`
}

CreateDatabaseOptions contains options that customize the creating of a database.

type CreateDatabaseUserOptions Uses

type CreateDatabaseUserOptions struct {
    // Loginname of the user to be created
    UserName string `json:"user,omitempty"`
    // The user password as a string. If not specified, it will default to an empty string.
    Password string `json:"passwd,omitempty"`
    // A flag indicating whether the user account should be activated or not. The default value is true. If set to false, the user won't be able to log into the database.
    Active *bool `json:"active,omitempty"`
    // A JSON object with extra user information. The data contained in extra will be stored for the user but not be interpreted further by ArangoDB.
    Extra interface{} `json:"extra,omitempty"`
}

CreateDatabaseUserOptions contains options for creating a single user for a database.

type CreateGraphOptions Uses

type CreateGraphOptions struct {
    // OrphanVertexCollections is an array of additional vertex collections used in the graph.
    // These are vertices for which there are no edges linking these vertices with anything.
    OrphanVertexCollections []string
    // EdgeDefinitions is an array of edge definitions for the graph.
    EdgeDefinitions []EdgeDefinition
    // IsSmart defines if the created graph should be smart.
    // This only has effect in Enterprise Edition.
    IsSmart bool
    // SmartGraphAttribute is the attribute name that is used to smartly shard the vertices of a graph.
    // Every vertex in this Graph has to have this attribute.
    // Cannot be modified later.
    SmartGraphAttribute string
    // NumberOfShards is the number of shards that is used for every collection within this graph.
    // Cannot be modified later.
    NumberOfShards int
}

CreateGraphOptions contains options that customize the creating of a graph.

type Cursor Uses

type Cursor interface {
    io.Closer

    // HasMore returns true if the next call to ReadDocument does not return a NoMoreDocuments error.
    HasMore() bool

    // ReadDocument reads the next document from the cursor.
    // The document data is stored into result, the document meta data is returned.
    // If the cursor has no more documents, a NoMoreDocuments error is returned.
    // Note: If the query (resulting in this cursor) does not return documents,
    //       then the returned DocumentMeta will be empty.
    ReadDocument(ctx context.Context, result interface{}) (DocumentMeta, error)

    // Count returns the total number of result documents available.
    // A valid return value is only available when the cursor has been created with a context that was
    // prepared with `WithQueryCount` and not with `WithQueryStream`.
    Count() int64

    // Statistics returns the query execution statistics for this cursor.
    // This might not be valid if the cursor has been created with a context that was
    // prepared with `WithQueryStream`
    Statistics() QueryStatistics
}

Cursor is returned from a query, used to iterate over a list of documents. Note that a Cursor must always be closed to avoid holding on to resources in the server while they are no longer needed.

type Database Uses

type Database interface {
    // Name returns the name of the database.
    Name() string

    // Info fetches information about the database.
    Info(ctx context.Context) (DatabaseInfo, error)

    // EngineInfo returns information about the database engine being used.
    // Note: When your cluster has multiple endpoints (cluster), you will get information
    // from the server that is currently being used.
    // If you want to know exactly which server the information is from, use a client
    // with only a single endpoint and avoid automatic synchronization of endpoints.
    EngineInfo(ctx context.Context) (EngineInfo, error)

    // Remove removes the entire database.
    // If the database does not exist, a NotFoundError is returned.
    Remove(ctx context.Context) error

    // Collection functions
    DatabaseCollections

    // View functions
    DatabaseViews

    // Graph functions
    DatabaseGraphs

    // Streaming Transactions functions
    DatabaseStreamingTransactions

    // ArangoSearch Analyzers API
    DatabaseArangoSearchAnalyzers

    // Query performs an AQL query, returning a cursor used to iterate over the returned documents.
    // Note that the returned Cursor must always be closed to avoid holding on to resources in the server while they are no longer needed.
    Query(ctx context.Context, query string, bindVars map[string]interface{}) (Cursor, error)

    // ValidateQuery validates an AQL query.
    // When the query is valid, nil returned, otherwise an error is returned.
    // The query is not executed.
    ValidateQuery(ctx context.Context, query string) error

    // Transaction performs a javascript transaction. The result of the transaction function is returned.
    Transaction(ctx context.Context, action string, options *TransactionOptions) (interface{}, error)
}

Database provides access to all collections & graphs in a single database.

type DatabaseArangoSearchAnalyzers Uses

type DatabaseArangoSearchAnalyzers interface {

    // Ensure ensures that the given analyzer exists. If it does not exist it is created.
    // The function returns whether the analyzer already existed or an error.
    EnsureAnalyzer(ctx context.Context, analyzer ArangoSearchAnalyzerDefinition) (bool, ArangoSearchAnalyzer, error)

    // Get returns the analyzer definition for the given analyzer or returns an error
    Analyzer(ctx context.Context, name string) (ArangoSearchAnalyzer, error)

    // List returns a list of all analyzers
    Analyzers(ctx context.Context) ([]ArangoSearchAnalyzer, error)
}

type DatabaseCollections Uses

type DatabaseCollections interface {
    // Collection opens a connection to an existing collection within the database.
    // If no collection with given name exists, an NotFoundError is returned.
    Collection(ctx context.Context, name string) (Collection, error)

    // CollectionExists returns true if a collection with given name exists within the database.
    CollectionExists(ctx context.Context, name string) (bool, error)

    // Collections returns a list of all collections in the database.
    Collections(ctx context.Context) ([]Collection, error)

    // CreateCollection creates a new collection with given name and options, and opens a connection to it.
    // If a collection with given name already exists within the database, a DuplicateError is returned.
    CreateCollection(ctx context.Context, name string, options *CreateCollectionOptions) (Collection, error)
}

DatabaseCollections provides access to all collections in a single database.

type DatabaseGraphs Uses

type DatabaseGraphs interface {
    // Graph opens a connection to an existing graph within the database.
    // If no graph with given name exists, an NotFoundError is returned.
    Graph(ctx context.Context, name string) (Graph, error)

    // GraphExists returns true if a graph with given name exists within the database.
    GraphExists(ctx context.Context, name string) (bool, error)

    // Graphs returns a list of all graphs in the database.
    Graphs(ctx context.Context) ([]Graph, error)

    // CreateGraph creates a new graph with given name and options, and opens a connection to it.
    // If a graph with given name already exists within the database, a DuplicateError is returned.
    CreateGraph(ctx context.Context, name string, options *CreateGraphOptions) (Graph, error)
}

DatabaseGraphs provides access to all graphs in a single database.

type DatabaseInfo Uses

type DatabaseInfo struct {
    // The identifier of the database.
    ID  string `json:"id,omitempty"`
    // The name of the database.
    Name string `json:"name,omitempty"`
    // The filesystem path of the database.
    Path string `json:"path,omitempty"`
    // If true then the database is the _system database.
    IsSystem bool `json:"isSystem,omitempty"`
    // Default replication factor for collections in database
    ReplicationFactor int `json:"replicationFactor,omitempty"`
    // Default write concern for collections in database
    WriteConcern int `json:"writeConcern,omitempty"`
    // Default sharding for collections in database
    Sharding DatabaseSharding `json:"sharding,omitempty"`
}

DatabaseInfo contains information about a database

type DatabaseInventory Uses

type DatabaseInventory struct {
    // Details of all collections
    Collections []InventoryCollection `json:"collections,omitempty"`
    // Details of all views
    Views []InventoryView `json:"views,omitempty"`
}

DatabaseInventory describes a detailed state of the collections & shards of a specific database within a cluster.

func (DatabaseInventory) CollectionByName Uses

func (i DatabaseInventory) CollectionByName(name string) (InventoryCollection, bool)

CollectionByName returns the InventoryCollection with given name. Return false if not found.

func (DatabaseInventory) IsReady Uses

func (i DatabaseInventory) IsReady() bool

IsReady returns true if the IsReady flag of all collections is set.

func (DatabaseInventory) PlanVersion Uses

func (i DatabaseInventory) PlanVersion() int64

PlanVersion returns the plan version of the first collection in the given inventory.

func (DatabaseInventory) ViewByName Uses

func (i DatabaseInventory) ViewByName(name string) (InventoryView, bool)

ViewByName returns the InventoryView with given name. Return false if not found.

type DatabaseSharding Uses

type DatabaseSharding string
const (
    DatabaseShardingSingle DatabaseSharding = "single"
    DatabaseShardingNone   DatabaseSharding = ""
)

type DatabaseStreamingTransactions Uses

type DatabaseStreamingTransactions interface {
    BeginTransaction(ctx context.Context, cols TransactionCollections, opts *BeginTransactionOptions) (TransactionID, error)
    CommitTransaction(ctx context.Context, tid TransactionID, opts *CommitTransactionOptions) error
    AbortTransaction(ctx context.Context, tid TransactionID, opts *AbortTransactionOptions) error

    TransactionStatus(ctx context.Context, tid TransactionID) (TransactionStatusRecord, error)
}

DatabaseStreamingTransactions provides access to the Streaming Transactions API

type DatabaseViews Uses

type DatabaseViews interface {
    // View opens a connection to an existing view within the database.
    // If no collection with given name exists, an NotFoundError is returned.
    View(ctx context.Context, name string) (View, error)

    // ViewExists returns true if a view with given name exists within the database.
    ViewExists(ctx context.Context, name string) (bool, error)

    // Views returns a list of all views in the database.
    Views(ctx context.Context) ([]View, error)

    // CreateArangoSearchView creates a new view of type ArangoSearch,
    // with given name and options, and opens a connection to it.
    // If a view with given name already exists within the database, a ConflictError is returned.
    CreateArangoSearchView(ctx context.Context, name string, options *ArangoSearchViewProperties) (ArangoSearchView, error)
}

DatabaseViews provides access to all views in a single database. Views are only available in ArangoDB 3.4 and higher.

type DocumentID Uses

type DocumentID string

DocumentID references a document in a collection. Format: collection/_key

func NewDocumentID Uses

func NewDocumentID(collection, key string) DocumentID

NewDocumentID creates a new document ID from the given collection, key pair.

func (DocumentID) Collection Uses

func (id DocumentID) Collection() string

Collection returns the collection part of the ID.

func (DocumentID) IsEmpty Uses

func (id DocumentID) IsEmpty() bool

IsEmpty returns true if the given ID is empty, false otherwise.

func (DocumentID) Key Uses

func (id DocumentID) Key() string

Key returns the key part of the ID.

func (DocumentID) String Uses

func (id DocumentID) String() string

String returns a string representation of the document ID.

func (DocumentID) Validate Uses

func (id DocumentID) Validate() error

Validate validates the given id.

func (DocumentID) ValidateOrEmpty Uses

func (id DocumentID) ValidateOrEmpty() error

ValidateOrEmpty validates the given id unless it is empty. In case of empty, nil is returned.

type DocumentMeta Uses

type DocumentMeta struct {
    Key string     `json:"_key,omitempty"`
    ID  DocumentID `json:"_id,omitempty"`
    Rev string     `json:"_rev,omitempty"`
}

DocumentMeta contains all meta data used to identifier a document.

type DocumentMetaSlice Uses

type DocumentMetaSlice []DocumentMeta

DocumentMetaSlice is a slice of DocumentMeta elements

func (DocumentMetaSlice) IDs Uses

func (l DocumentMetaSlice) IDs() []DocumentID

IDs returns the ID's of all elements.

func (DocumentMetaSlice) Keys Uses

func (l DocumentMetaSlice) Keys() []string

Keys returns the keys of all elements.

func (DocumentMetaSlice) Revs Uses

func (l DocumentMetaSlice) Revs() []string

Revs returns the revisions of all elements.

type EdgeDefinition Uses

type EdgeDefinition struct {
    // The name of the edge collection to be used.
    Collection string `json:"collection"`
    // To contains the names of one or more edge collections that can contain target vertices.
    To  []string `json:"to"`
    // From contains the names of one or more vertex collections that can contain source vertices.
    From []string `json:"from"`
}

EdgeDefinition contains all information needed to define a single edge in a graph.

type EdgeDocument Uses

type EdgeDocument struct {
    From DocumentID `json:"_from,omitempty"`
    To   DocumentID `json:"_to,omitempty"`
}

EdgeDocument is a minimal document for use in edge collection. You can use this in your own edge document structures completely use your own. If you use your own, make sure to include a `_from` and `_to` field.

type EngineInfo Uses

type EngineInfo struct {
    Type EngineType `json:"name"`
}

EngineInfo contains information about the database engine being used.

type EngineType Uses

type EngineType string

EngineType indicates type of database engine being used.

func (EngineType) String Uses

func (t EngineType) String() string

type EnsureFullTextIndexOptions Uses

type EnsureFullTextIndexOptions struct {
    // MinLength is the minimum character length of words to index. Will default to a server-defined
    // value if unspecified (0). It is thus recommended to set this value explicitly when creating the index.
    MinLength int
    // InBackground if true will not hold an exclusive collection lock for the entire index creation period (rocksdb only).
    InBackground bool
    // Name optional user defined name used for hints in AQL queries
    Name string
}

EnsureFullTextIndexOptions contains specific options for creating a full text index.

type EnsureGeoIndexOptions Uses

type EnsureGeoIndexOptions struct {
    // If a geo-spatial index on a location is constructed and GeoJSON is true, then the order within the array
    // is longitude followed by latitude. This corresponds to the format described in http://geojson.org/geojson-spec.html#positions
    GeoJSON bool
    // InBackground if true will not hold an exclusive collection lock for the entire index creation period (rocksdb only).
    InBackground bool
    // Name optional user defined name used for hints in AQL queries
    Name string
}

EnsureGeoIndexOptions contains specific options for creating a geo index.

type EnsureHashIndexOptions Uses

type EnsureHashIndexOptions struct {
    // If true, then create a unique index.
    Unique bool
    // If true, then create a sparse index.
    Sparse bool
    // If true, de-duplication of array-values, before being added to the index, will be turned off.
    // This flag requires ArangoDB 3.2.
    // Note: this setting is only relevant for indexes with array fields (e.g. "fieldName[*]")
    NoDeduplicate bool
    // InBackground if true will not hold an exclusive collection lock for the entire index creation period (rocksdb only).
    InBackground bool
    // Name optional user defined name used for hints in AQL queries
    Name string
}

EnsureHashIndexOptions contains specific options for creating a hash index.

type EnsurePersistentIndexOptions Uses

type EnsurePersistentIndexOptions struct {
    // If true, then create a unique index.
    Unique bool
    // If true, then create a sparse index.
    Sparse bool
    // InBackground if true will not hold an exclusive collection lock for the entire index creation period (rocksdb only).
    InBackground bool
    // Name optional user defined name used for hints in AQL queries
    Name string
}

EnsurePersistentIndexOptions contains specific options for creating a persistent index.

type EnsureSkipListIndexOptions Uses

type EnsureSkipListIndexOptions struct {
    // If true, then create a unique index.
    Unique bool
    // If true, then create a sparse index.
    Sparse bool
    // If true, de-duplication of array-values, before being added to the index, will be turned off.
    // This flag requires ArangoDB 3.2.
    // Note: this setting is only relevant for indexes with array fields (e.g. "fieldName[*]")
    NoDeduplicate bool
    // InBackground if true will not hold an exclusive collection lock for the entire index creation period (rocksdb only).
    InBackground bool
    // Name optional user defined name used for hints in AQL queries
    Name string
}

EnsureSkipListIndexOptions contains specific options for creating a skip-list index.

type EnsureTTLIndexOptions Uses

type EnsureTTLIndexOptions struct {
    // InBackground if true will not hold an exclusive collection lock for the entire index creation period (rocksdb only).
    InBackground bool
    // Name optional user defined name used for hints in AQL queries
    Name string
}

EnsureTTLIndexOptions provides specific options for creating a TTL index

type ErrorSlice Uses

type ErrorSlice []error

ErrorSlice is a slice of errors

func (ErrorSlice) FirstNonNil Uses

func (l ErrorSlice) FirstNonNil() error

FirstNonNil returns the first error in the slice that is not nil. If all errors in the slice are nil, nil is returned.

type Grant Uses

type Grant string

Grant specifies access rights for an object

const (
    // GrantReadWrite indicates read/write access to an object
    GrantReadWrite Grant = "rw"
    // GrantReadOnly indicates read-only access to an object
    GrantReadOnly Grant = "ro"
    // GrantNone indicates no access to an object
    GrantNone Grant = "none"
)

type Graph Uses

type Graph interface {
    // Name returns the name of the graph.
    Name() string

    // Remove removes the entire graph.
    // If the graph does not exist, a NotFoundError is returned.
    Remove(ctx context.Context) error

    // Edge collection functions
    GraphEdgeCollections

    // Vertex collection functions
    GraphVertexCollections
}

Graph provides access to all edge & vertex collections of a single graph in a database.

type GraphEdgeCollections Uses

type GraphEdgeCollections interface {
    // EdgeCollection opens a connection to an existing edge-collection within the graph.
    // If no edge-collection with given name exists, an NotFoundError is returned.
    // Note: When calling Remove on the returned Collection, the collection is removed from the graph. Not from the database.
    EdgeCollection(ctx context.Context, name string) (Collection, VertexConstraints, error)

    // EdgeCollectionExists returns true if an edge-collection with given name exists within the graph.
    EdgeCollectionExists(ctx context.Context, name string) (bool, error)

    // EdgeCollections returns all edge collections of this graph
    // Note: When calling Remove on any of the returned Collection's, the collection is removed from the graph. Not from the database.
    EdgeCollections(ctx context.Context) ([]Collection, []VertexConstraints, error)

    // CreateEdgeCollection creates an edge collection in the graph.
    // collection: The name of the edge collection to be used.
    // constraints.From: contains the names of one or more vertex collections that can contain source vertices.
    // constraints.To: contains the names of one or more edge collections that can contain target vertices.
    CreateEdgeCollection(ctx context.Context, collection string, constraints VertexConstraints) (Collection, error)

    // SetVertexConstraints modifies the vertex constraints of an existing edge collection in the graph.
    SetVertexConstraints(ctx context.Context, collection string, constraints VertexConstraints) error
}

GraphEdgeCollections provides access to all edge collections of a single graph in a database.

type GraphVertexCollections Uses

type GraphVertexCollections interface {
    // VertexCollection opens a connection to an existing vertex-collection within the graph.
    // If no vertex-collection with given name exists, an NotFoundError is returned.
    // Note: When calling Remove on the returned Collection, the collection is removed from the graph. Not from the database.
    VertexCollection(ctx context.Context, name string) (Collection, error)

    // VertexCollectionExists returns true if an vertex-collection with given name exists within the graph.
    VertexCollectionExists(ctx context.Context, name string) (bool, error)

    // VertexCollections returns all vertex collections of this graph
    // Note: When calling Remove on any of the returned Collection's, the collection is removed from the graph. Not from the database.
    VertexCollections(ctx context.Context) ([]Collection, error)

    // CreateVertexCollection creates a vertex collection in the graph.
    // collection: The name of the vertex collection to be used.
    CreateVertexCollection(ctx context.Context, collection string) (Collection, error)
}

GraphVertexCollections provides access to all vertex collections of a single graph in a database.

type HTTPStats Uses

type HTTPStats struct {
    RequestsTotal   int64 `json:"requestsTotal"`
    RequestsAsync   int64 `json:"requestsAsync"`
    RequestsGet     int64 `json:"requestsGet"`
    RequestsHead    int64 `json:"requestsHead"`
    RequestsPost    int64 `json:"requestsPost"`
    RequestsPut     int64 `json:"requestsPut"`
    RequestsPatch   int64 `json:"requestsPatch"`
    RequestsDelete  int64 `json:"requestsDelete"`
    RequestsOptions int64 `json:"requestsOptions"`
    RequestsOther   int64 `json:"requestsOther"`
}

HTTPStats contains statistics about the HTTP traffic.

type ImportDocumentOptions Uses

type ImportDocumentOptions struct {
    // FromPrefix is an optional prefix for the values in _from attributes. If specified, the value is automatically
    // prepended to each _from input value. This allows specifying just the keys for _from.
    FromPrefix string `json:"fromPrefix,omitempty"`
    // ToPrefix is an optional prefix for the values in _to attributes. If specified, the value is automatically
    // prepended to each _to input value. This allows specifying just the keys for _to.
    ToPrefix string `json:"toPrefix,omitempty"`
    // Overwrite is a flag that if set, then all data in the collection will be removed prior to the import.
    // Note that any existing index definitions will be preseved.
    Overwrite bool `json:"overwrite,omitempty"`
    // OnDuplicate controls what action is carried out in case of a unique key constraint violation.
    // Possible values are:
    // - ImportOnDuplicateError
    // - ImportOnDuplicateUpdate
    // - ImportOnDuplicateReplace
    // - ImportOnDuplicateIgnore
    OnDuplicate ImportOnDuplicate `json:"onDuplicate,omitempty"`
    // Complete is a flag that if set, will make the whole import fail if any error occurs.
    // Otherwise the import will continue even if some documents cannot be imported.
    Complete bool `json:"complete,omitempty"`
}

ImportDocumentOptions holds optional options that control the import document process.

type ImportDocumentStatistics Uses

type ImportDocumentStatistics struct {
    // Created holds the number of documents imported.
    Created int64 `json:"created,omitempty"`
    // Errors holds the number of documents that were not imported due to an error.
    Errors int64 `json:"errors,omitempty"`
    // Empty holds the number of empty lines found in the input (will only contain a value greater zero for types documents or auto).
    Empty int64 `json:"empty,omitempty"`
    // Updated holds the number of updated/replaced documents (in case onDuplicate was set to either update or replace).
    Updated int64 `json:"updated,omitempty"`
    // Ignored holds the number of failed but ignored insert operations (in case onDuplicate was set to ignore).
    Ignored int64 `json:"ignored,omitempty"`
}

ImportDocumentStatistics holds statistics of an import action.

type ImportOnDuplicate Uses

type ImportOnDuplicate string

ImportOnDuplicate is a type to control what action is carried out in case of a unique key constraint violation.

type Index Uses

type Index interface {
    // Name returns the collection specific ID of the index. This value should be used for all functions
    // the require a index _name_.
    Name() string

    // ID returns the ID of the index. Effectivly this is `<collection-name>/<index.Name()>`.
    ID() string

    // UserName returns the user provided name of the index or empty string if non is provided. This _name_
    // is used in querys to provides hints for the optimizer about preferred indexes.
    UserName() string

    // Type returns the type of the index
    Type() IndexType

    // Remove removes the entire index.
    // If the index does not exist, a NotFoundError is returned.
    Remove(ctx context.Context) error
}

Index provides access to a single index in a single collection.

type IndexType Uses

type IndexType string

IndexType represents a index type as string

type InvalidArgumentError Uses

type InvalidArgumentError struct {
    Message string
}

InvalidArgumentError is returned when a go function argument is invalid.

func (InvalidArgumentError) Error Uses

func (e InvalidArgumentError) Error() string

Error implements the error interface for InvalidArgumentError.

type InventoryCollection Uses

type InventoryCollection struct {
    Parameters  InventoryCollectionParameters `json:"parameters"`
    Indexes     []InventoryIndex              `json:"indexes,omitempty"`
    PlanVersion int64                         `json:"planVersion,omitempty"`
    IsReady     bool                          `json:"isReady,omitempty"`
    AllInSync   bool                          `json:"allInSync,omitempty"`
}

InventoryCollection is a single element of a DatabaseInventory, containing all information of a specific collection.

func (InventoryCollection) IndexByFieldsAndType Uses

func (i InventoryCollection) IndexByFieldsAndType(fields []string, indexType string) (InventoryIndex, bool)

IndexByFieldsAndType returns the InventoryIndex with given fields & type. Return false if not found.

type InventoryCollectionParameters Uses

type InventoryCollectionParameters struct {
    Deleted             bool             `json:"deleted,omitempty"`
    DoCompact           bool             `json:"doCompact,omitempty"`
    ID                  string           `json:"id,omitempty"`
    IndexBuckets        int              `json:"indexBuckets,omitempty"`
    Indexes             []InventoryIndex `json:"indexes,omitempty"`
    IsSmart             bool             `json:"isSmart,omitempty"`
    SmartGraphAttribute string           `json:"smartGraphAttribute,omitempty"`
    IsSystem            bool             `json:"isSystem,omitempty"`
    IsVolatile          bool             `json:"isVolatile,omitempty"`
    JournalSize         int64            `json:"journalSize,omitempty"`
    KeyOptions          struct {
        Type          string `json:"type,omitempty"`
        AllowUserKeys bool   `json:"allowUserKeys,omitempty"`
        LastValue     int64  `json:"lastValue,omitempty"`
    }   `json:"keyOptions"`
    Name              string `json:"name,omitempty"`
    NumberOfShards    int    `json:"numberOfShards,omitempty"`
    Path              string `json:"path,omitempty"`
    PlanID            string `json:"planId,omitempty"`
    ReplicationFactor int    `json:"replicationFactor,omitempty"`
    // Deprecated: use 'WriteConcern' instead
    MinReplicationFactor int `json:"minReplicationFactor,omitempty"`
    // Available from 3.6 arangod version.
    WriteConcern         int                    `json:"writeConcern,omitempty"`
    ShardKeys            []string               `json:"shardKeys,omitempty"`
    Shards               map[ShardID][]ServerID `json:"shards,omitempty"`
    Status               CollectionStatus       `json:"status,omitempty"`
    Type                 CollectionType         `json:"type,omitempty"`
    WaitForSync          bool                   `json:"waitForSync,omitempty"`
    DistributeShardsLike string                 `json:"distributeShardsLike,omitempty"`
    SmartJoinAttribute   string                 `json:"smartJoinAttribute,omitempty"`
    ShardingStrategy     ShardingStrategy       `json:"shardingStrategy,omitempty"`
}

InventoryCollectionParameters contains all configuration parameters of a collection in a database inventory.

func (*InventoryCollectionParameters) IsSatellite Uses

func (icp *InventoryCollectionParameters) IsSatellite() bool

IsSatellite returns true if the collection is a satellite collection

func (*InventoryCollectionParameters) MarshalJSON Uses

func (p *InventoryCollectionParameters) MarshalJSON() ([]byte, error)

MarshalJSON converts InventoryCollectionParameters into json

func (*InventoryCollectionParameters) UnmarshalJSON Uses

func (p *InventoryCollectionParameters) UnmarshalJSON(d []byte) error

UnmarshalJSON loads InventoryCollectionParameters from json

type InventoryIndex Uses

type InventoryIndex struct {
    ID          string   `json:"id,omitempty"`
    Type        string   `json:"type,omitempty"`
    Fields      []string `json:"fields,omitempty"`
    Unique      bool     `json:"unique"`
    Sparse      bool     `json:"sparse"`
    Deduplicate bool     `json:"deduplicate"`
    MinLength   int      `json:"minLength,omitempty"`
    GeoJSON     bool     `json:"geoJson,omitempty"`
    Name        string   `json:"name,omitempty"`
    ExpireAfter int      `json:"expireAfter,omitempty"`
}

InventoryIndex contains all configuration parameters of a single index of a collection in a database inventory.

func (InventoryIndex) FieldsEqual Uses

func (i InventoryIndex) FieldsEqual(fields []string) bool

FieldsEqual returns true when the given fields list equals the Fields list in the InventoryIndex. The order of fields is irrelevant.

type InventoryView Uses

type InventoryView struct {
    Name     string   `json:"name,omitempty"`
    Deleted  bool     `json:"deleted,omitempty"`
    ID       string   `json:"id,omitempty"`
    IsSystem bool     `json:"isSystem,omitempty"`
    PlanID   string   `json:"planId,omitempty"`
    Type     ViewType `json:"type,omitempty"`
    // Include all properties from an arangosearch view.
    ArangoSearchViewProperties
}

InventoryView is a single element of a DatabaseInventory, containing all information of a specific view.

type KeyGeneratorType Uses

type KeyGeneratorType string

KeyGeneratorType is a type of key generated, used in `CollectionKeyOptions`.

type MemoryStats Uses

type MemoryStats struct {
    ContextID    int64   `json:"contextId"`
    TMax         float64 `json:"tMax"`
    CountOfTimes int64   `json:"countOfTimes"`
    HeapMax      int64   `json:"heapMax"`
    HeapMin      int64   `json:"heapMin"`
}

MemoryStats contains statistics about memory usage.

type NoMoreDocumentsError Uses

type NoMoreDocumentsError struct{}

NoMoreDocumentsError is returned by Cursor's, when an attempt is made to read documents when there are no more.

func (NoMoreDocumentsError) Error Uses

func (e NoMoreDocumentsError) Error() string

Error implements the error interface for NoMoreDocumentsError.

type NumberOfServersResponse Uses

type NumberOfServersResponse struct {
    NoCoordinators   int      `json:"numberOfCoordinators,omitempty"`
    NoDBServers      int      `json:"numberOfDBServers,omitempty"`
    CleanedServerIDs []string `json:"cleanedServers,omitempty"`
}

NumberOfServersResponse holds the data returned from a NumberOfServer request.

type Protocol Uses

type Protocol int
const (
    ProtocolHTTP Protocol = iota
    ProtocolVST1_0
    ProtocolVST1_1
)

type ProtocolSet Uses

type ProtocolSet []Protocol

ProtocolSet is a set of protocols.

func (ProtocolSet) Contains Uses

func (ps ProtocolSet) Contains(p Protocol) bool

Contains returns true if the given protocol is contained in the given set, false otherwise.

func (ProtocolSet) ContainsAny Uses

func (ps ProtocolSet) ContainsAny(p ...Protocol) bool

ContainsAny returns true if any of the given protocols is contained in the given set, false otherwise.

type QueryStatistics Uses

type QueryStatistics interface {
    // the total number of data-modification operations successfully executed.
    WritesExecuted() int64
    // The total number of data-modification operations that were unsuccessful
    WritesIgnored() int64
    // The total number of documents iterated over when scanning a collection without an index.
    ScannedFull() int64
    // The total number of documents iterated over when scanning a collection using an index.
    ScannedIndex() int64
    // the total number of documents that were removed after executing a filter condition in a FilterNode
    Filtered() int64
    // Returns the numer of results before the last LIMIT in the query was applied.
    // A valid return value is only available when the has been created with a context that was
    // prepared with `WithFullCount`. Additionally this will also not return a valid value if
    // the context was prepared with `WithStream`.
    FullCount() int64
    // Execution time of the query (wall-clock time). value will be set from the outside
    ExecutionTime() time.Duration
}

Statistics returned with the query cursor

type RawObject Uses

type RawObject []byte

RawObject is a raw encoded object. Connection implementations must be able to unmarshal *RawObject into Go objects.

func (*RawObject) MarshalJSON Uses

func (r *RawObject) MarshalJSON() ([]byte, error)

MarshalJSON returns *r as the JSON encoding of r.

func (RawObject) MarshalVPack Uses

func (r RawObject) MarshalVPack() (velocypack.Slice, error)

MarshalVPack returns m as the Velocypack encoding of m.

func (*RawObject) UnmarshalJSON Uses

func (r *RawObject) UnmarshalJSON(data []byte) error

UnmarshalJSON sets *r to a copy of data.

func (*RawObject) UnmarshalVPack Uses

func (r *RawObject) UnmarshalVPack(data velocypack.Slice) error

UnmarshalVPack sets *m to a copy of data.

type Replication Uses

type Replication interface {
    // CreateBatch creates a "batch" to prevent removal of state required for replication
    CreateBatch(ctx context.Context, db Database, serverID int64, ttl time.Duration) (Batch, error)
    // Get the inventory of the server containing all collections (with entire details) of a database.
    // When this function is called on a coordinator is a cluster, an ID of a DBServer must be provided
    // using a context that is prepare with `WithDBServerID`.
    DatabaseInventory(ctx context.Context, db Database) (DatabaseInventory, error)
}

Replication provides access to replication related operations.

type Request Uses

type Request interface {
    // SetQuery sets a single query argument of the request.
    // Any existing query argument with the same key is overwritten.
    SetQuery(key, value string) Request
    // SetBody sets the content of the request.
    // The protocol of the connection determines what kinds of marshalling is taking place.
    // When multiple bodies are given, they are merged, with fields in the first document prevailing.
    SetBody(body ...interface{}) (Request, error)
    // SetBodyArray sets the content of the request as an array.
    // If the given mergeArray is not nil, its elements are merged with the elements in the body array (mergeArray data overrides bodyArray data).
    // The merge is NOT recursive.
    // The protocol of the connection determines what kinds of marshalling is taking place.
    SetBodyArray(bodyArray interface{}, mergeArray []map[string]interface{}) (Request, error)
    // SetBodyImportArray sets the content of the request as an array formatted for importing documents.
    // The protocol of the connection determines what kinds of marshalling is taking place.
    SetBodyImportArray(bodyArray interface{}) (Request, error)
    // SetHeader sets a single header arguments of the request.
    // Any existing header argument with the same key is overwritten.
    SetHeader(key, value string) Request
    // Written returns true as soon as this request has been written completely to the network.
    // This does not guarantee that the server has received or processed the request.
    Written() bool
    // Clone creates a new request containing the same data as this request
    Clone() Request
    // Path returns the Request path
    Path() string
    // Method returns the Request method
    Method() string
}

Request represents the input to a request on the server.

type Response Uses

type Response interface {
    // StatusCode returns an HTTP compatible status code of the response.
    StatusCode() int
    // Endpoint returns the endpoint that handled the request.
    Endpoint() string
    // CheckStatus checks if the status of the response equals to one of the given status codes.
    // If so, nil is returned.
    // If not, an attempt is made to parse an error response in the body and an error is returned.
    CheckStatus(validStatusCodes ...int) error
    // Header returns the value of a response header with given key.
    // If no such header is found, an empty string is returned.
    // On nested Response's, this function will always return an empty string.
    Header(key string) string
    // ParseBody performs protocol specific unmarshalling of the response data into the given result.
    // If the given field is non-empty, the contents of that field will be parsed into the given result.
    // This can only be used for requests that return a single object.
    ParseBody(field string, result interface{}) error
    // ParseArrayBody performs protocol specific unmarshalling of the response array data into individual response objects.
    // This can only be used for requests that return an array of objects.
    ParseArrayBody() ([]Response, error)
}

Response represents the response from the server on a given request.

type ResponseError Uses

type ResponseError struct {
    Err error
}

A ResponseError is returned when a request was completely written to a server, but the server did not respond, or some kind of network error occurred during the response.

func (*ResponseError) Error Uses

func (e *ResponseError) Error() string

Error returns the Error() result of the underlying error.

type ServerHealth Uses

type ServerHealth struct {
    Endpoint            string           `json:"Endpoint"`
    LastHeartbeatAcked  time.Time        `json:"LastHeartbeatAcked"`
    LastHeartbeatSent   time.Time        `json:"LastHeartbeatSent"`
    LastHeartbeatStatus string           `json:"LastHeartbeatStatus"`
    Role                ServerRole       `json:"Role"`
    ShortName           string           `json:"ShortName"`
    Status              ServerStatus     `json:"Status"`
    CanBeDeleted        bool             `json:"CanBeDeleted"`
    HostID              string           `json:"Host,omitempty"`
    Version             Version          `json:"Version,omitempty"`
    Engine              EngineType       `json:"Engine,omitempty"`
    SyncStatus          ServerSyncStatus `json:"SyncStatus,omitempty"`

    // Only for Coordinators
    AdvertisedEndpoint *string `json:"AdvertisedEndpoint,omitempty"`

    // Only for Agents
    Leader  *string `json:"Leader,omitempty"`
    Leading *bool   `json:"Leading,omitempty"`
}

ServerHealth contains health information of a single server in a cluster.

type ServerID Uses

type ServerID string

ServerID identifies an arangod server in a cluster.

type ServerMode Uses

type ServerMode string
const (
    // ServerModeDefault is the normal mode of the database in which read and write requests
    // are allowed.
    ServerModeDefault ServerMode = "default"
    // ServerModeReadOnly is the mode in which all modifications to th database are blocked.
    // Behavior is the same as user that has read-only access to all databases & collections.
    ServerModeReadOnly ServerMode = "readonly"
)

type ServerRole Uses

type ServerRole string

ServerRole is the role of an arangod server

const (
    // ServerRoleSingle indicates that the server is a single-server instance
    ServerRoleSingle ServerRole = "Single"
    // ServerRoleSingleActive indicates that the server is a the leader of a single-server resilient pair
    ServerRoleSingleActive ServerRole = "SingleActive"
    // ServerRoleSinglePassive indicates that the server is a a follower of a single-server resilient pair
    ServerRoleSinglePassive ServerRole = "SinglePassive"
    // ServerRoleDBServer indicates that the server is a dbserver within a cluster
    ServerRoleDBServer ServerRole = "DBServer"
    // ServerRoleCoordinator indicates that the server is a coordinator within a cluster
    ServerRoleCoordinator ServerRole = "Coordinator"
    // ServerRoleAgent indicates that the server is an agent within a cluster
    ServerRoleAgent ServerRole = "Agent"
    // ServerRoleUndefined indicates that the role of the server cannot be determined
    ServerRoleUndefined ServerRole = "Undefined"
)

type ServerStatistics Uses

type ServerStatistics struct {
    Time       float64     `json:"time"`
    Enabled    bool        `json:"enabled"`
    System     SystemStats `json:"system"`
    Client     ClientStats `json:"client"`
    ClientUser ClientStats `json:"clientUser,omitempty"`
    HTTP       HTTPStats   `json:"http"`
    Server     ServerStats `json:"server"`
}

ServerStatistics contains statistical data about the server as a whole.

type ServerStats Uses

type ServerStats struct {
    Uptime         float64          `json:"uptime"`
    PhysicalMemory int64            `json:"physicalMemory"`
    Transactions   TransactionStats `json:"transactions"`
    V8Context      V8ContextStats   `json:"v8Context"`
    Threads        ThreadStats      `json:"threads"`
}

ServerStats contains statistics about the server.

type ServerStatus Uses

type ServerStatus string

ServerStatus describes the health status of a server

const (
    // ServerStatusGood indicates server is in good state
    ServerStatusGood ServerStatus = "GOOD"
    // ServerStatusBad indicates server has missed 1 heartbeat
    ServerStatusBad ServerStatus = "BAD"
    // ServerStatusFailed indicates server has been declared failed by the supervision, this happens after about 15s being bad.
    ServerStatusFailed ServerStatus = "FAILED"
)

type ServerSyncStatus Uses

type ServerSyncStatus string

ServerSyncStatus describes the servers sync status

const (
    ServerSyncStatusUnknown   ServerSyncStatus = "UNKNOWN"
    ServerSyncStatusUndefined ServerSyncStatus = "UNDEFINED"
    ServerSyncStatusStartup   ServerSyncStatus = "STARTUP"
    ServerSyncStatusStopping  ServerSyncStatus = "STOPPING"
    ServerSyncStatusStopped   ServerSyncStatus = "STOPPED"
    ServerSyncStatusServing   ServerSyncStatus = "SERVING"
    ServerSyncStatusShutdown  ServerSyncStatus = "SHUTDOWN"
)

type SetCollectionPropertiesOptions Uses

type SetCollectionPropertiesOptions struct {
    // If true then creating or changing a document will wait until the data has been synchronized to disk.
    WaitForSync *bool `json:"waitForSync,omitempty"`
    // The maximal size of a journal or datafile in bytes. The value must be at least 1048576 (1 MB). Note that when changing the journalSize value, it will only have an effect for additional journals or datafiles that are created. Already existing journals or datafiles will not be affected.
    JournalSize int64 `json:"journalSize,omitempty"`
    // ReplicationFactor contains how many copies of each shard are kept on different DBServers.
    // Only available in cluster setup.
    ReplicationFactor int `json:"replicationFactor,omitempty"`
    // Deprecated: use 'WriteConcern' instead
    MinReplicationFactor int `json:"minReplicationFactor,omitempty"`
    // WriteConcern contains how many copies must be available before a collection can be written.
    // Available from 3.6 arangod version.
    WriteConcern int `json:"writeConcern,omitempty"`
}

SetCollectionPropertiesOptions contains data for Collection.SetProperties.

func (*SetCollectionPropertiesOptions) MarshalJSON Uses

func (p *SetCollectionPropertiesOptions) MarshalJSON() ([]byte, error)

MarshalJSON converts SetCollectionPropertiesOptions into json

func (*SetCollectionPropertiesOptions) UnmarshalJSON Uses

func (p *SetCollectionPropertiesOptions) UnmarshalJSON(d []byte) error

UnmarshalJSON loads SetCollectionPropertiesOptions from json

type ShardID Uses

type ShardID string

ShardID is an internal identifier of a specific shard

type ShardingStrategy Uses

type ShardingStrategy string

ShardingStrategy describes the sharding strategy of a collection

const (
    ShardingStrategyCommunityCompat           ShardingStrategy = "community-compat"
    ShardingStrategyEnterpriseCompat          ShardingStrategy = "enterprise-compat"
    ShardingStrategyEnterpriseSmartEdgeCompat ShardingStrategy = "enterprise-smart-edge-compat"
    ShardingStrategyHash                      ShardingStrategy = "hash"
    ShardingStrategyEnterpriseHashSmartEdge   ShardingStrategy = "enterprise-hash-smart-edge"
)

type Stats Uses

type Stats struct {
    Sum    float64 `json:"sum"`
    Count  int64   `json:"count"`
    Counts []int64 `json:"counts"`
}

Stats is used for various time-related statistics.

type SystemStats Uses

type SystemStats struct {
    MinorPageFaults     int64   `json:"minorPageFaults"`
    MajorPageFaults     int64   `json:"majorPageFaults"`
    UserTime            float64 `json:"userTime"`
    SystemTime          float64 `json:"systemTime"`
    NumberOfThreads     int64   `json:"numberOfThreads"`
    ResidentSize        int64   `json:"residentSize"`
    ResidentSizePercent float64 `json:"residentSizePercent"`
    VirtualSize         int64   `json:"virtualSize"`
}

SystemStats contains statistical data about the system, this is part of ServerStatistics.

type ThreadStats Uses

type ThreadStats struct {
    SchedulerThreads int64 `json:"scheduler-threads"`
    Blocked          int64 `json:"blocked"`
    Queued           int64 `json:"queued"`
    InProgress       int64 `json:"in-progress"`
    DirectExec       int64 `json:"direct-exec"`
}

ThreadsStats contains statistics about threads.

type Tick Uses

type Tick string

Tick is represent a place in either the Write-Ahead Log, journals and datafiles value reported by the server

type TransactionCollections Uses

type TransactionCollections struct {
    Read      []string `json:"read,omitempty"`
    Write     []string `json:"write,omitempty"`
    Exclusive []string `json:"exclusive,omitempty"`
}

TransactionCollections is used to specify which collecitions are accessed by a transaction and how

type TransactionID Uses

type TransactionID string

TransactionID identifies a transaction

type TransactionOptions Uses

type TransactionOptions struct {
    // Transaction size limit in bytes. Honored by the RocksDB storage engine only.
    MaxTransactionSize int

    // An optional numeric value that can be used to set a timeout for waiting on collection
    // locks. If not specified, a default value will be used.
    // Setting lockTimeout to 0 will make ArangoDB not time out waiting for a lock.
    LockTimeout *int

    // An optional boolean flag that, if set, will force the transaction to write
    // all data to disk before returning.
    WaitForSync bool

    // Maximum number of operations after which an intermediate commit is performed
    // automatically. Honored by the RocksDB storage engine only.
    IntermediateCommitCount *int

    // Optional arguments passed to action.
    Params []interface{}

    // Maximum total size of operations after which an intermediate commit is
    // performed automatically. Honored by the RocksDB storage engine only.
    IntermediateCommitSize *int

    // Collections that the transaction reads from.
    ReadCollections []string

    // Collections that the transaction writes to.
    WriteCollections []string
}

TransactionOptions contains options that customize the transaction.

type TransactionStats Uses

type TransactionStats struct {
    Started             int64 `json:"started"`
    Aborted             int64 `json:"aborted"`
    Committed           int64 `json:"committed"`
    IntermediateCommits int64 `json:"intermediateCommits"`
}

TransactionStats contains statistics about transactions.

type TransactionStatus Uses

type TransactionStatus string

TransactionStatus describes the status of an transaction

const (
    TransactionRunning   TransactionStatus = "running"
    TransactionCommitted TransactionStatus = "committed"
    TransactionAborted   TransactionStatus = "aborted"
)

type TransactionStatusRecord Uses

type TransactionStatusRecord struct {
    Status TransactionStatus
}

TransactionStatusRecord provides insight about the status of transaction

type User Uses

type User interface {
    // Name returns the name of the user.
    Name() string

    //  Is this an active user?
    IsActive() bool

    // Is a password change for this user needed?
    IsPasswordChangeNeeded() bool

    // Get extra information about this user that was passed during its creation/update/replacement
    Extra(result interface{}) error

    // Remove removes the user.
    // If the user does not exist, a NotFoundError is returned.
    Remove(ctx context.Context) error

    // Update updates individual properties of the user.
    // If the user does not exist, a NotFoundError is returned.
    Update(ctx context.Context, options UserOptions) error

    // Replace replaces all properties of the user.
    // If the user does not exist, a NotFoundError is returned.
    Replace(ctx context.Context, options UserOptions) error

    // AccessibleDatabases returns a list of all databases that can be accessed (read/write or read-only) by this user.
    AccessibleDatabases(ctx context.Context) ([]Database, error)

    // SetDatabaseAccess sets the access this user has to the given database.
    // Pass a `nil` database to set the default access this user has to any new database.
    // This function requires ArangoDB 3.2 and up for access value `GrantReadOnly`.
    SetDatabaseAccess(ctx context.Context, db Database, access Grant) error

    // GetDatabaseAccess gets the access rights for this user to the given database.
    // Pass a `nil` database to get the default access this user has to any new database.
    // This function requires ArangoDB 3.2 and up.
    // By default this function returns the "effective" grant.
    // To return the "configured" grant, pass a context configured with `WithConfigured`.
    // This distinction is only relevant in ArangoDB 3.3 in the context of a readonly database.
    GetDatabaseAccess(ctx context.Context, db Database) (Grant, error)

    // RemoveDatabaseAccess removes the access this user has to the given database.
    // As a result the users access falls back to its default access.
    // If you remove default access (db==`nil`) for a user (and there are no specific access
    // rules for a database), the user's access falls back to no-access.
    // Pass a `nil` database to set the default access this user has to any new database.
    // This function requires ArangoDB 3.2 and up.
    RemoveDatabaseAccess(ctx context.Context, db Database) error

    // SetCollectionAccess sets the access this user has to a collection.
    // If you pass a `Collection`, it will set access for that collection.
    // If you pass a `Database`, it will set the default collection access for that database.
    // If you pass `nil`, it will set the default collection access for the default database.
    // This function requires ArangoDB 3.2 and up.
    SetCollectionAccess(ctx context.Context, col AccessTarget, access Grant) error

    // GetCollectionAccess gets the access rights for this user to the given collection.
    // If you pass a `Collection`, it will get access for that collection.
    // If you pass a `Database`, it will get the default collection access for that database.
    // If you pass `nil`, it will get the default collection access for the default database.
    // By default this function returns the "effective" grant.
    // To return the "configured" grant, pass a context configured with `WithConfigured`.
    // This distinction is only relevant in ArangoDB 3.3 in the context of a readonly database.
    GetCollectionAccess(ctx context.Context, col AccessTarget) (Grant, error)

    // RemoveCollectionAccess removes the access this user has to a collection.
    // If you pass a `Collection`, it will removes access for that collection.
    // If you pass a `Database`, it will removes the default collection access for that database.
    // If you pass `nil`, it will removes the default collection access for the default database.
    // This function requires ArangoDB 3.2 and up.
    RemoveCollectionAccess(ctx context.Context, col AccessTarget) error

    // GrantReadWriteAccess grants this user read/write access to the given database.
    //
    // Deprecated: use GrantDatabaseReadWriteAccess instead.
    GrantReadWriteAccess(ctx context.Context, db Database) error

    // RevokeAccess revokes this user access to the given database.
    //
    // Deprecated: use `SetDatabaseAccess(ctx, db, GrantNone)` instead.
    RevokeAccess(ctx context.Context, db Database) error
}

User provides access to a single user of a single server / cluster of servers.

type UserOptions Uses

type UserOptions struct {
    // The user password as a string. If not specified, it will default to an empty string.
    Password string `json:"passwd,omitempty"`
    // A flag indicating whether the user account should be activated or not. The default value is true. If set to false, the user won't be able to log into the database.
    Active *bool `json:"active,omitempty"`
    // A JSON object with extra user information. The data contained in extra will be stored for the user but not be interpreted further by ArangoDB.
    Extra interface{} `json:"extra,omitempty"`
}

UserOptions contains options for creating a new user, updating or replacing a user.

type V8ContextStats Uses

type V8ContextStats struct {
    Available int64         `json:"available"`
    Busy      int64         `json:"busy"`
    Dirty     int64         `json:"dirty"`
    Free      int64         `json:"free"`
    Max       int64         `json:"max"`
    Memory    []MemoryStats `json:"memory"`
}

V8ContextStats contains statistics about V8 contexts.

type Version Uses

type Version string

Version holds a server version string. The string has the format "major.minor.sub". Major and minor will be numeric, and sub may contain a number or a textual version.

func (Version) CompareTo Uses

func (v Version) CompareTo(other Version) int

CompareTo returns an integer comparing two version. The result will be 0 if v==other, -1 if v < other, and +1 if v > other. If major & minor parts are equal and sub part is not a number, the sub part will be compared using lexicographical string comparison.

func (Version) Major Uses

func (v Version) Major() int

Major returns the major part of the version E.g. "3.1.7" -> 3

func (Version) Minor Uses

func (v Version) Minor() int

Minor returns the minor part of the version. E.g. "3.1.7" -> 1

func (Version) Sub Uses

func (v Version) Sub() string

Sub returns the sub part of the version. E.g. "3.1.7" -> "7"

func (Version) SubInt Uses

func (v Version) SubInt() (int, bool)

SubInt returns the sub part of the version as integer. The bool return value indicates if the sub part is indeed a number. E.g. "3.1.7" -> 7, true E.g. "3.1.foo" -> 0, false

type VersionInfo Uses

type VersionInfo struct {
    // This will always contain "arango"
    Server string `json:"server,omitempty"`
    //  The server version string. The string has the format "major.minor.sub".
    // Major and minor will be numeric, and sub may contain a number or a textual version.
    Version Version `json:"version,omitempty"`
    // Type of license of the server
    License string `json:"license,omitempty"`
    // Optional additional details. This is returned only if the context is configured using WithDetails.
    Details map[string]interface{} `json:"details,omitempty"`
}

VersionInfo describes the version of a database server.

func (*VersionInfo) IsEnterprise Uses

func (v *VersionInfo) IsEnterprise() bool

func (VersionInfo) String Uses

func (v VersionInfo) String() string

String creates a string representation of the given VersionInfo.

type VertexConstraints Uses

type VertexConstraints struct {
    // From contains names of vertex collection that are allowed to be used in the From part of an edge.
    From []string
    // To contains names of vertex collection that are allowed to be used in the To part of an edge.
    To  []string
}

VertexConstraints limit the vertex collection you can use in an edge.

type View Uses

type View interface {
    // Name returns the name of the view.
    Name() string

    // Type returns the type of this view.
    Type() ViewType

    // ArangoSearchView returns this view as an ArangoSearch view.
    // When the type of the view is not ArangoSearch, an error is returned.
    ArangoSearchView() (ArangoSearchView, error)

    // Database returns the database containing the view.
    Database() Database

    // Remove removes the entire view.
    // If the view does not exist, a NotFoundError is returned.
    Remove(ctx context.Context) error
}

View provides access to the information of a view. Views are only available in ArangoDB 3.4 and higher.

type ViewType Uses

type ViewType string

ViewType is the type of a view.

Directories

PathSynopsis
agencyPackage agency provides an API to access the ArangoDB agency (it is unlikely that you need this package directly).
clusterPackage cluster implements a driver.Connection that provides cluster failover support (it is not intended to be used directly).
httpPackage http implements driver.Connection using an HTTP connection.
jwtPackage jwt provides a helper function used to access ArangoDB servers using a JWT secret.
testPackage test implements add tests for the go-driver.
utilPackage util provides some helper methods for the go-driver (it is unlikely that you need this package directly).
vstPackage vst implements driver.Connection using a VelocyStream connection.
vst/protocolPackage protocol implements the VelocyStream protocol (it is not intended to be used directly).

Package driver imports 19 packages (graph) and is imported by 50 packages. Updated 2020-01-07. Refresh now. Tools for package owners.