bolt

package module
v0.0.0-...-4ebab81 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Nov 3, 2016 License: MIT Imports: 25 Imported by: 2

README

Golang Neo4J Bolt Driver

Build Status Tested against Golang 1.4.3 and up

Implements the Neo4J Bolt Protocol specification: As of the time of writing this, the current version is v3.1.0-M02

go get github.com/johnnadratowski/golang-neo4j-bolt-driver

Features

  • Neo4j Bolt low-level binary protocol support
  • Message Pipelining for high concurrency
  • Connection Pooling
  • TLS support
  • Compatible with sql.driver

Usage

Please see the statement tests or the conn tests for A LOT of examples of usage

Examples
Quick n’ Dirty
func quickNDirty() {
	driver := bolt.NewDriver()
	conn, _ := driver.OpenNeo("bolt://localhost:7687")
	defer conn.Close()

	// Start by creating a node
	result, _ := conn.ExecNeo("CREATE (n:NODE {foo: {foo}, bar: {bar}})", map[string]interface{}{"foo": 1, "bar": 2.2})
	numResult, _ := result.RowsAffected()
	fmt.Printf("CREATED ROWS: %d\n", numResult) // CREATED ROWS: 1

	// Lets get the node
	data, rowsMetadata, _, _ := conn.QueryNeoAll("MATCH (n:NODE) RETURN n.foo, n.bar", nil)
	fmt.Printf("COLUMNS: %#v\n", rowsMetadata["fields"].([]interface{}))  // COLUMNS: n.foo,n.bar
	fmt.Printf("FIELDS: %d %f\n", data[0][0].(int64), data[0][1].(float64)) // FIELDS: 1 2.2

	// oh cool, that worked. lets blast this baby and tell it to run a bunch of statements
	// in neo concurrently with a pipeline
	results, _ := conn.ExecPipeline([]string{
		"MATCH (n:NODE) CREATE (n)-[:REL]->(f:FOO)",
		"MATCH (n:NODE) CREATE (n)-[:REL]->(b:BAR)",
		"MATCH (n:NODE) CREATE (n)-[:REL]->(z:BAZ)",
		"MATCH (n:NODE) CREATE (n)-[:REL]->(f:FOO)",
		"MATCH (n:NODE) CREATE (n)-[:REL]->(b:BAR)",
		"MATCH (n:NODE) CREATE (n)-[:REL]->(z:BAZ)",
	}, nil, nil, nil, nil, nil, nil)
	for _, result := range results {
		numResult, _ := result.RowsAffected()
		fmt.Printf("CREATED ROWS: %d\n", numResult) // CREATED ROWS: 2 (per each iteration)
	}

	data, _, _, _ = conn.QueryNeoAll("MATCH (n:NODE)-[:REL]->(m) RETURN m", nil)
	for _, row := range data {
		fmt.Printf("NODE: %#v\n", row[0].(graph.Node)) // Prints all nodes
	}

	result, _ = conn.ExecNeo(`MATCH (n) DETACH DELETE n`, nil)
	numResult, _ = result.RowsAffected()
	fmt.Printf("Rows Deleted: %d", numResult) // Rows Deleted: 13
}
Slow n' Clean
func slowNClean() {
	driver := bolt.NewDriver()
	conn, err := driver.OpenNeo("bolt://localhost:7687")
	if err != nil {
		panic(err)
	}
	defer conn.Close()

	// Here we prepare a new statement. This gives us the flexibility to
	// cancel that statement without any request sent to Neo
	stmt, err := conn.PrepareNeo("CREATE (n:NODE {foo: {foo}, bar: {bar}})")
	if err != nil {
		panic(err)
	}

	// Executing a statement just returns summary information
	result, err := stmt.ExecNeo(map[string]interface{}{"foo": 1, "bar": 2.2})
	if err != nil {
		panic(err)
	}
	numResult, err := result.RowsAffected()
	if err != nil {
		panic(err)
	}
	fmt.Printf("CREATED ROWS: %d\n", numResult) // CREATED ROWS: 1

	// Closing the statment will also close the rows
	stmt.Close()

	// Lets get the node. Once again I can cancel this with no penalty
	stmt, err = conn.PrepareNeo("MATCH (n:NODE) RETURN n.foo, n.bar")
	if err != nil {
		panic(err)
	}

	// Even once I get the rows, if I do not consume them and close the
	// rows, Neo will discard and not send the data
	rows, err := stmt.QueryNeo(nil)
	if err != nil {
		panic(err)
	}

	// This interface allows you to consume rows one-by-one, as they
	// come off the bolt stream. This is more efficient especially
	// if you're only looking for a particular row/set of rows, as
	// you don't need to load up the entire dataset into memory
	data, _, err := rows.NextNeo()
	if err != nil {
		panic(err)
	}

	// This query only returns 1 row, so once it's done, it will return
	// the metadata associated with the query completion, along with
	// io.EOF as the error
	_, _, err = rows.NextNeo()
	if err != io.EOF {
		panic(err)
	}
	fmt.Printf("COLUMNS: %#v\n", rows.Metadata()["fields"].([]interface{})) // COLUMNS: n.foo,n.bar
	fmt.Printf("FIELDS: %d %f\n", data[0].(int64), data[1].(float64))       // FIELDS: 1 2.2

	stmt.Close()

	// Here we prepare a new pipeline statement for running multiple
	// queries concurrently
	pipeline, err := conn.PreparePipeline(
		"MATCH (n:NODE) CREATE (n)-[:REL]->(f:FOO)",
		"MATCH (n:NODE) CREATE (n)-[:REL]->(b:BAR)",
		"MATCH (n:NODE) CREATE (n)-[:REL]->(z:BAZ)",
		"MATCH (n:NODE) CREATE (n)-[:REL]->(f:FOO)",
		"MATCH (n:NODE) CREATE (n)-[:REL]->(b:BAR)",
		"MATCH (n:NODE) CREATE (n)-[:REL]->(z:BAZ)",
	)
	if err != nil {
		panic(err)
	}

	pipelineResults, err := pipeline.ExecPipeline(nil, nil, nil, nil, nil, nil)
	if err != nil {
		panic(err)
	}

	for _, result := range pipelineResults {
		numResult, _ := result.RowsAffected()
		fmt.Printf("CREATED ROWS: %d\n", numResult) // CREATED ROWS: 2 (per each iteration)
	}

	err = pipeline.Close()
	if err != nil {
		panic(err)
	}

	stmt, err = conn.PrepareNeo("MATCH path=(n:NODE)-[:REL]->(m) RETURN path")
	if err != nil {
		panic(err)
	}

	rows, err = stmt.QueryNeo(nil)
	if err != nil {
		panic(err)
	}

	// Here we loop through the rows until we get the metadata object
	// back, meaning the row stream has been fully consumed
	for err == nil {
		var row []interface{}
		row, _, err = rows.NextNeo()
		if err != nil && err != io.EOF {
			panic(err)
		} else if err != io.EOF {
			fmt.Printf("PATH: %#v\n", row[0].(graph.Path)) // Prints all paths
		}
	}

	stmt.Close()

	result, _ = conn.ExecNeo(`MATCH (n) DETACH DELETE n`, nil)
	fmt.Println(result)
	numResult, _ = result.RowsAffected()
	fmt.Printf("Rows Deleted: %d", numResult) // Rows Deleted: 13
}

API

There is much more detailed information in the godoc

This implementation attempts to follow the best practices as per the Bolt specification, but also implements compatibility with Golang's sql.driver interface.

As such, these interfaces closely match the sql.driver interfaces, but they also provide Neo4j Bolt specific functionality in addition to the sql.driver interface.

It is recommended that you use the Neo4j Bolt-specific interfaces if possible. The implementation is more efficient and can more closely support the Neo4j Bolt feature set.

The URL format is: bolt://(user):(password)@(host):(port) Schema must be bolt. User and password is only necessary if you are authenticating.

Connection pooling is provided out of the box with the NewDriverPool method. You can give it the maximum number of connections to have at a time.

You can get logs from the driver by setting the log level using the log packages SetLevel.

Dev Quickstart

# Put in git hooks
ln -s ../../scripts/pre-commit .git/hooks/pre-commit
ln -s ../../scripts/pre-push .git/hooks/pre-push

# No special build steps necessary
go build

# Testing with log info and a local bolt DB, getting coverage output
BOLT_DRIVER_LOG=info NEO4J_BOLT=bolt://localhost:7687 go test -coverprofile=./tmp/cover.out -coverpkg=./... -v -race && go tool cover -html=./tmp/cover.out

# Testing with trace output for debugging
BOLT_DRIVER_LOG=trace NEO4J_BOLT=bolt://localhost:7687 go test -v -race

# Testing with running recorder to record tests for CI
BOLT_DRIVER_LOG=trace NEO4J_BOLT=bolt://localhost:7687 RECORD_OUTPUT=1 go test -v -race

The tests are written in an integration testing style. Most of them are in the statement tests, but should be made more granular in the future.

In order to get CI, I made a recorder mechanism so you don't need to run neo4j alongside the tests in the CI server. You run the tests locally against a neo4j instance with the RECORD_OUTPUT=1 environment variable, it generates the recordings in the ./recordings folder. This is necessary if the tests have changed, or if the internals have significantly changed. Installing the git hooks will run the tests automatically on push. If there are updated tests, you will need to re-run the recorder to add them and push them as well.

You need access to a running Neo4J database to develop for this project, so that you can run the tests to generate the recordings.

TODO

  • Cypher Parser to implement NumInput and pre-flight checking
  • More Tests
  • Benchmark Tests

Documentation

Overview

Package bolt implements a driver for the Neo4J Bolt Protocol.

The driver is compatible with Golang's sql.driver interface, but aims to implement a more complete featureset in line with what Neo4J and Bolt provides.

As such, there are multiple interfaces the user can choose from. It's highly recommended that the user use the Neo4J-specific interfaces as they are more flexible and efficient than the provided sql.driver compatible methods.

The interface tries to be consistent throughout. The sql.driver interfaces are standard, but the Neo4J-specific ones contain a naming convention of either "Neo" or "Pipeline".

The "Neo" ones are the basic interfaces for making queries to Neo4j and it's expected that these would be used the most.

The "Pipeline" ones are to support Bolt's pipelining features. Pipelines allow the user to send Neo4j many queries at once and have them executed by the database concurrently. This is useful if you have a bunch of queries that aren't necessarily dependant on one another, and you want to get better performance. The internal APIs will also pipeline statements where it is able to reliably do so, but by manually using the pipelining feature you can maximize your throughput.

The API provides connection pooling using the `NewDriverPool` method. This allows you to pass it the maximum number of open connections to be used in the pool. Once this limit is hit, any new clients will have to wait for a connection to become available again.

The sql driver is registered as "neo4j-bolt". The sql.driver interface is much more limited than what bolt and neo4j supports. In some cases, concessions were made in order to make that interface work with the neo4j way of doing things. The main instance of this is the marshalling of objects to/from the sql.driver.Value interface. In order to support object types that aren't supported by this interface, the internal encoding package is used to marshal these objects to byte strings. This ultimately makes for a less efficient and more 'clunky' implementation. A glaring instance of this is passing parameters. Neo4j expects named parameters but the driver interface can only really support positional parameters. To get around this, the user must create a map[string]interface{} of their parameters and marshal it to a driver.Value using the encoding.Marshal function. Similarly, the user must unmarshal data returned from the queries using the encoding.Unmarshal function, then use type assertions to retrieve the proper type.

In most cases the driver will return the data from neo as the proper go-specific types. For integers they always come back as int64 and floats always come back as float64. This is for the convenience of the user and acts similarly to go's JSON interface. This prevents the user from having to use reflection to get these values. Internally, the types are always transmitted over the wire with as few bytes as possible.

There are also cases where no go-specific type matches the returned values, such as when you query for a node, relationship, or path. The driver exposes specific structs which represent this data in the 'structures.graph' package. There are 4 types - Node, Relationship, UnboundRelationship, and Path. The driver returns interface{} objects which must have their types properly asserted to get the data out.

There are some limitations to the types of collections the driver supports. Specifically, maps should always be of type map[string]interface{} and lists should always be of type []interface{}. It doesn't seem that the Bolt protocol supports uint64 either, so the biggest number it can send right now is the int64 max.

The URL format is: `bolt://(user):(password)@(host):(port)` Schema must be `bolt`. User and password is only necessary if you are authenticating. TLS is supported by using query parameters on the connection string, like so: `bolt://host:port?tls=true&tls_no_verify=false`

The supported query params are:

* timeout - the number of seconds to set the connection timeout to. Defaults to 60 seconds. * tls - Set to 'true' or '1' if you want to use TLS encryption * tls_no_verify - Set to 'true' or '1' if you want to accept any server certificate (for testing, not secure) * tls_ca_cert_file - path to a custom ca cert for a self-signed TLS cert * tls_cert_file - path to a cert file for this client (need to verify this is processed by Neo4j) * tls_key_file - path to a key file for this client (need to verify this is processed by Neo4j)

Package sql provides a generic interface around SQL (or SQL-like) databases.

The sql package must be used in conjunction with a database driver. See https://golang.org/s/sqldrivers for a list of drivers.

For more usage examples, see the wiki page at https://golang.org/s/sqlwiki.

Index

Constants

View Source
const (
	// Version is the current version of this driver
	Version = "1.0"

	// ClientID is the id of this client
	ClientID = "GolangNeo4jBolt/" + Version
)
View Source
const (
	// HostEnv is the environment variable read to gather the host information.
	HostEnv = "BOLT_DRIVER_HOST"

	// PortEnv is the environment variable read to gather the port information.
	PortEnv = "BOLT_DRIVER_PORT"

	// UserEnv is the environment variable read to gather the username.
	UserEnv = "BOLT_DRIVER_USER"

	// PassEnv is the environment variable read to gather the password.
	PassEnv = "BOLT_DRIVER_PASS"

	// TLSEnv is the environment variable read to determine whether the Open
	// and OpenNeo methods should attempt to connect with TLS.
	TLSEnv = "BOLT_DRIVER_TLS"

	// TLSNoVerifyEnv is the environment variable read to determine whether
	// the TLS certificate's verification should be skipped.
	TLSNoVerifyEnv = "BOLT_DRIVER_NO_VERIFY"

	// TLSCACertFileEnv is the environment variable read that should contain
	// the CA certificate's path.
	TLSCACertFileEnv = "BOLT_DRIVER_TLS_CA_CERT_FILE"

	// TLSCertFileEnv is the environment variable read that should contain the
	// public key path.
	TLSCertFileEnv = "BOLT_TLS_CERT_FILE"

	// TLSKeyFileEnv is the environment variable read that should contain the
	// private key path.
	TLSKeyFileEnv = "BOLT_TLS_KEY_FILE"
)

Variables

View Source
var ErrInFailedTransaction = errors.New("bolt: Could not complete operation in a failed transaction")
View Source
var ErrTxDone = errors.New("sql: Transaction has already been committed or rolled back")

Functions

func DialOpen

func DialOpen(d Dialer, name string) (driver.Conn, error)

DialOpen opens a driver.Conn with the given Dialer and network configuration.

func Open

func Open(name string) (driver.Conn, error)

Open calls DialOpen with the default dialer.

Types

type Conn

type Conn interface {
	// Prepare returns a prepared statement, bound to this connection.
	Prepare(query string) (stmt, error)

	// Query queries using the Neo4j-specific interface.
	Query(query string, params map[string]interface{}) (rows, error)

	// Exec executes a query using the Neo4j-specific interface.
	Exec(query string, params map[string]interface{}) (Result, error)

	// Close closes the current connection, invalidating any transactions
	// and statements.
	Close() error

	// Begin starts a new transaction.
	Begin() (driver.Tx, error)

	// SetChunkSize sets the maximum chunk size for writes to Neo4j.
	SetChunkSize(uint16)

	// SetTimeout sets the read and write timeouts for the connection.
	SetTimeout(time.Duration)
}

Conn is a connection to Neo4J. It's interface is similar to that of sql.DB, with some minor modifications for ease of use. In particular, the variadic arguments in Query, Exec, etc. have been replaced with map[string]interface{}.

func DialOpenNeo

func DialOpenNeo(d Dialer, name string) (Conn, error)

DialOpenNeo opens an Conn with the given Dialer and network configuration.

func OpenNeo

func OpenNeo(name string) (Conn, error)

OpenNeo calls DialOpenNeo with the default dialer.

type DB

type DB struct {
	// contains filtered or unexported fields
}

DB is a database handle representing a pool of zero or more underlying connections. It's safe for concurrent use by multiple goroutines.

The sql package creates and frees connections automatically; it also maintains a free pool of idle connections. If the database has a concept of per-connection state, such state can only be reliably observed within a transaction. Once DB.Begin is called, the returned Tx is bound to a single connection. Once Commit or Rollback is called on the transaction, that transaction's connection is returned to DB's idle connection pool. The pool size can be controlled with SetMaxIdleConns.

func OpenPool

func OpenPool(driverName, dataSourceName string) (*DB, error)

Open opens a database specified by its database driver name and a driver-specific data source name, usually consisting of at least a database name and connection information.

Most users will open a database via a driver-specific connection helper function that returns a *DB. No database drivers are included in the Go standard library. See https://golang.org/s/sqldrivers for a list of third-party drivers.

Open may just validate its arguments without creating a connection to the database. To verify that the data source name is valid, call Ping.

The returned DB is safe for concurrent use by multiple goroutines and maintains its own pool of idle connections. Thus, the Open function should be called just once. It is rarely necessary to close a DB.

func OpenRecorder

func OpenRecorder(name, dataSourceName string) (*DB, error)

OpenRecorder opens a *DB that records the session. Name will be the name of the gzipped JSON file containing the recorded session.

func (*DB) Begin

func (db *DB) Begin() (*Tx, error)

Begin starts a transaction. The isolation level is dependent on the driver.

func (*DB) Close

func (db *DB) Close() error

Close closes the database, releasing any open resources.

It is rare to Close a DB, as the DB handle is meant to be long-lived and shared between many goroutines.

func (*DB) Exec

func (db *DB) Exec(query string, args map[string]interface{}) (Result, error)

Exec executes a query without returning any rows. The args are for any placeholder parameters in the query.

func (*DB) Ping

func (db *DB) Ping() error

Ping verifies a connection to the database is still alive, establishing a connection if necessary.

func (*DB) Prepare

func (db *DB) Prepare(query string) (*Stmt, error)

Prepare creates a prepared statement for later queries or executions. Multiple queries or executions may be run concurrently from the returned statement. The caller must call the statement's Close method when the statement is no longer needed.

func (*DB) Query

func (db *DB) Query(query string, args map[string]interface{}) (*Rows, error)

Query executes a query that returns rows, typically a SELECT. The args are for any placeholder parameters in the query.

func (*DB) QueryRow

func (db *DB) QueryRow(query string, args map[string]interface{}) *Row

QueryRow executes a query that is expected to return at most one row. QueryRow always returns a non-nil value. Errors are deferred until Row's Scan method is called.

func (*DB) SetChunkSize

func (db *DB) SetChunkSize(size uint16)

func (*DB) SetConnMaxLifetime

func (db *DB) SetConnMaxLifetime(d time.Duration)

SetConnMaxLifetime sets the maximum amount of time a connection may be reused.

Expired connections may be closed lazily before reuse.

If d <= 0, connections are reused forever.

func (*DB) SetMaxIdleConns

func (db *DB) SetMaxIdleConns(n int)

SetMaxIdleConns sets the maximum number of connections in the idle connection pool.

If MaxOpenConns is greater than 0 but less than the new MaxIdleConns then the new MaxIdleConns will be reduced to match the MaxOpenConns limit

If n <= 0, no idle connections are retained.

func (*DB) SetMaxOpenConns

func (db *DB) SetMaxOpenConns(n int)

SetMaxOpenConns sets the maximum number of open connections to the database.

If MaxIdleConns is greater than 0 and the new MaxOpenConns is less than MaxIdleConns, then MaxIdleConns will be reduced to match the new MaxOpenConns limit

If n <= 0, then there is no limit on the number of open connections. The default is 0 (unlimited).

func (*DB) Stats

func (db *DB) Stats() DBStats

Stats returns database statistics.

type DBStats

type DBStats struct {
	// OpenConnections is the number of open connections to the database.
	OpenConnections int
}

DBStats contains database statistics.

type Dialer

type Dialer interface {
	// Dial connects to the address on the named network.
	Dial(network, address string) (net.Conn, error)

	// DialTimeout acts like Dial but takes a timeout. The timeout should
	// included name resolution, if required.
	DialTimeout(network, address string, timeout time.Duration) (net.Conn, error)
}

Dialer is a generic interface for types that can dial network addresses.

func TLSDialer

func TLSDialer(caFile, certFile, keyFile string, noVerify bool) (Dialer, error)

TLSDialer returns a Dialer that is compatible with Neo4j. It can be passed to DialOpen and DialOpenNeo. It reads configuration information from environment variables, although the function parameters take precedence. noVerify will only be read from an environment variable if noVerify is false.

type Event

type Event struct {
	Timestamp int64 `json:"-"`
	Event     []byte
	IsWrite   bool
	Completed bool
	Error     error
}

Event represents a single recording (read or write) event in the recorder

type Result

type Result interface {
	// LastInsertId returns the integer generated by the database
	// in response to a command. Typically this will be from an
	// "auto increment" column when inserting a new row. Not all
	// databases support this feature, and the syntax of such
	// statements varies.
	LastInsertId() (int64, error)

	// RowsAffected returns the number of rows affected by an
	// update, insert, or delete. Not every database or database
	// driver may support this.
	RowsAffected() (int64, error)

	Metadata() map[string]interface{}
}

A Result summarizes an executed SQL command.

type Row

type Row struct {
	// contains filtered or unexported fields
}

Row is the result of calling QueryRow to select a single row.

func (*Row) Scan

func (r *Row) Scan(dest ...interface{}) error

Scan copies the columns from the matched row into the values pointed at by dest. See the documentation on Rows.Scan for details. If more than one row matches the query, Scan uses the first row and discards the rest. If no row matches the query, Scan returns ErrNoRows.

type Rows

type Rows struct {
	// contains filtered or unexported fields
}

Rows is the result of a query. Its cursor starts before the first row of the result set. Use Next to advance through the rows:

rows, err := db.Query("SELECT ...")
...
defer rows.Close()
for rows.Next() {
    var id int
    var name string
    err = rows.Scan(&id, &name)
    ...
}
err = rows.Err() // get any error encountered during iteration
...

func (*Rows) Close

func (rs *Rows) Close() error

Close closes the Rows, preventing further enumeration. If Next returns false, the Rows are closed automatically and it will suffice to check the result of Err. Close is idempotent and does not affect the result of Err.

func (*Rows) Columns

func (rs *Rows) Columns() ([]string, error)

Columns returns the column names. Columns returns an error if the rows are closed, or if the rows are from QueryRow and there was a deferred error.

func (*Rows) Err

func (rs *Rows) Err() error

Err returns the error, if any, that was encountered during iteration. Err may be called after an explicit or implicit Close.

func (*Rows) Metadata

func (rs *Rows) Metadata() (map[string]interface{}, error)

Metadata returns the metadata. Next does not need to be called prior to calling Metadata. Metadata may return different values for each invocation of Next.

func (*Rows) Next

func (rs *Rows) Next() bool

Next prepares the next result row for reading with the Scan method. It returns true on success, or false if there is no next result row or an error happened while preparing it. Err should be consulted to distinguish between the two cases.

Every call to Scan, even the first one, must be preceded by a call to Next.

func (*Rows) Scan

func (rs *Rows) Scan(dest ...interface{}) error

Scan copies the columns in the current row into the values pointed at by dest. The number of values in dest must be the same as the number of columns in Rows.

Scan converts columns read from the database into the following common Go types and special types provided by the sql package:

*string
*[]byte
*int, *int8, *int16, *int32, *int64
*uint, *uint8, *uint16, *uint32, *uint64
*bool
*float32, *float64
*interface{}
*RawBytes
any type implementing Scanner (see Scanner docs)

In the most simple case, if the type of the value from the source column is an integer, bool or string type T and dest is of type *T, Scan simply assigns the value through the pointer.

Scan also converts between string and numeric types, as long as no information would be lost. While Scan stringifies all numbers scanned from numeric database columns into *string, scans into numeric types are checked for overflow. For example, a float64 with value 300 or a string with value "300" can scan into a uint16, but not into a uint8, though float64(255) or "255" can scan into a uint8. One exception is that scans of some float64 numbers to strings may lose information when stringifying. In general, scan floating point columns into *float64.

If a dest argument has type *[]byte, Scan saves in that argument a copy of the corresponding data. The copy is owned by the caller and can be modified and held indefinitely. The copy can be avoided by using an argument of type *RawBytes instead; see the documentation for RawBytes for restrictions on its use.

If an argument has type *interface{}, Scan copies the value provided by the underlying driver without conversion. When scanning from a source value of type []byte to *interface{}, a copy of the slice is made and the caller owns the result.

Source values of type time.Time may be scanned into values of type *time.Time, *interface{}, *string, or *[]byte. When converting to the latter two, time.Format3339Nano is used.

Source values of type bool may be scanned into types *bool, *interface{}, *string, *[]byte, or *RawBytes.

For scanning into *bool, the source may be true, false, 1, 0, or string inputs parseable by strconv.ParseBool.

type Stmt

type Stmt struct {
	// contains filtered or unexported fields
}

Stmt is a prepared statement. A Stmt is safe for concurrent use by multiple goroutines.

func (*Stmt) Close

func (s *Stmt) Close() error

Close closes the statement.

func (*Stmt) Exec

func (s *Stmt) Exec(args map[string]interface{}) (Result, error)

Exec executes a prepared statement with the given arguments and returns a Result summarizing the effect of the statement.

func (*Stmt) Query

func (s *Stmt) Query(args map[string]interface{}) (*Rows, error)

Query executes a prepared query statement with the given arguments and returns the query results as a *Rows.

func (*Stmt) QueryRow

func (s *Stmt) QueryRow(args map[string]interface{}) *Row

QueryRow executes a prepared query statement with the given arguments. If an error occurs during the execution of the statement, that error will be returned by a call to Scan on the returned *Row, which is always non-nil. If the query selects no rows, the *Row's Scan will return ErrNoRows. Otherwise, the *Row's Scan scans the first selected row and discards the rest.

Example usage:

var name string
err := nameByUseridStmt.QueryRow(id).Scan(&name)

type Tx

type Tx struct {
	// contains filtered or unexported fields
}

Tx is an in-progress database transaction.

A transaction must end with a call to Commit or Rollback.

After a call to Commit or Rollback, all operations on the transaction fail with ErrTxDone.

The statements prepared for a transaction by calling the transaction's Prepare or Stmt methods are closed by the call to Commit or Rollback.

func (*Tx) Commit

func (tx *Tx) Commit() error

Commit commits the transaction.

func (*Tx) Exec

func (tx *Tx) Exec(query string, args map[string]interface{}) (Result, error)

Exec executes a query that doesn't return rows. For example: an INSERT and UPDATE.

func (*Tx) Prepare

func (tx *Tx) Prepare(query string) (*Stmt, error)

Prepare creates a prepared statement for use within a transaction.

The returned statement operates within the transaction and can no longer be used once the transaction has been committed or rolled back.

To use an existing prepared statement on this transaction, see Tx.Stmt.

func (*Tx) Query

func (tx *Tx) Query(query string, args map[string]interface{}) (*Rows, error)

Query executes a query that returns rows, typically a SELECT.

func (*Tx) QueryRow

func (tx *Tx) QueryRow(query string, args map[string]interface{}) *Row

QueryRow executes a query that is expected to return at most one row. QueryRow always returns a non-nil value. Errors are deferred until Row's Scan method is called.

func (*Tx) Rollback

func (tx *Tx) Rollback() error

Rollback aborts the transaction.

func (*Tx) Stmt

func (tx *Tx) Stmt(stmt *Stmt) *Stmt

Stmt returns a transaction-specific prepared statement from an existing statement.

Example:

updateMoney, err := db.Prepare("UPDATE balance SET money=money+? WHERE id=?")
...
tx, err := db.Begin()
...
res, err := tx.Stmt(updateMoney).Exec(123.45, 98293203)

The returned statement operates within the transaction and can no longer be used once the transaction has been committed or rolled back.

Directories

Path Synopsis
_examples
Package encoding is used to encode/decode data going to/from the bolt protocol.
Package encoding is used to encode/decode data going to/from the bolt protocol.
Package structures contains various structures which are used by the Bolt protocol
Package structures contains various structures which are used by the Bolt protocol
graph
Package graph contains structs that can be returned from the Neo4j Graph
Package graph contains structs that can be returned from the Neo4j Graph
messages
Package messages contains structs that represent the messages that get sent using the Bolt protocol
Package messages contains structs that represent the messages that get sent using the Bolt protocol

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL