base

package
v0.0.0-...-1b51d53 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jun 2, 2017 License: Apache-2.0 Imports: 39 Imported by: 0

Documentation

Index

Constants

View Source
const (
	TapFeedType      = "tap"
	DcpFeedType      = "dcp"
	DcpShardFeedType = "dcpshard"
)
View Source
const (
	MaxConcurrentSingleOps = 1000 // Max 1000 concurrent single bucket ops
	MaxConcurrentBulkOps   = 35   // Max 35 concurrent bulk ops
	MaxConcurrentViewOps   = 100  // Max concurrent view ops
	MaxBulkBatchSize       = 100  // Maximum number of ops per bulk call

)
View Source
const (

	// The username of the special "GUEST" user
	GuestUsername = "GUEST"
	ISO8601Format = "2006-01-02T15:04:05.000Z07:00"
)
View Source
const (
	MemcachedDataTypeJSON = 1 << iota
	MemcachedDataTypeSnappy
	MemcachedDataTypeXattr
)

Memcached binary protocol datatype bit flags (https://github.com/couchbase/memcached/blob/master/docs/BinaryProtocol.md#data-types), used in MCRequest.DataType

View Source
const (
	KMaxVbNo           = 1024 // TODO: load from cluster config
	KStableSequenceKey = "_idx_stableSeq"
)
View Source
const (
	PartitionRangeWithin = PartitionRangeCompare(iota)
	PartitionRangeBefore
	PartitionRangeAfter
	PartitionRangeUnknown
)
View Source
const (
	KIndexPartitionKey = "_idxPartitionMap"
	KIndexPrefix       = "_idx"

	KPrincipalCountKeyFormat = "_idx_p_count:%s"  // key for principal count
	KPrincipalCountKeyPrefix = "_idx_p_count:"    // key prefix for principal count
	KTotalPrincipalCountKey  = "_idx_p_count_all" // key for overall principal count
)
View Source
const (
	KTimingExpvarVbNo      = 0
	KTimingExpvarFrequency = 200
)
View Source
const (
	ViewQueryParamStale         = "stale"
	ViewQueryParamReduce        = "reduce"
	ViewQueryParamStartKey      = "startkey"
	ViewQueryParamEndKey        = "endkey"
	ViewQueryParamInclusiveEnd  = "inclusive_end"
	ViewQueryParamLimit         = "limit"
	ViewQueryParamIncludeDocs   = "include_docs" // Ignored -- see https://forums.couchbase.com/t/do-the-viewquery-options-omit-include-docs-on-purpose/12399
	ViewQueryParamDescending    = "descending"
	ViewQueryParamGroup         = "group"
	ViewQueryParamSkip          = "skip"
	ViewQueryParamGroupLevel    = "group_level"
	ViewQueryParamStartKeyDocId = "startkey_docid"
	ViewQueryParamEndKeyDocId   = "endkey_docid"
	ViewQueryParamKey           = "key"
	ViewQueryParamKeys          = "keys"
)
View Source
const (
	DefaultContinuousRetryTimeMs = 500
)
View Source
const (
	// If true, all HTTP request/response bodies will be logged.
	// Use this sparingly as it will probably dump sensitive information into the logs.
	EnableLogHTTPBodies = false
)

Variables

View Source
var (
	ErrAlreadyImported = &SGError{alreadyImported}
	ErrImportCancelled = &SGError{importCancelled}
)
View Source
var LogKeys map[string]bool

Set of LogTo() key strings that are enabled.

View Source
var StatsExpvars *expvar.Map = expvar.NewMap("syncGateway_stats")
View Source
var TimingExpvarsEnabled = false

Functions

func AddDbPathToCookie

func AddDbPathToCookie(rq *http.Request, cookie *http.Cookie)

Needed due to https://github.com/tophatch/sync_gateway/issues/1345

func CbsExpiryToTime

func CbsExpiryToTime(expiry uint32) time.Time

This function takes a CBS expiry and returns as a time

func ConvertBackQuotedStrings

func ConvertBackQuotedStrings(data []byte) []byte

Preprocesses a string containing `...`-delimited strings. Converts the backquotes into double-quotes, and escapes any literal backslashes, newlines or double-quotes within them with backslashes.

func CouchHTTPErrorName

func CouchHTTPErrorName(status int) string

Returns the standard CouchDB error string for an HTTP error status. These are important for compatibility, as some REST APIs don't show numeric statuses, only these strings.

func CouchbaseUrlWithAuth

func CouchbaseUrlWithAuth(serverUrl, username, password, bucketname string) (string, error)

func CreateRollingLogger

func CreateRollingLogger(logConfig *LogAppenderConfig)

func CreateUUID

func CreateUUID() string

Returns a cryptographically-random 160-bit number encoded as a hex string.

func DurationToCbsExpiry

func DurationToCbsExpiry(ttl time.Duration) int

This is how Couchbase Server handles document expiration times

The actual value sent may either be Unix time (number of seconds since January 1, 1970, as a 32-bit value), or a number of seconds starting from current time. In the latter case, this number of seconds may not exceed 60*60*24*30 (number of seconds in 30 days); if the number sent by a client is larger than that, the server will consider it to be real Unix time value rather than an offset from current time.

This function takes a ttl as a Duration and returns an int formatted as required by CBS expiry processing

func EnableGoCBLogging

func EnableGoCBLogging()

func EnableLogKey

func EnableLogKey(key string)

func EnableSgReplicateLogging

func EnableSgReplicateLogging()

func ErrorAsHTTPStatus

func ErrorAsHTTPStatus(err error) (int, string)

Attempts to map an error to an HTTP status code and message. Defaults to 500 if it doesn't recognize the error. Returns 200 for a nil error.

func FixJSONNumbers

func FixJSONNumbers(value interface{}) interface{}

This is a workaround for an incompatibility between Go's JSON marshaler and CouchDB. Go parses JSON numbers into float64 type, and then when it marshals float64 to JSON it uses scientific notation if the number is more than six digits long, even if it's an integer. However, CouchDB doesn't seem to like scientific notation and throws an exception. (See <https://issues.apache.org/jira/browse/COUCHDB-1670>) Thus, this function, which walks through a JSON-compatible object and converts float64 values to int64 when possible. NOTE: This function works on generic map[string]interface{}, but *not* on types based on it, like db.Body. Thus, db.Body has a special FixJSONNumbers method -- call that instead. TODO: In Go 1.1 we will be able to use a new option in the JSON parser that converts numbers to a special number type that preserves the exact formatting.

func GenerateRandomSecret

func GenerateRandomSecret() string

func GetCallersName

func GetCallersName(depth int) string

Returns a string identifying a function on the call stack. Use depth=1 for the caller of the function that calls GetCallersName, etc.

func GetLogKeys

func GetLogKeys() map[string]bool

func GetStatsVbSeqno

func GetStatsVbSeqno(stats map[string]map[string]string, maxVbno uint16, useAbsHighSeqNo bool) (uuids map[uint16]uint64, highSeqnos map[uint16]uint64, seqErr error)

func IsCasMismatch

func IsCasMismatch(bucket Bucket, err error) bool

func IsDocNotFoundError

func IsDocNotFoundError(err error) bool

Returns true if an error is a doc-not-found error

func IsFilePathWritable

func IsFilePathWritable(fp string) (bool, error)

Validates path argument is a path to a writable file

func IsKeyNotFoundError

func IsKeyNotFoundError(bucket Bucket, err error) bool

func IsPowerOfTwo

func IsPowerOfTwo(n uint16) bool

func ListenAndServeHTTP

func ListenAndServeHTTP(addr string, connLimit int, certFile *string, keyFile *string, handler http.Handler, readTimeout *int, writeTimeout *int, http2Enabled bool) error

This is like a combination of http.ListenAndServe and http.ListenAndServeTLS, which also uses ThrottledListen to limit the number of open HTTP connections.

func LoadClockCounter

func LoadClockCounter(baseKey string, bucket Bucket) (uint64, error)

Count retrieval - utility for use outside of the context of a sharded clock.

func Log

func Log(message string)

Logs a message to the console.

func LogColor

func LogColor()

func LogEnabled

func LogEnabled(key string) bool

func LogEnabledExcludingLogStar

func LogEnabledExcludingLogStar(key string) bool

func LogError

func LogError(err error) error

If the error is not nil, logs its description and the name of the calling function. Returns the input error for easy chaining.

func LogFatal

func LogFatal(format string, args ...interface{})

Logs a warning to the console, then exits the process.

func LogLevel

func LogLevel() int

func LogNoColor

func LogNoColor()

Disables ANSI color in log output.

func LogNoTime

func LogNoTime()

func LogPanic

func LogPanic(format string, args ...interface{})

Logs a warning to the console, then panics.

func LogTo

func LogTo(key string, format string, args ...interface{})

Logs a message to the console, but only if the corresponding key is true in LogKeys.

func Logf

func Logf(format string, args ...interface{})

Logs a formatted message to the console.

func MergeStringArrays

func MergeStringArrays(arrays ...[]string) (merged []string)

Concatenates and merges multiple string arrays into one, discarding all duplicates (including duplicates within a single array.) Ordering is preserved.

func ParseLogFlag

func ParseLogFlag(flag string)

Parses a comma-separated list of log keys, probably coming from an argv flag. The key "bw" is interpreted as a call to LogNoColor, not a key.

func ParseLogFlags

func ParseLogFlags(flags []string)

Parses an array of log keys, probably coming from a argv flags. The key "bw" is interpreted as a call to LogNoColor, not a key.

func PrintClock

func PrintClock(clock SequenceClock) string

Clock utility functions

func RetryLoop

func RetryLoop(description string, worker RetryWorker, sleeper RetrySleeper) (error, interface{})

func SanitizeRequestURL

func SanitizeRequestURL(requestURL *url.URL) string

Replaces sensitive data from the URL query string with ******. Have to use string replacement instead of writing directly to the Values URL object, as only the URL's raw query is mutable.

func SecondsToCbsExpiry

func SecondsToCbsExpiry(ttl int) int

This function takes a ttl in seconds and returns an int formatted as required by CBS expiry processing

func SetLogLevel

func SetLogLevel(level int)

func StartDCPFeed

func StartDCPFeed(args sgbucket.TapArguments, spec BucketSpec, bucket Bucket) (sgbucket.TapFeed, error)

This starts a cbdatasource powered DCP Feed using an entirely separate connection to Couchbase Server than anything the existing bucket is using, and it uses the go-couchbase cbdatasource DCP abstraction layer

func SyncSourceFromURL

func SyncSourceFromURL(u *url.URL) string

* Returns a URL formatted string which excludes the path, query and fragment * This is used by _replicate to split the single URL passed in a CouchDB style * request into a source URL and a database name as used in sg_replicate

func TEMP

func TEMP(format string, args ...interface{})

Logs a highlighted message prefixed with "TEMP". This function is intended for temporary logging calls added during development and not to be checked in, hence its distinctive name (which is visible and easy to search for before committing.)

func ThrottledListen

func ThrottledListen(protocol string, addr string, limit int) (net.Listener, error)

Equivalent to net.Listen except that the returned listener allows only a limited number of open connections at a time. When the limit is reached it will block until some are closed before accepting any more. If the 'limit' parameter is 0, there is no limit and the behavior is identical to net.Listen.

func ToInt64

func ToInt64(value interface{}) (int64, bool)

func TransformBucketCredentials

func TransformBucketCredentials(inputUsername, inputPassword, inputBucketname string) (username, password, bucketname string)

This transforms raw input bucket credentials (for example, from config), to input credentials expected by Couchbase server, based on a few rules

func UnitTestUrl

func UnitTestUrl() string

func UpdateLogKeys

func UpdateLogKeys(keys map[string]bool, replace bool)

func UpdateLogger

func UpdateLogger(logFilePath string)

func VBHash

func VBHash(key string, numVb int) uint32

VBHash finds the vbucket for the given key.

func ValueToStringArray

func ValueToStringArray(value interface{}) []string

Convert string or array into a string array, otherwise return nil

func VerifyBucketSequenceParity

func VerifyBucketSequenceParity(indexBucketStableClock SequenceClock, bucket Bucket) error

Make sure that the index bucket and data bucket have correct sequence parity See https://github.com/tophatch/sync_gateway/issues/1133 for more details

func Warn

func Warn(format string, args ...interface{})

Logs a warning to the console

func WriteCasJSON

func WriteCasJSON(bucket Bucket, key string, value interface{}, cas uint64, exp int, callback func(v interface{}) (interface{}, error)) (casOut uint64, err error)

func WriteCasRaw

func WriteCasRaw(bucket Bucket, key string, value []byte, cas uint64, exp int, callback func([]byte) ([]byte, error)) (casOut uint64, err error)

func WriteHistogram

func WriteHistogram(expvarMap *expvar.Map, since time.Time, prefix string)

func WriteHistogramForDuration

func WriteHistogramForDuration(expvarMap *expvar.Map, duration time.Duration, prefix string)

Types

type ActiveTask

type ActiveTask struct {
	TaskType         string      `json:"type"`
	ReplicationID    string      `json:"replication_id"`
	Continuous       bool        `json:"continuous"`
	Source           string      `json:"source"`
	Target           string      `json:"target"`
	DocsRead         uint32      `json:"docs_read"`
	DocsWritten      uint32      `json:"docs_written"`
	DocWriteFailures uint32      `json:"doc_write_failures"`
	StartLastSeq     uint32      `json:"start_last_seq"`
	EndLastSeq       interface{} `json:"end_last_seq"`
}

type AuthHandler

type AuthHandler couchbase.AuthHandler

func UnitTestAuthHandler

func UnitTestAuthHandler() AuthHandler

type BinaryDocument

type BinaryDocument []byte

BinaryDocument is type alias that allows SGTranscoder to differentiate between documents that are intended to be written as binary docs, versus json documents that are being sent as raw bytes Some additional context here: https://play.golang.org/p/p4fkKiZD59

type Bucket

type Bucket sgbucket.Bucket

TODO: unalias these and just pass around sgbucket.X everywhere

func GetBucket

func GetBucket(spec BucketSpec, callback sgbucket.BucketNotifyFn) (bucket Bucket, err error)

func NewLeakyBucket

func NewLeakyBucket(bucket Bucket, config LeakyBucketConfig) Bucket

type BucketSpec

type BucketSpec struct {
	Server, PoolName, BucketName, FeedType string
	Auth                                   AuthHandler
	CouchbaseDriver                        CouchbaseDriver
	MaxNumRetries                          int  // max number of retries before giving up
	InitialRetrySleepTimeMS                int  // the initial time to sleep in between retry attempts (in millisecond), which will double each retry
	UseXattrs                              bool // Whether to use xattrs to store _sync metadata.  Used during view initialization
}

Full specification of how to connect to a bucket

func (BucketSpec) RetrySleeper

func (spec BucketSpec) RetrySleeper() RetrySleeper

Create a RetrySleeper based on the bucket spec properties. Used to retry bucket operations after transient errors.

type CompareResult

type CompareResult int

Priority of a journal message

const (
	CompareLessThan CompareResult = iota - 1
	CompareEquals
	CompareGreaterThan
)

func CompareVbAndSequence

func CompareVbAndSequence(vb1 uint16, s1 uint64, vb2 uint16, s2 uint64) CompareResult

Compares based on vbno, then sequence. Returns 0 if identical, 1 if s1 > s2, -1 if s1 < s2

func CompareVbSequence

func CompareVbSequence(s1, s2 VbSeq) CompareResult

Compares based on vbno, then sequence. Returns 0 if identical, 1 if s1 > s2, -1 if s1 < s2

type CouchbaseBucket

type CouchbaseBucket struct {
	*couchbase.Bucket // the underlying go-couchbase bucket
	// contains filtered or unexported fields
}

Implementation of sgbucket.Bucket that talks to a Couchbase server

func GetCouchbaseBucket

func GetCouchbaseBucket(spec BucketSpec, callback sgbucket.BucketNotifyFn) (bucket *CouchbaseBucket, err error)

Creates a Bucket that talks to a real live Couchbase server.

func (CouchbaseBucket) CouchbaseServerVersion

func (bucket CouchbaseBucket) CouchbaseServerVersion() (major uint64, minor uint64, micro string, err error)

func (CouchbaseBucket) DeleteWithXattr

func (bucket CouchbaseBucket) DeleteWithXattr(k string, xattr string) error

func (CouchbaseBucket) Dump

func (bucket CouchbaseBucket) Dump()

func (CouchbaseBucket) Get

func (bucket CouchbaseBucket) Get(k string, v interface{}) (cas uint64, err error)

func (CouchbaseBucket) GetMaxVbno

func (bucket CouchbaseBucket) GetMaxVbno() (uint16, error)

func (CouchbaseBucket) GetName

func (bucket CouchbaseBucket) GetName() string

func (CouchbaseBucket) GetRaw

func (bucket CouchbaseBucket) GetRaw(k string) (v []byte, cas uint64, err error)

func (CouchbaseBucket) GetStatsVbSeqno

func (bucket CouchbaseBucket) GetStatsVbSeqno(maxVbno uint16, useAbsHighSeqNo bool) (uuids map[uint16]uint64, highSeqnos map[uint16]uint64, seqErr error)

Goes out to the bucket and gets the high sequence number for all vbuckets and returns a map of UUIDS and a map of high sequence numbers (map from vbno -> seq)

func (CouchbaseBucket) GetWithXattr

func (bucket CouchbaseBucket) GetWithXattr(k string, xattr string, rv interface{}, xv interface{}) (cas uint64, err error)

func (CouchbaseBucket) Remove

func (bucket CouchbaseBucket) Remove(k string, cas uint64) (casOut uint64, err error)

func (CouchbaseBucket) SetBulk

func (bucket CouchbaseBucket) SetBulk(entries []*sgbucket.BulkSetEntry) (err error)

func (CouchbaseBucket) StartCouchbaseTapFeed

func (bucket CouchbaseBucket) StartCouchbaseTapFeed(args sgbucket.TapArguments) (sgbucket.TapFeed, error)

func (CouchbaseBucket) StartTapFeed

func (bucket CouchbaseBucket) StartTapFeed(args sgbucket.TapArguments) (sgbucket.TapFeed, error)

TODO: change to StartMutationFeed

func (CouchbaseBucket) UUID

func (bucket CouchbaseBucket) UUID() (string, error)

func (CouchbaseBucket) Update

func (bucket CouchbaseBucket) Update(k string, exp int, callback sgbucket.UpdateFunc) error

func (CouchbaseBucket) View

func (bucket CouchbaseBucket) View(ddoc, name string, params map[string]interface{}) (sgbucket.ViewResult, error)

func (CouchbaseBucket) Write

func (bucket CouchbaseBucket) Write(k string, flags int, exp int, v interface{}, opt sgbucket.WriteOptions) (err error)

func (CouchbaseBucket) WriteCas

func (bucket CouchbaseBucket) WriteCas(k string, flags int, exp int, cas uint64, v interface{}, opt sgbucket.WriteOptions) (casOut uint64, err error)

func (CouchbaseBucket) WriteCasWithXattr

func (bucket CouchbaseBucket) WriteCasWithXattr(k string, xattr string, exp int, cas uint64, v interface{}, xv interface{}) (casOut uint64, err error)

func (CouchbaseBucket) WriteUpdate

func (bucket CouchbaseBucket) WriteUpdate(k string, exp int, callback sgbucket.WriteUpdateFunc) error

func (CouchbaseBucket) WriteUpdateWithXattr

func (bucket CouchbaseBucket) WriteUpdateWithXattr(k string, xattr string, exp int, callback sgbucket.WriteUpdateWithXattrFunc) (casOut uint64, err error)

type CouchbaseBucketGoCB

type CouchbaseBucketGoCB struct {
	*gocb.Bucket // the underlying gocb bucket
	// contains filtered or unexported fields
}

Implementation of sgbucket.Bucket that talks to a Couchbase server and uses gocb

func GetCouchbaseBucketGoCB

func GetCouchbaseBucketGoCB(spec BucketSpec) (bucket *CouchbaseBucketGoCB, err error)

Creates a Bucket that talks to a real live Couchbase server.

func GetGoCBBucketFromBaseBucket

func GetGoCBBucketFromBaseBucket(baseBucket Bucket) (bucket CouchbaseBucketGoCB, err error)

func (CouchbaseBucketGoCB) Add

func (bucket CouchbaseBucketGoCB) Add(k string, exp int, v interface{}) (added bool, err error)

func (CouchbaseBucketGoCB) AddRaw

func (bucket CouchbaseBucketGoCB) AddRaw(k string, exp int, v []byte) (added bool, err error)

GoCB AddRaw writes as BinaryDocument, which results in the document having the binary doc common flag set. Callers that want to write JSON documents as raw bytes should pass v as []byte to the stanard bucket.Add

func (CouchbaseBucketGoCB) Append

func (bucket CouchbaseBucketGoCB) Append(k string, data []byte) error

func (CouchbaseBucketGoCB) Close

func (bucket CouchbaseBucketGoCB) Close()

func (CouchbaseBucketGoCB) CouchbaseServerVersion

func (bucket CouchbaseBucketGoCB) CouchbaseServerVersion() (major uint64, minor uint64, micro string, err error)

func (CouchbaseBucketGoCB) Delete

func (bucket CouchbaseBucketGoCB) Delete(k string) error

func (CouchbaseBucketGoCB) DeleteDDoc

func (bucket CouchbaseBucketGoCB) DeleteDDoc(docname string) error

func (CouchbaseBucketGoCB) DeleteWithXattr

func (bucket CouchbaseBucketGoCB) DeleteWithXattr(k string, xattrKey string) error

Delete a document and it's associated named xattr. Couchbase server will preserve system xattrs as part of the (CBS) tombstone when a document is deleted. To remove the system xattr as well, an explicit subdoc delete operation is required.

func (CouchbaseBucketGoCB) Dump

func (bucket CouchbaseBucketGoCB) Dump()

func (CouchbaseBucketGoCB) Get

func (bucket CouchbaseBucketGoCB) Get(k string, rv interface{}) (cas uint64, err error)

func (CouchbaseBucketGoCB) GetAndTouchRaw

func (bucket CouchbaseBucketGoCB) GetAndTouchRaw(k string, exp int) (rv []byte, cas uint64, err error)

func (CouchbaseBucketGoCB) GetBucketCredentials

func (bucket CouchbaseBucketGoCB) GetBucketCredentials() (username, password string)

func (CouchbaseBucketGoCB) GetBulkCounters

func (bucket CouchbaseBucketGoCB) GetBulkCounters(keys []string) (map[string]uint64, error)

Retrieve keys in bulk for increased efficiency. If any keys are not found, they will not be returned, and so the size of the map may be less than the size of the keys slice, and no error will be returned in that case since it's an expected situation.

If there is an "overall error" calling the underlying GoCB bulk operation, then that error will be returned.

If there are errors on individual keys -- aside from "not found" errors -- such as QueueOverflow errors that can be retried successfully, they will be retried with a backoff loop.

func (CouchbaseBucketGoCB) GetBulkRaw

func (bucket CouchbaseBucketGoCB) GetBulkRaw(keys []string) (map[string][]byte, error)

Retrieve keys in bulk for increased efficiency. If any keys are not found, they will not be returned, and so the size of the map may be less than the size of the keys slice, and no error will be returned in that case since it's an expected situation.

If there is an "overall error" calling the underlying GoCB bulk operation, then that error will be returned.

If there are errors on individual keys -- aside from "not found" errors -- such as QueueOverflow errors that can be retried successfully, they will be retried with a backoff loop.

func (CouchbaseBucketGoCB) GetDDoc

func (bucket CouchbaseBucketGoCB) GetDDoc(docname string, into interface{}) error

func (CouchbaseBucketGoCB) GetMaxVbno

func (bucket CouchbaseBucketGoCB) GetMaxVbno() (uint16, error)

func (CouchbaseBucketGoCB) GetName

func (bucket CouchbaseBucketGoCB) GetName() string

func (CouchbaseBucketGoCB) GetRaw

func (bucket CouchbaseBucketGoCB) GetRaw(k string) (rv []byte, cas uint64, err error)

func (CouchbaseBucketGoCB) GetStatsVbSeqno

func (bucket CouchbaseBucketGoCB) GetStatsVbSeqno(maxVbno uint16, useAbsHighSeqNo bool) (uuids map[uint16]uint64, highSeqnos map[uint16]uint64, seqErr error)

func (CouchbaseBucketGoCB) GetWithXattr

func (bucket CouchbaseBucketGoCB) GetWithXattr(k string, xattrKey string, rv interface{}, xv interface{}) (cas uint64, err error)

Retrieve a document and it's associated named xattr

func (CouchbaseBucketGoCB) Incr

func (bucket CouchbaseBucketGoCB) Incr(k string, amt, def uint64, exp int) (uint64, error)

func (CouchbaseBucketGoCB) IsKeyNotFoundError

func (bucket CouchbaseBucketGoCB) IsKeyNotFoundError(err error) bool

func (CouchbaseBucketGoCB) PutDDoc

func (bucket CouchbaseBucketGoCB) PutDDoc(docname string, value interface{}) error

func (CouchbaseBucketGoCB) Refresh

func (bucket CouchbaseBucketGoCB) Refresh() error

This is a "better-than-nothing" version of Refresh(). See https://forums.couchbase.com/t/equivalent-of-go-couchbase-bucket-refresh/12498/2

func (CouchbaseBucketGoCB) Remove

func (bucket CouchbaseBucketGoCB) Remove(k string, cas uint64) (casOut uint64, err error)

func (CouchbaseBucketGoCB) Set

func (bucket CouchbaseBucketGoCB) Set(k string, exp int, v interface{}) error

func (CouchbaseBucketGoCB) SetBulk

func (bucket CouchbaseBucketGoCB) SetBulk(entries []*sgbucket.BulkSetEntry) (err error)

Retry up to the retry limit, then return. Does not retry items if they had CAS failures, and it's up to the caller to handle those.

func (CouchbaseBucketGoCB) SetRaw

func (bucket CouchbaseBucketGoCB) SetRaw(k string, exp int, v []byte) error

func (CouchbaseBucketGoCB) StartTapFeed

func (bucket CouchbaseBucketGoCB) StartTapFeed(args sgbucket.TapArguments) (sgbucket.TapFeed, error)

TODO: Change to StartMutationFeed

func (CouchbaseBucketGoCB) UUID

func (bucket CouchbaseBucketGoCB) UUID() (string, error)

func (CouchbaseBucketGoCB) Update

func (bucket CouchbaseBucketGoCB) Update(k string, exp int, callback sgbucket.UpdateFunc) error

func (CouchbaseBucketGoCB) VBHash

func (bucket CouchbaseBucketGoCB) VBHash(docID string) uint32

func (CouchbaseBucketGoCB) View

func (bucket CouchbaseBucketGoCB) View(ddoc, name string, params map[string]interface{}) (sgbucket.ViewResult, error)

func (CouchbaseBucketGoCB) ViewCustom

func (bucket CouchbaseBucketGoCB) ViewCustom(ddoc, name string, params map[string]interface{}, vres interface{}) error

func (CouchbaseBucketGoCB) Write

func (bucket CouchbaseBucketGoCB) Write(k string, flags int, exp int, v interface{}, opt sgbucket.WriteOptions) error

func (CouchbaseBucketGoCB) WriteCas

func (bucket CouchbaseBucketGoCB) WriteCas(k string, flags int, exp int, cas uint64, v interface{}, opt sgbucket.WriteOptions) (casOut uint64, err error)

func (CouchbaseBucketGoCB) WriteCasWithXattr

func (bucket CouchbaseBucketGoCB) WriteCasWithXattr(k string, xattrKey string, exp int, cas uint64, v interface{}, xv interface{}) (casOut uint64, err error)

CAS-safe write of a document and it's associated named xattr

func (CouchbaseBucketGoCB) WriteUpdate

func (bucket CouchbaseBucketGoCB) WriteUpdate(k string, exp int, callback sgbucket.WriteUpdateFunc) error

func (CouchbaseBucketGoCB) WriteUpdateWithXattr

func (bucket CouchbaseBucketGoCB) WriteUpdateWithXattr(k string, xattrKey string, exp int, callback sgbucket.WriteUpdateWithXattrFunc) (casOut uint64, err error)

type CouchbaseBucketType

type CouchbaseBucketType int
const (
	DataBucket CouchbaseBucketType = iota
	IndexBucket
)

type CouchbaseDriver

type CouchbaseDriver int
const (
	GoCouchbase            CouchbaseDriver = iota
	GoCB                                   // Use GoCB driver with default Transcoder
	GoCBCustomSGTranscoder                 // Use GoCB driver with a custom Transcoder
)

func ChooseCouchbaseDriver

func ChooseCouchbaseDriver(bucketType CouchbaseBucketType) CouchbaseDriver

func (CouchbaseDriver) String

func (couchbaseDriver CouchbaseDriver) String() string

type DCPLoggingReceiver

type DCPLoggingReceiver struct {
	// contains filtered or unexported fields
}

DCPReceiver implements cbdatasource.Receiver to manage updates coming from a cbdatasource BucketDataSource. See go-couchbase/cbdatasource for additional details

func (*DCPLoggingReceiver) DataDelete

func (r *DCPLoggingReceiver) DataDelete(vbucketId uint16, key []byte, seq uint64,
	req *gomemcached.MCRequest) error

func (*DCPLoggingReceiver) DataUpdate

func (r *DCPLoggingReceiver) DataUpdate(vbucketId uint16, key []byte, seq uint64,
	req *gomemcached.MCRequest) error

func (*DCPLoggingReceiver) GetBucketNotifyFn

func (r *DCPLoggingReceiver) GetBucketNotifyFn() sgbucket.BucketNotifyFn

func (*DCPLoggingReceiver) GetEventFeed

func (r *DCPLoggingReceiver) GetEventFeed() <-chan sgbucket.TapEvent

func (*DCPLoggingReceiver) GetMetaData

func (r *DCPLoggingReceiver) GetMetaData(vbucketId uint16) (
	value []byte, lastSeq uint64, err error)

func (*DCPLoggingReceiver) GetOutput

func (r *DCPLoggingReceiver) GetOutput() chan sgbucket.TapEvent

func (*DCPLoggingReceiver) OnError

func (r *DCPLoggingReceiver) OnError(err error)

func (*DCPLoggingReceiver) Rollback

func (r *DCPLoggingReceiver) Rollback(vbucketId uint16, rollbackSeq uint64) error

func (*DCPLoggingReceiver) SeedSeqnos

func (r *DCPLoggingReceiver) SeedSeqnos(uuids map[uint16]uint64, seqs map[uint16]uint64)

func (*DCPLoggingReceiver) SetBucketNotifyFn

func (r *DCPLoggingReceiver) SetBucketNotifyFn(notify sgbucket.BucketNotifyFn)

func (*DCPLoggingReceiver) SetEventFeed

func (r *DCPLoggingReceiver) SetEventFeed(c chan sgbucket.TapEvent)

func (*DCPLoggingReceiver) SetMetaData

func (r *DCPLoggingReceiver) SetMetaData(vbucketId uint16, value []byte) error

func (*DCPLoggingReceiver) SnapshotStart

func (r *DCPLoggingReceiver) SnapshotStart(vbucketId uint16,
	snapStart, snapEnd uint64, snapType uint32) error

type DCPReceiver

type DCPReceiver struct {
	// contains filtered or unexported fields
}

DCPReceiver implements cbdatasource.Receiver to manage updates coming from a cbdatasource BucketDataSource. See go-couchbase/cbdatasource for additional details

func (*DCPReceiver) DataDelete

func (r *DCPReceiver) DataDelete(vbucketId uint16, key []byte, seq uint64,
	req *gomemcached.MCRequest) error

func (*DCPReceiver) DataUpdate

func (r *DCPReceiver) DataUpdate(vbucketId uint16, key []byte, seq uint64,
	req *gomemcached.MCRequest) error

func (*DCPReceiver) GetBucketNotifyFn

func (r *DCPReceiver) GetBucketNotifyFn() sgbucket.BucketNotifyFn

func (*DCPReceiver) GetEventFeed

func (r *DCPReceiver) GetEventFeed() <-chan sgbucket.TapEvent

func (*DCPReceiver) GetMetaData

func (r *DCPReceiver) GetMetaData(vbucketId uint16) (
	value []byte, lastSeq uint64, err error)

func (*DCPReceiver) GetOutput

func (r *DCPReceiver) GetOutput() chan sgbucket.TapEvent

func (*DCPReceiver) OnError

func (r *DCPReceiver) OnError(err error)

func (*DCPReceiver) Rollback

func (r *DCPReceiver) Rollback(vbucketId uint16, rollbackSeq uint64) error

Until we have CBL client support for rollback, we just rollback the sequence for the vbucket to unblock the DCP stream.

func (*DCPReceiver) SeedSeqnos

func (r *DCPReceiver) SeedSeqnos(uuids map[uint16]uint64, seqs map[uint16]uint64)

Seeds the sequence numbers returned by GetMetadata to support starting DCP from a particular sequence.

func (*DCPReceiver) SetBucketNotifyFn

func (r *DCPReceiver) SetBucketNotifyFn(notify sgbucket.BucketNotifyFn)

func (*DCPReceiver) SetEventFeed

func (r *DCPReceiver) SetEventFeed(c chan sgbucket.TapEvent)

func (*DCPReceiver) SetMetaData

func (r *DCPReceiver) SetMetaData(vbucketId uint16, value []byte) error

SetMetaData and GetMetaData used internally by cbdatasource. Expects send/recieve of opaque []byte data. cbdatasource is multithreaded so need to manage synchronization

func (*DCPReceiver) SnapshotStart

func (r *DCPReceiver) SnapshotStart(vbucketId uint16,
	snapStart, snapEnd uint64, snapType uint32) error

type DebugIntMeanVar

type DebugIntMeanVar struct {
	// contains filtered or unexported fields
}

func (*DebugIntMeanVar) AddSince

func (d *DebugIntMeanVar) AddSince(start time.Time)

func (*DebugIntMeanVar) AddValue

func (d *DebugIntMeanVar) AddValue(value int64)

func (*DebugIntMeanVar) String

func (d *DebugIntMeanVar) String() string

type EventUpdateFunc

type EventUpdateFunc func(event *sgbucket.TapEvent) bool

type GoCBLogger

type GoCBLogger struct{}

func (GoCBLogger) Log

func (l GoCBLogger) Log(level gocbcore.LogLevel, offset int, format string, v ...interface{}) error

type HTTPError

type HTTPError struct {
	Status  int
	Message string
}

Simple error implementation wrapping an HTTP response status.

func HTTPErrorf

func HTTPErrorf(status int, format string, args ...interface{}) *HTTPError

func (*HTTPError) Error

func (err *HTTPError) Error() string

type IndexPartitionMap

type IndexPartitionMap []uint16 // Maps vbuckets to index partition value

type IndexPartitions

type IndexPartitions struct {
	PartitionDefs  PartitionStorageSet      // Partition definitions, as stored in bucket _idxPartitionMap
	VbMap          IndexPartitionMap        // Map from vbucket to partition
	VbPositionMaps map[uint16]VbPositionMap // VBPositionMaps, keyed by partition
}

func NewIndexPartitions

func NewIndexPartitions(partitions PartitionStorageSet) *IndexPartitions

func (IndexPartitions) PartitionCount

func (i IndexPartitions) PartitionCount() int

func (*IndexPartitions) PartitionForVb

func (i *IndexPartitions) PartitionForVb(vbNo uint16) uint16

Returns the partition the vb is assigned to

type IndexablePartitionClock

type IndexablePartitionClock struct {
	Cas            uint64
	Key            string
	PartitionClock PartitionClock
	// contains filtered or unexported fields
}

IndexablePartitionClock is used to persist clocks as JSON. Used for rollback view query.

func NewIndexablePartitionClock

func NewIndexablePartitionClock(key string, channelName string) *IndexablePartitionClock

func (*IndexablePartitionClock) MarshalJSON

func (i *IndexablePartitionClock) MarshalJSON() ([]byte, error)

func (*IndexablePartitionClock) UnmarshalJSON

func (i *IndexablePartitionClock) UnmarshalJSON(data []byte) error

func (*IndexablePartitionClock) Update

func (s *IndexablePartitionClock) Update(clock PartitionClock, allowRollback bool) (changed bool)

Updates clock from another clock. Will not lower sequence numbers (in case of concurrent writers) - logs warning in this scenario.

type IndexablePartitionClockStorage

type IndexablePartitionClockStorage struct {
	Channel string   `json:"channel"`
	VbNos   []uint16 `json:"vb"`
	Seqs    []uint64 `json:"seq"`
}

type IntMax

type IntMax struct {
	// contains filtered or unexported fields
}

IntMax is an expvar.Value that tracks the maximum value it's given.

func (*IntMax) SetIfMax

func (v *IntMax) SetIfMax(value int64)

func (*IntMax) String

func (v *IntMax) String() string

type IntMeanVar

type IntMeanVar struct {
	// contains filtered or unexported fields
}

IntMean is an expvar.Value that returns the mean of all values that are sent via AddValue or AddSince.

func (*IntMeanVar) AddSince

func (v *IntMeanVar) AddSince(start time.Time)

func (*IntMeanVar) AddValue

func (v *IntMeanVar) AddValue(value int64)

Adds value. Calculates new mean as iterative mean (avoids int overflow)

func (*IntMeanVar) String

func (v *IntMeanVar) String() string

type IntRollingMeanVar

type IntRollingMeanVar struct {
	// contains filtered or unexported fields
}

IntRollingMean is an expvar.Value that returns the mean of the [size] latest values sent via AddValue. Uses a slice to track values, so setting a large size has memory implications

func NewIntRollingMeanVar

func NewIntRollingMeanVar(capacity int) IntRollingMeanVar

func (*IntRollingMeanVar) AddSince

func (v *IntRollingMeanVar) AddSince(start time.Time)

func (*IntRollingMeanVar) AddSincePerItem

func (v *IntRollingMeanVar) AddSincePerItem(start time.Time, numItems int)

func (*IntRollingMeanVar) AddValue

func (v *IntRollingMeanVar) AddValue(value int64)

Adds value

func (*IntRollingMeanVar) String

func (v *IntRollingMeanVar) String() string

type LRUCache

type LRUCache struct {
	// contains filtered or unexported fields
}

An LRU cache of document revision bodies, together with their channel access.

func NewLRUCache

func NewLRUCache(capacity int) (*LRUCache, error)

Creates an LRU cache with the given capacity and an optional loader function.

func (*LRUCache) Count

func (lc *LRUCache) Count() int

func (*LRUCache) Get

func (lc *LRUCache) Get(key string) (result interface{}, found bool)

Looks up an entry from the cache.

func (*LRUCache) Put

func (lc *LRUCache) Put(key string, value interface{})

Adds an entry to the cache.

type LRUCacheLoaderFunc

type LRUCacheLoaderFunc func(key string) (value interface{}, err error)

type LeakyBucket

type LeakyBucket struct {
	// contains filtered or unexported fields
}

A wrapper around a Bucket to support forced errors. For testing use only.

func (*LeakyBucket) Add

func (b *LeakyBucket) Add(k string, exp int, v interface{}) (added bool, err error)

func (*LeakyBucket) AddRaw

func (b *LeakyBucket) AddRaw(k string, exp int, v []byte) (added bool, err error)

func (*LeakyBucket) Append

func (b *LeakyBucket) Append(k string, data []byte) error

func (*LeakyBucket) Close

func (b *LeakyBucket) Close()

func (*LeakyBucket) CloseAndDelete

func (b *LeakyBucket) CloseAndDelete() error

func (*LeakyBucket) CouchbaseServerVersion

func (b *LeakyBucket) CouchbaseServerVersion() (major uint64, minor uint64, micro string, err error)

func (*LeakyBucket) Delete

func (b *LeakyBucket) Delete(k string) error

func (*LeakyBucket) DeleteDDoc

func (b *LeakyBucket) DeleteDDoc(docname string) error

func (*LeakyBucket) DeleteWithXattr

func (b *LeakyBucket) DeleteWithXattr(k string, xattr string) error

func (*LeakyBucket) Dump

func (b *LeakyBucket) Dump()

func (*LeakyBucket) Get

func (b *LeakyBucket) Get(k string, rv interface{}) (cas uint64, err error)

func (*LeakyBucket) GetAndTouchRaw

func (b *LeakyBucket) GetAndTouchRaw(k string, exp int) (v []byte, cas uint64, err error)

func (*LeakyBucket) GetBulkRaw

func (b *LeakyBucket) GetBulkRaw(keys []string) (map[string][]byte, error)

func (*LeakyBucket) GetDDoc

func (b *LeakyBucket) GetDDoc(docname string, value interface{}) error

func (*LeakyBucket) GetMaxVbno

func (b *LeakyBucket) GetMaxVbno() (uint16, error)

func (*LeakyBucket) GetName

func (b *LeakyBucket) GetName() string

func (*LeakyBucket) GetRaw

func (b *LeakyBucket) GetRaw(k string) (v []byte, cas uint64, err error)

func (*LeakyBucket) GetStatsVbSeqno

func (b *LeakyBucket) GetStatsVbSeqno(maxVbno uint16, useAbsHighSeqNo bool) (uuids map[uint16]uint64, highSeqnos map[uint16]uint64, seqErr error)

func (*LeakyBucket) GetWithXattr

func (b *LeakyBucket) GetWithXattr(k string, xattr string, rv interface{}, xv interface{}) (cas uint64, err error)

func (*LeakyBucket) Incr

func (b *LeakyBucket) Incr(k string, amt, def uint64, exp int) (uint64, error)

func (*LeakyBucket) PutDDoc

func (b *LeakyBucket) PutDDoc(docname string, value interface{}) error

func (*LeakyBucket) Refresh

func (b *LeakyBucket) Refresh() error

func (*LeakyBucket) Remove

func (b *LeakyBucket) Remove(k string, cas uint64) (casOut uint64, err error)

func (*LeakyBucket) Set

func (b *LeakyBucket) Set(k string, exp int, v interface{}) error

func (*LeakyBucket) SetBulk

func (b *LeakyBucket) SetBulk(entries []*sgbucket.BulkSetEntry) (err error)

func (*LeakyBucket) SetRaw

func (b *LeakyBucket) SetRaw(k string, exp int, v []byte) error

func (*LeakyBucket) StartTapFeed

func (b *LeakyBucket) StartTapFeed(args sgbucket.TapArguments) (sgbucket.TapFeed, error)

func (*LeakyBucket) UUID

func (b *LeakyBucket) UUID() (string, error)

func (*LeakyBucket) Update

func (b *LeakyBucket) Update(k string, exp int, callback sgbucket.UpdateFunc) (err error)

func (*LeakyBucket) VBHash

func (b *LeakyBucket) VBHash(docID string) uint32

func (*LeakyBucket) View

func (b *LeakyBucket) View(ddoc, name string, params map[string]interface{}) (sgbucket.ViewResult, error)

func (*LeakyBucket) ViewCustom

func (b *LeakyBucket) ViewCustom(ddoc, name string, params map[string]interface{}, vres interface{}) error

func (*LeakyBucket) Write

func (b *LeakyBucket) Write(k string, flags int, exp int, v interface{}, opt sgbucket.WriteOptions) error

func (*LeakyBucket) WriteCas

func (b *LeakyBucket) WriteCas(k string, flags int, exp int, cas uint64, v interface{}, opt sgbucket.WriteOptions) (uint64, error)

func (*LeakyBucket) WriteCasWithXattr

func (b *LeakyBucket) WriteCasWithXattr(k string, xattr string, exp int, cas uint64, v interface{}, xv interface{}) (casOut uint64, err error)

func (*LeakyBucket) WriteUpdate

func (b *LeakyBucket) WriteUpdate(k string, exp int, callback sgbucket.WriteUpdateFunc) (err error)

func (*LeakyBucket) WriteUpdateWithXattr

func (b *LeakyBucket) WriteUpdateWithXattr(k string, xattr string, exp int, callback sgbucket.WriteUpdateWithXattrFunc) (casOut uint64, err error)

type LeakyBucketConfig

type LeakyBucketConfig struct {

	// Incr() fails 3 times before finally succeeding
	IncrTemporaryFailCount uint16

	// Emulate TAP/DCP feed de-dupliation behavior, such that within a
	// window of # of mutations or a timeout, mutations for a given document
	// will be filtered such that only the _latest_ mutation will make it through.
	TapFeedDeDuplication bool
	TapFeedVbuckets      bool     // Emulate vbucket numbers on feed
	TapFeedMissingDocs   []string // Emulate entry not appearing on tap feed
}

The config object that controls the LeakyBucket behavior

type Level

type Level int32
const (
	// DebugLevel logs are typically voluminous, and are usually disabled in
	// production.
	DebugLevel Level = iota - 1
	// InfoLevel is the default logging priority.
	InfoLevel
	// WarnLevel logs are more important than Info, but don't need individual
	// human review.
	WarnLevel
	// ErrorLevel logs are high-priority. If an application is running smoothly,
	// it shouldn't generate any error-level logs.
	ErrorLevel
	// PanicLevel logs a message, then panics.
	PanicLevel
	// FatalLevel logs a message, then calls os.Exit(1).
	FatalLevel
)

By setting DebugLevel to -1, if LogLevel is not set in the logging config it will default to the zero value for int32 (0) which will disable debug logging, InfoLevel logging will be the default output.

func (*Level) MarshalText

func (l *Level) MarshalText() ([]byte, error)

Implementaion of the go encoding.TextMarshaller interface for the Level type This method will also be called by the JSON Marshaller

MarshalText marshals the Level to text. Note that the text representation drops the -Level suffix (see example).

func (Level) String

func (l Level) String() string

String returns a lower-case ASCII representation of the log level.

func (*Level) UnmarshalText

func (l *Level) UnmarshalText(text []byte) error

Implementaion of the go encoding.TextUnmarshaller interface for the Level type This method will also be called by the JSON Unmarshaller e.g. when loading from logging configuration.

UnmarshalText unmarshals text to a level. Like MarshalText, UnmarshalText expects the text representation of a Level to drop the -Level suffix (see example).

In particular, this makes it easy to configure logging levels using YAML, TOML, or JSON files.

type LogAppenderConfig

type LogAppenderConfig struct {
	// Filename is the file to write logs to.  Backup log files will be retained
	// in the same directory.  It uses <processname>-lumberjack.log in
	// os.TempDir() if empty.
	LogFilePath *string            `json:",omitempty"`
	LogKeys     []string           `json:",omitempty"` // Log keywords to enable
	LogLevel    Level              `json:",omitempty"`
	Rotation    *LogRotationConfig `json:",omitempty"`
}

func (*LogAppenderConfig) ValidateLogAppender

func (config *LogAppenderConfig) ValidateLogAppender() error

type LogRotationConfig

type LogRotationConfig struct {
	// MaxSize is the maximum size in megabytes of the log file before it gets
	// rotated. It defaults to 100 megabytes.
	MaxSize int `json:",omitempty"`

	// MaxAge is the maximum number of days to retain old log files based on the
	// timestamp encoded in their filename.  Note that a day is defined as 24
	// hours and may not exactly correspond to calendar days due to daylight
	// savings, leap seconds, etc. The default is not to remove old log files
	// based on age.
	MaxAge int `json:",omitempty"`

	// MaxBackups is the maximum number of old log files to retain.  The default
	// is to retain all old log files (though MaxAge may still cause them to get
	// deleted.)
	MaxBackups int `json:",omitempty"`

	// LocalTime determines if the time used for formatting the timestamps in
	// backup files is the computer's local time.  The default is to use UTC
	// time.
	LocalTime bool `json:",omitempty"`
}

type LoggerWriter

type LoggerWriter struct {
	LogKey       string        // The log key to log to, eg, "HTTP+"
	SerialNumber uint64        // The request ID
	Request      *http.Request // The request
}

This provides an io.Writer interface around the base.Log API

func NewLoggerWriter

func NewLoggerWriter(logKey string, serialNumber uint64, req *http.Request) *LoggerWriter

Create a new LoggerWriter

func (*LoggerWriter) Write

func (lw *LoggerWriter) Write(p []byte) (n int, err error)

Write() method to satisfy the io.Writer interface

type LoggingBucket

type LoggingBucket struct {
	// contains filtered or unexported fields
}

A wrapper around a Bucket that transparently adds logging of all the API calls.

func (*LoggingBucket) Add

func (b *LoggingBucket) Add(k string, exp int, v interface{}) (added bool, err error)

func (*LoggingBucket) AddRaw

func (b *LoggingBucket) AddRaw(k string, exp int, v []byte) (added bool, err error)

func (*LoggingBucket) Append

func (b *LoggingBucket) Append(k string, data []byte) error

func (*LoggingBucket) Close

func (b *LoggingBucket) Close()

func (*LoggingBucket) CouchbaseServerVersion

func (b *LoggingBucket) CouchbaseServerVersion() (major uint64, minor uint64, micro string, err error)

func (*LoggingBucket) Delete

func (b *LoggingBucket) Delete(k string) error

func (*LoggingBucket) DeleteDDoc

func (b *LoggingBucket) DeleteDDoc(docname string) error

func (*LoggingBucket) DeleteWithXattr

func (b *LoggingBucket) DeleteWithXattr(k string, xattr string) error

func (*LoggingBucket) Dump

func (b *LoggingBucket) Dump()

func (*LoggingBucket) Get

func (b *LoggingBucket) Get(k string, rv interface{}) (uint64, error)

func (*LoggingBucket) GetAndTouchRaw

func (b *LoggingBucket) GetAndTouchRaw(k string, exp int) (v []byte, cas uint64, err error)

func (*LoggingBucket) GetBulkRaw

func (b *LoggingBucket) GetBulkRaw(keys []string) (map[string][]byte, error)

func (*LoggingBucket) GetDDoc

func (b *LoggingBucket) GetDDoc(docname string, value interface{}) error

func (*LoggingBucket) GetMaxVbno

func (b *LoggingBucket) GetMaxVbno() (uint16, error)

func (*LoggingBucket) GetName

func (b *LoggingBucket) GetName() string

func (*LoggingBucket) GetRaw

func (b *LoggingBucket) GetRaw(k string) (v []byte, cas uint64, err error)

func (*LoggingBucket) GetStatsVbSeqno

func (b *LoggingBucket) GetStatsVbSeqno(maxVbno uint16, useAbsHighSeqNo bool) (uuids map[uint16]uint64, highSeqnos map[uint16]uint64, seqErr error)

func (*LoggingBucket) GetWithXattr

func (b *LoggingBucket) GetWithXattr(k string, xattr string, rv interface{}, xv interface{}) (cas uint64, err error)

func (*LoggingBucket) Incr

func (b *LoggingBucket) Incr(k string, amt, def uint64, exp int) (uint64, error)

func (*LoggingBucket) PutDDoc

func (b *LoggingBucket) PutDDoc(docname string, value interface{}) error

func (*LoggingBucket) Refresh

func (b *LoggingBucket) Refresh() error

func (*LoggingBucket) Remove

func (b *LoggingBucket) Remove(k string, cas uint64) (casOut uint64, err error)

func (*LoggingBucket) Set

func (b *LoggingBucket) Set(k string, exp int, v interface{}) error

func (*LoggingBucket) SetBulk

func (b *LoggingBucket) SetBulk(entries []*sgbucket.BulkSetEntry) (err error)

func (*LoggingBucket) SetRaw

func (b *LoggingBucket) SetRaw(k string, exp int, v []byte) error

func (*LoggingBucket) StartTapFeed

func (b *LoggingBucket) StartTapFeed(args sgbucket.TapArguments) (sgbucket.TapFeed, error)

func (*LoggingBucket) UUID

func (b *LoggingBucket) UUID() (string, error)

func (*LoggingBucket) Update

func (b *LoggingBucket) Update(k string, exp int, callback sgbucket.UpdateFunc) (err error)

func (*LoggingBucket) VBHash

func (b *LoggingBucket) VBHash(docID string) uint32

func (*LoggingBucket) View

func (b *LoggingBucket) View(ddoc, name string, params map[string]interface{}) (sgbucket.ViewResult, error)

func (*LoggingBucket) ViewCustom

func (b *LoggingBucket) ViewCustom(ddoc, name string, params map[string]interface{}, vres interface{}) error

func (*LoggingBucket) Write

func (b *LoggingBucket) Write(k string, flags int, exp int, v interface{}, opt sgbucket.WriteOptions) error

func (*LoggingBucket) WriteCas

func (b *LoggingBucket) WriteCas(k string, flags int, exp int, cas uint64, v interface{}, opt sgbucket.WriteOptions) (uint64, error)

func (*LoggingBucket) WriteCasWithXattr

func (b *LoggingBucket) WriteCasWithXattr(k string, xattr string, exp int, cas uint64, v interface{}, xv interface{}) (casOut uint64, err error)

func (*LoggingBucket) WriteUpdate

func (b *LoggingBucket) WriteUpdate(k string, exp int, callback sgbucket.WriteUpdateFunc) (err error)

func (*LoggingBucket) WriteUpdateWithXattr

func (b *LoggingBucket) WriteUpdateWithXattr(k string, xattr string, exp int, callback sgbucket.WriteUpdateWithXattrFunc) (casOut uint64, err error)

type LoggingConfigMap

type LoggingConfigMap map[string]*LogAppenderConfig

type PartitionClock

type PartitionClock map[uint16]uint64

PartitionClock is simplified version of SequenceClock for a single partition

func ConvertClockToPartitionClocks

func ConvertClockToPartitionClocks(clock SequenceClock, partitions IndexPartitions) []*PartitionClock

func (PartitionClock) Add

func (clock PartitionClock) Add(other PartitionClock)

Adds the values from another clock to the current clock

func (PartitionClock) AddToClock

func (clock PartitionClock) AddToClock(seqClock SequenceClock)

func (PartitionClock) Copy

func (clock PartitionClock) Copy() PartitionClock

func (PartitionClock) GetSequence

func (clock PartitionClock) GetSequence(vbNo uint16) uint64

func (PartitionClock) Set

func (clock PartitionClock) Set(other PartitionClock)

Set the values in the current clock to the values in other clock

func (PartitionClock) SetSequence

func (clock PartitionClock) SetSequence(vbNo uint16, seq uint64)

func (PartitionClock) String

func (clock PartitionClock) String() string

type PartitionRange

type PartitionRange struct {
	// contains filtered or unexported fields
}

PartitionRange is a pair of clocks defining a range of sequences with a partition. Defines helper functions for range comparison

func NewPartitionRange

func NewPartitionRange() PartitionRange

func (PartitionRange) Compare

func (p PartitionRange) Compare(vbNo uint16, sequence uint64) PartitionRangeCompare

Identifies where the specified vbNo, sequence is relative to the partition range

func (PartitionRange) GetSequenceRange

func (p PartitionRange) GetSequenceRange(vbNo uint16) SequenceRange

func (PartitionRange) SetRange

func (p PartitionRange) SetRange(vbNo uint16, sinceSeq, toSeq uint64)

func (PartitionRange) SinceAfter

func (p PartitionRange) SinceAfter(clock PartitionClock) bool

StartsAfter returns true if all since sequences in the partition range are equal to or later than the partition clock

func (PartitionRange) SinceBefore

func (p PartitionRange) SinceBefore(clock PartitionClock) bool

StartsBefore returns true if any non-nil since sequences in the partition range are earlier than the partition clock

type PartitionRangeCompare

type PartitionRangeCompare int

PartitionRange.Compare Outcomes:

Within, Before, After are returned if the sequence is within/before/after the range
Unknown is returned if the range doesn't include since/to values for the vbno

type PartitionStorage

type PartitionStorage struct {
	Uuid  string   `json:"uuid"`
	Index uint16   `json:"index"`
	VbNos []uint16 `json:"vbNos"`
}

type PartitionStorageSet

type PartitionStorageSet []PartitionStorage

func SeedTestPartitionMap

func SeedTestPartitionMap(bucket Bucket, numPartitions uint16) (PartitionStorageSet, error)

Index partitions for unit tests

func (PartitionStorageSet) Len

func (c PartitionStorageSet) Len() int

Implementation of sort.Interface

func (PartitionStorageSet) Less

func (c PartitionStorageSet) Less(i, j int) bool

func (PartitionStorageSet) Sort

func (c PartitionStorageSet) Sort()

Sorts the PartitionStorageSet by Uuid

func (PartitionStorageSet) String

func (c PartitionStorageSet) String() string

func (PartitionStorageSet) Swap

func (c PartitionStorageSet) Swap(i, j int)

type Receiver

type Receiver interface {
	cbdatasource.Receiver
	SeedSeqnos(map[uint16]uint64, map[uint16]uint64)
	GetEventFeed() <-chan sgbucket.TapEvent
	SetEventFeed(chan sgbucket.TapEvent)
	GetOutput() chan sgbucket.TapEvent

	SetBucketNotifyFn(sgbucket.BucketNotifyFn)
	GetBucketNotifyFn() sgbucket.BucketNotifyFn
	// contains filtered or unexported methods
}

func NewDCPReceiver

func NewDCPReceiver() Receiver

type Replicator

type Replicator struct {
	// contains filtered or unexported fields
}

func NewReplicator

func NewReplicator() *Replicator

func (*Replicator) ActiveTasks

func (r *Replicator) ActiveTasks() (tasks []ActiveTask)

func (*Replicator) Replicate

func (r *Replicator) Replicate(params sgreplicate.ReplicationParameters, isCancel bool) (task *ActiveTask, err error)

type RetrySleeper

type RetrySleeper func(retryCount int) (shouldContinue bool, timeTosleepMs int)

A retry sleeper is called back by the retry loop and passed the current retryCount, and should return the amount of milliseconds that the retry should sleep.

func CreateDoublingSleeperFunc

func CreateDoublingSleeperFunc(maxNumAttempts, initialTimeToSleepMs int) RetrySleeper

Create a RetrySleeper that will double the retry time on every iteration and use the given parameters

type RetryWorker

type RetryWorker func() (shouldRetry bool, err error, value interface{})

A RetryWorker encapsulates the work being done in a Retry Loop. The shouldRetry return value determines whether the worker will retry, regardless of the err value. If the worker has exceeded it's retry attempts, then it will not be called again even if it returns shouldRetry = true.

type SGError

type SGError struct {
	// contains filtered or unexported fields
}

func (SGError) Error

func (e SGError) Error() string

type SGTranscoder

type SGTranscoder struct {
}

func (SGTranscoder) Decode

func (t SGTranscoder) Decode(bytes []byte, flags uint32, out interface{}) error

Decode applies the default Couchbase transcoding behaviour to decode into a Go type.

func (SGTranscoder) Encode

func (t SGTranscoder) Encode(value interface{}) ([]byte, uint32, error)

Encode applies the default Couchbase transcoding behaviour to encode a Go type. Figures out how to convert the given struct into bytes and then sets the json flag.

type SequenceClock

type SequenceClock interface {
	SetSequence(vbNo uint16, vbSequence uint64)     // Sets the sequence value for a vbucket
	SetMaxSequence(vbNo uint16, vbSequence uint64)  // Sets the sequence value for a vbucket - must be larger than existing sequence
	GetSequence(vbNo uint16) (vbSequence uint64)    // Retrieves the sequence value for a vbucket
	Cas() (casOut uint64)                           // Gets the last known cas for this sequence clock
	SetCas(cas uint64)                              // Sets the last known cas for this sequence clock
	Marshal() (value []byte, err error)             // Marshals the sequence value
	Unmarshal(value []byte) error                   // Unmarshals the sequence value
	UpdateWithClock(updateClock SequenceClock)      // Updates the clock with values from updateClock
	Value() []uint64                                // Returns the raw vector clock
	ValueAsMap() map[uint16]uint64                  // Returns the raw vector clock
	GetHashedValue() string                         // Returns previously hashed value, if present.  If not present, does NOT generate hash
	SetHashedValue(value string)                    // Returns previously hashed value, if present.  If not present, does NOT generate hash
	Equals(otherClock SequenceClock) bool           // Evaluates whether two clocks are identical
	IsEmptyClock() bool                             // Evaluates if this an empty clock
	AllAfter(otherClock SequenceClock) bool         // True if all entries in clock are greater than or equal to the corresponding values in otherClock
	AllBefore(otherClock SequenceClock) bool        // True if all entries in clock are less than or equal to the corresponding values in otherClock
	AnyAfter(otherClock SequenceClock) bool         // True if any entries in clock are greater than the corresponding values in otherClock
	AnyBefore(otherClock SequenceClock) bool        // True if any entries in clock are less than the corresponding values in otherClock
	SetTo(otherClock SequenceClock)                 // Sets the current clock to a copy of the other clock
	Copy() SequenceClock                            // Returns a copy of the clock
	LimitTo(otherClock SequenceClock) SequenceClock // Returns a new clock where any values in clock that are greater than otherClock, are set to otherClock
}

type SequenceClockImpl

type SequenceClockImpl struct {
	// contains filtered or unexported fields
}

Vector-clock based sequence. Not thread-safe - use SyncSequenceClock for usages with potential for concurrent access.

func HighSeqNosToSequenceClock

func HighSeqNosToSequenceClock(highSeqs map[uint16]uint64) (*SequenceClockImpl, error)

Convert a map of vbno->seq high sequences (as returned by couchbasebucket.GetStatsVbSeqno()) to a SequenceClock

func NewSequenceClockForBytes

func NewSequenceClockForBytes(bytes []byte) (*SequenceClockImpl, error)

func NewSequenceClockImpl

func NewSequenceClockImpl() *SequenceClockImpl

func (*SequenceClockImpl) AllAfter

func (c *SequenceClockImpl) AllAfter(other SequenceClock) bool

Compares another sequence clock with this one. Returns true only if ALL vb values in the clock are greater than or equal to corresponding values in other

func (*SequenceClockImpl) AllBefore

func (c *SequenceClockImpl) AllBefore(other SequenceClock) bool

Compares another sequence clock with this one. Returns true only if ALL vb values in the clock are less than or equal to the corresponding values in other

func (*SequenceClockImpl) AnyAfter

func (c *SequenceClockImpl) AnyAfter(other SequenceClock) bool

Compares another sequence clock with this one. Returns true if ANY vb values in the clock are greater than the corresponding values in other

func (*SequenceClockImpl) AnyBefore

func (c *SequenceClockImpl) AnyBefore(other SequenceClock) bool

Compares another sequence clock with this one. Returns true if ANY vb values in the clock are less than the corresponding values in other

func (*SequenceClockImpl) Cas

func (c *SequenceClockImpl) Cas() uint64

func (*SequenceClockImpl) Copy

func (c *SequenceClockImpl) Copy() SequenceClock

Deep-copies a SequenceClock

func (*SequenceClockImpl) Equals

func (c *SequenceClockImpl) Equals(other SequenceClock) bool

Compares another sequence clock with this one

func (*SequenceClockImpl) GetHashedValue

func (c *SequenceClockImpl) GetHashedValue() string

func (*SequenceClockImpl) GetSequence

func (c *SequenceClockImpl) GetSequence(vbNo uint16) (vbSequence uint64)

func (*SequenceClockImpl) Init

func (c *SequenceClockImpl) Init(value []uint64, hash string)

func (*SequenceClockImpl) IsEmptyClock

func (c *SequenceClockImpl) IsEmptyClock() bool

func (*SequenceClockImpl) LimitTo

func (c *SequenceClockImpl) LimitTo(other SequenceClock) SequenceClock

func (*SequenceClockImpl) Marshal

func (c *SequenceClockImpl) Marshal() ([]byte, error)

TODO: replace with something more intelligent than gob encode, to take advantage of known

clock structure?

func (*SequenceClockImpl) SetCas

func (c *SequenceClockImpl) SetCas(cas uint64)

func (*SequenceClockImpl) SetHashedValue

func (c *SequenceClockImpl) SetHashedValue(value string)

func (*SequenceClockImpl) SetMaxSequence

func (c *SequenceClockImpl) SetMaxSequence(vbNo uint16, vbSequence uint64)

func (*SequenceClockImpl) SetSequence

func (c *SequenceClockImpl) SetSequence(vbNo uint16, vbSequence uint64)

func (*SequenceClockImpl) SetTo

func (c *SequenceClockImpl) SetTo(other SequenceClock)

Sets a sequence clock equal to the specified clock

func (*SequenceClockImpl) Unmarshal

func (c *SequenceClockImpl) Unmarshal(value []byte) error

func (*SequenceClockImpl) UpdateWithClock

func (c *SequenceClockImpl) UpdateWithClock(updateClock SequenceClock)

func (*SequenceClockImpl) UpdateWithPartitionClocks

func (c *SequenceClockImpl) UpdateWithPartitionClocks(partitionClocks []*PartitionClock, allowRollback bool) error

func (*SequenceClockImpl) Value

func (c *SequenceClockImpl) Value() []uint64

func (*SequenceClockImpl) ValueAsMap

func (c *SequenceClockImpl) ValueAsMap() map[uint16]uint64

type SequenceRange

type SequenceRange struct {
	Since uint64
	To    uint64
}

type SequenceTimingExpvar

type SequenceTimingExpvar struct {
	// contains filtered or unexported fields
}

SequenceTimingExpvarMap attempts to track timing information for targeted sequences as they move through the system. Creates a map that looks like the following, where Indexed, Polled, Changes are the incoming stages, the values are nanosecond timestamps, and the sequences are the target sequences, based on the specified vb and frequency (in the example frequency=1000). Since we won't necessarily see every vb sequence, we track the first sequence we see higher than the target frequency. (e.g. if our last sequence was 1000 and frequency is 1000, it will track the first sequence seen higher than 2000). Note: Frequency needs to be high enough that a sequence can move through the system before the next sequence is seen, otherwise earlier stages could be updating current before the later stages have processed it.

{
	"timingMap": {
		"seq1000.Indexed" :  4738432432,
		"seq1000.Polled" : 5743785947,
		"seq1000.Changes" :
		"seq2002.Indexed" :  4738432432,
		"seq2002.Polled" : 5743785947,
		"seq2002.Changes" :
	}
}
var TimingExpvars SequenceTimingExpvar

func NewSequenceTimingExpvar

func NewSequenceTimingExpvar(frequency uint64, targetVbNo uint16, name string) SequenceTimingExpvar

func (SequenceTimingExpvar) String

func (s SequenceTimingExpvar) String() string

func (*SequenceTimingExpvar) UpdateBySequence

func (s *SequenceTimingExpvar) UpdateBySequence(stage string, vbNo uint16, seq uint64)

func (*SequenceTimingExpvar) UpdateBySequenceAt

func (s *SequenceTimingExpvar) UpdateBySequenceAt(stage string, vbNo uint16, seq uint64, time time.Time)

func (*SequenceTimingExpvar) UpdateBySequenceRange

func (s *SequenceTimingExpvar) UpdateBySequenceRange(stage string, vbNo uint16, startSeq uint64, endSeq uint64)

Update by sequence range is used for events (like clock polling) that don't see every sequence. Writes when current target sequence is in range. Assumes callers don't report overlapping ranges

type Set

type Set map[string]present

An immutable set of strings, represented as a map.

func SetFromArray

func SetFromArray(names []string) Set

Creates a new Set from an array of strings.

func SetOf

func SetOf(names ...string) Set

Creates a new Set from zero or more inline string arguments.

func (Set) Contains

func (set Set) Contains(ch string) bool

Returns true if the set includes the channel.

func (Set) Equals

func (set Set) Equals(other Set) bool

func (Set) MarshalJSON

func (set Set) MarshalJSON() ([]byte, error)

func (Set) Removing

func (set Set) Removing(str string) Set

Returns a set with any instance of 'str' removed

func (Set) String

func (set Set) String() string

func (Set) ToArray

func (set Set) ToArray() []string

Converts a Set to an array of strings (ordering is undefined).

func (Set) Union

func (set Set) Union(other Set) Set

Returns the union of two sets.

func (*Set) UnmarshalJSON

func (setPtr *Set) UnmarshalJSON(data []byte) error

type ShardedClock

type ShardedClock struct {
	// contains filtered or unexported fields
}

ShardedClock is a full clock for the bucket. ShardedClock manages the collection of clock shards (ShardedClockPartitions), and also manages the counter for the clock.

func NewShardedClock

func NewShardedClock(baseKey string, partitions *IndexPartitions, bucket Bucket) *ShardedClock

func NewShardedClockWithPartitions

func NewShardedClockWithPartitions(baseKey string, partitions *IndexPartitions, bucket Bucket) *ShardedClock

func (*ShardedClock) AsClock

func (s *ShardedClock) AsClock() *SequenceClockImpl

func (*ShardedClock) GetSequence

func (s *ShardedClock) GetSequence(vbNo uint16) (vbSequence uint64)

func (*ShardedClock) Load

func (s *ShardedClock) Load() (isChanged bool, err error)

Loads clock from bucket. If counter isn't changed, returns false and leaves as-is. For newly initialized ShardedClocks (counter=0), this will only happen if there are no entries in the bucket for the clock.

func (*ShardedClock) UpdateAndWrite

func (s *ShardedClock) UpdateAndWrite(updates map[uint16]uint64) (err error)

Update and write a sharded clock with the specified values.

type ShardedClockPartition

type ShardedClockPartition struct {
	Key string // Clock partition document key
	// contains filtered or unexported fields
}

ShardedClockPartition manages storage for one clock partition, where a clock partition is a set of {vb, seq} values for a subset of vbuckets. Modifying clock values and metadata is done directly to the []byte storage, to avoid marshal/unmarshal overhead. SeqSize defines how many bytes are used to store each clock value. It is initialized at 2 bytes/value (uint16 capacity), but gets increased via the resize() operation when a call to SetSequence would exceed the current capacity. Structure of []byte:

index : 2 bytes.  Partition Index, as uint16.
seqSize: 1 byte.  Sequence Size.  Supports values 1-4, where max sequence for that size is defined in kClockMaxSequences[size]
vbucket sequences: 2-8 bytes per sequence (depending on seqSize)

func NewShardedClockPartition

func NewShardedClockPartition(baseKey string, index uint16, vbuckets []uint16) *ShardedClockPartition

func NewShardedClockPartitionForBytes

func NewShardedClockPartitionForBytes(key string, bytes []byte, partitions *IndexPartitions) *ShardedClockPartition

func (*ShardedClockPartition) AddToClock

func (p *ShardedClockPartition) AddToClock(clock SequenceClock) error

func (*ShardedClockPartition) GetIndex

func (p *ShardedClockPartition) GetIndex() uint16

func (*ShardedClockPartition) GetSeqSize

func (p *ShardedClockPartition) GetSeqSize() uint8

Sequence Size - used as variable-length encoding, but for all sequences in the partition.

func (*ShardedClockPartition) GetSequence

func (p *ShardedClockPartition) GetSequence(vb uint16) (seq uint64)

func (*ShardedClockPartition) Init

func (p *ShardedClockPartition) Init(vbuckets []uint16)

Initializes vbucketOffsets

func (*ShardedClockPartition) Marshal

func (p *ShardedClockPartition) Marshal() ([]byte, error)

func (*ShardedClockPartition) SetIndex

func (p *ShardedClockPartition) SetIndex(index uint16)

func (*ShardedClockPartition) SetSeqSize

func (p *ShardedClockPartition) SetSeqSize(size uint8)

func (*ShardedClockPartition) SetSequence

func (p *ShardedClockPartition) SetSequence(vb uint16, seq uint64)

Sets sequence. Uses big endian byte ordering.

func (*ShardedClockPartition) Unmarshal

func (p *ShardedClockPartition) Unmarshal(value []byte) error

type SimpleFeed

type SimpleFeed struct {
	// contains filtered or unexported fields
}

func (*SimpleFeed) Close

func (s *SimpleFeed) Close() error

func (*SimpleFeed) Events

func (s *SimpleFeed) Events() <-chan sgbucket.TapEvent

func (*SimpleFeed) WriteEvents

func (s *SimpleFeed) WriteEvents() chan<- sgbucket.TapEvent

type StableSequenceFunc

type StableSequenceFunc func() (clock SequenceClock, err error)

Callback function to return the stable sequence

type StatsBucket

type StatsBucket struct {
	// contains filtered or unexported fields
}

A wrapper around a Bucket that tracks bucket usage statistics as basic read/write counts. Doesn't break down by operation type, to better identify counts for bulk operations

func NewStatsBucket

func NewStatsBucket(bucket Bucket) *StatsBucket

func (*StatsBucket) Add

func (b *StatsBucket) Add(k string, exp int, v interface{}) (added bool, err error)

func (*StatsBucket) AddRaw

func (b *StatsBucket) AddRaw(k string, exp int, v []byte) (added bool, err error)

func (*StatsBucket) Append

func (b *StatsBucket) Append(k string, data []byte) error

func (*StatsBucket) Close

func (b *StatsBucket) Close()

func (*StatsBucket) CouchbaseServerVersion

func (b *StatsBucket) CouchbaseServerVersion() (major uint64, minor uint64, micro string, err error)

func (*StatsBucket) Delete

func (b *StatsBucket) Delete(k string) error

func (*StatsBucket) DeleteDDoc

func (b *StatsBucket) DeleteDDoc(docname string) error

func (*StatsBucket) DeleteWithXattr

func (b *StatsBucket) DeleteWithXattr(k string, xattr string) error

func (*StatsBucket) Dump

func (b *StatsBucket) Dump()

func (*StatsBucket) Get

func (b *StatsBucket) Get(k string, rv interface{}) (uint64, error)

func (*StatsBucket) GetAndTouchRaw

func (b *StatsBucket) GetAndTouchRaw(k string, exp int) (v []byte, cas uint64, err error)

func (*StatsBucket) GetBulkRaw

func (b *StatsBucket) GetBulkRaw(keys []string) (map[string][]byte, error)

func (*StatsBucket) GetDDoc

func (b *StatsBucket) GetDDoc(docname string, value interface{}) error

func (*StatsBucket) GetMaxVbno

func (b *StatsBucket) GetMaxVbno() (uint16, error)

func (*StatsBucket) GetName

func (b *StatsBucket) GetName() string

func (*StatsBucket) GetRaw

func (b *StatsBucket) GetRaw(k string) (v []byte, cas uint64, err error)

func (*StatsBucket) GetStats

func (b *StatsBucket) GetStats() StatsBucketStats

func (*StatsBucket) GetStatsVbSeqno

func (b *StatsBucket) GetStatsVbSeqno(maxVbno uint16, useAbsHighSeqNo bool) (uuids map[uint16]uint64, highSeqnos map[uint16]uint64, seqErr error)

func (*StatsBucket) GetWithXattr

func (b *StatsBucket) GetWithXattr(k string, xattr string, rv interface{}, xv interface{}) (cas uint64, err error)

func (*StatsBucket) Incr

func (b *StatsBucket) Incr(k string, amt, def uint64, exp int) (uint64, error)

func (*StatsBucket) PutDDoc

func (b *StatsBucket) PutDDoc(docname string, value interface{}) error

func (*StatsBucket) Refresh

func (b *StatsBucket) Refresh() error

func (*StatsBucket) Remove

func (b *StatsBucket) Remove(k string, cas uint64) (casOut uint64, err error)

func (*StatsBucket) Set

func (b *StatsBucket) Set(k string, exp int, v interface{}) error

func (*StatsBucket) SetBulk

func (b *StatsBucket) SetBulk(entries []*sgbucket.BulkSetEntry) (err error)

func (*StatsBucket) SetRaw

func (b *StatsBucket) SetRaw(k string, exp int, v []byte) error

func (*StatsBucket) StartTapFeed

func (b *StatsBucket) StartTapFeed(args sgbucket.TapArguments) (sgbucket.TapFeed, error)

func (*StatsBucket) UUID

func (b *StatsBucket) UUID() (string, error)

func (*StatsBucket) Update

func (b *StatsBucket) Update(k string, exp int, callback sgbucket.UpdateFunc) (err error)

func (*StatsBucket) VBHash

func (b *StatsBucket) VBHash(docID string) uint32

func (*StatsBucket) View

func (b *StatsBucket) View(ddoc, name string, params map[string]interface{}) (sgbucket.ViewResult, error)

func (*StatsBucket) ViewCustom

func (b *StatsBucket) ViewCustom(ddoc, name string, params map[string]interface{}, vres interface{}) error

func (*StatsBucket) Write

func (b *StatsBucket) Write(k string, flags int, exp int, v interface{}, opt sgbucket.WriteOptions) error

func (*StatsBucket) WriteCas

func (b *StatsBucket) WriteCas(k string, flags int, exp int, cas uint64, v interface{}, opt sgbucket.WriteOptions) (uint64, error)

func (*StatsBucket) WriteCasWithXattr

func (b *StatsBucket) WriteCasWithXattr(k string, xattr string, exp int, cas uint64, v interface{}, xv interface{}) (casOut uint64, err error)

func (*StatsBucket) WriteUpdate

func (b *StatsBucket) WriteUpdate(k string, exp int, callback sgbucket.WriteUpdateFunc) (err error)

func (*StatsBucket) WriteUpdateWithXattr

func (b *StatsBucket) WriteUpdateWithXattr(k string, xattr string, exp int, callback sgbucket.WriteUpdateWithXattrFunc) (casOut uint64, err error)

type StatsBucketStats

type StatsBucketStats struct {
	DocsRead           uint64
	DocsWritten        uint64
	BytesRead          uint64
	BytesWritten       uint64
	UnknownSizeRead    uint64
	UnknownSizeWritten uint64
}

func (*StatsBucketStats) PerIteration

func (sbs *StatsBucketStats) PerIteration(iterationCount uint64) string

func (*StatsBucketStats) String

func (sbs *StatsBucketStats) String() string

type SyncSequenceClock

type SyncSequenceClock struct {
	Clock *SequenceClockImpl
	// contains filtered or unexported fields
}

Synchronized Sequence Clock - should be used in shared usage scenarios

func ConvertToSyncSequenceClock

func ConvertToSyncSequenceClock(clock *SequenceClockImpl) *SyncSequenceClock

func NewSyncSequenceClock

func NewSyncSequenceClock() *SyncSequenceClock

func (*SyncSequenceClock) AllAfter

func (c *SyncSequenceClock) AllAfter(other SequenceClock) bool

func (*SyncSequenceClock) AllBefore

func (c *SyncSequenceClock) AllBefore(other SequenceClock) bool

func (*SyncSequenceClock) AnyAfter

func (c *SyncSequenceClock) AnyAfter(other SequenceClock) bool

func (*SyncSequenceClock) AnyBefore

func (c *SyncSequenceClock) AnyBefore(other SequenceClock) bool

func (*SyncSequenceClock) Cas

func (c *SyncSequenceClock) Cas() uint64

func (*SyncSequenceClock) Copy

func (c *SyncSequenceClock) Copy() SequenceClock

func (*SyncSequenceClock) Equals

func (c *SyncSequenceClock) Equals(other SequenceClock) bool

func (*SyncSequenceClock) GetHashedValue

func (c *SyncSequenceClock) GetHashedValue() string

func (*SyncSequenceClock) GetSequence

func (c *SyncSequenceClock) GetSequence(vbNo uint16) (sequence uint64)

func (*SyncSequenceClock) IsEmptyClock

func (c *SyncSequenceClock) IsEmptyClock() bool

func (*SyncSequenceClock) LimitTo

func (c *SyncSequenceClock) LimitTo(other SequenceClock) SequenceClock

func (*SyncSequenceClock) Marshal

func (c *SyncSequenceClock) Marshal() ([]byte, error)

TODO: possibly replace with something more intelligent than gob encode, to take advantage of known

clock structure?

func (*SyncSequenceClock) SetCas

func (c *SyncSequenceClock) SetCas(cas uint64)

func (*SyncSequenceClock) SetHashedValue

func (c *SyncSequenceClock) SetHashedValue(value string)

func (*SyncSequenceClock) SetMaxSequence

func (c *SyncSequenceClock) SetMaxSequence(vbNo uint16, vbSequence uint64)

func (*SyncSequenceClock) SetSequence

func (c *SyncSequenceClock) SetSequence(vbNo uint16, sequence uint64)

func (*SyncSequenceClock) SetTo

func (c *SyncSequenceClock) SetTo(other SequenceClock)

Copies a channel clock

func (*SyncSequenceClock) Unmarshal

func (c *SyncSequenceClock) Unmarshal(value []byte) error

func (*SyncSequenceClock) UpdateWithClock

func (c *SyncSequenceClock) UpdateWithClock(updateClock SequenceClock)

func (*SyncSequenceClock) Value

func (c *SyncSequenceClock) Value() []uint64

func (*SyncSequenceClock) ValueAsMap

func (c *SyncSequenceClock) ValueAsMap() map[uint16]uint64

type TapArguments

type TapArguments sgbucket.TapArguments

type TapFeed

type TapFeed sgbucket.TapFeed

type TimingStatus

type TimingStatus int
const (
	TimingStatusCurrent TimingStatus = iota
	TimingStatusNext
	TimingStatusNone
	TimingStatusInit
)

type Uint64Slice

type Uint64Slice []uint64

Uint64Slice attaches the methods of sort.Interface to []uint64, sorting in increasing order.

func (Uint64Slice) Len

func (s Uint64Slice) Len() int

func (Uint64Slice) Less

func (s Uint64Slice) Less(i, j int) bool

func (Uint64Slice) Sort

func (s Uint64Slice) Sort()

Sort is a convenience method.

func (Uint64Slice) Swap

func (s Uint64Slice) Swap(i, j int)

type UnitTestAuth

type UnitTestAuth struct {
	// contains filtered or unexported fields
}

func (*UnitTestAuth) GetCredentials

func (u *UnitTestAuth) GetCredentials() (string, string, string)

type VbPositionMap

type VbPositionMap map[uint16]uint64 // Map from vbucket to position within partition.  Stored as uint64 to avoid cast during arithmetic

type VbSeq

type VbSeq struct {
	Vb  uint16
	Seq uint64
}

VbSeq stores a vbucket number and vbucket sequence pair

func (VbSeq) CompareTo

func (v VbSeq) CompareTo(vb uint16, seq uint64) CompareResult

func (VbSeq) LessThanOrEqualsClock

func (v VbSeq) LessThanOrEqualsClock(clock SequenceClock) bool

func (*VbSeq) UpdateIfEarlier

func (v *VbSeq) UpdateIfEarlier(other VbSeq) bool

Updates to the other sequence value if empty (seq=0), or the other value compares less than v

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL