Documentation ¶
Index ¶
- Constants
- Variables
- func Base58Decode(enc string) ([]byte, error)
- func Base58Encode(buf []byte) string
- func Blob(i interface{}) ([]byte, error)
- func Decrypt(data, key []byte) ([]byte, error)
- func DeleteKeys(svc *s3.S3, bucket string, keys []string) error
- func Divide(list []string, max int) (out [][]string)
- func Encrypt(data, key []byte) ([]byte, error)
- func Halve(list []string) (left, right []string)
- type APICombinator
- type APIEngine
- type Appender
- type AppendingCombinator
- func (ac AppendingCombinator) Delete(r Reference) error
- func (ac AppendingCombinator) Find(p string) (Reference, error)
- func (ac AppendingCombinator) Get(r Reference) (interface{}, error)
- func (ac AppendingCombinator) Merge(r Reference, i interface{}) error
- func (ac AppendingCombinator) Put(r Reference, i interface{}) error
- type Cache
- type DatabaseCombinator
- type Deferred
- type Directory
- type EncodedRefs
- type Encrypter
- type FTPCombinator
- type FileReference
- type FileSystem
- type FredEngine
- type HashReference
- type HashedContent
- type KeyValue
- type KeyValueCombinator
- type ListRecord
- type Listing
- type ListingCombinator
- type LogRecord
- type LoggingCombinator
- type Memory
- type Multiplexer
- type Observation
- type Observations
- type Passthrough
- type ProgrammaticCombinator
- type ReadOnly
- type Ref
- type RefFunc
- type Reference
- type S3Collection
- type S3KeyValue
- type S3Record
- type S3Reference
- type Stdio
- type StorageCombinator
- type VersionRecord
- type Versioning
- type Versions
Constants ¶
const ( Algo = "AES_256" KeyLength = 32 )
const ( DefaultHashAlgo = MD5 MD5 = "md5" Shake256 = "shake256" )
const HashPrefix = `EAE18B82-F047-4913-BFE7-CF5B9E3B35AB`
const MaxKeys = 10
Variables ¶
var ( // if Get method cannot find reference, combinator should return something that wraps this error: NotFound = errors.New("reference not found") // if any method cannot be performed, combinator should return something that wraps this error: NotSupported = errors.New("operation not supported") )
var ReadOnlyError = errors.New("read only")
Functions ¶
func Base58Decode ¶ added in v1.0.3
func Base58Encode ¶ added in v1.0.3
avoids problematic characters for both people and filesystems
Types ¶
type APICombinator ¶
type APICombinator struct {
// contains filtered or unexported fields
}
func NewAPICombinator ¶
func NewAPICombinator(e APIEngine) *APICombinator
func (APICombinator) Delete ¶
func (a APICombinator) Delete(Reference) error
func (APICombinator) Get ¶
func (a APICombinator) Get(r Reference) (interface{}, error)
func (APICombinator) Merge ¶
func (a APICombinator) Merge(Reference, interface{}) error
func (APICombinator) Put ¶
func (a APICombinator) Put(Reference, interface{}) error
type Appender ¶
type Appender struct {
// contains filtered or unexported fields
}
adds an append merge function
func NewAppender ¶
func NewAppender(c StorageCombinator) *Appender
type AppendingCombinator ¶
type AppendingCombinator struct {
// contains filtered or unexported fields
}
will simply append data upon Merge call
func NewFileAppender ¶
func NewFileAppender(dir string) (*AppendingCombinator, error)
func (AppendingCombinator) Delete ¶
func (ac AppendingCombinator) Delete(r Reference) error
func (AppendingCombinator) Get ¶
func (ac AppendingCombinator) Get(r Reference) (interface{}, error)
func (AppendingCombinator) Merge ¶
func (ac AppendingCombinator) Merge(r Reference, i interface{}) error
simple appends or creates
func (AppendingCombinator) Put ¶
func (ac AppendingCombinator) Put(r Reference, i interface{}) error
type Cache ¶
type Cache struct {
// contains filtered or unexported fields
}
func NewCache ¶
func NewCache(underlying, cache StorageCombinator) *Cache
type DatabaseCombinator ¶
type DatabaseCombinator struct {
// contains filtered or unexported fields
}
func NewDatabaseCombinator ¶
func NewDatabaseCombinator(db *sql.DB) *DatabaseCombinator
func (DatabaseCombinator) Delete ¶
func (dc DatabaseCombinator) Delete(Reference) error
func (DatabaseCombinator) Get ¶
func (dc DatabaseCombinator) Get(r Reference) (interface{}, error)
all that matters are the uri's query parameters: query: the query (required) format: csv or json (defaults to csv) na: the "null" string, defaults to na interface: string/reader/bytes, defaults to bytes header: true or false, defaults to true, whether to output a header row (metadata)
func (DatabaseCombinator) Merge ¶
func (dc DatabaseCombinator) Merge(Reference, interface{}) error
func (DatabaseCombinator) Put ¶
func (dc DatabaseCombinator) Put(Reference, interface{}) error
type Deferred ¶
type Deferred struct {
// contains filtered or unexported fields
}
func NewDeferred ¶
func NewDeferred(factory func() (StorageCombinator, error)) *Deferred
type Directory ¶
type Directory []FileReference
type EncodedRefs ¶ added in v1.0.3
type EncodedRefs struct {
// contains filtered or unexported fields
}
func NewEncodedRefs ¶ added in v1.0.3
func NewEncodedRefs(c StorageCombinator) EncodedRefs
func (EncodedRefs) Delete ¶ added in v1.0.3
func (e EncodedRefs) Delete(r Reference) error
func (EncodedRefs) Get ¶ added in v1.0.3
func (e EncodedRefs) Get(r Reference) (interface{}, error)
func (EncodedRefs) Merge ¶ added in v1.0.3
func (e EncodedRefs) Merge(r Reference, i interface{}) error
func (EncodedRefs) Put ¶ added in v1.0.3
func (e EncodedRefs) Put(r Reference, i interface{}) error
type Encrypter ¶
type Encrypter struct {
// contains filtered or unexported fields
}
uses aws kms master key data encryption to encrypt/decrypt in relation to embedded combinator
func NewEncrypter ¶
type FTPCombinator ¶
type FTPCombinator struct {
// contains filtered or unexported fields
}
func NewFTPCombinator ¶
func NewFTPCombinator(host, user, password string) (*FTPCombinator, error)
func (FTPCombinator) Delete ¶
func (f FTPCombinator) Delete(Reference) error
func (FTPCombinator) Get ¶
func (f FTPCombinator) Get(r Reference) (interface{}, error)
func (FTPCombinator) Merge ¶
func (f FTPCombinator) Merge(Reference, interface{}) error
func (FTPCombinator) Put ¶
func (f FTPCombinator) Put(Reference, interface{}) error
type FileReference ¶
func NewFileReference ¶
func NewFileReference(fi os.FileInfo) FileReference
func (FileReference) String ¶
func (f FileReference) String() string
func (FileReference) URI ¶
func (f FileReference) URI() *url.URL
type FileSystem ¶
type FileSystem struct {
// contains filtered or unexported fields
}
FileSystem is a storage combinator based on files
func NewFileSystem ¶
func NewFileSystem(mount string) (*FileSystem, error)
NewFileSystem creates a new filesystem storage combinator with given scheme, mountpoint, and default file mode just uses the uri's path to map to underlying file system if no path, then hashes the uri string to a path
func (FileSystem) Delete ¶
func (fs FileSystem) Delete(r Reference) error
func (FileSystem) Get ¶
func (fs FileSystem) Get(r Reference) (interface{}, error)
func (FileSystem) Merge ¶
func (fs FileSystem) Merge(r Reference, i interface{}) error
appends to the file or creates it
func (FileSystem) Put ¶
func (fs FileSystem) Put(r Reference, i interface{}) error
type FredEngine ¶ added in v0.1.67
type FredEngine struct {
Key []byte
}
func (FredEngine) Get ¶ added in v0.1.67
func (e FredEngine) Get(r Reference) (*http.Response, error)
func (FredEngine) Process ¶ added in v0.1.67
func (e FredEngine) Process(rc io.ReadCloser) (interface{}, error)
type HashReference ¶ added in v1.0.8
type HashReference struct {
// contains filtered or unexported fields
}
func ParseHashRef ¶ added in v1.0.3
func ParseHashRef(r Reference) (*HashReference, error)
func (HashReference) Algo ¶ added in v1.0.9
func (h HashReference) Algo() string
func (HashReference) String ¶ added in v1.0.8
func (h HashReference) String() string
func (HashReference) URI ¶ added in v1.0.8
func (h HashReference) URI() *url.URL
func (HashReference) Value ¶ added in v1.0.9
func (h HashReference) Value() []byte
type HashedContent ¶ added in v1.0.3
type HashedContent struct {
// contains filtered or unexported fields
}
enforces refs and content to be related by a hash. references are of form <algo>:<value> where <algo> is name of algorithm, <value> is base58-encoded value of hash
func NewHashedContent ¶ added in v1.0.3
func NewHashedContent(c StorageCombinator) HashedContent
func (HashedContent) Delete ¶ added in v1.0.3
func (hc HashedContent) Delete(r Reference) error
func (HashedContent) Get ¶ added in v1.0.3
func (hc HashedContent) Get(r Reference) (interface{}, error)
func (HashedContent) Merge ¶ added in v1.0.3
func (hc HashedContent) Merge(r Reference, i interface{}) error
func (HashedContent) Put ¶ added in v1.0.3
func (hc HashedContent) Put(r Reference, i interface{}) error
type KeyValueCombinator ¶ added in v1.0.6
type KeyValueCombinator struct {
// contains filtered or unexported fields
}
func NewKeyValue ¶ added in v1.0.6
func NewKeyValue(kv KeyValue) *KeyValueCombinator
func (KeyValueCombinator) Delete ¶ added in v1.0.6
func (c KeyValueCombinator) Delete(Reference) error
func (KeyValueCombinator) Get ¶ added in v1.0.6
func (c KeyValueCombinator) Get(r Reference) (interface{}, error)
func (KeyValueCombinator) Merge ¶ added in v1.0.6
func (c KeyValueCombinator) Merge(Reference, interface{}) error
func (KeyValueCombinator) Put ¶ added in v1.0.6
func (c KeyValueCombinator) Put(r Reference, i interface{}) error
type Listing ¶
func NewListing ¶
type ListingCombinator ¶
type ListingCombinator struct {
// contains filtered or unexported fields
}
func NewListingCombinator ¶
func NewListingCombinator(raw StorageCombinator, listReference Reference) *ListingCombinator
embedded combinator's merge method has to simply append
func (ListingCombinator) Delete ¶
func (lc ListingCombinator) Delete(r Reference) error
func (ListingCombinator) Get ¶
func (lc ListingCombinator) Get(r Reference) (interface{}, error)
func (ListingCombinator) Merge ¶
func (lc ListingCombinator) Merge(r Reference, i interface{}) error
func (ListingCombinator) Put ¶
func (lc ListingCombinator) Put(r Reference, i interface{}) error
type LogRecord ¶
type LoggingCombinator ¶
type LoggingCombinator struct {
// contains filtered or unexported fields
}
func NewLoggingCombinator ¶
func NewLoggingCombinator(storage, list StorageCombinator, listRef Reference) *LoggingCombinator
logs get/put/merge/delete calls
func (LoggingCombinator) Delete ¶
func (c LoggingCombinator) Delete(r Reference) error
func (LoggingCombinator) Get ¶
func (c LoggingCombinator) Get(r Reference) (interface{}, error)
func (LoggingCombinator) Merge ¶
func (c LoggingCombinator) Merge(r Reference, i interface{}) error
func (LoggingCombinator) Put ¶
func (c LoggingCombinator) Put(r Reference, i interface{}) error
type Multiplexer ¶
type Multiplexer struct {
// contains filtered or unexported fields
}
func NewMultiplexer ¶
func NewMultiplexer(m map[string]StorageCombinator) *Multiplexer
NewMultiplexer creates a switching storage combinator, based on longest match to first path component
func (Multiplexer) Delete ¶
func (m Multiplexer) Delete(r Reference) error
func (Multiplexer) Get ¶
func (m Multiplexer) Get(r Reference) (interface{}, error)
func (Multiplexer) Merge ¶
func (m Multiplexer) Merge(r Reference, i interface{}) error
func (Multiplexer) Put ¶
func (m Multiplexer) Put(r Reference, i interface{}) error
type Observation ¶ added in v0.1.67
type Observations ¶ added in v0.1.67
type Observations struct { Start string `xml:"observation_start,attr"` End string `xml:"observation_end,attr"` Observations []*Observation `xml:"observation"` }
func (Observations) String ¶ added in v0.1.67
func (o Observations) String() string
type Passthrough ¶
type Passthrough struct {
// contains filtered or unexported fields
}
func NewPassthrough ¶
func NewPassthrough(msg string, c StorageCombinator) *Passthrough
func (Passthrough) Delete ¶
func (pt Passthrough) Delete(r Reference) error
func (Passthrough) Get ¶
func (pt Passthrough) Get(r Reference) (interface{}, error)
func (Passthrough) Merge ¶
func (pt Passthrough) Merge(r Reference, i interface{}) error
func (Passthrough) Put ¶
func (pt Passthrough) Put(r Reference, i interface{}) error
type ProgrammaticCombinator ¶
type ProgrammaticCombinator struct {
// contains filtered or unexported fields
}
func NewProgrammatic ¶
func NewProgrammatic(f RefFunc) *ProgrammaticCombinator
func (ProgrammaticCombinator) Delete ¶
func (c ProgrammaticCombinator) Delete(Reference) error
func (ProgrammaticCombinator) Get ¶
func (c ProgrammaticCombinator) Get(r Reference) (interface{}, error)
func (ProgrammaticCombinator) Merge ¶
func (c ProgrammaticCombinator) Merge(Reference, interface{}) error
func (ProgrammaticCombinator) Put ¶
func (c ProgrammaticCombinator) Put(Reference, interface{}) error
type ReadOnly ¶
type ReadOnly struct {
// contains filtered or unexported fields
}
func NewReadOnly ¶
func NewReadOnly(c StorageCombinator) *ReadOnly
type S3Collection ¶
type S3Collection struct {
// contains filtered or unexported fields
}
maintains a collection of json-encoded records
func NewS3Collection ¶
func NewS3CollectionDebug ¶ added in v1.0.18
func NewS3CollectionDebug(bucket, prefix string, ref Reference, svc *s3.S3, debug bool) (*S3Collection, error)
ref is the one single valid reference for Get and Merge methods
func (S3Collection) Delete ¶
func (c S3Collection) Delete(r Reference) error
func (S3Collection) Get ¶
func (c S3Collection) Get(r Reference) (interface{}, error)
think about query "after=isotime" or before="isotime" for only those ones, or fragment "count" for just the count
func (S3Collection) Merge ¶
func (c S3Collection) Merge(r Reference, i interface{}) error
func (S3Collection) Put ¶
func (c S3Collection) Put(r Reference, i interface{}) error
type S3KeyValue ¶
type S3KeyValue struct {
// contains filtered or unexported fields
}
func NewS3KeyValue ¶
func (S3KeyValue) Delete ¶
func (fs S3KeyValue) Delete(r Reference) error
func (S3KeyValue) Get ¶
func (fs S3KeyValue) Get(r Reference) (interface{}, error)
func (S3KeyValue) Merge ¶
func (fs S3KeyValue) Merge(r Reference, i interface{}) error
func (S3KeyValue) Put ¶
func (fs S3KeyValue) Put(r Reference, i interface{}) error
type S3Reference ¶
func (S3Reference) String ¶
func (o S3Reference) String() string
func (S3Reference) URI ¶
func (o S3Reference) URI() *url.URL
type StorageCombinator ¶
type VersionRecord ¶
type Versioning ¶
type Versioning struct {
// contains filtered or unexported fields
}
adds versioning to an existing combinator
func NewVersioning ¶
func NewVersioning(c StorageCombinator) *Versioning
usurps the uri fragment for versioning operations
func (Versioning) Delete ¶
func (v Versioning) Delete(r Reference) error
need to think about this: should actually add a "delete" version, not delete it per se!!!
func (Versioning) Get ¶
func (v Versioning) Get(r Reference) (interface{}, error)
uri fragment = "versions" returns the list of versions unless there is also a "version" query parameter, then that version is retrieved
func (Versioning) Merge ¶
func (v Versioning) Merge(r Reference, i interface{}) error
func (Versioning) Put ¶
func (v Versioning) Put(r Reference, i interface{}) error
Source Files ¶
- api.go
- appender.go
- base58.go
- cache.go
- db.go
- deferred.go
- encoded.go
- encrypter.go
- file_appender.go
- filesystem.go
- fred.go
- ftp.go
- hashedcontent.go
- interface.go
- keyvalue.go
- lister.go
- logging.go
- memory.go
- multiplexer.go
- passthrough.go
- programmatic.go
- read_only.go
- s3.go
- s3collection.go
- stdio.go
- util.go
- versioning.go