Documentation ¶
Index ¶
- Constants
- Variables
- type Chunk
- type Conf
- type K
- type KeyOp
- type Op
- type Remote
- type Repository
- func (repo *Repository) Combine(r io.Reader, w io.Writer) (err error)
- func (repo *Repository) Fetch(r io.Reader, w io.Writer) (err error)
- func (repo *Repository) ForEach(r io.Reader, fn func(K) error) error
- func (repo *Repository) Git(ctx context.Context, in io.Reader, out io.Writer, args ...string) (err error)
- func (repo *Repository) Install(w io.Writer, conf *Conf) (err error)
- func (repo *Repository) LocalStore() (db *bolt.DB, err error)
- func (repo *Repository) Path(k K, mkdir bool) (p string, err error)
- func (repo *Repository) Pull(ref string, w io.Writer) (err error)
- func (repo *Repository) Push(store *bolt.DB, r io.Reader, remoteName string) (err error)
- func (repo *Repository) Scan(left, right string, w io.Writer) (err error)
- func (repo *Repository) ScanEach(r io.Reader, w io.Writer) (err error)
- func (repo *Repository) Split(r io.Reader, w io.Writer) (err error)
- type S3Remote
Constants ¶
const KeySize = 32
KeySize describes the size of each chunk ley
Variables ¶
var ( //PushOp tells a chunk was/is pushed to a remote PushOp = Op("push") //FetchOp tells a chunk was/is fetched from a remote FetchOp = Op("fetch") //StageOd tells a chunk is staged locally StageOp = Op("stage") //IndexOp tells a remote chunk is indexed IndexOp = Op("index") )
var ( //ChunkBufferSize determines the size of the buffer that wil hold each chunk ChunkBufferSize = 8 * 1024 * 1024 //8MiB //RemoteBranchSuffix identifies the specialty branches used for persisting remote information RemoteBranchSuffix = "bits-remote" )
var (
ErrAlreadyPushed = fmt.Errorf("chunk is already pushed to the remote")
)
var ( //IndexBucket holds remotely whether chunks are stored remotely IndexBucket = []byte("index") )
var RemoteChunk = []byte{}
RemoteChunk indicates a certain chunk is know but stored remotely
Functions ¶
This section is empty.
Types ¶
type Conf ¶
type Conf struct { //holds the aws s3 bucket name AWSS3BucketName string `json:"aws_s3_bucket_name"` //The aws key that has access to the above bucket AWSAccessKeyID string `json:"aws_access_key_id"` //the aws secret that authorizes access to the s3 bucket AWSSecretAccessKey string `json:"aws_secret_access_key"` //holds the chunking polynomial DeduplicationScope uint64 `json:"deduplication_scope"` }
Conf for the bits repository we're using
func (*Conf) OverwriteFromGit ¶
func (conf *Conf) OverwriteFromGit(repo *Repository) (err error)
LoadGitValues will overwrite values based on configuration set through git
type K ¶
K are 32-byte chunk keys, de-duplicated lookups and convergent encryption setup assume this this to be a (cryptographic) hash of plain-text chunk content
type KeyOp ¶
type KeyOp struct { Op Op K K Skipped bool CopyN int64 //if any bytes were copied in the operation, its recorded here }
KeyOp descibes a key operation
type Remote ¶
type Remote interface { ChunkReader(k K) (rc io.ReadCloser, err error) ChunkWriter(k K) (wc io.WriteCloser, err error) ListChunks(w io.Writer) (err error) }
Remote describes a method for streaming chunk information
type Repository ¶
type Repository struct { //is called when a chunk was handled in any operation, can be called //concurrently KeyProgressFn func(KeyOp, float64) // contains filtered or unexported fields }
Repository provides an abstraction on top of a Git repository for a certain directory that is queried by git commands
func NewRepository ¶
func NewRepository(dir string, output io.Writer) (repo *Repository, err error)
NewRepository sets up an interface on top of a Git repository in the provided directory. It will fail if the get executable is not in the shells PATH or if the directory doesnt seem to be a Git repository
func (*Repository) Combine ¶
Combine turns a newline seperated list of chunk keys from 'r' by reading the the projects local store. Chunks are then decrypted and combined in the original file and written to writer 'w'
func (*Repository) Fetch ¶
Fetch takes a list of chunk keys on reader 'r' and will try to fetch chunks that are not yet stored locally. Chunks that are already stored locally should result in a no-op, all keys (fetched or not) will be written to 'w'.
func (*Repository) ForEach ¶
ForEach is a convenient method for running logic for each chunk key in stream 'r', it will skip the chunk header and footer
func (*Repository) Git ¶
func (repo *Repository) Git(ctx context.Context, in io.Reader, out io.Writer, args ...string) (err error)
Git runs the git executable with the working directory set to the repository director
func (*Repository) Install ¶
func (repo *Repository) Install(w io.Writer, conf *Conf) (err error)
Install will prepare a git repository for usage with git bits, it configures filters, installs hooks and pulls chunks to write files in the current working tree. A configuration struct can be provided to populate local git configuration got future bits commands
func (*Repository) LocalStore ¶
func (repo *Repository) LocalStore() (db *bolt.DB, err error)
LocalStore will return the local chunk store, creating it in the repositories chunk directory if it doesnt exist yet. It creates the necessary buckets if they dont exist yet
func (*Repository) Path ¶
func (repo *Repository) Path(k K, mkdir bool) (p string, err error)
Path returns the local path to the chunk file based on the key, it can create required directories when 'mkdir' is set to true, in that case err might container directory creation failure.
func (*Repository) Pull ¶
func (repo *Repository) Pull(ref string, w io.Writer) (err error)
Pull get all file paths of blobs that hold chunk keys in the provided ref and combine the chunks in them into their original file, fetching any chunks not currently available in the local store
func (*Repository) Push ¶
Push takes a list of chunk keys on reader 'r' and moves each chunk from the local storage to the remote store with name 'remote'. Prior to pushing the local index of the remote is updated so chunks are not uploaded twice.
func (*Repository) Scan ¶
func (repo *Repository) Scan(left, right string, w io.Writer) (err error)
Scan will traverse git objects between commit 'left' and 'right', it will look for blobs larger then 32 bytes that are also in the clean log. These blobs should contain keys that are written to writer 'w'
type S3Remote ¶
type S3Remote struct {
// contains filtered or unexported fields
}
func NewS3Remote ¶
func NewS3Remote(repo *Repository, remote, bucket, accessKey, secretKey string) (s3 *S3Remote, err error)
func (*S3Remote) ChunkReader ¶
func (s *S3Remote) ChunkReader(k K) (rc io.ReadCloser, err error)
ChunkReader returns a file handle that the chunk with the given key can be read from, the user is expected to close it when finished
func (*S3Remote) ChunkWriter ¶
func (s *S3Remote) ChunkWriter(k K) (wc io.WriteCloser, err error)
ChunkWriter returns a file handle to which a chunk with give key can be written to, the user is expected to close it when finished.
func (*S3Remote) ListChunks ¶
ListChunks will write all chunks in the bucket to writer w