pachyderm: github.com/pachyderm/pachyderm/src/server/pkg/storage/chunk Index | Files

package chunk

import "github.com/pachyderm/pachyderm/src/server/pkg/storage/chunk"

Index

Package Files

chunk.pb.go reader.go storage.go util.go writer.go

Constants

const (
    // MB is Megabytes.
    MB  = 1024 * 1024
    // WindowSize is the size of the rolling hash window.
    WindowSize = 64
)

Variables

var (
    ErrInvalidLengthChunk        = fmt.Errorf("proto: negative length found during unmarshaling")
    ErrIntOverflowChunk          = fmt.Errorf("proto: integer overflow")
    ErrUnexpectedEndOfGroupChunk = fmt.Errorf("proto: unexpected end of group")
)

func Cleanup Uses

func Cleanup(objC obj.Client, chunks *Storage)

Cleanup cleans up a local chunk storage instance.

func RandSeq Uses

func RandSeq(n int) []byte

RandSeq generates a random sequence of data (n is number of bytes)

type Annotation Uses

type Annotation struct {
    Offset      int64
    RefDataRefs []*DataRef
    NextDataRef *DataRef
    Meta        interface{}
}

Annotation is used to associate information with a set of bytes written into the chunk storage layer.

type Chunk Uses

type Chunk struct {
    Hash                 string   `protobuf:"bytes,1,opt,name=hash,proto3" json:"hash,omitempty"`
    XXX_NoUnkeyedLiteral struct{} `json:"-"`
    XXX_unrecognized     []byte   `json:"-"`
    XXX_sizecache        int32    `json:"-"`
}

func (*Chunk) Descriptor Uses

func (*Chunk) Descriptor() ([]byte, []int)

func (*Chunk) GetHash Uses

func (m *Chunk) GetHash() string

func (*Chunk) Marshal Uses

func (m *Chunk) Marshal() (dAtA []byte, err error)

func (*Chunk) MarshalTo Uses

func (m *Chunk) MarshalTo(dAtA []byte) (int, error)

func (*Chunk) MarshalToSizedBuffer Uses

func (m *Chunk) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*Chunk) ProtoMessage Uses

func (*Chunk) ProtoMessage()

func (*Chunk) Reset Uses

func (m *Chunk) Reset()

func (*Chunk) Size Uses

func (m *Chunk) Size() (n int)

func (*Chunk) String Uses

func (m *Chunk) String() string

func (*Chunk) Unmarshal Uses

func (m *Chunk) Unmarshal(dAtA []byte) error

func (*Chunk) XXX_DiscardUnknown Uses

func (m *Chunk) XXX_DiscardUnknown()

func (*Chunk) XXX_Marshal Uses

func (m *Chunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*Chunk) XXX_Merge Uses

func (m *Chunk) XXX_Merge(src proto.Message)

func (*Chunk) XXX_Size Uses

func (m *Chunk) XXX_Size() int

func (*Chunk) XXX_Unmarshal Uses

func (m *Chunk) XXX_Unmarshal(b []byte) error

type Copy Uses

type Copy struct {
    // contains filtered or unexported fields
}

Copy is the basic data structure to represent a copy of data from a reader to a writer. before/after are the raw bytes that precede/follow full chunks in the set of bytes represented by the copy.

type DataRef Uses

type DataRef struct {
    // The chunk the referenced data is located in.
    Chunk *Chunk `protobuf:"bytes,1,opt,name=chunk,proto3" json:"chunk,omitempty"`
    // The hash of the data being referenced.
    // This field is empty when it is equal to the chunk hash (the ref is the whole chunk).
    Hash string `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"`
    // The offset and size used for accessing the data within the chunk.
    OffsetBytes          int64    `protobuf:"varint,3,opt,name=offset_bytes,json=offsetBytes,proto3" json:"offset_bytes,omitempty"`
    SizeBytes            int64    `protobuf:"varint,4,opt,name=size_bytes,json=sizeBytes,proto3" json:"size_bytes,omitempty"`
    XXX_NoUnkeyedLiteral struct{} `json:"-"`
    XXX_unrecognized     []byte   `json:"-"`
    XXX_sizecache        int32    `json:"-"`
}

DataRef is a reference to data within a chunk.

func (*DataRef) Descriptor Uses

func (*DataRef) Descriptor() ([]byte, []int)

func (*DataRef) GetChunk Uses

func (m *DataRef) GetChunk() *Chunk

func (*DataRef) GetHash Uses

func (m *DataRef) GetHash() string

func (*DataRef) GetOffsetBytes Uses

func (m *DataRef) GetOffsetBytes() int64

func (*DataRef) GetSizeBytes Uses

func (m *DataRef) GetSizeBytes() int64

func (*DataRef) Marshal Uses

func (m *DataRef) Marshal() (dAtA []byte, err error)

func (*DataRef) MarshalTo Uses

func (m *DataRef) MarshalTo(dAtA []byte) (int, error)

func (*DataRef) MarshalToSizedBuffer Uses

func (m *DataRef) MarshalToSizedBuffer(dAtA []byte) (int, error)

func (*DataRef) ProtoMessage Uses

func (*DataRef) ProtoMessage()

func (*DataRef) Reset Uses

func (m *DataRef) Reset()

func (*DataRef) Size Uses

func (m *DataRef) Size() (n int)

func (*DataRef) String Uses

func (m *DataRef) String() string

func (*DataRef) Unmarshal Uses

func (m *DataRef) Unmarshal(dAtA []byte) error

func (*DataRef) XXX_DiscardUnknown Uses

func (m *DataRef) XXX_DiscardUnknown()

func (*DataRef) XXX_Marshal Uses

func (m *DataRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DataRef) XXX_Merge Uses

func (m *DataRef) XXX_Merge(src proto.Message)

func (*DataRef) XXX_Size Uses

func (m *DataRef) XXX_Size() int

func (*DataRef) XXX_Unmarshal Uses

func (m *DataRef) XXX_Unmarshal(b []byte) error

type Reader Uses

type Reader struct {
    // contains filtered or unexported fields
}

Reader reads a set of DataRefs from chunk storage.

func (*Reader) Close Uses

func (r *Reader) Close() error

Close closes the reader. Currently a no-op, but will be used when streaming is implemented.

func (*Reader) Len Uses

func (r *Reader) Len() int64

Len returns the number of bytes left.

func (*Reader) NextRange Uses

func (r *Reader) NextRange(dataRefs []*DataRef)

NextRange sets the next range for the reader.

func (*Reader) OnSplit Uses

func (r *Reader) OnSplit(f func())

OnSplit registers a callback for when a chunk split point is encountered. The callback is only executed at a split point found after reading WindowSize bytes. The reason for this is to guarantee that the same split point will appear in the writer the data is being written to.

func (*Reader) Read Uses

func (r *Reader) Read(data []byte) (int, error)

Read reads from the byte stream produced by the set of DataRefs.

func (*Reader) ReadCopy Uses

func (r *Reader) ReadCopy(n ...int64) (*Copy, error)

ReadCopy reads copy data from the reader.

type ReaderFunc Uses

type ReaderFunc func() ([]*DataRef, error)

ReaderFunc is a callback that returns the next set of data references to a reader.

type Storage Uses

type Storage struct {
    // contains filtered or unexported fields
}

Storage is the abstraction that manages chunk storage.

func LocalStorage Uses

func LocalStorage(tb testing.TB) (obj.Client, *Storage)

LocalStorage creates a local chunk storage instance. Useful for storage layer tests.

func NewStorage Uses

func NewStorage(objC obj.Client, memoryLimit int64) *Storage

NewStorage creates a new Storage.

func (*Storage) Delete Uses

func (s *Storage) Delete(ctx context.Context, hash string) error

Delete deletes a chunk in object storage.

func (*Storage) DeleteAll Uses

func (s *Storage) DeleteAll(ctx context.Context) error

DeleteAll deletes all of the chunks in object storage.

func (*Storage) List Uses

func (s *Storage) List(ctx context.Context, f func(string) error) error

List lists all of the chunks in object storage.

func (*Storage) NewReader Uses

func (s *Storage) NewReader(ctx context.Context, f ...ReaderFunc) *Reader

NewReader creates an io.ReadCloser for a chunk. (bryce) The whole chunk is in-memory right now. Could be a problem with concurrency, particularly the merge process. May want to handle concurrency here (pass in multiple data refs)

func (*Storage) NewWriter Uses

func (s *Storage) NewWriter(ctx context.Context, averageBits int, f WriterFunc, seed int64) *Writer

NewWriter creates an io.WriteCloser for a stream of bytes to be chunked. Chunks are created based on the content, then hashed and deduplicated/uploaded to object storage. The callback arguments are the chunk hash and content.

type Writer Uses

type Writer struct {
    // contains filtered or unexported fields
}

Writer splits a byte stream into content defined chunks that are hashed and deduplicated/uploaded to object storage. Chunk split points are determined by a bit pattern in a rolling hash function (buzhash64 at https://github.com/chmduquesne/rollinghash). The byte stream is split into byte sets for parallel processing. Workers roll the rolling hash function and perform the execution of the writer function on these byte sets. The workers are daisy chained such that split points across byte sets can be resolved by shuffling bytes between workers in the chain and the writer function is executed on the sequential ordering of the chunks in the byte stream.

func (*Writer) Annotate Uses

func (w *Writer) Annotate(a *Annotation)

Annotate associates an annotation with the current byte set.

func (*Writer) AnnotatedBytesSize Uses

func (w *Writer) AnnotatedBytesSize() int64

AnnotatedBytesSize returns the size of the bytes for the current annotation.

func (*Writer) ChunkCount Uses

func (w *Writer) ChunkCount() int64

ChunkCount returns a count of the number of chunks created/referenced by the writer.

func (*Writer) Close Uses

func (w *Writer) Close() error

Close closes the writer.

func (*Writer) Copy Uses

func (w *Writer) Copy(r *Reader, n ...int64) error

Copy does a cheap copy from a reader to a writer.

func (*Writer) Flush Uses

func (w *Writer) Flush() error

Flush flushes the buffered data.

func (*Writer) Reset Uses

func (w *Writer) Reset()

Reset resets the buffer and annotations.

func (*Writer) Write Uses

func (w *Writer) Write(data []byte) (int, error)

Write rolls through the data written, calling c.f when a chunk is found. Note: If making changes to this function, be wary of the performance implications (check before and after performance with chunker benchmarks).

func (*Writer) WriteCopy Uses

func (w *Writer) WriteCopy(c *Copy) error

WriteCopy writes copy data to the writer.

type WriterFunc Uses

type WriterFunc func(*DataRef, []*Annotation) error

WriterFunc is a callback that returns a data reference to the next chunk and the annotations within the chunk.

Package chunk imports 20 packages (graph) and is imported by 2 packages. Updated 2019-10-19. Refresh now. Tools for package owners.