access

package
v0.0.0-...-303e327 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: May 22, 2023 License: Apache-2.0 Imports: 29 Imported by: 0

Documentation

Index

Constants

View Source
const (
	// HashSize dummy hash size
	HashSize = 0

	// MaxLocationBlobs max blobs length in Location
	MaxLocationBlobs int = 4
	// MaxDeleteLocations max locations of delete request
	MaxDeleteLocations int = 1024
	// MaxBlobSize max blob size for allocation
	MaxBlobSize uint32 = 1 << 25 // 32MB
)

Variables

This section is empty.

Functions

func WithRequestID

func WithRequestID(ctx context.Context, rid interface{}) context.Context

WithRequestID trace request id in full life of the request The second parameter rid could be the one of type below:

a string,
an interface { String() string },
an interface { TraceID() string },
an interface { RequestID() string },

Types

type API

type API interface {
	// Put object once if size is not greater than MaxSizePutOnce, otherwise put blobs one by one.
	// return a location and map of hash summary bytes you excepted.
	//
	// If PutArgs' body is of type *bytes.Buffer, *bytes.Reader, or *strings.Reader,
	// GetBody is populated, then the Put once request has retry ability.
	Put(ctx context.Context, args *PutArgs) (location Location, hashSumMap HashSumMap, err error)
	// Get object, range is supported.
	Get(ctx context.Context, args *GetArgs) (body io.ReadCloser, err error)
	// Delete all blobs in these locations.
	// return failed locations which have yet been deleted if error is not nil.
	Delete(ctx context.Context, args *DeleteArgs) (failedLocations []Location, err error)
}

API access api for s3 To trace request id, the ctx is better WithRequestID(ctx, rid).

func New

func New(cfg Config) (API, error)

New returns an access API

type AllocArgs

type AllocArgs struct {
	Size            uint64            `json:"size"`
	BlobSize        uint32            `json:"blob_size"`
	AssignClusterID proto.ClusterID   `json:"assign_cluster_id"`
	CodeMode        codemode.CodeMode `json:"code_mode"`
}

AllocArgs for service /alloc

func (*AllocArgs) IsValid

func (args *AllocArgs) IsValid() bool

IsValid is valid alloc args

type AllocResp

type AllocResp struct {
	Location Location `json:"location"`
	Tokens   []string `json:"tokens"`
}

AllocResp alloc response result with tokens if size mod blobsize == 0, length of tokens equal length of location blobs otherwise additional token for the last blob uploading

type Blob

type Blob struct {
	Bid  proto.BlobID
	Vid  proto.Vid
	Size uint32
}

Blob is one piece of data in a location

Bid is the blob id Vid is which volume the blob in Size is real size of the blob

type Config

type Config struct {
	// ConnMode rpc connection timeout setting
	ConnMode RPCConnectMode
	// ClientTimeoutMs the whole request and response timeout
	ClientTimeoutMs int64
	// BodyBandwidthMBPs reading body timeout, request or response
	//   timeout = ContentLength/BodyBandwidthMBPs + BodyBaseTimeoutMs
	BodyBandwidthMBPs float64
	// BodyBaseTimeoutMs base timeout for read body
	BodyBaseTimeoutMs int64

	// Consul is consul config for discovering service
	Consul ConsulConfig
	// ServiceIntervalS is interval seconds for discovering service
	ServiceIntervalS int
	// PriorityAddrs priority addrs of access service when retry
	PriorityAddrs []string
	// MaxSizePutOnce max size using once-put object interface
	MaxSizePutOnce int64
	// MaxPartRetry max retry times when putting one part, 0 means forever
	MaxPartRetry int
	// MaxHostRetry max retry hosts of access service
	MaxHostRetry int
	// PartConcurrence concurrence of put parts
	PartConcurrence int

	// rpc selector config
	// Failure retry interval, default value is -1, if FailRetryIntervalS < 0,
	// remove failed hosts will not work.
	FailRetryIntervalS int
	// Within MaxFailsPeriodS, if the number of failures is greater than or equal to MaxFails,
	// the host is considered disconnected.
	MaxFailsPeriodS int
	// HostTryTimes Number of host failure retries
	HostTryTimes int

	// RPCConfig user-defined rpc config
	// All connections will use the config if it's not nil
	// ConnMode will be ignored if rpc config is setting
	RPCConfig *rpc.Config

	// LogLevel client output logging level.
	LogLevel log.Level

	// Logger trace all logging to the logger if setting.
	// It is an io.WriteCloser that writes to the specified filename.
	// YOU should CLOSE it after you do not use the client anymore.
	Logger *Logger
}

Config access client config

type ConsulConfig

type ConsulConfig = api.Config

ConsulConfig alias of consul api.Config Fixup: client and sdk using the same config type

type DeleteArgs

type DeleteArgs struct {
	Locations []Location `json:"locations"`
}

DeleteArgs for service /delete

func (*DeleteArgs) IsValid

func (args *DeleteArgs) IsValid() bool

IsValid is valid delete args

type DeleteBlobArgs

type DeleteBlobArgs struct {
	ClusterID proto.ClusterID `json:"clusterid"`
	Vid       proto.Vid       `json:"volumeid"`
	BlobID    proto.BlobID    `json:"blobid"`
	Size      int64           `json:"size"`
	Token     string          `json:"token"`
}

DeleteBlobArgs for service /deleteblob

func (*DeleteBlobArgs) IsValid

func (args *DeleteBlobArgs) IsValid() bool

IsValid is valid delete blob args

type DeleteResp

type DeleteResp struct {
	FailedLocations []Location `json:"failed_locations,omitempty"`
}

DeleteResp delete response with failed locations

type GetArgs

type GetArgs struct {
	Location Location `json:"location"`
	Offset   uint64   `json:"offset"`
	ReadSize uint64   `json:"read_size"`
}

GetArgs for service /get

func (*GetArgs) IsValid

func (args *GetArgs) IsValid() bool

IsValid is valid get args

type HashAlgorithm

type HashAlgorithm uint8

HashAlgorithm hash.Hash algorithm when uploading data

const (
	HashAlgDummy  HashAlgorithm = 1 << iota
	HashAlgCRC32                // crc32 with IEEE
	HashAlgMD5                  // md5
	HashAlgSHA1                 // sha1
	HashAlgSHA256               // sha256
)

defined hash algorithm

func (HashAlgorithm) ToHashSumMap

func (alg HashAlgorithm) ToHashSumMap() HashSumMap

ToHashSumMap returns a new HashSumMap, decode from rpc url argument

func (HashAlgorithm) ToHasher

func (alg HashAlgorithm) ToHasher() hash.Hash

ToHasher returns a new hash.Hash computing checksum the value of algorithm should be one of HashAlg*

type HashSumMap

type HashSumMap map[HashAlgorithm][]byte

HashSumMap save checksum in rpc calls

func (HashSumMap) All

func (h HashSumMap) All() map[string]interface{}

All returns readable checksum

func (HashSumMap) GetSum

func (h HashSumMap) GetSum(key HashAlgorithm) (interface{}, bool)

GetSum get checksum value and ok via HashAlgorithm

HashAlgDummy  returns nil, bool
HashAlgCRC32  returns uint32, bool
HashAlgMD5    returns string(32), bool
HashAlgSHA1   returns string(40), bool
HashAlgSHA256 returns string(64), bool

func (HashSumMap) GetSumVal

func (h HashSumMap) GetSumVal(key HashAlgorithm) interface{}

GetSumVal get checksum only value via HashAlgorithm

func (HashSumMap) ToHashAlgorithm

func (h HashSumMap) ToHashAlgorithm() HashAlgorithm

ToHashAlgorithm returns HashAlgorithm, encode to rpc url argument

type HasherMap

type HasherMap map[HashAlgorithm]hash.Hash

HasherMap map hasher of HashAlgorithm

func (HasherMap) ToHashAlgorithm

func (h HasherMap) ToHashAlgorithm() HashAlgorithm

ToHashAlgorithm returns HashAlgorithm

func (HasherMap) ToWriter

func (h HasherMap) ToWriter() io.Writer

ToWriter returns io writer

type Location

type Location struct {
	ClusterID proto.ClusterID   `json:"cluster_id"`
	CodeMode  codemode.CodeMode `json:"code_mode"`
	Size      uint64            `json:"size"`
	BlobSize  uint32            `json:"blob_size"`
	Crc       uint32            `json:"crc"`
	Blobs     []SliceInfo       `json:"blobs"`
	// contains filtered or unexported fields
}

Location file location, 4 + 1 + 8 + 4 + 4 + len*16 bytes | | | ClusterID(4) | CodeMode(1) | | Size(8) | | BlobSize(4) | Crc(4) | | len*SliceInfo(16) |

ClusterID which cluster file is in CodeMode is ec encode mode, see defined in "common/lib/codemode" Size is file size BlobSize is every blob's size but the last one which's size=(Size mod BlobSize) Crc is the checksum, change anything of the location, crc will mismatch Blobs all blob information

func DecodeLocation

func DecodeLocation(buf []byte) (Location, int, error)

DecodeLocation parse location from buf Returns Location and the number of bytes read Error is not nil when parsing failed

func DecodeLocationFrom

func DecodeLocationFrom(s string) (Location, error)

DecodeLocationFrom decode location from hex string

func DecodeLocationFromBase64

func DecodeLocationFromBase64(s string) (Location, error)

DecodeLocationFromBase64 decode location from base64 string

func DecodeLocationFromHex

func DecodeLocationFromHex(s string) (Location, error)

DecodeLocationFromHex decode location from hex string

func (*Location) Base64String

func (loc *Location) Base64String() string

Base64String transfer location to base64 string

func (*Location) Copy

func (loc *Location) Copy() Location

Copy returns a new same Location

func (*Location) Decode

func (loc *Location) Decode(buf []byte) (int, error)

Decode parse location from buf Returns the number of bytes read Error is not nil when parsing failed

func (*Location) Encode

func (loc *Location) Encode() []byte

Encode transfer Location to slice byte Returns the buf created by me

(n) means max-n bytes
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|  field  | crc | clusterid  | codemode |    size     |  blobsize  |
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
| n-bytes |  4  | uvarint(5) |    1     | uvarint(10) | uvarint(5) |
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
        25   +  (5){len(blobs)}   +   len(Blobs) * 20
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
|  blobs  | minbid | vid | count |           ...                   |
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
| n-bytes |  (10)  | (5) |  (5)  | (20) | (20) |       ...         |
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

func (*Location) Encode2

func (loc *Location) Encode2(buf []byte) int

Encode2 transfer Location to the buf, the buf reuse by yourself Returns the number of bytes read If the buffer is too small, Encode2 will panic

func (*Location) HexString

func (loc *Location) HexString() string

HexString transfer location to hex string

func (*Location) Spread

func (loc *Location) Spread() []Blob

Spread location blobs to slice

func (*Location) ToString

func (loc *Location) ToString() string

ToString transfer location to hex string

type Logger

type Logger = lumberjack.Logger

Logger alias of lumberjack Logger See more at: https://github.com/natefinch/lumberjack

type PutArgs

type PutArgs struct {
	Size   int64         `json:"size"`
	Hashes HashAlgorithm `json:"hashes,omitempty"`
	Body   io.Reader     `json:"-"`
}

PutArgs for service /put Hashes means how to calculate check sum, HashAlgCRC32 | HashAlgMD5 equal 2 + 4 = 6

func (*PutArgs) IsValid

func (args *PutArgs) IsValid() bool

IsValid is valid put args

type PutAtArgs

type PutAtArgs struct {
	ClusterID proto.ClusterID `json:"clusterid"`
	Vid       proto.Vid       `json:"volumeid"`
	BlobID    proto.BlobID    `json:"blobid"`
	Size      int64           `json:"size"`
	Hashes    HashAlgorithm   `json:"hashes,omitempty"`
	Token     string          `json:"token"`
	Body      io.Reader       `json:"-"`
}

PutAtArgs for service /putat

func (*PutAtArgs) IsValid

func (args *PutAtArgs) IsValid() bool

IsValid is valid putat args

type PutAtResp

type PutAtResp struct {
	HashSumMap HashSumMap `json:"hashsum"`
}

PutAtResp putat response result

type PutResp

type PutResp struct {
	Location   Location   `json:"location"`
	HashSumMap HashSumMap `json:"hashsum"`
}

PutResp put response result

type RPCConnectMode

type RPCConnectMode uint8

RPCConnectMode self-defined rpc client connection config setting

const (
	DefaultConnMode RPCConnectMode = iota
	QuickConnMode
	GeneralConnMode
	SlowConnMode
	NoLimitConnMode
)

timeout: [short - - - - - - - - -> long]

quick --> general --> default --> slow --> nolimit

speed: 40MB --> 20MB --> 10MB --> 4MB --> nolimit

type SignArgs

type SignArgs struct {
	Locations []Location `json:"locations"`
	Location  Location   `json:"location"`
}

SignArgs for service /sign Locations are signed location getting from /alloc Location is to be signed location which merged by yourself

func (*SignArgs) IsValid

func (args *SignArgs) IsValid() bool

IsValid is valid sign args

type SignResp

type SignResp struct {
	Location Location `json:"location"`
}

SignResp sign response location with crc

type SliceInfo

type SliceInfo struct {
	MinBid proto.BlobID `json:"min_bid"`
	Vid    proto.Vid    `json:"vid"`
	Count  uint32       `json:"count"`
	// contains filtered or unexported fields
}

SliceInfo blobs info, 8 + 4 + 4 bytes

MinBid is the first blob id Vid is which volume all blobs in Count is num of consecutive blob ids, count=1 just has one blob

blob ids = [MinBid, MinBid+count)

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL