internal

package
v0.0.0-...-86c6960 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Mar 28, 2023 License: Apache-2.0 Imports: 57 Imported by: 0

Documentation

Index

Constants

View Source
const (
	PG        = "PG"
	SQLSERVER = "SQLSERVER"
	MYSQL     = "MYSQL"
	REDIS     = "REDIS"
	FDB       = "FDB"
	MONGO     = "MONGO"
	GP        = "GP"

	DownloadConcurrencySetting   = "WALG_DOWNLOAD_CONCURRENCY"
	UploadConcurrencySetting     = "WALG_UPLOAD_CONCURRENCY"
	UploadDiskConcurrencySetting = "WALG_UPLOAD_DISK_CONCURRENCY"
	UploadQueueSetting           = "WALG_UPLOAD_QUEUE"
	SentinelUserDataSetting      = "WALG_SENTINEL_USER_DATA"
	PreventWalOverwriteSetting   = "WALG_PREVENT_WAL_OVERWRITE"
	UploadWalMetadata            = "WALG_UPLOAD_WAL_METADATA"
	DeltaMaxStepsSetting         = "WALG_DELTA_MAX_STEPS"
	DeltaOriginSetting           = "WALG_DELTA_ORIGIN"
	CompressionMethodSetting     = "WALG_COMPRESSION_METHOD"
	StoragePrefixSetting         = "WALG_STORAGE_PREFIX"
	DiskRateLimitSetting         = "WALG_DISK_RATE_LIMIT"
	NetworkRateLimitSetting      = "WALG_NETWORK_RATE_LIMIT"
	UseWalDeltaSetting           = "WALG_USE_WAL_DELTA"
	UseReverseUnpackSetting      = "WALG_USE_REVERSE_UNPACK"
	SkipRedundantTarsSetting     = "WALG_SKIP_REDUNDANT_TARS"
	VerifyPageChecksumsSetting   = "WALG_VERIFY_PAGE_CHECKSUMS"
	StoreAllCorruptBlocksSetting = "WALG_STORE_ALL_CORRUPT_BLOCKS"
	UseRatingComposerSetting     = "WALG_USE_RATING_COMPOSER"
	UseCopyComposerSetting       = "WALG_USE_COPY_COMPOSER"
	UseDatabaseComposerSetting   = "WALG_USE_DATABASE_COMPOSER"
	WithoutFilesMetadataSetting  = "WALG_WITHOUT_FILES_METADATA"
	DeltaFromNameSetting         = "WALG_DELTA_FROM_NAME"
	DeltaFromUserDataSetting     = "WALG_DELTA_FROM_USER_DATA"
	FetchTargetUserDataSetting   = "WALG_FETCH_TARGET_USER_DATA"
	LogLevelSetting              = "WALG_LOG_LEVEL"
	TarSizeThresholdSetting      = "WALG_TAR_SIZE_THRESHOLD"
	TarDisableFsyncSetting       = "WALG_TAR_DISABLE_FSYNC"
	CseKmsIDSetting              = "WALG_CSE_KMS_ID"
	CseKmsRegionSetting          = "WALG_CSE_KMS_REGION"
	LibsodiumKeySetting          = "WALG_LIBSODIUM_KEY"
	LibsodiumKeyPathSetting      = "WALG_LIBSODIUM_KEY_PATH"
	LibsodiumKeyTransform        = "WALG_LIBSODIUM_KEY_TRANSFORM"
	GpgKeyIDSetting              = "GPG_KEY_ID"
	PgpKeySetting                = "WALG_PGP_KEY"
	PgpKeyPathSetting            = "WALG_PGP_KEY_PATH"
	PgpKeyPassphraseSetting      = "WALG_PGP_KEY_PASSPHRASE"
	PgDataSetting                = "PGDATA"
	UserSetting                  = "USER" // TODO : do something with it
	PgPortSetting                = "PGPORT"
	PgUserSetting                = "PGUSER"
	PgHostSetting                = "PGHOST"
	PgPasswordSetting            = "PGPASSWORD"
	PgPassfileSetting            = "PGPASSFILE"
	PgDatabaseSetting            = "PGDATABASE"
	PgSslModeSetting             = "PGSSLMODE"
	PgSlotName                   = "WALG_SLOTNAME"
	PgWalSize                    = "WALG_PG_WAL_SIZE"
	TotalBgUploadedLimit         = "TOTAL_BG_UPLOADED_LIMIT"
	NameStreamCreateCmd          = "WALG_STREAM_CREATE_COMMAND"
	NameStreamRestoreCmd         = "WALG_STREAM_RESTORE_COMMAND"
	MaxDelayedSegmentsCount      = "WALG_INTEGRITY_MAX_DELAYED_WALS"
	PrefetchDir                  = "WALG_PREFETCH_DIR"
	PgReadyRename                = "PG_READY_RENAME"
	SerializerTypeSetting        = "WALG_SERIALIZER_TYPE"
	StreamSplitterPartitions     = "WALG_STREAM_SPLITTER_PARTITIONS"
	StreamSplitterBlockSize      = "WALG_STREAM_SPLITTER_BLOCK_SIZE"
	StreamSplitterMaxFileSize    = "WALG_STREAM_SPLITTER_MAX_FILE_SIZE"
	StatsdAddressSetting         = "WALG_STATSD_ADDRESS"
	PgAliveCheckInterval         = "WALG_ALIVE_CHECK_INTERVAL"
	PgStopBackupTimeout          = "WALG_STOP_BACKUP_TIMEOUT"

	ProfileSamplingRatio = "PROFILE_SAMPLING_RATIO"
	ProfileMode          = "PROFILE_MODE"
	ProfilePath          = "PROFILE_PATH"

	MongoDBUriSetting               = "MONGODB_URI"
	MongoDBLastWriteUpdateInterval  = "MONGODB_LAST_WRITE_UPDATE_INTERVAL"
	OplogArchiveAfterSize           = "OPLOG_ARCHIVE_AFTER_SIZE"
	OplogArchiveTimeoutInterval     = "OPLOG_ARCHIVE_TIMEOUT_INTERVAL"
	OplogPITRDiscoveryInterval      = "OPLOG_PITR_DISCOVERY_INTERVAL"
	OplogPushStatsEnabled           = "OPLOG_PUSH_STATS_ENABLED"
	OplogPushStatsLoggingInterval   = "OPLOG_PUSH_STATS_LOGGING_INTERVAL"
	OplogPushStatsUpdateInterval    = "OPLOG_PUSH_STATS_UPDATE_INTERVAL"
	OplogPushStatsExposeHTTP        = "OPLOG_PUSH_STATS_EXPOSE_HTTP"
	OplogPushWaitForBecomePrimary   = "OPLOG_PUSH_WAIT_FOR_BECOME_PRIMARY"
	OplogPushPrimaryCheckInterval   = "OPLOG_PUSH_PRIMARY_CHECK_INTERVAL"
	OplogReplayOplogAlwaysUpsert    = "OPLOG_REPLAY_OPLOG_ALWAYS_UPSERT"
	OplogReplayOplogApplicationMode = "OPLOG_REPLAY_OPLOG_APPLICATION_MODE"
	OplogReplayIgnoreErrorCodes     = "OPLOG_REPLAY_IGNORE_ERROR_CODES"

	MysqlDatasourceNameSetting     = "WALG_MYSQL_DATASOURCE_NAME"
	MysqlSslCaSetting              = "WALG_MYSQL_SSL_CA"
	MysqlBinlogReplayCmd           = "WALG_MYSQL_BINLOG_REPLAY_COMMAND"
	MysqlBinlogDstSetting          = "WALG_MYSQL_BINLOG_DST"
	MysqlBackupPrepareCmd          = "WALG_MYSQL_BACKUP_PREPARE_COMMAND"
	MysqlTakeBinlogsFromMaster     = "WALG_MYSQL_TAKE_BINLOGS_FROM_MASTER"
	MysqlCheckGTIDs                = "WALG_MYSQL_CHECK_GTIDS"
	MysqlBinlogServerHost          = "WALG_MYSQL_BINLOG_SERVER_HOST"
	MysqlBinlogServerPort          = "WALG_MYSQL_BINLOG_SERVER_PORT"
	MysqlBinlogServerUser          = "WALG_MYSQL_BINLOG_SERVER_USER"
	MysqlBinlogServerPassword      = "WALG_MYSQL_BINLOG_SERVER_PASSWORD"
	MysqlBinlogServerID            = "WALG_MYSQL_BINLOG_SERVER_ID"
	MysqlBinlogServerReplicaSource = "WALG_MYSQL_BINLOG_SERVER_REPLICA_SOURCE"
	MysqlBackupDownloadMaxRetry    = "WALG_BACKUP_DOWNLOAD_MAX_RETRY"

	RedisPassword = "WALG_REDIS_PASSWORD"

	GPLogsDirectory        = "WALG_GP_LOGS_DIR"
	GPSegContentID         = "WALG_GP_SEG_CONTENT_ID"
	GPSegmentsPollInterval = "WALG_GP_SEG_POLL_INTERVAL"
	GPSegmentsPollRetries  = "WALG_GP_SEG_POLL_RETRIES"
	GPSegmentsUpdInterval  = "WALG_GP_SEG_UPD_INTERVAL"
	GPSegmentStatesDir     = "WALG_GP_SEG_STATES_DIR"
	GPDeleteConcurrency    = "WALG_GP_DELETE_CONCURRENCY"
	GPAoSegSizeThreshold   = "WALG_GP_AOSEG_SIZE_THRESHOLD"

	GoMaxProcs = "GOMAXPROCS"

	HTTPListen       = "HTTP_LISTEN"
	HTTPExposePprof  = "HTTP_EXPOSE_PPROF"
	HTTPExposeExpVar = "HTTP_EXPOSE_EXPVAR"

	SQLServerBlobHostname     = "SQLSERVER_BLOB_HOSTNAME"
	SQLServerBlobCertFile     = "SQLSERVER_BLOB_CERT_FILE"
	SQLServerBlobKeyFile      = "SQLSERVER_BLOB_KEY_FILE"
	SQLServerBlobLockFile     = "SQLSERVER_BLOB_LOCK_FILE"
	SQLServerConnectionString = "SQLSERVER_CONNECTION_STRING"
	SQLServerDBConcurrency    = "SQLSERVER_DB_CONCURRENCY"
	SQLServerReuseProxy       = "SQLSERVER_REUSE_PROXY"

	EndpointSourceSetting = "S3_ENDPOINT_SOURCE"
	EndpointPortSetting   = "S3_ENDPOINT_PORT"

	AwsAccessKeyID     = "AWS_ACCESS_KEY_ID"
	AwsSecretAccessKey = "AWS_SECRET_ACCESS_KEY"
	AwsSessionToken    = "AWS_SESSION_TOKEN"

	YcKmsKeyIDSetting  = "YC_CSE_KMS_KEY_ID"
	YcSaKeyFileSetting = "YC_SERVICE_ACCOUNT_KEY_FILE"

	PgBackRestStanza = "PGBACKREST_STANZA"

	AzureStorageAccount   = "AZURE_STORAGE_ACCOUNT"
	AzureStorageAccessKey = "AZURE_STORAGE_ACCESS_KEY"
	AzureStorageSasToken  = "AZURE_STORAGE_SAS_TOKEN"
	AzureEnvironmentName  = "AZURE_ENVIRONMENT_NAME"

	GoogleApplicationCredentials = "GOOGLE_APPLICATION_CREDENTIALS"

	SwiftOsAuthURL    = "OS_AUTH_URL"
	SwiftOsUsername   = "OS_USERNAME"
	SwiftOsPassword   = "OS_PASSWORD"
	SwiftOsTenantName = "OS_TENANT_NAME"
	SwiftOsRegionName = "OS_REGION_NAME"

	SSHPort           = "SSH_PORT"
	SSHPassword       = "SSH_PASSWORD"
	SSHUsername       = "SSH_USERNAME"
	SSHPrivateKeyPath = "SSH_PRIVATE_KEY_PATH"

	SystemdNotifySocket = "NOTIFY_SOCKET"
)
View Source
const (
	DefaultDataBurstRateLimit = 8 * pgDefaultDatabasePageSize
	DefaultDataFolderPath     = "/tmp"
	WaleFileHost              = "file://localhost"
)
View Source
const (
	NoDeleteModifier = iota
	FullDeleteModifier
	FindFullDeleteModifier
	ForceDeleteModifier
	ConfirmFlag            = "confirm"
	DeleteShortDescription = "Clears old backups and WALs"

	DeleteRetainExamples = `` /* 321-byte string literal not displayed */

	DeleteBeforeExamples = `` /* 156-byte string literal not displayed */

	DeleteEverythingExamples = `` /* 154-byte string literal not displayed */

	DeleteTargetExamples = `` /* 420-byte string literal not displayed */

	DeleteEverythingUsageExample = "everything [FORCE]"
	DeleteRetainUsageExample     = "retain [FULL|FIND_FULL] backup_count"
	DeleteBeforeUsageExample     = "before [FIND_FULL] backup_name|timestamp"
	DeleteTargetUsageExample     = "target [FIND_FULL] backup_name | --target-user-data <data>"

	DeleteTargetUserDataFlag        = "target-user-data"
	DeleteTargetUserDataDescription = "delete storage backup which has the specified user data"
)
View Source
const (
	SplitMergeStreamBackup   = "SPLIT_MERGE_STREAM_BACKUP"
	SingleStreamStreamBackup = "STREAM_BACKUP"
)
View Source
const (
	StreamPrefix           = "stream_"
	StreamBackupNameLength = 23 // len(StreamPrefix) + len(utility.BackupTimeFormat)
)
View Source
const LatestString = "LATEST"
View Source
const MaxCorruptBlocksInFileDesc int = 10
View Source
const MinAllowedConcurrency = 1
View Source
const TarPartitionFolderName = "/tar_partitions/"

Variables

View Source
var (
	CfgFile string

	MongoDefaultSettings = map[string]string{
		OplogPushStatsLoggingInterval:  "30s",
		OplogPushStatsUpdateInterval:   "30s",
		OplogPushWaitForBecomePrimary:  "false",
		OplogPushPrimaryCheckInterval:  "30s",
		OplogArchiveTimeoutInterval:    "60s",
		OplogArchiveAfterSize:          "16777216",
		MongoDBLastWriteUpdateInterval: "3s",
		StreamSplitterBlockSize:        "1048576",
	}

	MysqlDefaultSettings = map[string]string{
		StreamSplitterBlockSize:     "1048576",
		MysqlBackupDownloadMaxRetry: "1",
	}

	SQLServerDefaultSettings = map[string]string{
		SQLServerDBConcurrency: "10",
	}

	PGDefaultSettings = map[string]string{
		PgWalSize:        "16",
		PgBackRestStanza: "main",
	}

	GPDefaultSettings = map[string]string{
		GPLogsDirectory:        "/var/log",
		PgWalSize:              "64",
		GPSegmentsPollInterval: "5m",
		GPSegmentsUpdInterval:  "10s",
		GPSegmentsPollRetries:  "5",
		GPSegmentStatesDir:     "/tmp",
		GPDeleteConcurrency:    "1",
		GPAoSegSizeThreshold:   "1048576",
	}

	AllowedSettings map[string]bool

	CommonAllowedSettings = map[string]bool{}/* 103 elements not displayed */

	PGAllowedSettings = map[string]bool{

		PgPortSetting:        true,
		PgUserSetting:        true,
		PgHostSetting:        true,
		PgDataSetting:        true,
		PgPasswordSetting:    true,
		PgPassfileSetting:    true,
		PgDatabaseSetting:    true,
		PgSslModeSetting:     true,
		PgSlotName:           true,
		PgWalSize:            true,
		PrefetchDir:          true,
		PgReadyRename:        true,
		PgBackRestStanza:     true,
		PgAliveCheckInterval: true,
		PgStopBackupTimeout:  true,
	}

	MongoAllowedSettings = map[string]bool{

		MongoDBUriSetting:              true,
		MongoDBLastWriteUpdateInterval: true,
		OplogArchiveTimeoutInterval:    true,
		OplogArchiveAfterSize:          true,
		OplogPushStatsEnabled:          true,
		OplogPushStatsLoggingInterval:  true,
		OplogPushStatsUpdateInterval:   true,
		OplogPushStatsExposeHTTP:       true,
		OplogPushWaitForBecomePrimary:  true,
		OplogPushPrimaryCheckInterval:  true,
		OplogPITRDiscoveryInterval:     true,
		StreamSplitterBlockSize:        true,
		StreamSplitterPartitions:       true,
	}

	SQLServerAllowedSettings = map[string]bool{

		SQLServerBlobHostname:     true,
		SQLServerBlobCertFile:     true,
		SQLServerBlobKeyFile:      true,
		SQLServerBlobLockFile:     true,
		SQLServerConnectionString: true,
		SQLServerDBConcurrency:    true,
		SQLServerReuseProxy:       true,
	}

	MysqlAllowedSettings = map[string]bool{

		MysqlDatasourceNameSetting:     true,
		MysqlSslCaSetting:              true,
		MysqlBinlogReplayCmd:           true,
		MysqlBinlogDstSetting:          true,
		MysqlBackupPrepareCmd:          true,
		MysqlTakeBinlogsFromMaster:     true,
		MysqlCheckGTIDs:                true,
		StreamSplitterPartitions:       true,
		StreamSplitterBlockSize:        true,
		StreamSplitterMaxFileSize:      true,
		MysqlBinlogServerHost:          true,
		MysqlBinlogServerPort:          true,
		MysqlBinlogServerUser:          true,
		MysqlBinlogServerPassword:      true,
		MysqlBinlogServerID:            true,
		MysqlBinlogServerReplicaSource: true,
		MysqlBackupDownloadMaxRetry:    true,
	}

	RedisAllowedSettings = map[string]bool{

		RedisPassword: true,
	}

	GPAllowedSettings = map[string]bool{
		GPLogsDirectory:        true,
		GPSegContentID:         true,
		GPSegmentsPollRetries:  true,
		GPSegmentsPollInterval: true,
		GPSegmentsUpdInterval:  true,
		GPSegmentStatesDir:     true,
		GPDeleteConcurrency:    true,
		GPAoSegSizeThreshold:   true,
	}

	RequiredSettings       = make(map[string]bool)
	HTTPSettingExposeFuncs = map[string]func(webserver.WebServer){
		HTTPExposePprof:          webserver.EnablePprofEndpoints,
		HTTPExposeExpVar:         webserver.EnableExpVarEndpoints,
		OplogPushStatsExposeHTTP: nil,
	}
	Turbo bool
)
View Source
var (
	WalgMetricsPrefix = "walg_"

	WalgMetrics = metrics{
				// contains filtered or unexported fields
	}
)
View Source
var DeprecatedExternalGpgMessage = fmt.Sprintf(
	`You are using deprecated functionality that uses an external gpg library.
It will be removed in next major version.
Please set GPG key using environment variables %s or %s.
`, PgpKeySetting, PgpKeyPathSetting)
View Source
var ErrorSizeTrackingDisabled = fmt.Errorf("size tracking disabled by DisableSizeTracking method")
View Source
var MaxExtractRetryWait = 5 * time.Minute
View Source
var MinExtractRetryWait = time.Minute
View Source
var StorageAdapters = []StorageAdapter{
	{"S3_PREFIX", s3.SettingList, s3.ConfigureFolder, nil},
	{"FILE_PREFIX", nil, fs.ConfigureFolder, preprocessFilePrefix},
	{"GS_PREFIX", gcs.SettingList, gcs.ConfigureFolder, nil},
	{"AZ_PREFIX", azure.SettingList, azure.ConfigureFolder, nil},
	{"SWIFT_PREFIX", swift.SettingList, swift.ConfigureFolder, nil},
	{"SSH_PREFIX", sh.SettingsList, sh.ConfigureFolder, nil},
}
View Source
var StringModifiers = []string{"FULL", "FIND_FULL"}
View Source
var StringModifiersDeleteEverything = []string{"FORCE"}

Functions

func AddConfigFlags

func AddConfigFlags(Cmd *cobra.Command, hiddenCfgFlagAnnotation string)

func AddTurboFlag

func AddTurboFlag(cmd *cobra.Command)

func AssertRequiredSettingsSet

func AssertRequiredSettingsSet() error

func CheckAllowedSettings

func CheckAllowedSettings(config *viper.Viper)

CheckAllowedSettings warnings if a viper instance's setting not allowed

func CompressAndEncrypt

func CompressAndEncrypt(source io.Reader, compressor compression.Compressor, crypter crypto.Crypter) io.Reader

CompressAndEncrypt compresses input to a pipe reader. Output must be used or pipe will block.

func Configure

func Configure()

func ConfigureAndRunDefaultWebServer

func ConfigureAndRunDefaultWebServer() error

ConfigureAndRunDefaultWebServer configures and runs web server

func ConfigureArchiveStatusManager

func ConfigureArchiveStatusManager() (fsutil.DataFolder, error)

TODO : unit tests

func ConfigureCompressor

func ConfigureCompressor() (compression.Compressor, error)

TODO : unit tests

func ConfigureCrypter

func ConfigureCrypter() crypto.Crypter

ConfigureCrypter uses environment variables to create and configure a crypter. In case no configuration in environment variables found, return `<nil>` value.

func ConfigureFolder

func ConfigureFolder() (storage.Folder, error)

TODO : unit tests

func ConfigureFolderForSpecificConfig

func ConfigureFolderForSpecificConfig(config *viper.Viper) (storage.Folder, error)

TODO: something with that when provided multiple 'keys' in the config, this function will always return only one concrete 'folder'. Chosen folder depends only on 'StorageAdapters' order

func ConfigureLimiters

func ConfigureLimiters()

TODO : unit tests

func ConfigureLogging

func ConfigureLogging() error

func ConfigurePGArchiveStatusManager

func ConfigurePGArchiveStatusManager() (fsutil.DataFolder, error)

func ConfigureSettings

func ConfigureSettings(currentType string)

nolint: gocyclo

func ConfigureStoragePrefix

func ConfigureStoragePrefix(folder storage.Folder) storage.Folder

func DecompressDecryptBytes

func DecompressDecryptBytes(archiveReader io.Reader, decompressor compression.Decompressor) (io.ReadCloser, error)

func DecryptAndDecompressTar

func DecryptAndDecompressTar(reader io.Reader, filePath string, crypter crypto.Crypter) (io.ReadCloser, error)

DecryptAndDecompressTar decrypts file and checks its extension. If it's tar, a decompression is not needed. Otherwise it uses corresponding decompressor. If none found an error will be returned.

func DecryptBytes

func DecryptBytes(archiveReader io.Reader) (io.Reader, error)

func DefaultHandleBackupList

func DefaultHandleBackupList(folder storage.Folder, pretty, json bool)

func DeleteArgsValidator

func DeleteArgsValidator(args, stringModifiers []string, minArgs int, maxArgs int) error

func DeleteBackups

func DeleteBackups(folder storage.Folder, backups []string) error

DeleteBackups purges given backups files TODO: extract BackupLayout abstraction and provide DataPath(), SentinelPath(), Exists() methods

func DeleteBeforeArgsValidator

func DeleteBeforeArgsValidator(cmd *cobra.Command, args []string) error

func DeleteEverythingArgsValidator

func DeleteEverythingArgsValidator(cmd *cobra.Command, args []string) error

func DeleteGarbage

func DeleteGarbage(folder storage.Folder, garbage []string) error

DeleteGarbage purges given garbage keys

func DeleteRetainAfterArgsValidator

func DeleteRetainAfterArgsValidator(cmd *cobra.Command, args []string) error

func DeleteRetainArgsValidator

func DeleteRetainArgsValidator(cmd *cobra.Command, args []string) error

func DeleteTargetArgsValidator

func DeleteTargetArgsValidator(cmd *cobra.Command, args []string) error

func DownloadAndDecompressSplittedStream

func DownloadAndDecompressSplittedStream(backup Backup, blockSize int, extension string,
	writeCloser io.WriteCloser, maxDownloadRetry int) error

TODO : unit tests DownloadAndDecompressSplittedStream downloads, decompresses and writes stream to stdout

func DownloadAndDecompressStorageFile

func DownloadAndDecompressStorageFile(folder storage.Folder, fileName string) (io.ReadCloser, error)

TODO : unit tests

func DownloadAndDecompressStream

func DownloadAndDecompressStream(backup Backup, writeCloser io.WriteCloser) error

TODO : unit tests downloadAndDecompressStream downloads, decompresses and writes stream to stdout

func DownloadFile

func DownloadFile(folder storage.Folder, filename, ext string, writeCloser io.WriteCloser) error

DownloadFile downloads, decompresses and decrypts

func DownloadFileTo

func DownloadFileTo(folder storage.Folder, fileName string, dstPath string) error

TODO : unit tests DownloadFileTo downloads a file and writes it to local file

func ExtractAll

func ExtractAll(tarInterpreter TarInterpreter, files []ReaderMaker) error

ExtractAll Handles all files passed in. Supports `.lzo`, `.lz4`, `.lzma`, and `.tar`. File type `.nop` is used for testing purposes. Each file is extracted in its own goroutine and ExtractAll will wait for all goroutines to finish. Retries unsuccessful attempts log2(MaxConcurrency) times, dividing concurrency by two each time.

func ExtractAllWithSleeper

func ExtractAllWithSleeper(tarInterpreter TarInterpreter, files []ReaderMaker, sleeper Sleeper) error

func ExtractDeleteEverythingModifierFromArgs

func ExtractDeleteEverythingModifierFromArgs(args []string) int

ExtractDeleteEverythingModifierFromArgs extracts the args for the "delete everything" command

func ExtractDeleteModifierFromArgs

func ExtractDeleteModifierFromArgs(args []string) (int, string)

ExtractDeleteModifierFromArgs extracts the delete modifier the "delete retain"/"delete before" commands

func ExtractDeleteRetainAfterModifierFromArgs

func ExtractDeleteRetainAfterModifierFromArgs(args []string) (int, string, string)

ExtractDeleteRetainAfterModifierFromArgs extracts the args for the "delete retain --after" command

func ExtractDeleteTargetModifierFromArgs

func ExtractDeleteTargetModifierFromArgs(args []string) int

ExtractDeleteTargetModifierFromArgs extracts the args for the "delete target" command

func FatalOnUnrecoverableMetadataError

func FatalOnUnrecoverableMetadataError(backupTime BackupTime, err error)

func FetchDto

func FetchDto(folder storage.Folder, dto interface{}, path string) error

FetchDto gets data from path and de-serializes it to given object

func FolderFromConfig

func FolderFromConfig(configFile string) (storage.Folder, error)

FolderFromConfig prefers the config parameters instead of the current environment variables

func FolderSize

func FolderSize(folder storage.Folder, path string) (int64, error)

func FormatTime

func FormatTime(backupTime time.Time) string

func FormatTimeInner

func FormatTimeInner(backupTime time.Time, timeFormat string) string

func GetBackupSentinelObjects

func GetBackupSentinelObjects(folder storage.Folder) ([]storage.Object, error)

func GetBackupToCommandFetcher

func GetBackupToCommandFetcher(cmd *exec.Cmd) func(folder storage.Folder, backup Backup)

GetBackupToCommandFetcher returns function that copies all bytes from backup to cmd's stdin

func GetBoolSetting

func GetBoolSetting(setting string) (val bool, ok bool, err error)

func GetBoolSettingDefault

func GetBoolSettingDefault(setting string, def bool) (bool, error)

func GetCommandSetting

func GetCommandSetting(variableName string) (*exec.Cmd, error)

func GetCommandSettingContext

func GetCommandSettingContext(ctx context.Context, variableName string) (*exec.Cmd, error)

func GetDataFolderPath

func GetDataFolderPath() string

func GetDeltaConfig

func GetDeltaConfig() (maxDeltas int, fromFull bool)

TODO : unit tests

func GetDurationSetting

func GetDurationSetting(setting string) (time.Duration, error)

func GetGarbageFromPrefix

func GetGarbageFromPrefix(folders []storage.Folder, nonGarbage []BackupTime) []string

func GetLastDecompressor

func GetLastDecompressor() (compression.Decompressor, error)

func GetLatestBackupName

func GetLatestBackupName(folder storage.Folder) (string, error)

func GetLogsDstSettings

func GetLogsDstSettings(operationLogsDstEnvVariable string) (dstFolder string, err error)

TODO : unit tests GetLogsDstSettings reads from the environment variables fetch settings

func GetMaxConcurrency

func GetMaxConcurrency(concurrencyType string) (int, error)

func GetMaxDownloadConcurrency

func GetMaxDownloadConcurrency() (int, error)

func GetMaxUploadConcurrency

func GetMaxUploadConcurrency() (int, error)

func GetMaxUploadDiskConcurrency

func GetMaxUploadDiskConcurrency() (int, error)

func GetOplogArchiveAfterSize

func GetOplogArchiveAfterSize() (int, error)

func GetOplogPITRDiscoveryIntervalSetting

func GetOplogPITRDiscoveryIntervalSetting() (*time.Duration, error)

func GetPartitionedBackupFileNames

func GetPartitionedBackupFileNames(backup Backup, decompressor compression.Decompressor) ([][]string, error)

func GetPartitionedSteamMultipartName

func GetPartitionedSteamMultipartName(backupName string, extension string, partIdx int, fileNumber int) string

func GetPartitionedStreamName

func GetPartitionedStreamName(backupName string, extension string, partIdx int) string

func GetPermanentBackups

func GetPermanentBackups(folder storage.Folder, metaFetcher GenericMetaFetcher) map[string]bool

func GetPgSlotName

func GetPgSlotName() (pgSlotName string)

GetPgSlotName reads the slot name from the environment

func GetRelativeArchiveDataFolderPath

func GetRelativeArchiveDataFolderPath() string

func GetRequiredSetting

func GetRequiredSetting(setting string) (string, error)

func GetSentinelUserData

func GetSentinelUserData() (interface{}, error)

func GetSetting

func GetSetting(key string) (value string, ok bool)

GetSetting extract setting by key if key is set, return empty string otherwise

func GetStreamName

func GetStreamName(backupName string, extension string) string

func HandleBackupFetch

func HandleBackupFetch(folder storage.Folder,
	targetBackupSelector BackupSelector,
	fetcher func(folder storage.Folder, backup Backup))

TODO : unit tests HandleBackupFetch is invoked to perform wal-g backup-fetch

func HandleBackupList

func HandleBackupList(
	getBackupsFunc func() ([]BackupTime, error),
	writeBackupListFunc func([]BackupTime),
	logging Logging,
)

func HandleBackupMark

func HandleBackupMark(uploader *Uploader, backupName string, toPermanent bool, metaInteractor GenericMetaInteractor)

func InitConfig

func InitConfig()

InitConfig reads config file and ENV variables if set.

func IsPermanent

func IsPermanent(objectName string, permanentBackups map[string]bool, backupNameLength int) bool

IsPermanent is a generic function to determine if the storage object is permanent. It does not support permanent WALs or binlogs.

func MetadataNameFromBackup

func MetadataNameFromBackup(backupName string) string

func PackFileTo

func PackFileTo(tarBall TarBall, fileInfoHeader *tar.Header, fileContent io.Reader) (fileSize int64, err error)

func ParseTS

func ParseTS(endTSEnvVar string) (endTS *time.Time, err error)

func PrettyFormatTime

func PrettyFormatTime(backupTime time.Time) string

func PushMetrics

func PushMetrics()

func ReadConfigFromFile

func ReadConfigFromFile(config *viper.Viper, configFile string)

ReadConfigFromFile read config to the viper instance

func SentinelNameFromBackup

func SentinelNameFromBackup(backupName string) string

func SetDefaultValues

func SetDefaultValues(config *viper.Viper)

SetDefaultValues set default settings to the viper instance

func SetGoMaxProcs

func SetGoMaxProcs(config *viper.Viper)

func SetLastDecompressor

func SetLastDecompressor(decompressor compression.Decompressor) error

func SortBackupTimeSlices

func SortBackupTimeSlices(backupTimes []BackupTime)

func SortTimedBackup

func SortTimedBackup(backups []TimedBackup)

func SplitPurgingBackups

func SplitPurgingBackups(backups []TimedBackup,
	retainCount *int,
	retainAfter *time.Time) (purge, retain map[string]bool, err error)

SplitPurgingBackups partitions backups to delete and retain, if no retains policy than retain all backups

func StartReadingFile

func StartReadingFile(fileInfoHeader *tar.Header, info os.FileInfo, path string) (io.ReadSeekCloser, error)

TODO : unit tests

func StreamBackupToCommandStdin

func StreamBackupToCommandStdin(cmd *exec.Cmd, backup Backup) error

StreamBackupToCommandStdin downloads and decompresses backup stream to cmd stdin.

func StreamMetadataNameFromBackup

func StreamMetadataNameFromBackup(backupName string) string

func TryDownloadFile

func TryDownloadFile(folder storage.Folder, path string) (fileReader io.ReadCloser, exists bool, err error)

func UnmarshalSentinelUserData

func UnmarshalSentinelUserData(userDataStr string) (interface{}, error)

func UnwrapLatestModifier

func UnwrapLatestModifier(backupName string, folder storage.Folder) (string, error)

UnwrapLatestModifier checks if LATEST is provided instead of backupName if so, replaces it with the name of the latest backup

func UploadBackupStreamMetadata

func UploadBackupStreamMetadata(uploader UploaderProvider, metadata interface{}, backupName string) error

func UploadDto

func UploadDto(folder storage.Folder, dto interface{}, path string) error

UploadDto serializes given object to JSON and puts it to path

func UploadSentinel

func UploadSentinel(uploader UploaderProvider, sentinelDto interface{}, backupName string) error

TODO : unit tests

func WriteAsJSON

func WriteAsJSON(data interface{}, output io.Writer, pretty bool) error

func WriteBackupList

func WriteBackupList(backups []BackupTime, output io.Writer)

func WritePrettyBackupList

func WritePrettyBackupList(backups []BackupTime, output io.Writer)

Types

type ArchiveNonExistenceError

type ArchiveNonExistenceError struct {
	// contains filtered or unexported fields
}

func (ArchiveNonExistenceError) Error

func (err ArchiveNonExistenceError) Error() string

type Backup

type Backup struct {
	Name string
	// base backup folder or catchup backup folder
	Folder storage.Folder
}

Backup provides basic functionality to fetch backup-related information from storage

WAL-G stores information about single backup in the following files:

Sentinel file - contains useful information, such as backup start time, backup size, etc. see FetchSentinel, UploadSentinel

Metadata file (only in Postgres) - Postgres sentinel files can be quite large (> 1GB), so the metadata file is useful for the quick fetch of backup-related information. see FetchMetadata, UploadMetadata

func GetBackupByName

func GetBackupByName(backupName, subfolder string, folder storage.Folder) (Backup, error)

func NewBackup

func NewBackup(folder storage.Folder, name string) Backup

func (*Backup) AssureExists

func (backup *Backup) AssureExists() error

AssureExists is similar to CheckExistence, but returns an error in two cases: 1. Backup does not exist 2. Failed to check if backup exist

func (*Backup) CheckExistence

func (backup *Backup) CheckExistence() (bool, error)

func (*Backup) FetchMetadata

func (backup *Backup) FetchMetadata(metadataDto interface{}) error

TODO : unit tests

func (*Backup) FetchSentinel

func (backup *Backup) FetchSentinel(sentinelDto interface{}) error

TODO : unit tests

func (*Backup) SentinelExists

func (backup *Backup) SentinelExists() (bool, error)

SentinelExists checks that the sentinel file of the specified backup exists.

func (*Backup) UploadMetadata

func (backup *Backup) UploadMetadata(metadataDto interface{}) error

func (*Backup) UploadSentinel

func (backup *Backup) UploadSentinel(sentinelDto interface{}) error

type BackupFileDescription

type BackupFileDescription struct {
	IsIncremented bool // should never be both incremented and Skipped
	IsSkipped     bool
	MTime         time.Time
	CorruptBlocks *CorruptBlocksInfo `json:",omitempty"`
	UpdatesCount  uint64
}

func NewBackupFileDescription

func NewBackupFileDescription(isIncremented, isSkipped bool, modTime time.Time) *BackupFileDescription

func (*BackupFileDescription) SetCorruptBlocks

func (desc *BackupFileDescription) SetCorruptBlocks(corruptBlockNumbers []uint32, storeAllBlocks bool)

type BackupFileList

type BackupFileList map[string]BackupFileDescription

type BackupHasPermanentBackupInFutureError

type BackupHasPermanentBackupInFutureError struct {
	// contains filtered or unexported fields
}

type BackupMarkHandler

type BackupMarkHandler struct {
	// contains filtered or unexported fields
}

func NewBackupMarkHandler

func NewBackupMarkHandler(metaInteractor GenericMetaInteractor, storageRootFolder storage.Folder) BackupMarkHandler

func (*BackupMarkHandler) GetBackupsToMark

func (h *BackupMarkHandler) GetBackupsToMark(backupName string, toPermanent bool) ([]string, error)

GetBackupsToMark retrieves all previous permanent or impermanent backups, including itself, any previous delta backups and initial full backup, in increasing order beginning from full backup, returning backups ready to be marked

For example, when marking backups from impermanent to permanent, we retrieve all currently impermanent backups and return them as a slice

func (*BackupMarkHandler) MarkBackup

func (h *BackupMarkHandler) MarkBackup(backupName string, toPermanent bool)

MarkBackup marks a backup as permanent or impermanent

type BackupNameSelector

type BackupNameSelector struct {
	// contains filtered or unexported fields
}

Select backup by provided backup name

func NewBackupNameSelector

func NewBackupNameSelector(backupName string, checkExistence bool) (BackupNameSelector, error)

func (BackupNameSelector) Select

func (s BackupNameSelector) Select(folder storage.Folder) (string, error)

TODO: unit tests

type BackupNonExistenceError

type BackupNonExistenceError struct {
	// contains filtered or unexported fields
}

func NewBackupNonExistenceError

func NewBackupNonExistenceError(backupName string) BackupNonExistenceError

func (BackupNonExistenceError) Error

func (err BackupNonExistenceError) Error() string

type BackupObject

type BackupObject interface {
	storage.Object
	GetBackupTime() time.Time
	GetBackupName() string

	// TODO: move increment info into separate struct (in backup.go)
	IsFullBackup() bool
	GetBaseBackupName() string
	GetIncrementFromName() string
}

BackupObject represents the backup sentinel object uploaded on storage

func FindBackupObjects

func FindBackupObjects(folder storage.Folder) ([]BackupObject, error)

func NewDefaultBackupObject

func NewDefaultBackupObject(object storage.Object) BackupObject

type BackupSelector

type BackupSelector interface {
	Select(folder storage.Folder) (string, error)
}

BackupSelector returns the name of storage backup chosen according to the internal rules. Returns NoBackupsFoundError in case there are no backups matching the criteria.

func CreateTargetDeleteBackupSelector

func CreateTargetDeleteBackupSelector(cmd *cobra.Command,
	args []string, targetUserData string, metaFetcher GenericMetaFetcher) (BackupSelector, error)

create the BackupSelector to select the backup to delete

func NewDeltaBaseSelector

func NewDeltaBaseSelector(
	targetBackupName, targetUserData string, metaFetcher GenericMetaFetcher) (BackupSelector, error)

NewDeltaBaseSelector returns the BackupSelector for delta backup base according to the provided flags

func NewTargetBackupSelector

func NewTargetBackupSelector(targetUserData, targetName string, metaFetcher GenericMetaFetcher) (BackupSelector, error)

type BackupStreamMetadata

type BackupStreamMetadata struct {
	Type        string `json:"type"`
	Partitions  uint   `json:"partitions,omitempty"`
	BlockSize   uint   `json:"block_size,omitempty"`
	Compression string `json:"compression,omitempty"`
}

type BackupTime

type BackupTime struct {
	BackupName  string    `json:"backup_name"`
	Time        time.Time `json:"time"`
	WalFileName string    `json:"wal_file_name"`
}

BackupTime is used to sort backups by latest modified time.

func GetBackupTimeSlices

func GetBackupTimeSlices(backups []storage.Object) []BackupTime

func GetBackups

func GetBackups(folder storage.Folder) (backups []BackupTime, err error)

TODO : unit tests GetBackups receives backup descriptions and sorts them by time

func GetBackupsAndGarbage

func GetBackupsAndGarbage(folder storage.Folder) (backups []BackupTime, garbage []string, err error)

TODO : unit tests

type Bundle

type Bundle struct {
	Directory string
	Sentinel  *Sentinel

	TarBallComposer TarBallComposer
	TarBallQueue    *TarBallQueue

	Crypter crypto.Crypter

	TarSizeThreshold int64

	ExcludedFilenames map[string]utility.Empty

	FilesFilter FilesFilter
}

func NewBundle

func NewBundle(
	directory string, crypter crypto.Crypter,
	tarSizeThreshold int64, excludedFilenames map[string]utility.Empty) *Bundle

func (*Bundle) AddToBundle

func (bundle *Bundle) AddToBundle(path string, info os.FileInfo, err error) error

func (*Bundle) FinishComposing

func (bundle *Bundle) FinishComposing() (TarFileSets, error)

func (*Bundle) FinishQueue

func (bundle *Bundle) FinishQueue() error

func (*Bundle) GetFileRelPath

func (bundle *Bundle) GetFileRelPath(fileAbsPath string) string

func (*Bundle) SetupComposer

func (bundle *Bundle) SetupComposer(composerMaker TarBallComposerMaker) (err error)

func (*Bundle) StartQueue

func (bundle *Bundle) StartQueue(tarBallMaker TarBallMaker) error

type BundleFiles

type BundleFiles interface {
	AddSkippedFile(tarHeader *tar.Header, fileInfo os.FileInfo)
	AddFile(tarHeader *tar.Header, fileInfo os.FileInfo, isIncremented bool)
	AddFileDescription(name string, backupFileDescription BackupFileDescription)
	AddFileWithCorruptBlocks(tarHeader *tar.Header, fileInfo os.FileInfo, isIncremented bool,
		corruptedBlocks []uint32, storeAllBlocks bool)
	GetUnderlyingMap() *sync.Map
}

BundleFiles represents the files in the backup that is going to be created

type CachedDecompressor

type CachedDecompressor struct {
	FileExtension string
}

CachedDecompressor is the file extension describing decompressor

type CommonDirectoryDownloader

type CommonDirectoryDownloader struct {
	Folder     storage.Folder
	BackupName string
}

func (*CommonDirectoryDownloader) DownloadDirectory

func (downloader *CommonDirectoryDownloader) DownloadDirectory(pathToRestore string) error

type CommonDirectoryUploader

type CommonDirectoryUploader struct {
	// contains filtered or unexported fields
}

func NewCommonDirectoryUploader

func NewCommonDirectoryUploader(
	crypter crypto.Crypter, packer TarBallFilePacker,
	tarBallComposerMaker TarBallComposerMaker, tarSizeThreshold int64,
	excludedFiles map[string]utility.Empty, backupName string,
	uploader *Uploader) *CommonDirectoryUploader

func (*CommonDirectoryUploader) Upload

func (u *CommonDirectoryUploader) Upload(path string) TarFileSets

type CommonFilesFilter

type CommonFilesFilter struct{}

func (*CommonFilesFilter) ShouldUploadFile

func (*CommonFilesFilter) ShouldUploadFile(path string) bool

type ComposeFileInfo

type ComposeFileInfo struct {
	Path          string
	FileInfo      os.FileInfo
	WasInBase     bool
	Header        *tar.Header
	IsIncremented bool
}

func NewComposeFileInfo

func NewComposeFileInfo(path string, fileInfo os.FileInfo, wasInBase, isIncremented bool,
	header *tar.Header) *ComposeFileInfo

type ComposeRatingEvaluator

type ComposeRatingEvaluator interface {
	Evaluate(path string, updatesCount uint64, wasInBase bool) uint64
}

type CompressAndEncryptError

type CompressAndEncryptError struct {
	// contains filtered or unexported fields
}

CompressAndEncryptError is used to catch specific errors from CompressAndEncrypt when uploading to Storage. Will not retry upload if this error occurs.

func (CompressAndEncryptError) Error

func (err CompressAndEncryptError) Error() string

type CorruptBlocksInfo

type CorruptBlocksInfo struct {
	CorruptBlocksCount int
	SomeCorruptBlocks  []uint32
}

type DefaultBackupObject

type DefaultBackupObject struct {
	storage.Object
}

func (DefaultBackupObject) GetBackupName

func (o DefaultBackupObject) GetBackupName() string

func (DefaultBackupObject) GetBackupTime

func (o DefaultBackupObject) GetBackupTime() time.Time

func (DefaultBackupObject) GetBaseBackupName

func (o DefaultBackupObject) GetBaseBackupName() string

func (DefaultBackupObject) GetIncrementFromName

func (o DefaultBackupObject) GetIncrementFromName() string

func (DefaultBackupObject) IsFullBackup

func (o DefaultBackupObject) IsFullBackup() bool

type DefaultComposeRatingEvaluator

type DefaultComposeRatingEvaluator struct {
	// contains filtered or unexported fields
}

func NewDefaultComposeRatingEvaluator

func NewDefaultComposeRatingEvaluator(incrementFromFiles BackupFileList) *DefaultComposeRatingEvaluator

func (*DefaultComposeRatingEvaluator) Evaluate

func (evaluator *DefaultComposeRatingEvaluator) Evaluate(path string, updatesCount uint64, wasInBase bool) uint64

type DeleteHandler

type DeleteHandler struct {
	Folder storage.Folder
	// contains filtered or unexported fields
}

func NewDeleteHandler

func NewDeleteHandler(
	folder storage.Folder,
	backups []BackupObject,
	less func(object1, object2 storage.Object) bool,
	options ...DeleteHandlerOption,
) *DeleteHandler

func (*DeleteHandler) DeleteBeforeTarget

func (h *DeleteHandler) DeleteBeforeTarget(target BackupObject, confirmed bool) error

func (*DeleteHandler) DeleteBeforeTargetWhere

func (h *DeleteHandler) DeleteBeforeTargetWhere(target BackupObject, confirmed bool,
	objSelector func(object storage.Object) bool, folderFilter func(name string) bool) error

func (*DeleteHandler) DeleteEverything

func (h *DeleteHandler) DeleteEverything(confirmed bool)

func (*DeleteHandler) DeleteTarget

func (h *DeleteHandler) DeleteTarget(target BackupObject, confirmed, findFull bool,
	folderFilter func(name string) bool) error

func (*DeleteHandler) FindTargetBefore

func (h *DeleteHandler) FindTargetBefore(beforeStr string, modifier int) (BackupObject, error)

TODO: unit tests

func (*DeleteHandler) FindTargetBeforeName

func (h *DeleteHandler) FindTargetBeforeName(name string, modifier int) (BackupObject, error)

func (*DeleteHandler) FindTargetBeforeTime

func (h *DeleteHandler) FindTargetBeforeTime(timeLine time.Time, modifier int) (BackupObject, error)

TODO: unit tests

func (*DeleteHandler) FindTargetByName

func (h *DeleteHandler) FindTargetByName(bname string) (BackupObject, error)

TODO: unit tests

func (*DeleteHandler) FindTargetBySelector

func (h *DeleteHandler) FindTargetBySelector(targetSelector BackupSelector) (BackupObject, error)

TODO: unit tests

func (*DeleteHandler) FindTargetRetain

func (h *DeleteHandler) FindTargetRetain(retentionCount, modifier int) (BackupObject, error)

func (*DeleteHandler) FindTargetRetainAfter

func (h *DeleteHandler) FindTargetRetainAfter(retentionCount int, afterStr string, modifier int) (BackupObject, error)

TODO: unit tests

func (*DeleteHandler) FindTargetRetainAfterName

func (h *DeleteHandler) FindTargetRetainAfterName(
	retentionCount int, name string, modifier int) (BackupObject, error)

TODO: unit tests

func (*DeleteHandler) FindTargetRetainAfterTime

func (h *DeleteHandler) FindTargetRetainAfterTime(retentionCount int, timeLine time.Time, modifier int,
) (BackupObject, error)

TODO: unit tests

func (*DeleteHandler) HandleDeleteBefore

func (h *DeleteHandler) HandleDeleteBefore(args []string, confirmed bool)

func (*DeleteHandler) HandleDeleteEverything

func (h *DeleteHandler) HandleDeleteEverything(args []string, permanentBackups map[string]bool, confirmed bool)

func (*DeleteHandler) HandleDeleteRetain

func (h *DeleteHandler) HandleDeleteRetain(args []string, confirmed bool)

func (*DeleteHandler) HandleDeleteRetainAfter

func (h *DeleteHandler) HandleDeleteRetainAfter(args []string, confirmed bool)

func (*DeleteHandler) HandleDeleteTarget

func (h *DeleteHandler) HandleDeleteTarget(targetSelector BackupSelector, confirmed, findFull bool)

type DeleteHandlerOption

type DeleteHandlerOption func(h *DeleteHandler)

func IsPermanentFunc

func IsPermanentFunc(isPermanent func(storage.Object) bool) DeleteHandlerOption

type DevNullWriter

type DevNullWriter struct {
	io.WriteCloser
	// contains filtered or unexported fields
}

func (*DevNullWriter) Write

func (e *DevNullWriter) Write(p []byte) (int, error)

type DirectoryDownloader

type DirectoryDownloader interface {
	DownloadDirectory(pathToRestore string) error
}

func NewCommonDirectoryDownloader

func NewCommonDirectoryDownloader(folder storage.Folder, backupName string) DirectoryDownloader

type DirectoryIsNotEmptyError

type DirectoryIsNotEmptyError struct {
	// contains filtered or unexported fields
}

func NewDirectoryIsNotEmptyError

func NewDirectoryIsNotEmptyError(path string) DirectoryIsNotEmptyError

type DirectoryUploader

type DirectoryUploader interface {
	Upload(path string) TarFileSets
}

type DtoSerializer

type DtoSerializer interface {
	Marshal(dto interface{}) (io.Reader, error)
	Unmarshal(reader io.Reader, dto interface{}) error
}

func NewDtoSerializer

func NewDtoSerializer() (DtoSerializer, error)

TODO: unit test

type DtoSerializerType

type DtoSerializerType string
const (
	RegularJSONSerializer  DtoSerializerType = "json_default"
	StreamedJSONSerializer DtoSerializerType = "json_streamed"
)

type ErrWaiter

type ErrWaiter interface {
	Wait() error
}

type ErrorLogger

type ErrorLogger interface {
	FatalOnError(err error)
}

type ExponentialSleeper

type ExponentialSleeper struct {
	// contains filtered or unexported fields
}

func NewExponentialSleeper

func NewExponentialSleeper(startSleepDuration, sleepDurationBound time.Duration) *ExponentialSleeper

func (*ExponentialSleeper) Sleep

func (sleeper *ExponentialSleeper) Sleep()

type FileNotExistError

type FileNotExistError struct {
	// contains filtered or unexported fields
}

func NewFileNotExistError

func NewFileNotExistError(path string) FileNotExistError

func (FileNotExistError) Error

func (err FileNotExistError) Error() string

type FileTarInterpreter

type FileTarInterpreter struct {
	DirectoryToSave string
	// contains filtered or unexported fields
}

func (*FileTarInterpreter) Interpret

func (tarInterpreter *FileTarInterpreter) Interpret(reader io.Reader, fileInfo *tar.Header) error

type FileType

type FileType string
const (
	TarFileType     FileType = "TarFileType"
	RegularFileType FileType = "RegularFileType"
)

type FilesFilter

type FilesFilter interface {
	ShouldUploadFile(path string) bool
}

type GenericMetaFetcher

type GenericMetaFetcher interface {
	Fetch(backupName string, backupFolder storage.Folder) (GenericMetadata, error)
}

type GenericMetaInteractor

type GenericMetaInteractor interface {
	GenericMetaFetcher
	GenericMetaSetter
}

GenericMetaInteractor is a combination of GenericMetaFetcher and GenericMetaSetter. It can be useful when need both.

type GenericMetaSetter

type GenericMetaSetter interface {
	SetUserData(backupName string, backupFolder storage.Folder, userData interface{}) error
	SetIsPermanent(backupName string, backupFolder storage.Folder, isPermanent bool) error
}

type GenericMetadata

type GenericMetadata struct {
	BackupName       string
	UncompressedSize int64
	CompressedSize   int64
	Hostname         string
	StartTime        time.Time
	FinishTime       time.Time

	IsPermanent   bool
	IsIncremental bool

	// need to use separate fetcher
	// to avoid useless sentinel load (in Postgres)
	IncrementDetails IncrementDetailsFetcher

	UserData interface{}
}

GenericMetadata allows to obtain some basic information about existing backup in storage. It is useful when creating a functionality that is common to all databases, for example backup-list or backup-mark.

To support the GenericMetadata in some particular database, one should write its own GenericMetaFetcher and GenericMetaSetter.

type IncrementDetails

type IncrementDetails struct {
	IncrementFrom     string
	IncrementFullName string
	IncrementCount    int
}

IncrementDetails is useful to fetch information about dependencies of some incremental backup

type IncrementDetailsFetcher

type IncrementDetailsFetcher interface {
	Fetch() (isIncremental bool, details IncrementDetails, err error)
}

type InfoLogger

type InfoLogger interface {
	Println(v ...interface{})
}

type InvalidConcurrencyValueError

type InvalidConcurrencyValueError struct {
	// contains filtered or unexported fields
}

func (InvalidConcurrencyValueError) Error

type LatestBackupSelector

type LatestBackupSelector struct {
}

Select the latest backup from storage

func NewLatestBackupSelector

func NewLatestBackupSelector() LatestBackupSelector

func (LatestBackupSelector) Select

func (s LatestBackupSelector) Select(folder storage.Folder) (string, error)

TODO: unit tests

type LazyCache

type LazyCache[K comparable, V any] struct {
	// contains filtered or unexported fields
}

func NewLazyCache

func NewLazyCache[K comparable, V any](load func(key K) (value V, err error)) *LazyCache[K, V]

func (*LazyCache[K, V]) Load

func (lazyCache *LazyCache[K, V]) Load(key K) (value V, exists bool, err error)

func (*LazyCache[K, V]) LoadExisting

func (lazyCache *LazyCache[K, V]) LoadExisting(key K) (value V, exists bool)

func (*LazyCache[K, V]) Range

func (lazyCache *LazyCache[K, V]) Range(reduce func(key K, value V) bool)

Range calls reduce sequentially for each key and value present in the cache. If reduce returns false, range stops the iteration.

func (*LazyCache[K, V]) Store

func (lazyCache *LazyCache[K, V]) Store(key K, value V)

type LimitedFolder

type LimitedFolder struct {
	storage.Folder
	// contains filtered or unexported fields
}

func NewLimitedFolder

func NewLimitedFolder(folder storage.Folder, limiter *rate.Limiter) *LimitedFolder

func (*LimitedFolder) GetSubFolder

func (lf *LimitedFolder) GetSubFolder(subFolderRelativePath string) storage.Folder

func (*LimitedFolder) PutObject

func (lf *LimitedFolder) PutObject(name string, content io.Reader) error

func (*LimitedFolder) ReadObject

func (lf *LimitedFolder) ReadObject(objectRelativePath string) (io.ReadCloser, error)

type Logging

type Logging struct {
	InfoLogger  InfoLogger
	ErrorLogger ErrorLogger
}

type MetaConstructor

type MetaConstructor interface {
	Init() error
	Finalize(backupName string) error
	MetaInfo() interface{}
}

MetaConstructor - interface that helps with building meta-info about backup and generate MetaInfo see MongoMetaConstructor see RedisMetaConstructor

type NOPTarBall

type NOPTarBall struct {
	// contains filtered or unexported fields
}

NOPTarBall mocks a tarball. Used for prefault logic.

func (*NOPTarBall) AddSize

func (tarBall *NOPTarBall) AddSize(i int64)

func (*NOPTarBall) AwaitUploads

func (tarBall *NOPTarBall) AwaitUploads()

func (*NOPTarBall) CloseTar

func (tarBall *NOPTarBall) CloseTar() error

func (*NOPTarBall) Name

func (tarBall *NOPTarBall) Name() string

func (*NOPTarBall) SetUp

func (tarBall *NOPTarBall) SetUp(crypter crypto.Crypter, params ...string)

func (*NOPTarBall) Size

func (tarBall *NOPTarBall) Size() int64

func (*NOPTarBall) TarWriter

func (tarBall *NOPTarBall) TarWriter() *tar.Writer

type NOPTarBallMaker

type NOPTarBallMaker struct {
	// contains filtered or unexported fields
}

NOPTarBallMaker creates a new NOPTarBall. Used for testing purposes.

func (*NOPTarBallMaker) Make

func (tarBallMaker *NOPTarBallMaker) Make(inheritState bool) TarBall

Make creates a new NOPTarBall.

type NoBackupsFoundError

type NoBackupsFoundError struct {
	// contains filtered or unexported fields
}

func NewNoBackupsFoundError

func NewNoBackupsFoundError() NoBackupsFoundError

func (NoBackupsFoundError) Error

func (err NoBackupsFoundError) Error() string

type NoFilesToExtractError

type NoFilesToExtractError struct {
	// contains filtered or unexported fields
}

func (NoFilesToExtractError) Error

func (err NoFilesToExtractError) Error() string

type NopBundleFiles

type NopBundleFiles struct {
}

func (*NopBundleFiles) AddFile

func (files *NopBundleFiles) AddFile(tarHeader *tar.Header, fileInfo os.FileInfo, isIncremented bool)

func (*NopBundleFiles) AddFileDescription

func (files *NopBundleFiles) AddFileDescription(name string, backupFileDescription BackupFileDescription)

func (*NopBundleFiles) AddFileWithCorruptBlocks

func (files *NopBundleFiles) AddFileWithCorruptBlocks(tarHeader *tar.Header, fileInfo os.FileInfo,
	isIncremented bool, corruptedBlocks []uint32, storeAllBlocks bool)

func (*NopBundleFiles) AddSkippedFile

func (files *NopBundleFiles) AddSkippedFile(tarHeader *tar.Header, fileInfo os.FileInfo)

func (*NopBundleFiles) GetUnderlyingMap

func (files *NopBundleFiles) GetUnderlyingMap() *sync.Map

type NopIncrementDetailsFetcher

type NopIncrementDetailsFetcher struct{}

NopIncrementDetailsFetcher is useful for databases without incremental backup support

func (*NopIncrementDetailsFetcher) Fetch

type NopTarFileSets

type NopTarFileSets struct {
}

func NewNopTarFileSets

func NewNopTarFileSets() *NopTarFileSets

func (*NopTarFileSets) AddFile

func (tarFileSets *NopTarFileSets) AddFile(name string, file string)

func (*NopTarFileSets) AddFiles

func (tarFileSets *NopTarFileSets) AddFiles(name string, files []string)

func (*NopTarFileSets) Get

func (tarFileSets *NopTarFileSets) Get() map[string][]string

type OldestNonPermanentSelector

type OldestNonPermanentSelector struct {
	// contains filtered or unexported fields
}

OldestNonPermanentSelector finds oldest non-permanent backup available in storage.

func NewOldestNonPermanentSelector

func NewOldestNonPermanentSelector(metaFetcher GenericMetaFetcher) *OldestNonPermanentSelector

func (*OldestNonPermanentSelector) Select

func (s *OldestNonPermanentSelector) Select(folder storage.Folder) (string, error)

TODO: unit tests

type ProfileStopper

type ProfileStopper interface {
	Stop()
}

func Profile

func Profile() (ProfileStopper, error)

type ReaderMaker

type ReaderMaker interface {
	Reader() (io.ReadCloser, error)
	StoragePath() string
	LocalPath() string
	FileType() FileType
	Mode() int64
}

ReaderMaker is the generic interface used by extract. It allows for ease of handling different file formats.

type RegularBundleFiles

type RegularBundleFiles struct {
	sync.Map
}

func (*RegularBundleFiles) AddFile

func (files *RegularBundleFiles) AddFile(tarHeader *tar.Header, fileInfo os.FileInfo, isIncremented bool)

func (*RegularBundleFiles) AddFileDescription

func (files *RegularBundleFiles) AddFileDescription(name string, backupFileDescription BackupFileDescription)

func (*RegularBundleFiles) AddFileWithCorruptBlocks

func (files *RegularBundleFiles) AddFileWithCorruptBlocks(tarHeader *tar.Header, fileInfo os.FileInfo,
	isIncremented bool, corruptedBlocks []uint32, storeAllBlocks bool)

func (*RegularBundleFiles) AddSkippedFile

func (files *RegularBundleFiles) AddSkippedFile(tarHeader *tar.Header, fileInfo os.FileInfo)

func (*RegularBundleFiles) GetUnderlyingMap

func (files *RegularBundleFiles) GetUnderlyingMap() *sync.Map

type RegularJSON

type RegularJSON struct{}

func (RegularJSON) Marshal

func (r RegularJSON) Marshal(dto interface{}) (io.Reader, error)

func (RegularJSON) Unmarshal

func (r RegularJSON) Unmarshal(reader io.Reader, dto interface{}) error

type RegularTarBallComposer

type RegularTarBallComposer struct {
	// contains filtered or unexported fields
}

func NewRegularTarBallComposer

func NewRegularTarBallComposer(
	tarBallQueue *TarBallQueue,
	tarBallFilePacker TarBallFilePacker,
	files BundleFiles,
	tarFileSets TarFileSets,
	crypter crypto.Crypter,
) *RegularTarBallComposer

func (*RegularTarBallComposer) AddFile

func (c *RegularTarBallComposer) AddFile(info *ComposeFileInfo)

func (*RegularTarBallComposer) AddHeader

func (c *RegularTarBallComposer) AddHeader(header *tar.Header, fileInfo os.FileInfo) error

func (*RegularTarBallComposer) FinishComposing

func (c *RegularTarBallComposer) FinishComposing() (TarFileSets, error)

func (*RegularTarBallComposer) GetFiles

func (c *RegularTarBallComposer) GetFiles() BundleFiles

func (*RegularTarBallComposer) SkipFile

func (c *RegularTarBallComposer) SkipFile(tarHeader *tar.Header, fileInfo os.FileInfo)

type RegularTarBallComposerMaker

type RegularTarBallComposerMaker struct {
	// contains filtered or unexported fields
}

func NewRegularTarBallComposerMaker

func NewRegularTarBallComposerMaker(files BundleFiles, tarFileSets TarFileSets) *RegularTarBallComposerMaker

func (*RegularTarBallComposerMaker) Make

func (maker *RegularTarBallComposerMaker) Make(bundle *Bundle) (TarBallComposer, error)

type RegularTarBallFilePacker

type RegularTarBallFilePacker struct {
	// contains filtered or unexported fields
}

func NewRegularTarBallFilePacker

func NewRegularTarBallFilePacker(files BundleFiles) *RegularTarBallFilePacker

func (*RegularTarBallFilePacker) PackFileIntoTar

func (p *RegularTarBallFilePacker) PackFileIntoTar(cfi *ComposeFileInfo, tarBall TarBall) error

type RegularTarFileSets

type RegularTarFileSets map[string][]string

func NewRegularTarFileSets

func NewRegularTarFileSets() *RegularTarFileSets

func (*RegularTarFileSets) AddFile

func (tarFileSets *RegularTarFileSets) AddFile(name string, file string)

func (*RegularTarFileSets) AddFiles

func (tarFileSets *RegularTarFileSets) AddFiles(name string, files []string)

func (*RegularTarFileSets) Get

func (tarFileSets *RegularTarFileSets) Get() map[string][]string

type Sentinel

type Sentinel struct {
	Info os.FileInfo
	Path string
}

Sentinel is used to signal completion of a walked directory.

type SentinelMarshallingError

type SentinelMarshallingError struct {
	// contains filtered or unexported fields
}

region errors

func NewSentinelMarshallingError

func NewSentinelMarshallingError(sentinelName string, err error) SentinelMarshallingError

func (SentinelMarshallingError) Error

func (err SentinelMarshallingError) Error() string

type Sleeper

type Sleeper interface {
	Sleep()
}

type SplitStreamUploader

type SplitStreamUploader struct {
	*Uploader
	// contains filtered or unexported fields
}

SplitStreamUploader - new UploaderProvider implementation that enable us to split upload streams into blocks

of blockSize bytes, then puts it in at most `partitions` streams that are compressed and pushed to storage

func (*SplitStreamUploader) Clone

func (uploader *SplitStreamUploader) Clone() *SplitStreamUploader

func (*SplitStreamUploader) PushStream

func (uploader *SplitStreamUploader) PushStream(stream io.Reader) (string, error)

TODO : unit tests returns backup_prefix (Note: individual parition names are built by adding '_0000.br' or '_0000_0000.br' suffix)

type StorageAdapter

type StorageAdapter struct {
	// contains filtered or unexported fields
}

type StorageReaderMaker

type StorageReaderMaker struct {
	Folder storage.Folder

	StorageFileType FileType
	FileMode        int64
	// contains filtered or unexported fields
}

StorageReaderMaker creates readers for downloading from storage

func NewRegularFileStorageReaderMarker

func NewRegularFileStorageReaderMarker(folder storage.Folder, storagePath, localPath string, fileMode int64) *StorageReaderMaker

func NewStorageReaderMaker

func NewStorageReaderMaker(folder storage.Folder, relativePath string) *StorageReaderMaker

func (*StorageReaderMaker) FileType

func (readerMaker *StorageReaderMaker) FileType() FileType

func (*StorageReaderMaker) LocalPath

func (readerMaker *StorageReaderMaker) LocalPath() string

func (*StorageReaderMaker) Mode

func (readerMaker *StorageReaderMaker) Mode() int64

func (*StorageReaderMaker) Reader

func (readerMaker *StorageReaderMaker) Reader() (io.ReadCloser, error)

func (*StorageReaderMaker) StoragePath

func (readerMaker *StorageReaderMaker) StoragePath() string

type StorageTarBall

type StorageTarBall struct {
	// contains filtered or unexported fields
}

StorageTarBall represents a tar file that is going to be uploaded to storage.

func (*StorageTarBall) AddSize

func (tarBall *StorageTarBall) AddSize(i int64)

AddSize to total Size

func (*StorageTarBall) AwaitUploads

func (tarBall *StorageTarBall) AwaitUploads()

func (*StorageTarBall) CloseTar

func (tarBall *StorageTarBall) CloseTar() error

CloseTar closes the tar writer, flushing any unwritten data to the underlying writer before also closing the underlying writer.

func (*StorageTarBall) Name

func (tarBall *StorageTarBall) Name() string

func (*StorageTarBall) SetUp

func (tarBall *StorageTarBall) SetUp(crypter crypto.Crypter, names ...string)

SetUp creates a new tar writer and starts upload to storage. Upload will block until the tar file is finished writing. If a name for the file is not given, default name is of the form `part_....tar.[Compressor file extension]`.

func (*StorageTarBall) Size

func (tarBall *StorageTarBall) Size() int64

Size accumulated in this tarball

func (*StorageTarBall) TarWriter

func (tarBall *StorageTarBall) TarWriter() *tar.Writer

type StorageTarBallMaker

type StorageTarBallMaker struct {
	// contains filtered or unexported fields
}

StorageTarBallMaker creates tarballs that are uploaded to storage.

func NewStorageTarBallMaker

func NewStorageTarBallMaker(backupName string, uploader *Uploader) *StorageTarBallMaker

func (*StorageTarBallMaker) Make

func (tarBallMaker *StorageTarBallMaker) Make(dedicatedUploader bool) TarBall

Make returns a tarball with required storage fields.

type StreamFetcher

type StreamFetcher = func(backup Backup, writeCloser io.WriteCloser) error

func GetBackupStreamFetcher

func GetBackupStreamFetcher(backup Backup) (StreamFetcher, error)

type StreamedJSON

type StreamedJSON struct{}

func (StreamedJSON) Marshal

func (s StreamedJSON) Marshal(dto interface{}) (io.Reader, error)

func (StreamedJSON) Unmarshal

func (s StreamedJSON) Unmarshal(reader io.Reader, dto interface{}) error

type TarBall

type TarBall interface {
	SetUp(crypter crypto.Crypter, args ...string)
	CloseTar() error
	Size() int64
	AddSize(int64)
	TarWriter() *tar.Writer
	AwaitUploads()
	Name() string
}

A TarBall represents one tar file.

type TarBallComposer

type TarBallComposer interface {
	AddFile(info *ComposeFileInfo)
	AddHeader(header *tar.Header, fileInfo os.FileInfo) error
	SkipFile(tarHeader *tar.Header, fileInfo os.FileInfo)
	FinishComposing() (TarFileSets, error)
	GetFiles() BundleFiles
}

type TarBallComposerMaker

type TarBallComposerMaker interface {
	Make(bundle *Bundle) (TarBallComposer, error)
}

TarBallComposerMaker is used to make an instance of TarBallComposer

type TarBallFilePacker

type TarBallFilePacker interface {
	PackFileIntoTar(cfi *ComposeFileInfo, tarBall TarBall) error
}

type TarBallMaker

type TarBallMaker interface {
	Make(dedicatedUploader bool) TarBall
}

TarBallMaker is used to allow for flexible creation of different TarBalls.

func NewNopTarBallMaker

func NewNopTarBallMaker() TarBallMaker

type TarBallQueue

type TarBallQueue struct {
	TarSizeThreshold   int64
	AllTarballsSize    *int64
	TarBallMaker       TarBallMaker
	LastCreatedTarball TarBall
	// contains filtered or unexported fields
}

TarBallQueue is used to process multiple tarballs concurrently

func NewTarBallQueue

func NewTarBallQueue(tarSizeThreshold int64, tarBallMaker TarBallMaker) *TarBallQueue

func (*TarBallQueue) CheckSizeAndEnqueueBack

func (tarQueue *TarBallQueue) CheckSizeAndEnqueueBack(tarBall TarBall) error

func (*TarBallQueue) CloseTarball

func (tarQueue *TarBallQueue) CloseTarball(tarBall TarBall) error

func (*TarBallQueue) Deque

func (tarQueue *TarBallQueue) Deque() TarBall

func (*TarBallQueue) DequeCtx

func (tarQueue *TarBallQueue) DequeCtx(ctx context.Context) (TarBall, error)

DequeCtx returns a TarBall from the queue. If the context finishes before it can do so, it returns the result of ctx.Err().

func (*TarBallQueue) EnqueueBack

func (tarQueue *TarBallQueue) EnqueueBack(tarBall TarBall)

func (*TarBallQueue) FinishQueue

func (tarQueue *TarBallQueue) FinishQueue() error

func (*TarBallQueue) FinishTarBall

func (tarQueue *TarBallQueue) FinishTarBall(tarBall TarBall) error

func (*TarBallQueue) NewTarBall

func (tarQueue *TarBallQueue) NewTarBall(dedicatedUploader bool) TarBall

NewTarBall starts writing new tarball

func (*TarBallQueue) StartQueue

func (tarQueue *TarBallQueue) StartQueue() error

type TarFileSets

type TarFileSets interface {
	AddFile(name string, file string)
	AddFiles(name string, files []string)
	Get() map[string][]string
}

type TarInterpreter

type TarInterpreter interface {
	Interpret(reader io.Reader, header *tar.Header) error
}

TarInterpreter behaves differently for different file types.

func NewFileTarInterpreter

func NewFileTarInterpreter(directoryToSave string) TarInterpreter

type TarSizeError

type TarSizeError struct {
	// contains filtered or unexported fields
}

type TimedBackup

type TimedBackup interface {
	Name() string
	StartTime() time.Time
	IsPermanent() bool
}

type UnconfiguredStorageError

type UnconfiguredStorageError struct {
	// contains filtered or unexported fields
}

func (UnconfiguredStorageError) Error

func (err UnconfiguredStorageError) Error() string

type UnknownCompressionMethodError

type UnknownCompressionMethodError struct {
	// contains filtered or unexported fields
}

func (UnknownCompressionMethodError) Error

type UnknownSerializerTypeError

type UnknownSerializerTypeError struct {
	// contains filtered or unexported fields
}

func NewUnknownSerializerTypeError

func NewUnknownSerializerTypeError(serializerType DtoSerializerType) UnknownSerializerTypeError

func (UnknownSerializerTypeError) Error

func (err UnknownSerializerTypeError) Error() string

type UnmarshallingError

type UnmarshallingError struct {
	// contains filtered or unexported fields
}

func (UnmarshallingError) Error

func (err UnmarshallingError) Error() string

type UnsetRequiredSettingError

type UnsetRequiredSettingError struct {
	// contains filtered or unexported fields
}

func NewUnsetRequiredSettingError

func NewUnsetRequiredSettingError(settingName string) UnsetRequiredSettingError

func (UnsetRequiredSettingError) Error

func (err UnsetRequiredSettingError) Error() string

type UnsupportedFileTypeError

type UnsupportedFileTypeError struct {
	// contains filtered or unexported fields
}

UnsupportedFileTypeError is used to signal file types that are unsupported by WAL-G.

func (UnsupportedFileTypeError) Error

func (err UnsupportedFileTypeError) Error() string

type UploadObject

type UploadObject struct {
	Path    string
	Content io.Reader
}

UploadObject

type Uploader

type Uploader struct {
	UploadingFolder storage.Folder
	Compressor      compression.Compressor

	ArchiveStatusManager   asm.ArchiveStatusManager
	PGArchiveStatusManager asm.ArchiveStatusManager
	Failed                 atomic.Value
	// contains filtered or unexported fields
}

Uploader contains fields associated with uploading tarballs. Multiple tarballs can share one uploader.

func ConfigureUploader

func ConfigureUploader() (uploader *Uploader, err error)

ConfigureUploader connects to storage and creates an uploader. It makes sure that a valid session has started; if invalid, returns AWS error and `<nil>` values.

func NewUploader

func NewUploader(
	compressor compression.Compressor,
	uploadingLocation storage.Folder,
) *Uploader

FIXME: return UploaderProvider

func (*Uploader) ChangeDirectory

func (uploader *Uploader) ChangeDirectory(relativePath string)

func (*Uploader) Clone

func (uploader *Uploader) Clone() *Uploader

Clone creates similar Uploader with new WaitGroup

func (*Uploader) Compression

func (uploader *Uploader) Compression() compression.Compressor

Compression returns configured compressor

func (*Uploader) DisableSizeTracking

func (uploader *Uploader) DisableSizeTracking()

DisableSizeTracking stops bandwidth tracking

func (*Uploader) Finish

func (uploader *Uploader) Finish()

Finish waits for all waiting parts to be uploaded. If an error occurs, prints alert to stderr.

func (*Uploader) Folder

func (uploader *Uploader) Folder() storage.Folder

func (*Uploader) PushStream

func (uploader *Uploader) PushStream(stream io.Reader) (string, error)

TODO : unit tests PushStream compresses a stream and push it

func (*Uploader) PushStreamToDestination

func (uploader *Uploader) PushStreamToDestination(stream io.Reader, dstPath string) error

TODO : unit tests PushStreamToDestination compresses a stream and push it to specifyed destination

func (*Uploader) RawDataSize

func (uploader *Uploader) RawDataSize() (int64, error)

RawDataSize returns 0 and error when SizeTracking disabled (see DisableSizeTracking)

func (*Uploader) Upload

func (uploader *Uploader) Upload(path string, content io.Reader) error

TODO : unit tests

func (*Uploader) UploadFile

func (uploader *Uploader) UploadFile(file ioextensions.NamedReader) error

TODO : unit tests UploadFile compresses a file and uploads it.

func (*Uploader) UploadMultiple

func (uploader *Uploader) UploadMultiple(objects []UploadObject) error

UploadMultiple uploads multiple objects from the start of the slice, returning the first error if any. Note that this operation is not atomic TODO : unit tests

func (*Uploader) UploadedDataSize

func (uploader *Uploader) UploadedDataSize() (int64, error)

UploadedDataSize returns 0 and error when SizeTracking disabled (see DisableSizeTracking)

type UploaderProvider

type UploaderProvider interface {
	Upload(path string, content io.Reader) error
	UploadFile(file ioextensions.NamedReader) error
	PushStream(stream io.Reader) (string, error)
	PushStreamToDestination(stream io.Reader, dstPath string) error
	Compression() compression.Compressor
	DisableSizeTracking()
	UploadedDataSize() (int64, error)
	RawDataSize() (int64, error)
	ChangeDirectory(relativePath string)
	Folder() storage.Folder
}

func ConfigureSplitUploader

func ConfigureSplitUploader() (UploaderProvider, error)

func NewSplitStreamUploader

func NewSplitStreamUploader(
	uploader *Uploader,
	partitions int,
	blockSize int,
	maxFileSize int,
) UploaderProvider

type UserDataBackupSelector

type UserDataBackupSelector struct {
	// contains filtered or unexported fields
}

Select backup which has the provided user data

func NewUserDataBackupSelector

func NewUserDataBackupSelector(userDataRaw string, metaFetcher GenericMetaFetcher) (UserDataBackupSelector, error)

func (UserDataBackupSelector) Select

func (s UserDataBackupSelector) Select(folder storage.Folder) (string, error)

TODO: unit tests

type WrongTypeError

type WrongTypeError struct {
	// contains filtered or unexported fields
}

func NewWrongTypeError

func NewWrongTypeError(desiredType string) WrongTypeError

func (WrongTypeError) Error

func (err WrongTypeError) Error() string

Directories

Path Synopsis
Package abool provides atomic Boolean type for cleaner code and better performance.
Package abool provides atomic Boolean type for cleaner code and better performance.
lz4
databases
fdb
testtools
Package mock_internal is a generated GoMock package.
Package mock_internal is a generated GoMock package.

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL