greenplum

package
v0.0.0-...-e89a0ad Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Mar 18, 2024 License: Apache-2.0 Imports: 38 Imported by: 0

Documentation

Index

Constants

View Source
const (
	AoStoragePath       = "aosegments"
	AoSegSuffix         = "_aoseg"
	AoSegDeltaDelimiter = "_D_"
)
View Source
const (
	BackupNamePrefix     = "backup_"
	BackupNameLength     = len(BackupNamePrefix) + len(utility.BackupTimeFormat)
	SegBackupLogPrefix   = "wal-g-log"
	SegBackupPushCmdName = "seg-backup-push"
)
View Source
const AOFilesMetadataName = "ao_files_metadata.json"
View Source
const MetadataDatetimeFormat = "%Y-%m-%dT%H:%M:%S.%fZ"
View Source
const PgHbaTemplate = `` /* 759-byte string literal not displayed */
View Source
const RestorePointSuffix = "_restore_point.json"
View Source
const SegmentsFolderPath = "segments_" + utility.VersionStr + "/"
View Source
const SignatureMagicNumber byte = 0x56

Variables

View Source
var IncrementFileHeader = []byte{'w', 'i', '1', SignatureMagicNumber}

IncrementFileHeader contains "wi" at the head which stands for "wal-g increment" format version "1", signature magic number

Functions

func ApplyFileIncrement

func ApplyFileIncrement(fileName string, increment io.Reader, fsync bool) error

func ConfigureSegContentID

func ConfigureSegContentID(contentIDFlag string) (int, error)

func FindRestorePointBeforeTS

func FindRestorePointBeforeTS(timestampStr string, folder storage.Folder) (string, error)

FindRestorePointBeforeTS finds restore point that was created before the provided timestamp and finish time closest to the provided timestamp

func FormatCmdStateName

func FormatCmdStateName(contentID int, cmdName string) string

func FormatCmdStatePath

func FormatCmdStatePath(contentID int, cmdName string) string

func FormatSegmentBackupPath

func FormatSegmentBackupPath(contentID int) string

func FormatSegmentStateFolderPath

func FormatSegmentStateFolderPath(contentID int) string

func FormatSegmentStoragePrefix

func FormatSegmentStoragePrefix(contentID int) string

func FormatSegmentWalPath

func FormatSegmentWalPath(contentID int) string

func HandleDetailedBackupList

func HandleDetailedBackupList(folder storage.Folder, pretty, json bool)

func HandleRestorePointList

func HandleRestorePointList(folder storage.Folder, pretty, json bool)

func LoadStorageAoFiles

func LoadStorageAoFiles(baseBackupsFolder storage.Folder) (map[string]struct{}, error)

LoadStorageAoFiles loads the list of the AO/AOCS segment files that are referenced from previous backups

func NewGreenplumBackupFetcher

func NewGreenplumBackupFetcher(restoreCfgPath string, inPlaceRestore bool, logsDir string,
	fetchContentIds []int, mode BackupFetchMode, restorePoint string,
) func(folder storage.Folder, backup internal.Backup)

func NewIncrementalPageReader

func NewIncrementalPageReader(file io.ReadSeekCloser, eof, offset int64) (io.ReadCloser, error)

func NewSegBackupHandler

func NewSegBackupHandler(arguments postgres.BackupArguments) (*postgres.BackupHandler, error)

func ReadIncrementFileHeader

func ReadIncrementFileHeader(reader io.Reader) error

func RestorePointMetadataFileName

func RestorePointMetadataFileName(pointName string) string

func SetSegmentStoragePrefix

func SetSegmentStoragePrefix(contentID int)

func StripRightmostRestorePointName

func StripRightmostRestorePointName(path string) string

func ValidateMatch

func ValidateMatch(folder storage.Folder, backupName string, restorePoint string) error

ValidateMatch checks that restore point is reachable from the provided backup

Types

type AOFilesMetadataDTO

type AOFilesMetadataDTO struct {
	Files BackupAOFiles
}

func NewAOFilesMetadataDTO

func NewAOFilesMetadataDTO() *AOFilesMetadataDTO

type AoRelFileMetadata

type AoRelFileMetadata struct {
	// contains filtered or unexported fields
}

func NewAoRelFileMetadata

func NewAoRelFileMetadata(relNameMd5 string, storageType RelStorageType, eof, modCount int64) AoRelFileMetadata

type AoRelFileStorageMap

type AoRelFileStorageMap map[walparser.BlockLocation]AoRelFileMetadata

AoRelFileStorageMap indicates the storage type for the relfile

func NewAoRelFileStorageMap

func NewAoRelFileStorageMap(queryRunner *GpQueryRunner) (AoRelFileStorageMap, error)

type AoStorageUploader

type AoStorageUploader struct {
	// contains filtered or unexported fields
}

func NewAoStorageUploader

func NewAoStorageUploader(uploader internal.Uploader, baseAoFiles BackupAOFiles,
	crypter crypto.Crypter, files internal.BundleFiles, isIncremental bool) *AoStorageUploader

func (*AoStorageUploader) AddFile

func (*AoStorageUploader) GetFiles

func (u *AoStorageUploader) GetFiles() *AOFilesMetadataDTO

type Backup

type Backup struct {
	internal.Backup
	SentinelDto *BackupSentinelDto // used for storage query caching
	// contains filtered or unexported fields
}

Backup contains information about a valid Greenplum backup generated and uploaded by WAL-G.

func ListStorageBackups

func ListStorageBackups(folder storage.Folder) ([]Backup, error)

ListStorageBackups returns the list of storage backups sorted by finish time (in ascending order)

func NewBackup

func NewBackup(rootFolder storage.Folder, name string) Backup

func (*Backup) GetSegmentBackup

func (backup *Backup) GetSegmentBackup(backupID string, contentID int) (SegBackup, error)

func (*Backup) GetSentinel

func (backup *Backup) GetSentinel() (BackupSentinelDto, error)

type BackupAOFileDesc

type BackupAOFileDesc struct {
	StoragePath   string         `json:"StoragePath"`
	IsSkipped     bool           `json:"IsSkipped"`
	IsIncremented bool           `json:"IsIncremented,omitempty"`
	MTime         time.Time      `json:"MTime"`
	StorageType   RelStorageType `json:"StorageType"`
	EOF           int64          `json:"EOF"`
	ModCount      int64          `json:"ModCount,omitempty"`
	Compressor    string         `json:"Compressor,omitempty"`
	FileMode      int64          `json:"FileMode"`
}

type BackupAOFiles

type BackupAOFiles map[string]BackupAOFileDesc

type BackupArguments

type BackupArguments struct {
	// contains filtered or unexported fields
}

BackupArguments holds all arguments parsed from cmd to this handler class

func NewBackupArguments

func NewBackupArguments(isPermanent, isFull bool, userData interface{}, fwdArgs []SegmentFwdArg, logsDir string,
	segPollInterval time.Duration, segPollRetries int, deltaBaseSelector internal.BackupSelector) BackupArguments

NewBackupArguments creates a BackupArgument object to hold the arguments from the cmd

type BackupDetail

type BackupDetail struct {
	Name         string
	RestorePoint *string     `json:"restore_point,omitempty"`
	UserData     interface{} `json:"user_data,omitempty"`

	StartTime        time.Time `json:"start_time"`
	FinishTime       time.Time `json:"finish_time"`
	DatetimeFormat   string    `json:"date_fmt,omitempty"`
	Hostname         string    `json:"hostname"`
	GpVersion        string    `json:"gp_version"`
	IsPermanent      bool      `json:"is_permanent"`
	SystemIdentifier *uint64   `json:"system_identifier,omitempty"`

	UncompressedSize int64 `json:"uncompressed_size"`
	CompressedSize   int64 `json:"compressed_size"`
	DataCatalogSize  int64 `json:"data_catalog_size"`

	IncrementFrom     *string `json:"increment_from,omitempty"`
	IncrementFullName *string `json:"increment_full_name,omitempty"`
	IncrementCount    *int    `json:"increment_count,omitempty"`
}

func MakeBackupDetails

func MakeBackupDetails(backups []Backup) []BackupDetail

func NewBackupDetail

func NewBackupDetail(backup Backup) BackupDetail

type BackupFetchMode

type BackupFetchMode string
const (
	DefaultFetchMode BackupFetchMode = "default"
	UnpackFetchMode  BackupFetchMode = "unpack"
	PrepareFetchMode BackupFetchMode = "prepare"
)

func NewBackupFetchMode

func NewBackupFetchMode(mode string) (BackupFetchMode, error)

type BackupHandler

type BackupHandler struct {
	// contains filtered or unexported fields
}

BackupHandler is the main struct which is handling the backup process

func NewBackupHandler

func NewBackupHandler(arguments BackupArguments) (bh *BackupHandler, err error)

NewBackupHandler returns a backup handler object, which can handle the backup

func (*BackupHandler) HandleBackupPush

func (bh *BackupHandler) HandleBackupPush()

HandleBackupPush handles the backup being read from filesystem and being pushed to the repository

type BackupObject

type BackupObject struct {
	internal.BackupObject
	// contains filtered or unexported fields
}

func (BackupObject) GetBaseBackupName

func (o BackupObject) GetBaseBackupName() string

func (BackupObject) GetIncrementFromName

func (o BackupObject) GetIncrementFromName() string

func (BackupObject) IsFullBackup

func (o BackupObject) IsFullBackup() bool

type BackupSentinelDto

type BackupSentinelDto struct {
	RestorePoint *string           `json:"restore_point,omitempty"`
	Segments     []SegmentMetadata `json:"segments,omitempty"`
	UserData     interface{}       `json:"user_data,omitempty"`

	StartTime        time.Time `json:"start_time"`
	FinishTime       time.Time `json:"finish_time"`
	DatetimeFormat   string    `json:"date_fmt,omitempty"`
	Hostname         string    `json:"hostname"`
	GpVersion        string    `json:"gp_version"`
	IsPermanent      bool      `json:"is_permanent"`
	SystemIdentifier *uint64   `json:"system_identifier"`

	UncompressedSize int64 `json:"uncompressed_size"`
	CompressedSize   int64 `json:"compressed_size"`
	DataCatalogSize  int64 `json:"data_catalog_size"`

	IncrementFrom     *string `json:"increment_from,omitempty"`
	IncrementFullName *string `json:"increment_full_name,omitempty"`
	IncrementCount    *int    `json:"increment_count,omitempty"`
}

BackupSentinelDto describes file structure of json sentinel

func NewBackupSentinelDto

func NewBackupSentinelDto(currBackupInfo *CurrBackupInfo, prevBackupInfo *PrevBackupInfo, restoreLSNs map[int]string, userData interface{},
	isPermanent bool) BackupSentinelDto

NewBackupSentinelDto returns new BackupSentinelDto instance

func (*BackupSentinelDto) IsIncremental

func (s *BackupSentinelDto) IsIncremental() (isIncremental bool)

func (*BackupSentinelDto) String

func (s *BackupSentinelDto) String() string

type BackupWorkers

type BackupWorkers struct {
	Uploader internal.Uploader
	Conn     *pgx.Conn
}

BackupWorkers holds the external objects that the handler uses to get the backup data / write the backup data

type ClusterRestoreConfig

type ClusterRestoreConfig struct {
	Segments map[int]SegmentRestoreConfig `json:"segments"`
}

ClusterRestoreConfig is used to describe the restored cluster

type CurrBackupInfo

type CurrBackupInfo struct {
	// contains filtered or unexported fields
}

CurrBackupInfo holds all information that is harvest during the backup process

type DeleteArgs

type DeleteArgs struct {
	Confirmed bool
	FindFull  bool
}

type DeleteHandler

type DeleteHandler struct {
	internal.DeleteHandler
	// contains filtered or unexported fields
}

func NewDeleteHandler

func NewDeleteHandler(folder storage.Folder, args DeleteArgs) (*DeleteHandler, error)

func (*DeleteHandler) DeleteBeforeTarget

func (h *DeleteHandler) DeleteBeforeTarget(target internal.BackupObject) error

func (*DeleteHandler) HandleDeleteBefore

func (h *DeleteHandler) HandleDeleteBefore(args []string)

func (*DeleteHandler) HandleDeleteEverything

func (h *DeleteHandler) HandleDeleteEverything(args []string)

func (*DeleteHandler) HandleDeleteGarbage

func (h *DeleteHandler) HandleDeleteGarbage(args []string) error

HandleDeleteGarbage delete outdated WAL archives and leftover backup files

func (*DeleteHandler) HandleDeleteRetain

func (h *DeleteHandler) HandleDeleteRetain(args []string)

func (*DeleteHandler) HandleDeleteRetainAfter

func (h *DeleteHandler) HandleDeleteRetainAfter(args []string)

func (*DeleteHandler) HandleDeleteTarget

func (h *DeleteHandler) HandleDeleteTarget(targetSelector internal.BackupSelector)

type ExtractProviderImpl

type ExtractProviderImpl struct {
	FilesToExtractProviderImpl
}

func (ExtractProviderImpl) Get

func (t ExtractProviderImpl) Get(
	backup postgres.Backup,
	filesToUnwrap map[string]bool,
	skipRedundantTars bool,
	dbDataDir string,
	createNewIncrementalFiles bool,
) (postgres.IncrementalTarInterpreter, []internal.ReaderMaker, string, error)

type FetchHandler

type FetchHandler struct {
	// contains filtered or unexported fields
}

func NewFetchHandler

func NewFetchHandler(
	backup internal.Backup, sentinel BackupSentinelDto,
	segCfgMaker SegConfigMaker, logsDir string,
	fetchContentIds []int, mode BackupFetchMode,
	restorePoint string,
) *FetchHandler

nolint:gocritic

func (*FetchHandler) Fetch

func (fh *FetchHandler) Fetch() error

func (*FetchHandler) Prepare

func (fh *FetchHandler) Prepare() error

func (*FetchHandler) Unpack

func (fh *FetchHandler) Unpack()

type FilesToExtractProviderImpl

type FilesToExtractProviderImpl struct {
	postgres.FilesToExtractProviderImpl
}

func (FilesToExtractProviderImpl) Get

func (t FilesToExtractProviderImpl) Get(backup SegBackup, filesToUnwrap map[string]bool, skipRedundantTars bool) (
	tarsToExtract []internal.ReaderMaker, pgControlKey string, err error)

type GenericMetaFetcher

type GenericMetaFetcher struct{}

func NewGenericMetaFetcher

func NewGenericMetaFetcher() GenericMetaFetcher

func (GenericMetaFetcher) Fetch

func (mf GenericMetaFetcher) Fetch(backupName string, backupFolder storage.Folder) (internal.GenericMetadata, error)

TODO: Unit tests

type GenericMetaInteractor

type GenericMetaInteractor struct {
	GenericMetaFetcher
	GenericMetaSetter
}

func NewGenericMetaInteractor

func NewGenericMetaInteractor() GenericMetaInteractor

type GenericMetaSetter

type GenericMetaSetter struct{}

func NewGenericMetaSetter

func NewGenericMetaSetter() GenericMetaSetter

func (GenericMetaSetter) SetIsPermanent

func (ms GenericMetaSetter) SetIsPermanent(backupName string, backupFolder storage.Folder, isPermanent bool) error

TODO: Unit tests

func (GenericMetaSetter) SetUserData

func (ms GenericMetaSetter) SetUserData(backupName string, backupFolder storage.Folder, userData interface{}) error

TODO: Unit tests

type GpQueryRunner

type GpQueryRunner struct {
	*postgres.PgQueryRunner
}

GpQueryRunner is implementation for controlling Greenplum

func NewGpQueryRunner

func NewGpQueryRunner(conn *pgx.Conn) (*GpQueryRunner, error)

NewGpQueryRunner builds QueryRunner from available connection

func ToGpQueryRunner

func ToGpQueryRunner(queryRunner *postgres.PgQueryRunner) *GpQueryRunner

func (*GpQueryRunner) AbortBackup

func (queryRunner *GpQueryRunner) AbortBackup() (err error)

AbortBackup stops the backup process on all segments

func (*GpQueryRunner) CreateGreenplumRestorePoint

func (queryRunner *GpQueryRunner) CreateGreenplumRestorePoint(restorePointName string) (restoreLSNs map[int]string, err error)

CreateGreenplumRestorePoint creates a restore point

func (*GpQueryRunner) FetchAOStorageMetadata

func (queryRunner *GpQueryRunner) FetchAOStorageMetadata(dbInfo postgres.PgDatabaseInfo) (AoRelFileStorageMap, error)

FetchAOStorageMetadata queries the storage metadata for AO & AOCS tables (GreenplumDB)

func (*GpQueryRunner) GetGreenplumSegmentsInfo

func (queryRunner *GpQueryRunner) GetGreenplumSegmentsInfo(semVer semver.Version) (segments []cluster.SegConfig, err error)

GetGreenplumSegmentsInfo returns the information about segments

func (*GpQueryRunner) GetGreenplumVersion

func (queryRunner *GpQueryRunner) GetGreenplumVersion() (version string, err error)

GetGreenplumVersion returns version

func (*GpQueryRunner) IsInBackup

func (queryRunner *GpQueryRunner) IsInBackup() (isInBackupByContentID map[int]bool, err error)

IsInBackup check if there is backup running

type GpTarBallComposer

type GpTarBallComposer struct {
	// contains filtered or unexported fields
}

func NewGpTarBallComposer

func NewGpTarBallComposer(
	tarBallQueue *internal.TarBallQueue, crypter crypto.Crypter, relStorageMap AoRelFileStorageMap,
	bundleFiles internal.BundleFiles, packer *postgres.TarBallFilePackerImpl, aoStorageUploader *AoStorageUploader,
	tarFileSets internal.TarFileSets, uploader internal.Uploader, backupName string,
) (*GpTarBallComposer, error)

func (*GpTarBallComposer) AddFile

func (c *GpTarBallComposer) AddFile(info *internal.ComposeFileInfo)

func (*GpTarBallComposer) AddHeader

func (c *GpTarBallComposer) AddHeader(fileInfoHeader *tar.Header, info os.FileInfo) error

func (*GpTarBallComposer) FinishComposing

func (c *GpTarBallComposer) FinishComposing() (internal.TarFileSets, error)

func (*GpTarBallComposer) GetFiles

func (c *GpTarBallComposer) GetFiles() internal.BundleFiles

func (*GpTarBallComposer) SkipFile

func (c *GpTarBallComposer) SkipFile(tarHeader *tar.Header, fileInfo os.FileInfo)

type GpTarBallComposerMaker

type GpTarBallComposerMaker struct {
	TarFileSets internal.TarFileSets
	// contains filtered or unexported fields
}

func NewGpTarBallComposerMaker

func NewGpTarBallComposerMaker(relStorageMap AoRelFileStorageMap, uploader internal.Uploader, backupName string,
) (*GpTarBallComposerMaker, error)

func (*GpTarBallComposerMaker) Make

type InPlaceSegMaker

type InPlaceSegMaker struct{}

func (*InPlaceSegMaker) Make

func (c *InPlaceSegMaker) Make(metadata SegmentMetadata) (cluster.SegConfig, error)

type IncrementalTarInterpreter

type IncrementalTarInterpreter struct {
	*postgres.FileTarInterpreter
	// contains filtered or unexported fields
}

func NewIncrementalTarInterpreter

func NewIncrementalTarInterpreter(dbDataDirectory string, sentinel postgres.BackupSentinelDto, filesMetadata postgres.FilesMetadataDto,
	aoFilesMetadata AOFilesMetadataDTO,
	filesToUnwrap map[string]bool, createNewIncrementalFiles bool) *IncrementalTarInterpreter

func (*IncrementalTarInterpreter) Interpret

func (i *IncrementalTarInterpreter) Interpret(reader io.Reader, header *tar.Header) error

type InvalidIncrementFileHeaderError

type InvalidIncrementFileHeaderError struct {
	// contains filtered or unexported fields
}

func (InvalidIncrementFileHeaderError) Error

type NoRestorePointsFoundError

type NoRestorePointsFoundError struct {
	// contains filtered or unexported fields
}

func NewNoRestorePointsFoundError

func NewNoRestorePointsFoundError() NoRestorePointsFoundError

type PgHbaMaker

type PgHbaMaker struct {
	// contains filtered or unexported fields
}

func NewPgHbaMaker

func NewPgHbaMaker(segments map[int][]*cluster.SegConfig) PgHbaMaker

func (PgHbaMaker) Make

func (m PgHbaMaker) Make() (string, error)

type PgSegmentSentinelDto

type PgSegmentSentinelDto struct {
	postgres.BackupSentinelDto
	BackupName string
}

PgSegmentSentinelDto is used during the initial fetching of the segment backup metadata

type PrevBackupInfo

type PrevBackupInfo struct {
	// contains filtered or unexported fields
}

type RecoveryConfigMaker

type RecoveryConfigMaker struct {
	// contains filtered or unexported fields
}

func NewRecoveryConfigMaker

func NewRecoveryConfigMaker(walgBinaryPath, cfgPath, recoveryTargetName string) RecoveryConfigMaker

func (RecoveryConfigMaker) Make

func (m RecoveryConfigMaker) Make(contentID int) string

type RelStorageType

type RelStorageType byte
const (
	AppendOptimized RelStorageType = 'a'
	ColumnOriented  RelStorageType = 'c'
)

type RestoreCfgSegMaker

type RestoreCfgSegMaker struct {
	// contains filtered or unexported fields
}

func (*RestoreCfgSegMaker) Make

type RestorePointBackupSelector

type RestorePointBackupSelector struct {
	// contains filtered or unexported fields
}

func NewRestorePointBackupSelector

func NewRestorePointBackupSelector(restorePoint string) *RestorePointBackupSelector

func (*RestorePointBackupSelector) Select

func (s *RestorePointBackupSelector) Select(folder storage.Folder) (string, error)

type RestorePointCreator

type RestorePointCreator struct {
	Uploader internal.Uploader
	Conn     *pgx.Conn
	// contains filtered or unexported fields
}

func NewRestorePointCreator

func NewRestorePointCreator(pointName string) (rpc *RestorePointCreator, err error)

NewRestorePointCreator returns a restore point creator

func (*RestorePointCreator) Create

func (rpc *RestorePointCreator) Create()

Create creates cluster-wide consistent restore point

type RestorePointMetadata

type RestorePointMetadata struct {
	Name             string         `json:"name"`
	StartTime        time.Time      `json:"start_time"`
	FinishTime       time.Time      `json:"finish_time"`
	Hostname         string         `json:"hostname"`
	GpVersion        string         `json:"gp_version"`
	SystemIdentifier *uint64        `json:"system_identifier"`
	LsnBySegment     map[int]string `json:"lsn_by_segment"`
}

func FetchRestorePointMetadata

func FetchRestorePointMetadata(folder storage.Folder, pointName string) (RestorePointMetadata, error)

func (*RestorePointMetadata) String

func (s *RestorePointMetadata) String() string

type RestorePointTime

type RestorePointTime struct {
	Name string    `json:"restore_point_name"`
	Time time.Time `json:"time"`
}

func GetRestorePoints

func GetRestorePoints(folder storage.Folder) (restorePoints []RestorePointTime, err error)

GetRestorePoints receives restore points descriptions and sorts them by time

func GetRestorePointsTimeSlices

func GetRestorePointsTimeSlices(restorePoints []storage.Object) []RestorePointTime

type SegBackup

type SegBackup struct {
	postgres.Backup

	AoFilesMetadataDto *AOFilesMetadataDTO
}

func NewSegBackup

func NewSegBackup(baseBackupFolder storage.Folder, name string) SegBackup

func ToGpSegBackup

func ToGpSegBackup(source postgres.Backup) (output SegBackup)

func (*SegBackup) LoadAoFilesMetadata

func (backup *SegBackup) LoadAoFilesMetadata() (*AOFilesMetadataDTO, error)

type SegCmdRunner

type SegCmdRunner struct {
	// contains filtered or unexported fields
}

func NewSegCmdRunner

func NewSegCmdRunner(contentID int, cmdName, cmdArgs string, updInterval time.Duration) *SegCmdRunner

func (*SegCmdRunner) Run

func (r *SegCmdRunner) Run()

type SegCmdState

type SegCmdState struct {
	TS     time.Time    `json:"ts"`
	Status SegCmdStatus `json:"status"`
}

type SegCmdStatus

type SegCmdStatus string
const (
	RunningCmdStatus     SegCmdStatus = "running"
	FailedCmdStatus      SegCmdStatus = "failed"
	SuccessCmdStatus     SegCmdStatus = "success"
	InterruptedCmdStatus SegCmdStatus = "interrupted"
)

type SegConfigMaker

type SegConfigMaker interface {
	Make(SegmentMetadata) (cluster.SegConfig, error)
}

func NewRestoreCfgSegMaker

func NewRestoreCfgSegMaker(restoreConfigReader io.Reader) (SegConfigMaker, error)

func NewSegConfigMaker

func NewSegConfigMaker(restoreCfgPath string, inPlaceRestore bool) (SegConfigMaker, error)

type SegDeleteBeforeHandler

type SegDeleteBeforeHandler struct {
	*postgres.DeleteHandler
	// contains filtered or unexported fields
}

func (SegDeleteBeforeHandler) Delete

func (h SegDeleteBeforeHandler) Delete(segBackup SegBackup) error

type SegDeleteHandler

type SegDeleteHandler interface {
	Delete(backup SegBackup) error
}

func NewSegDeleteHandler

func NewSegDeleteHandler(rootFolder storage.Folder, contentID int, args DeleteArgs, delType SegDeleteType,
) (SegDeleteHandler, error)

type SegDeleteTargetHandler

type SegDeleteTargetHandler struct {
	*postgres.DeleteHandler
	// contains filtered or unexported fields
}

func (SegDeleteTargetHandler) Delete

func (h SegDeleteTargetHandler) Delete(segBackup SegBackup) error

type SegDeleteType

type SegDeleteType int
const (
	SegDeleteBefore SegDeleteType = iota
	SegDeleteTarget
)

type SegDeltaBackupConfigurator

type SegDeltaBackupConfigurator struct {
	// contains filtered or unexported fields
}

func NewSegDeltaBackupConfigurator

func NewSegDeltaBackupConfigurator(deltaBaseSelector internal.BackupSelector) SegDeltaBackupConfigurator

func (SegDeltaBackupConfigurator) Configure

func (c SegDeltaBackupConfigurator) Configure(folder storage.Folder, isPermanent bool,
) (prevBackupInfo postgres.PrevBackupInfo, incrementCount int, err error)

type SegmentFwdArg

type SegmentFwdArg struct {
	Name  string
	Value string
}

SegmentFwdArg describes the specific WAL-G arguments that is going to be forwarded to the segments

type SegmentMetadata

type SegmentMetadata struct {
	DatabaseID int         `json:"db_id"`
	ContentID  int         `json:"content_id"`
	Role       SegmentRole `json:"role"`
	Port       int         `json:"port"`
	Hostname   string      `json:"hostname"`
	DataDir    string      `json:"data_dir"`

	BackupID        string `json:"backup_id"`
	BackupName      string `json:"backup_name"`
	RestorePointLSN string `json:"restore_point_lsn"`
}

func NewSegmentMetadata

func NewSegmentMetadata(backupID string, segCfg cluster.SegConfig, restoreLSN, backupName string) SegmentMetadata

func (SegmentMetadata) ToSegConfig

func (c SegmentMetadata) ToSegConfig() cluster.SegConfig

type SegmentRestoreConfig

type SegmentRestoreConfig struct {
	Hostname string `json:"hostname"`
	Port     int    `json:"port"`
	DataDir  string `json:"data_dir"`
}

type SegmentRole

type SegmentRole string
const (
	Primary SegmentRole = "p"
	Mirror  SegmentRole = "m"
)

type SegmentUserData

type SegmentUserData struct {
	ID string `json:"id"`
}

func NewSegmentUserData

func NewSegmentUserData() SegmentUserData

func NewSegmentUserDataFromID

func NewSegmentUserDataFromID(backupID string) SegmentUserData

func (SegmentUserData) QuotedString

func (d SegmentUserData) QuotedString() string

QuotedString will do json.Marshal-ing followed by quoting in order to escape special control characters in the resulting JSON so it can be transferred as the cmdline argument to a segment

func (SegmentUserData) String

func (d SegmentUserData) String() string

type UnexpectedTarDataError

type UnexpectedTarDataError struct {
	// contains filtered or unexported fields
}

func (UnexpectedTarDataError) Error

func (err UnexpectedTarDataError) Error() string

type UnknownIncrementFileHeaderError

type UnknownIncrementFileHeaderError struct {
	// contains filtered or unexported fields
}

func (UnknownIncrementFileHeaderError) Error

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL