osd

package
v0.0.0-...-3f1f3ec Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Feb 28, 2020 License: Apache-2.0 Imports: 33 Imported by: 0

Documentation

Overview

Package osd for the Ceph OSDs.

Package osd for the Ceph OSDs.

Package osd for the Ceph OSDs.

Package osd for the Ceph OSDs.

Package config provides methods for generating the Ceph config for a Ceph cluster and for producing a "ceph.conf" compatible file from the config as well as Ceph command line-compatible flags.

Index

Constants

View Source
const (
	// AppName is the "app" label on osd pods
	AppName = "rook-ceph-osd"
	// FailureDomainKey is the label key whose value is the failure domain of the OSD
	FailureDomainKey = "failure-domain"

	// OsdIdLabelKey is the OSD label key
	OsdIdLabelKey = "ceph-osd-id"
)
View Source
const (

	// CephDeviceSetLabelKey is the Rook device set label key
	CephDeviceSetLabelKey = "ceph.rook.io/DeviceSet"
	// CephSetIndexLabelKey is the Rook label key index
	CephSetIndexLabelKey = "ceph.rook.io/setIndex"
	// CephDeviceSetPVCIDLabelKey is the Rook PVC ID label key
	CephDeviceSetPVCIDLabelKey = "ceph.rook.io/DeviceSetPVCId"
	// OSDOverPVCLabelKey is the Rook PVC label key
	OSDOverPVCLabelKey = "ceph.rook.io/pvc"
)
View Source
const (
	OrchestrationStatusStarting      = "starting"
	OrchestrationStatusComputingDiff = "computingDiff"
	OrchestrationStatusOrchestrating = "orchestrating"
	OrchestrationStatusCompleted     = "completed"
	OrchestrationStatusFailed        = "failed"
)

Variables

View Source
var (

	// The labels that can be specified with the K8s labels such as failure-domain.beta.kubernetes.io/zone
	// These are all at the top layers of the CRUSH map.
	KubernetesTopologyLabels = []string{"zone", "region"}

	// The node labels that are supported with the topology.rook.io prefix such as topology.rook.io/rack
	CRUSHTopologyLabels = []string{"chassis", "rack", "row", "pdu", "pod", "room", "datacenter"}

	// The list of supported failure domains in the CRUSH map, ordered from lowest to highest
	CRUSHMapLevelsOrdered = append([]string{"host"}, append(CRUSHTopologyLabels, KubernetesTopologyLabels...)...)
)

Functions

func ExtractRookTopologyFromLabels

func ExtractRookTopologyFromLabels(labels map[string]string) (map[string]string, []string)

ExtractRookTopologyFromLabels extracts rook topology from labels and returns a map from topology type to value, and an array of any invalid labels with a topology prefix.

func GetLocationWithNode

func GetLocationWithNode(clientset kubernetes.Interface, nodeName string, crushHostname string) (string, error)

func UpdateLocationWithNodeLabels

func UpdateLocationWithNodeLabels(location *[]string, nodeLabels map[string]string)

func UpdateNodeStatus

func UpdateNodeStatus(kv *k8sutil.ConfigMapKVStore, node string, status OrchestrationStatus) error

Types

type Cluster

type Cluster struct {
	Namespace string

	Keyring string

	DesiredStorage rookalpha.StorageScopeSpec // user-defined storage scope spec
	ValidStorage   rookalpha.StorageScopeSpec // valid subset of `Storage`, computed at runtime

	Network cephv1.NetworkSpec
	// contains filtered or unexported fields
}

Cluster keeps track of the OSDs

func New

func New(
	clusterInfo *cephconfig.ClusterInfo,
	context *clusterd.Context,
	namespace string,
	rookVersion string,
	cephVersion cephv1.CephVersionSpec,
	storageSpec rookalpha.StorageScopeSpec,
	dataDirHostPath string,
	placement rookalpha.Placement,
	annotations rookalpha.Annotations,
	network cephv1.NetworkSpec,
	resources v1.ResourceRequirements,
	prepareResources v1.ResourceRequirements,
	priorityClassName string,
	ownerRef metav1.OwnerReference,
	isUpgrade bool,
	skipUpgradeChecks bool,
	continueUpgradeAfterChecksEvenIfNotHealthy bool,
) *Cluster

New creates an instance of the OSD manager

func (*Cluster) Start

func (c *Cluster) Start() error

Start the osd management

type Monitor

type Monitor struct {
	// contains filtered or unexported fields
}

Monitor defines OSD process monitoring

func NewMonitor

func NewMonitor(context *clusterd.Context, clusterName string, removeOSDsIfOUTAndSafeToRemove bool, cephVersion cephver.CephVersion) *Monitor

NewMonitor instantiates OSD monitoring

func (*Monitor) Start

func (m *Monitor) Start(stopCh chan struct{})

Start runs monitoring logic for osds status at set intervals

func (*Monitor) Update

func (m *Monitor) Update(removeOSDsIfOUTAndSafeToRemove bool)

Update updates the removeOSDsIfOUTAndSafeToRemove

type OSDInfo

type OSDInfo struct {
	ID                  int    `json:"id"`
	DataPath            string `json:"data-path"`
	Config              string `json:"conf"`
	Cluster             string `json:"cluster"`
	KeyringPath         string `json:"keyring-path"`
	UUID                string `json:"uuid"`
	Journal             string `json:"journal"`
	IsFileStore         bool   `json:"is-file-store"`
	IsDirectory         bool   `json:"is-directory"`
	DevicePartUUID      string `json:"device-part-uuid"`
	CephVolumeInitiated bool   `json:"ceph-volume-initiated"`
	//LVPath is the logical Volume path for an OSD created by Ceph-volume with format '/dev/<Volume Group>/<Logical Volume>'
	LVPath        string `json:"lv-path"`
	SkipLVRelease bool   `json:"skip-lv-release"`
	Location      string `json:"location"`
	LVBackedPV    bool   `json:"lv-backed-pv"`
}

OSDInfo represent all the properties of a given OSD

type OrchestrationStatus

type OrchestrationStatus struct {
	OSDs         []OSDInfo `json:"osds"`
	Status       string    `json:"status"`
	PvcBackedOSD bool      `json:"pvc-backed-osd"`
	Message      string    `json:"message"`
}

OrchestrationStatus represents the status of an OSD orchestration

Directories

Path Synopsis
Package config for OSD config managed by the operator
Package config for OSD config managed by the operator

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL