osd

package
v1.3.9 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Jul 17, 2020 License: Apache-2.0 Imports: 31 Imported by: 0

Documentation

Overview

Package osd for the Ceph OSDs.

Package osd for the Ceph OSDs.

Package osd for the Ceph OSDs.

Package config provides methods for generating the Ceph config for a Ceph cluster and for producing a "ceph.conf" compatible file from the config as well as Ceph command line-compatible flags.

Index

Constants

View Source
const (
	// AppName is the "app" label on osd pods
	AppName = "rook-ceph-osd"
	// FailureDomainKey is the label key whose value is the failure domain of the OSD
	FailureDomainKey = "failure-domain"

	// OsdIdLabelKey is the OSD label key
	OsdIdLabelKey = "ceph-osd-id"
)
View Source
const (
	CrushDeviceClassVarName = "ROOK_OSD_CRUSH_DEVICE_CLASS"

	// CephDeviceSetLabelKey is the Rook device set label key
	CephDeviceSetLabelKey = "ceph.rook.io/DeviceSet"
	// CephSetIndexLabelKey is the Rook label key index
	CephSetIndexLabelKey = "ceph.rook.io/setIndex"
	// CephDeviceSetPVCIDLabelKey is the Rook PVC ID label key
	CephDeviceSetPVCIDLabelKey = "ceph.rook.io/DeviceSetPVCId"
	// OSDOverPVCLabelKey is the Rook PVC label key
	OSDOverPVCLabelKey = "ceph.rook.io/pvc"
)
View Source
const (
	OrchestrationStatusStarting      = "starting"
	OrchestrationStatusComputingDiff = "computingDiff"
	OrchestrationStatusOrchestrating = "orchestrating"
	OrchestrationStatusCompleted     = "completed"
	OrchestrationStatusFailed        = "failed"
)

Variables

View Source
var (

	// The labels that can be specified with the K8s labels such as topology.kubernetes.io/zone
	// These are all at the top layers of the CRUSH map.
	KubernetesTopologyLabels = []string{"zone", "region"}

	// The node labels that are supported with the topology.rook.io prefix such as topology.rook.io/rack
	CRUSHTopologyLabels = []string{"chassis", "rack", "row", "pdu", "pod", "room", "datacenter"}

	// The list of supported failure domains in the CRUSH map, ordered from lowest to highest
	CRUSHMapLevelsOrdered = append([]string{"host"}, append(CRUSHTopologyLabels, KubernetesTopologyLabels...)...)
)

Functions

func ExtractOSDTopologyFromLabels added in v1.3.9

func ExtractOSDTopologyFromLabels(labels map[string]string) map[string]string

ExtractTopologyFromLabels extracts rook topology from labels and returns a map from topology type to value

func GetLocationWithNode added in v1.3.9

func GetLocationWithNode(clientset kubernetes.Interface, nodeName string, crushHostname string) (string, error)

func PrivilegedContext added in v1.3.9

func PrivilegedContext() *v1.SecurityContext

PrivilegedContext returns a privileged Pod security context

func UpdateLocationWithNodeLabels added in v1.3.9

func UpdateLocationWithNodeLabels(location *[]string, nodeLabels map[string]string)

func UpdateNodeStatus

func UpdateNodeStatus(kv *k8sutil.ConfigMapKVStore, node string, status OrchestrationStatus)

Types

type Cluster

type Cluster struct {
	Namespace string

	Keyring string

	DesiredStorage rookv1.StorageScopeSpec // user-defined storage scope spec
	ValidStorage   rookv1.StorageScopeSpec // valid subset of `Storage`, computed at runtime

	Network cephv1.NetworkSpec
	// contains filtered or unexported fields
}

Cluster keeps track of the OSDs

func New

func New(
	clusterInfo *cephconfig.ClusterInfo,
	context *clusterd.Context,
	namespace string,
	rookVersion string,
	cephVersion cephv1.CephVersionSpec,
	storageSpec rookv1.StorageScopeSpec,
	dataDirHostPath string,
	placement rookv1.Placement,
	annotations rookv1.Annotations,
	network cephv1.NetworkSpec,
	resources v1.ResourceRequirements,
	prepareResources v1.ResourceRequirements,
	priorityClassName string,
	ownerRef metav1.OwnerReference,
	skipUpgradeChecks bool,
	continueUpgradeAfterChecksEvenIfNotHealthy bool,
	healthCheck cephv1.CephClusterHealthCheckSpec,
) *Cluster

New creates an instance of the OSD manager

func (*Cluster) Start

func (c *Cluster) Start() error

Start the osd management

type OSDHealthMonitor added in v1.3.9

type OSDHealthMonitor struct {
	// contains filtered or unexported fields
}

OSDHealthMonitor defines OSD process monitoring

func NewOSDHealthMonitor added in v1.3.9

func NewOSDHealthMonitor(context *clusterd.Context, namespace string, removeOSDsIfOUTAndSafeToRemove bool, healthCheck cephv1.CephClusterHealthCheckSpec) *OSDHealthMonitor

NewOSDHealthMonitor instantiates OSD monitoring

func (*OSDHealthMonitor) Start added in v1.3.9

func (m *OSDHealthMonitor) Start(stopCh chan struct{})

Start runs monitoring logic for osds status at set intervals

func (*OSDHealthMonitor) Update added in v1.3.9

func (m *OSDHealthMonitor) Update(removeOSDsIfOUTAndSafeToRemove bool)

Update updates the removeOSDsIfOUTAndSafeToRemove

type OSDInfo

type OSDInfo struct {
	ID             int    `json:"id"`
	Cluster        string `json:"cluster"`
	UUID           string `json:"uuid"`
	DevicePartUUID string `json:"device-part-uuid"`
	// BlockPath is the logical Volume path for an OSD created by Ceph-volume with format '/dev/<Volume Group>/<Logical Volume>' or simply /dev/vdb if block mode is used
	BlockPath     string `json:"lv-path"`
	MetadataPath  string `json:"metadata-path"`
	SkipLVRelease bool   `json:"skip-lv-release"`
	Location      string `json:"location"`
	LVBackedPV    bool   `json:"lv-backed-pv"`
	CVMode        string `json:"lv-mode"`
	Store         string `json:"store"`
}

OSDInfo represent all the properties of a given OSD

type OrchestrationStatus

type OrchestrationStatus struct {
	OSDs         []OSDInfo `json:"osds"`
	Status       string    `json:"status"`
	PvcBackedOSD bool      `json:"pvc-backed-osd"`
	Message      string    `json:"message"`
}

OrchestrationStatus represents the status of an OSD orchestration

Directories

Path Synopsis
Package config for OSD config managed by the operator
Package config for OSD config managed by the operator

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL