extension

package
v1.4.1 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Feb 26, 2024 License: Apache-2.0 Imports: 15 Imported by: 2

Documentation

Index

Constants

View Source
const (
	DomainPrefix = "koordinator.sh/"
	// ResourceDomainPrefix is a prefix "kubernetes.io/" used by particular extend resources (e.g. batch resources)
	ResourceDomainPrefix = corev1.ResourceDefaultNamespacePrefix
	// SchedulingDomainPrefix represents the scheduling domain prefix
	SchedulingDomainPrefix = "scheduling.koordinator.sh"
	// NodeDomainPrefix represents the node domain prefix
	NodeDomainPrefix = "node.koordinator.sh"
	PodDomainPrefix  = "pod.koordinator.sh"

	LabelPodQoS      = DomainPrefix + "qosClass"
	LabelPodPriority = DomainPrefix + "priority"
	// LabelPodPriorityClass is used to revise those Pods that are already running and have Priority set, so that
	// Koordinator can be smoothly deployed to the running cluster. If you don't have a running Pod with
	// PriorityClass set, don't set this field specifically.
	LabelPodPriorityClass = DomainPrefix + "priority-class"

	LabelManagedBy = "app.kubernetes.io/managed-by"

	// LabelPodMutatingUpdate is a label key that pods with `pod.koordinator.sh/mutating-update=true` will
	// be mutated by Koordinator webhook when updating.
	LabelPodMutatingUpdate = PodDomainPrefix + "/mutating-update"
)
View Source
const (
	AnnotationGangPrefix = "gang.scheduling.koordinator.sh"
	// AnnotationGangName specifies the name of the gang
	AnnotationGangName = AnnotationGangPrefix + "/name"

	// AnnotationGangMinNum specifies the minimum number of the gang that can be executed
	AnnotationGangMinNum = AnnotationGangPrefix + "/min-available"

	// AnnotationGangWaitTime specifies gang's max wait time in Permit Stage
	AnnotationGangWaitTime = AnnotationGangPrefix + "/waiting-time"

	// AnnotationGangTotalNum specifies the total children number of the gang
	// If not specified,it will be set with the AnnotationGangMinNum
	AnnotationGangTotalNum = AnnotationGangPrefix + "/total-number"

	// AnnotationGangMode defines the Gang Scheduling operation when failed scheduling
	// Support GangModeStrict and GangModeNonStrict, default is GangModeStrict
	AnnotationGangMode = AnnotationGangPrefix + "/mode"

	// AnnotationGangGroups defines which gangs are bundled as a group
	// The gang will go to bind only all gangs in one group meet the conditions
	AnnotationGangGroups = AnnotationGangPrefix + "/groups"

	// AnnotationGangTimeout means that the entire gang cannot be scheduled due to timeout
	// The annotation is added by the scheduler when the gang times out
	AnnotationGangTimeout = AnnotationGangPrefix + "/timeout"

	GangModeStrict    = "Strict"
	GangModeNonStrict = "NonStrict"

	// AnnotationGangMatchPolicy defines the Gang Scheduling operation of taking which status pod into account
	// Support GangMatchPolicyOnlyWaiting, GangMatchPolicyWaitingAndRunning, GangMatchPolicyOnceSatisfied, default is GangMatchPolicyOnceSatisfied
	AnnotationGangMatchPolicy        = AnnotationGangPrefix + "/match-policy"
	GangMatchPolicyOnlyWaiting       = "only-waiting"
	GangMatchPolicyWaitingAndRunning = "waiting-and-running"
	GangMatchPolicyOnceSatisfied     = "once-satisfied"

	// AnnotationAliasGangMatchPolicy defines same match policy but different prefix.
	// Duplicate definitions here are only for compatibility considerations
	AnnotationAliasGangMatchPolicy = "pod-group.scheduling.sigs.k8s.io/match-policy"
)
View Source
const (
	// Deprecated: kubernetes-sigs/scheduler-plugins/lightweight-coscheduling
	LabelLightweightCoschedulingPodGroupName = "pod-group.scheduling.sigs.k8s.io/name"
	// Deprecated: kubernetes-sigs/scheduler-plugins/lightweight-coscheduling
	LabelLightweightCoschedulingPodGroupMinAvailable = "pod-group.scheduling.sigs.k8s.io/min-available"
)
View Source
const (
	// AnnotationCPUNormalizationRatio denotes the cpu normalization ratio of the node.
	AnnotationCPUNormalizationRatio = NodeDomainPrefix + "/cpu-normalization-ratio"

	// LabelCPUNormalizationEnabled indicates whether the cpu normalization is enabled on the node.
	// If both the label and node-level CPUNormalizationStrategy is set, the label overrides the strategy.
	LabelCPUNormalizationEnabled = NodeDomainPrefix + "/cpu-normalization-enabled"

	// AnnotationCPUBasicInfo denotes the basic CPU info of the node.
	AnnotationCPUBasicInfo = NodeDomainPrefix + "/cpu-basic-info"

	// NormalizationRatioDiffEpsilon is the min difference between two cpu normalization ratios.
	NormalizationRatioDiffEpsilon = 0.01
)
View Source
const (
	// Deprecated: because of the limitation of extended resource naming
	KoordBatchCPU corev1.ResourceName = DomainPrefix + "batch-cpu"
	// Deprecated: because of the limitation of extended resource naming
	KoordBatchMemory corev1.ResourceName = DomainPrefix + "batch-memory"

	// Deprecated: Device extension resource names should use the prefix `koordinator.sh`
	DeprecatedKoordRDMA corev1.ResourceName = ResourceDomainPrefix + "rdma"
	// Deprecated: Device extension resource names should use the prefix `koordinator.sh`
	DeprecatedKoordFPGA corev1.ResourceName = ResourceDomainPrefix + "fpga"
	// Deprecated: Device extension resource names should use the prefix `koordinator.sh`
	DeprecatedKoordGPU corev1.ResourceName = ResourceDomainPrefix + "gpu"
	// Deprecated: Device extension resource names should use the prefix `koordinator.sh`
	DeprecatedGPUCore corev1.ResourceName = ResourceDomainPrefix + "gpu-core"
	// Deprecated: Device extension resource names should use the prefix `koordinator.sh`
	DeprecatedGPUMemory corev1.ResourceName = ResourceDomainPrefix + "gpu-memory"
	// Deprecated: Device extension resource names should use the prefix `koordinator.sh`
	DeprecatedGPUMemoryRatio corev1.ResourceName = ResourceDomainPrefix + "gpu-memory-ratio"
)
View Source
const (
	// Deprecated: Device extension resource names should use the prefix `koordinator.sh`
	DeprecatedGPUDriver string = ResourceDomainPrefix + "gpu-driver"
	// Deprecated: Device extension resource names should use the prefix `koordinator.sh`
	DeprecatedGPUModel string = ResourceDomainPrefix + "gpu-model"
)
View Source
const (
	// AnnotationDeviceAllocated represents the device allocated by the pod
	AnnotationDeviceAllocated = SchedulingDomainPrefix + "/device-allocated"
	// AnnotationDeviceAllocateHint guides the scheduler in selecting and allocating specialized hardware resources
	AnnotationDeviceAllocateHint = SchedulingDomainPrefix + "/device-allocate-hint"
	// AnnotationDeviceJointAllocate guides the scheduler joint-allocates devices
	AnnotationDeviceJointAllocate = SchedulingDomainPrefix + "/device-joint-allocate"
)
View Source
const (
	ResourceNvidiaGPU      corev1.ResourceName = "nvidia.com/gpu"
	ResourceHygonDCU       corev1.ResourceName = "dcu.com/gpu"
	ResourceRDMA           corev1.ResourceName = DomainPrefix + "rdma"
	ResourceFPGA           corev1.ResourceName = DomainPrefix + "fpga"
	ResourceGPU            corev1.ResourceName = DomainPrefix + "gpu"
	ResourceGPUShared      corev1.ResourceName = DomainPrefix + "gpu.shared"
	ResourceGPUCore        corev1.ResourceName = DomainPrefix + "gpu-core"
	ResourceGPUMemory      corev1.ResourceName = DomainPrefix + "gpu-memory"
	ResourceGPUMemoryRatio corev1.ResourceName = DomainPrefix + "gpu-memory-ratio"
)
View Source
const (
	LabelGPUModel         string = NodeDomainPrefix + "/gpu-model"
	LabelGPUDriverVersion string = NodeDomainPrefix + "/gpu-driver-version"
)
View Source
const (
	SystemQuotaName                 = "koordinator-system-quota"
	RootQuotaName                   = "koordinator-root-quota"
	DefaultQuotaName                = "koordinator-default-quota"
	QuotaKoordinatorPrefix          = "quota.scheduling.koordinator.sh"
	LabelQuotaIsParent              = QuotaKoordinatorPrefix + "/is-parent"
	LabelQuotaParent                = QuotaKoordinatorPrefix + "/parent"
	LabelAllowLentResource          = QuotaKoordinatorPrefix + "/allow-lent-resource"
	LabelQuotaName                  = QuotaKoordinatorPrefix + "/name"
	LabelQuotaProfile               = QuotaKoordinatorPrefix + "/profile"
	LabelQuotaIsRoot                = QuotaKoordinatorPrefix + "/is-root"
	LabelQuotaTreeID                = QuotaKoordinatorPrefix + "/tree-id"
	LabelQuotaIgnoreDefaultTree     = QuotaKoordinatorPrefix + "/ignore-default-tree"
	LabelPreemptible                = QuotaKoordinatorPrefix + "/preemptible"
	LabelAllowForceUpdate           = QuotaKoordinatorPrefix + "/allow-force-update"
	AnnotationSharedWeight          = QuotaKoordinatorPrefix + "/shared-weight"
	AnnotationRuntime               = QuotaKoordinatorPrefix + "/runtime"
	AnnotationRequest               = QuotaKoordinatorPrefix + "/request"
	AnnotationChildRequest          = QuotaKoordinatorPrefix + "/child-request"
	AnnotationResourceKeys          = QuotaKoordinatorPrefix + "/resource-keys"
	AnnotationTotalResource         = QuotaKoordinatorPrefix + "/total-resource"
	AnnotationQuotaNamespaces       = QuotaKoordinatorPrefix + "/namespaces"
	AnnotationGuaranteed            = QuotaKoordinatorPrefix + "/guaranteed"
	AnnotationAllocated             = QuotaKoordinatorPrefix + "/allocated"
	AnnotationNonPreemptibleRequest = QuotaKoordinatorPrefix + "/non-preemptible-request"
	AnnotationNonPreemptibleUsed    = QuotaKoordinatorPrefix + "/non-preemptible-used"
)

RootQuotaName means quotaTree's root\head.

View Source
const (
	// AnnotationNodeColocationStrategy denotes the annotation key of the node colocation strategy.
	// The value is the ColocationStrategy. It takes precedence to the ColocationStrategy in the slo-controller-config.
	// The illegal value will be ignored.
	AnnotationNodeColocationStrategy = NodeDomainPrefix + "/colocation-strategy"

	// LabelCPUReclaimRatio denotes the CPU reclaim ratio of a node. The value is a float number.
	// It takes precedence to the CPUReclaimThresholdPercent in the slo-controller-config and the node annotations.
	// The illegal value will be ignored.
	LabelCPUReclaimRatio = NodeDomainPrefix + "/cpu-reclaim-ratio"
	// LabelMemoryReclaimRatio denotes the memory reclaim ratio of a node. The value is a float number.
	// It takes precedence to the MemoryReclaimThresholdPercent in the slo-controller-config and the node annotations.
	// The illegal value will be ignored.
	LabelMemoryReclaimRatio = NodeDomainPrefix + "/memory-reclaim-ratio"
)
View Source
const (
	// AnnotationNodeResourceAmplificationRatio denotes the resource amplification ratio of the node.
	AnnotationNodeResourceAmplificationRatio = NodeDomainPrefix + "/resource-amplification-ratio"

	// AnnotationNodeRawAllocatable denotes the un-amplified raw allocatable of the node.
	AnnotationNodeRawAllocatable = NodeDomainPrefix + "/raw-allocatable"
)
View Source
const (
	// AnnotationResourceSpec represents resource allocation API defined by Koordinator.
	// The user specifies the desired CPU orchestration policy by setting the annotation.
	AnnotationResourceSpec = SchedulingDomainPrefix + "/resource-spec"
	// AnnotationResourceStatus represents resource allocation result.
	// koord-scheduler patch Pod with the annotation before binding to node.
	AnnotationResourceStatus = SchedulingDomainPrefix + "/resource-status"
)

Defines the pod level annotations and labels

View Source
const (
	// AnnotationNodeCPUTopology describes the detailed CPU topology.
	AnnotationNodeCPUTopology = NodeDomainPrefix + "/cpu-topology"
	// AnnotationNodeCPUAllocs describes K8s Guaranteed Pods.
	AnnotationNodeCPUAllocs = NodeDomainPrefix + "/pod-cpu-allocs"
	// AnnotationNodeCPUSharedPools describes the CPU Shared Pool defined by Koordinator.
	// The shared pool is mainly used by Koordinator LS Pods or K8s Burstable Pods.
	AnnotationNodeCPUSharedPools = NodeDomainPrefix + "/cpu-shared-pools"
	// AnnotationNodeBECPUSharedPools describes the CPU Shared Pool defined by Koordinator.
	// The shared pool is mainly used by Koordinator BE Pods or K8s Besteffort Pods.
	AnnotationNodeBECPUSharedPools = NodeDomainPrefix + "/be-cpu-shared-pools"

	// LabelNodeCPUBindPolicy constrains how to bind CPU logical CPUs when scheduling.
	LabelNodeCPUBindPolicy = NodeDomainPrefix + "/cpu-bind-policy"
	// LabelNodeNUMAAllocateStrategy indicates how to choose satisfied NUMA Nodes when scheduling.
	LabelNodeNUMAAllocateStrategy = NodeDomainPrefix + "/numa-allocate-strategy"

	// LabelNUMATopologyPolicy represents that how to align resource allocation according to the NUMA topology
	LabelNUMATopologyPolicy = NodeDomainPrefix + "/numa-topology-policy"
)

Defines the node level annotations and labels

View Source
const (
	NodeNUMAAllocateStrategyLeastAllocated = NUMALeastAllocated
	NodeNUMAAllocateStrategyMostAllocated  = NUMAMostAllocated
)
View Source
const (
	// AnnotationKubeletCPUManagerPolicy describes the cpu manager policy options of kubelet
	AnnotationKubeletCPUManagerPolicy = "kubelet.koordinator.sh/cpu-manager-policy"

	KubeletCPUManagerPolicyStatic                         = "static"
	KubeletCPUManagerPolicyNone                           = "none"
	KubeletCPUManagerPolicyFullPCPUsOnlyOption            = "full-pcpus-only"
	KubeletCPUManagerPolicyDistributeCPUsAcrossNUMAOption = "distribute-cpus-across-numa"
)
View Source
const (
	// LabelPodOperatingMode describes the mode of operation for Pod.
	LabelPodOperatingMode = SchedulingDomainPrefix + "/operating-mode"

	// AnnotationReservationOwners indicates the owner specification which can allocate reserved resources
	AnnotationReservationOwners = SchedulingDomainPrefix + "/reservation-owners"

	// AnnotationReservationCurrentOwner indicates current resource owners which allocated the reservation resources.
	AnnotationReservationCurrentOwner = SchedulingDomainPrefix + "/reservation-current-owner"
)
View Source
const (
	// LabelReservationOrder controls the preference logic for Reservation.
	// Reservation with lower order is preferred to be selected before Reservation with higher order.
	// But if it is 0, Reservation will be selected according to the capacity score.
	LabelReservationOrder = SchedulingDomainPrefix + "/reservation-order"

	// AnnotationReservationAllocated represents the reservation allocated by the pod.
	AnnotationReservationAllocated = SchedulingDomainPrefix + "/reservation-allocated"

	// AnnotationReservationAffinity represents the constraints of Pod selection Reservation
	AnnotationReservationAffinity = SchedulingDomainPrefix + "/reservation-affinity"
)
View Source
const (
	BatchCPU    corev1.ResourceName = ResourceDomainPrefix + "batch-cpu"
	BatchMemory corev1.ResourceName = ResourceDomainPrefix + "batch-memory"
	MidCPU      corev1.ResourceName = ResourceDomainPrefix + "mid-cpu"
	MidMemory   corev1.ResourceName = ResourceDomainPrefix + "mid-memory"
)
View Source
const (
	// AnnotationCustomUsageThresholds represents the user-defined resource utilization threshold.
	// For specific value definitions, see CustomUsageThresholds
	AnnotationCustomUsageThresholds = SchedulingDomainPrefix + "/usage-thresholds"
)
View Source
const (
	// AnnotationEvictionCost indicates the eviction cost. It can be used to set to an int32.
	// Although the K8s community has [Pod Deletion Cost #2255](https://github.com/kubernetes/enhancements/issues/2255),
	// it is not a general mechanism. To avoid conflicts with components that use `Pod Deletion Cost`,
	// users can individually mark the eviction cost for Pods.
	// The implicit eviction cost for pods that don't set the annotation is 0, negative values are permitted.
	// If set the cost with `math.MaxInt32`, it means the Pod will not be evicted.
	// Pods with lower eviction cost are preferred to be evicted before pods with higher eviction cost.
	// If a batch of Pods to be evicted have the same priority, they will be sorted by cost,
	// and the Pod with the smallest cost will be evicted.
	AnnotationEvictionCost = SchedulingDomainPrefix + "/eviction-cost"
)
View Source
const (
	// AnnotationExtendedResourceSpec specifies the resource requirements of extended resources for internal usage.
	// It annotates the requests/limits of extended resources and can be used by runtime proxy and koordlet that
	// cannot get the original pod spec in CRI requests.
	AnnotationExtendedResourceSpec = NodeDomainPrefix + "/extended-resource-spec"
)
View Source
const (
	AnnotationNodeReservation = NodeDomainPrefix + "/reservation"
)
View Source
const (
	AnnotationNodeSystemQOSResource = NodeDomainPrefix + "/system-qos-resource"
)
View Source
const (
	AnnotationSkipUpdateResource = "config.koordinator.sh/skip-update-resources"
)
View Source
const (
	// AnnotationSoftEviction indicates custom eviction. It can be used to set to an "true".
	AnnotationSoftEviction = SchedulingDomainPrefix + "/soft-eviction"
)

Variables

View Source
var (
	PriorityProdValueMax int32 = 9999
	PriorityProdValueMin int32 = 9000

	PriorityMidValueMax int32 = 7999
	PriorityMidValueMin int32 = 7000

	PriorityBatchValueMax int32 = 5999
	PriorityBatchValueMin int32 = 5000

	PriorityFreeValueMax int32 = 3999
	PriorityFreeValueMin int32 = 3000
)

Define Koordinator priority as a variable value to support customizing different priority ranges

View Source
var DefaultPriorityClass = PriorityNone

KnownPriorityClasses is the list of known priority classes in koordinator.

View Source
var QoSClassForGuaranteed = QoSLSR

QoSClassForGuaranteed indicates the QoSClass which a Guaranteed Pod without a koordinator QoSClass specified should be regarded by default. TODO: add component options to customize it.

Functions

func Amplify added in v1.4.0

func Amplify(origin int64, ratio Ratio) int64

func AmplifyResourceList added in v1.4.0

func AmplifyResourceList(requests corev1.ResourceList, amplificationRatios map[corev1.ResourceName]Ratio, resourceNames ...corev1.ResourceName)

func GetAllocated added in v1.4.0

func GetAllocated(quota *v1alpha1.ElasticQuota) (corev1.ResourceList, error)

func GetAnnotationQuotaNamespaces added in v1.4.0

func GetAnnotationQuotaNamespaces(quota *v1alpha1.ElasticQuota) []string

func GetCPUNormalizationEnabled added in v1.4.0

func GetCPUNormalizationEnabled(node *corev1.Node) (*bool, error)

func GetCPUNormalizationRatio added in v1.4.0

func GetCPUNormalizationRatio(node *corev1.Node) (float64, error)

GetCPUNormalizationRatio gets the cpu normalization ratio from the node. It returns -1 without an error when the cpu normalization annotation is missing.

func GetChildRequest added in v1.4.0

func GetChildRequest(quota *v1alpha1.ElasticQuota) (corev1.ResourceList, error)

func GetEvictionCost added in v1.1.1

func GetEvictionCost(annotations map[string]string) (int32, error)

func GetGangMatchPolicy added in v1.3.0

func GetGangMatchPolicy(pod *corev1.Pod) string

func GetGangName added in v1.0.0

func GetGangName(pod *corev1.Pod) string

func GetGuaranteed added in v1.4.0

func GetGuaranteed(quota *v1alpha1.ElasticQuota) (corev1.ResourceList, error)

func GetKubeQosClass added in v1.3.0

func GetKubeQosClass(pod *corev1.Pod) corev1.PodQOSClass

func GetMinNum added in v1.0.0

func GetMinNum(pod *corev1.Pod) (int, error)

func GetNodeRawAllocatable added in v1.4.0

func GetNodeRawAllocatable(annotations map[string]string) (corev1.ResourceList, error)

GetNodeRawAllocatable gets the raw allocatable of node from annotations.

func GetNodeResourceAmplificationRatios added in v1.4.0

func GetNodeResourceAmplificationRatios(annotations map[string]string) (map[corev1.ResourceName]Ratio, error)

GetNodeResourceAmplificationRatios gets the resource amplification ratios of node from annotations.

func GetNonPreemptibleRequest added in v1.4.0

func GetNonPreemptibleRequest(quota *v1alpha1.ElasticQuota) (corev1.ResourceList, error)

func GetNonPreemptibleUsed added in v1.4.0

func GetNonPreemptibleUsed(quota *v1alpha1.ElasticQuota) (corev1.ResourceList, error)

func GetParentQuotaName added in v0.7.0

func GetParentQuotaName(quota *v1alpha1.ElasticQuota) string

func GetPodSubPriority added in v0.7.0

func GetPodSubPriority(labels map[string]string) (int32, error)

GetPodSubPriority get pod's sub-priority in Koordinator from label

func GetQuotaName added in v1.0.0

func GetQuotaName(pod *corev1.Pod) string

func GetQuotaTreeID added in v1.4.0

func GetQuotaTreeID(quota *v1alpha1.ElasticQuota) string

func GetRequest added in v1.4.0

func GetRequest(quota *v1alpha1.ElasticQuota) (corev1.ResourceList, error)

func GetReservationCurrentOwner added in v1.3.0

func GetReservationCurrentOwner(annotations map[string]string) (*corev1.ObjectReference, error)

func GetReservationOwners added in v1.3.0

func GetReservationOwners(annotations map[string]string) ([]schedulingv1alpha1.ReservationOwner, error)

func GetReservedCPUs added in v1.2.0

func GetReservedCPUs(annotations map[string]string) (reservedCPUs string, numReservedCPUs int)

func GetRuntime added in v1.4.0

func GetRuntime(quota *v1alpha1.ElasticQuota) (corev1.ResourceList, error)

func GetSharedWeight added in v0.7.0

func GetSharedWeight(quota *v1alpha1.ElasticQuota) corev1.ResourceList

func HasNodeRawAllocatable added in v1.4.0

func HasNodeRawAllocatable(annotations map[string]string) bool

HasNodeRawAllocatable checks if the node has raw allocatable annotation.

func IsAllowForceUpdate added in v1.4.0

func IsAllowForceUpdate(quota *v1alpha1.ElasticQuota) bool

func IsAllowLentResource added in v0.7.0

func IsAllowLentResource(quota *v1alpha1.ElasticQuota) bool

func IsCPUNormalizationRatioDifferent added in v1.4.0

func IsCPUNormalizationRatioDifferent(old, new float64) bool

func IsForbiddenModify added in v0.7.0

func IsForbiddenModify(quota *v1alpha1.ElasticQuota) (bool, error)

func IsParentQuota added in v0.7.0

func IsParentQuota(quota *v1alpha1.ElasticQuota) bool

func IsPodNonPreemptible added in v1.4.0

func IsPodNonPreemptible(pod *corev1.Pod) bool

func IsReservationAllocateOnce added in v1.3.0

func IsReservationAllocateOnce(r *schedulingv1alpha1.Reservation) bool

func IsReservationOperatingMode added in v1.3.0

func IsReservationOperatingMode(pod *corev1.Pod) bool

func IsTreeRootQuota added in v1.4.0

func IsTreeRootQuota(quota *v1alpha1.ElasticQuota) bool

func RemoveReservationCurrentOwner added in v1.3.0

func RemoveReservationCurrentOwner(annotations map[string]string)

func SetCPUBasicInfo added in v1.4.0

func SetCPUBasicInfo(annotations map[string]string, info *CPUBasicInfo) bool

SetCPUBasicInfo sets the cpu basic info at the node-level annotations. It returns true if the annotations changes.

func SetCPUNormalizationRatio added in v1.4.0

func SetCPUNormalizationRatio(node *corev1.Node, ratio float64) bool

SetCPUNormalizationRatio sets the node annotation according to the cpu-normalization-ratio. It returns true if the label value changes. NOTE: The ratio will be converted to string with the precision 2. e.g. 3.1415926 -> 3.14.

func SetDeviceAllocateHints added in v1.4.0

func SetDeviceAllocateHints(obj metav1.Object, hint DeviceAllocateHints) error

func SetDeviceAllocations added in v0.7.0

func SetDeviceAllocations(obj metav1.Object, allocations DeviceAllocations) error

func SetDeviceJointAllocate added in v1.4.0

func SetDeviceJointAllocate(obj metav1.Object, jointAllocate *DeviceJointAllocate) error

func SetExtendedResourceSpec added in v1.1.0

func SetExtendedResourceSpec(pod *corev1.Pod, spec *ExtendedResourceSpec) error

func SetNodeNUMATopologyPolicy added in v1.4.0

func SetNodeNUMATopologyPolicy(obj metav1.Object, policy NUMATopologyPolicy)

func SetNodeRawAllocatable added in v1.4.0

func SetNodeRawAllocatable(node *corev1.Node, allocatable corev1.ResourceList)

SetNodeRawAllocatable sets the node annotation according to the raw allocatable.

func SetNodeResourceAmplificationRatio added in v1.4.0

func SetNodeResourceAmplificationRatio(node *corev1.Node, resource corev1.ResourceName, ratio Ratio) (bool, error)

SetNodeResourceAmplificationRatio sets the amplification ratio of a specific resource of the node. It returns true if the ratio changes. NOTE: The ratio will be converted to string with the precision 2. e.g. 3.1415926 -> 3.14.

func SetNodeResourceAmplificationRatios added in v1.4.0

func SetNodeResourceAmplificationRatios(node *corev1.Node, ratios map[corev1.ResourceName]Ratio)

SetNodeResourceAmplificationRatios sets the node annotation according to the resource amplification ratios. NOTE: The ratio will be converted to string with the precision 2. e.g. 3.1415926 -> 3.14.

func SetReservationAffinity added in v1.4.0

func SetReservationAffinity(obj metav1.Object, affinity *ReservationAffinity) error

func SetReservationAllocated added in v0.6.0

func SetReservationAllocated(pod *corev1.Pod, r metav1.Object)

func SetReservationCurrentOwner added in v1.3.0

func SetReservationCurrentOwner(annotations map[string]string, owner *corev1.ObjectReference) error

func SetReservationOwners added in v1.4.0

func SetReservationOwners(obj metav1.Object, owners []schedulingv1alpha1.ReservationOwner) error

func SetResourceSpec added in v1.3.0

func SetResourceSpec(obj metav1.Object, spec *ResourceSpec) error

func SetResourceStatus added in v0.7.0

func SetResourceStatus(obj metav1.Object, status *ResourceStatus) error

func ShouldSkipUpdateResource added in v1.3.0

func ShouldSkipUpdateResource(profile *configv1alpha1.ClusterColocationProfile) bool

func TranslateResourceNameByPriorityClass added in v0.4.0

func TranslateResourceNameByPriorityClass(priorityClass PriorityClass, defaultResourceName corev1.ResourceName) corev1.ResourceName

TranslateResourceNameByPriorityClass translates defaultResourceName to extend resourceName by PriorityClass

Types

type AggregationType added in v1.3.0

type AggregationType string
const (
	// max is not welcomed since it may import outliers
	AVG AggregationType = "avg"
	P99 AggregationType = "p99"
	P95 AggregationType = "p95"
	P90 AggregationType = "p90"
	P50 AggregationType = "p50"
)

type CPUBasicInfo added in v1.4.0

type CPUBasicInfo struct {
	CPUModel           string `json:"cpuModel,omitempty"`
	HyperThreadEnabled bool   `json:"hyperThreadEnabled,omitempty"`
	TurboEnabled       bool   `json:"turboEnabled,omitempty"`
	CatL3CbmMask       string `json:"catL3CbmMask,omitempty"`
	VendorID           string `json:"vendorID,omitempty"`
}

CPUBasicInfo describes the cpu basic features and status.

func GetCPUBasicInfo added in v1.4.0

func GetCPUBasicInfo(annotations map[string]string) (*CPUBasicInfo, error)

GetCPUBasicInfo gets the cpu basic info from the node-level annotations. It returns nil info without an error when the cpu basic info annotation is missing.

func (*CPUBasicInfo) Key added in v1.4.0

func (c *CPUBasicInfo) Key() string

type CPUBindPolicy added in v0.5.0

type CPUBindPolicy string

CPUBindPolicy defines the CPU binding policy

const (
	// CPUBindPolicyDefault performs the default bind policy that specified in koord-scheduler configuration
	CPUBindPolicyDefault CPUBindPolicy = "Default"
	// CPUBindPolicyFullPCPUs favor cpuset allocation that pack in few physical cores
	CPUBindPolicyFullPCPUs CPUBindPolicy = "FullPCPUs"
	// CPUBindPolicySpreadByPCPUs favor cpuset allocation that evenly allocate logical cpus across physical cores
	CPUBindPolicySpreadByPCPUs CPUBindPolicy = "SpreadByPCPUs"
	// CPUBindPolicyConstrainedBurst constrains the CPU Shared Pool range of the Burstable Pod
	CPUBindPolicyConstrainedBurst CPUBindPolicy = "ConstrainedBurst"
)

type CPUExclusivePolicy added in v0.6.0

type CPUExclusivePolicy string
const (
	// CPUExclusivePolicyNone does not perform any exclusive policy
	CPUExclusivePolicyNone CPUExclusivePolicy = "None"
	// CPUExclusivePolicyPCPULevel represents mutual exclusion in the physical core dimension
	CPUExclusivePolicyPCPULevel CPUExclusivePolicy = "PCPULevel"
	// CPUExclusivePolicyNUMANodeLevel indicates mutual exclusion in the NUMA topology dimension
	CPUExclusivePolicyNUMANodeLevel CPUExclusivePolicy = "NUMANodeLevel"
)

type CPUInfo added in v0.5.0

type CPUInfo struct {
	ID     int32 `json:"id"`
	Core   int32 `json:"core"`
	Socket int32 `json:"socket"`
	Node   int32 `json:"node"`
}

type CPUSharedPool added in v0.5.0

type CPUSharedPool struct {
	Socket int32  `json:"socket"`
	Node   int32  `json:"node"`
	CPUSet string `json:"cpuset,omitempty"`
}

func GetNodeBECPUSharePools added in v1.4.0

func GetNodeBECPUSharePools(nodeTopoAnnotations map[string]string) ([]CPUSharedPool, error)

func GetNodeCPUSharePools added in v0.5.0

func GetNodeCPUSharePools(nodeTopoAnnotations map[string]string) ([]CPUSharedPool, error)

type CPUTopology added in v0.5.0

type CPUTopology struct {
	Detail []CPUInfo `json:"detail,omitempty"`
}

func GetCPUTopology added in v0.5.0

func GetCPUTopology(annotations map[string]string) (*CPUTopology, error)

type CustomAggregatedUsage added in v1.1.0

type CustomAggregatedUsage struct {
	// UsageThresholds indicates the resource utilization threshold of the machine based on percentile statistics
	UsageThresholds map[corev1.ResourceName]int64 `json:"usageThresholds,omitempty"`
	// UsageAggregationType indicates the percentile type of the machine's utilization when filtering
	UsageAggregationType AggregationType `json:"usageAggregationType,omitempty"`
	// UsageAggregatedDuration indicates the statistical period of the percentile of the machine's utilization when filtering
	UsageAggregatedDuration *metav1.Duration `json:"usageAggregatedDuration,omitempty"`
}

type CustomUsageThresholds added in v0.4.0

type CustomUsageThresholds struct {
	// UsageThresholds indicates the resource utilization threshold of the whole machine.
	UsageThresholds map[corev1.ResourceName]int64 `json:"usageThresholds,omitempty"`
	// ProdUsageThresholds indicates the resource utilization threshold of Prod Pods compared to the whole machine
	ProdUsageThresholds map[corev1.ResourceName]int64 `json:"prodUsageThresholds,omitempty"`
	// AggregatedUsage supports resource utilization filtering and scoring based on percentile statistics
	AggregatedUsage *CustomAggregatedUsage `json:"aggregatedUsage,omitempty"`
}

CustomUsageThresholds supports user-defined node resource utilization thresholds.

func GetCustomUsageThresholds added in v0.4.0

func GetCustomUsageThresholds(node *corev1.Node) (*CustomUsageThresholds, error)

type DeviceAllocateHints added in v1.4.0

type DeviceAllocateHints map[schedulingv1alpha1.DeviceType]*DeviceHint

func GetDeviceAllocateHints added in v1.4.0

func GetDeviceAllocateHints(annotations map[string]string) (DeviceAllocateHints, error)

type DeviceAllocateStrategy added in v1.4.0

type DeviceAllocateStrategy string
const (
	ApplyForAllDeviceAllocateStrategy DeviceAllocateStrategy = "ApplyForAll"
	RequestsAsCountAllocateStrategy   DeviceAllocateStrategy = "RequestsAsCount"
)

type DeviceAllocation added in v0.7.0

type DeviceAllocation struct {
	Minor     int32                      `json:"minor"`
	Resources corev1.ResourceList        `json:"resources"`
	Extension *DeviceAllocationExtension `json:"extension,omitempty"`
}

type DeviceAllocationExtension added in v1.4.0

type DeviceAllocationExtension struct {
	VirtualFunctions []VirtualFunction `json:"vfs,omitempty"`
}

type DeviceAllocations added in v0.7.0

type DeviceAllocations map[schedulingv1alpha1.DeviceType][]*DeviceAllocation

DeviceAllocations would be injected into Pod as form of annotation during Pre-bind stage.

{
  "gpu": [
    {
      "minor": 0,
      "resources": {
        "koordinator.sh/gpu-core": 100,
        "koordinator.sh/gpu-mem-ratio": 100,
        "koordinator.sh/gpu-mem": "16Gi"
      }
    },
    {
      "minor": 1,
      "resources": {
        "koordinator.sh/gpu-core": 100,
        "koordinator.sh/gpu-mem-ratio": 100,
        "koordinator.sh/gpu-mem": "16Gi"
      }
    }
  ]
}

func GetDeviceAllocations added in v0.7.0

func GetDeviceAllocations(podAnnotations map[string]string) (DeviceAllocations, error)

type DeviceExclusivePolicy added in v1.4.0

type DeviceExclusivePolicy string
const (
	// DeviceLevelDeviceExclusivePolicy represents mutual exclusion in the device instance dimension
	DeviceLevelDeviceExclusivePolicy DeviceExclusivePolicy = "DeviceLevel"
	// PCIExpressLevelDeviceExclusivePolicy represents mutual exclusion in the PCIe dimension
	PCIExpressLevelDeviceExclusivePolicy DeviceExclusivePolicy = "PCIeLevel"
)

type DeviceHint added in v1.4.0

type DeviceHint struct {
	// Selector selects devices by label selector.
	Selector *metav1.LabelSelector `json:"selector,omitempty"`
	// VFSelector selects VFs by label selector.
	// If specified the VFSelector, scheduler will allocate VFs from PFs which satisfy VFSelector.
	VFSelector *metav1.LabelSelector `json:"vfSelector,omitempty"`
	// AllocateStrategy controls the allocation strategy
	AllocateStrategy DeviceAllocateStrategy `json:"allocateStrategy,omitempty"`
	// ExclusivePolicy indicates the exclusive policy.
	ExclusivePolicy DeviceExclusivePolicy `json:"exclusivePolicy,omitempty"`
}

type DeviceJointAllocate added in v1.4.0

type DeviceJointAllocate struct {
	// DeviceTypes indicates that the specified types of devices are grouped and allocated according to topology.
	DeviceTypes []schedulingv1alpha1.DeviceType `json:"deviceTypes,omitempty"`
	// RequiredScope specifies the allocation scope required for the joint allocation of devices.
	// It defines the granularity at which devices should be joint-allocated, e.g. in the same PCIe.
	RequiredScope DeviceJointAllocateScope `json:"requiredScope,omitempty"`
}

func GetDeviceJointAllocate added in v1.4.0

func GetDeviceJointAllocate(annotations map[string]string) (*DeviceJointAllocate, error)

type DeviceJointAllocateScope added in v1.4.0

type DeviceJointAllocateScope string
const (
	SamePCIeDeviceJointAllocateScope DeviceJointAllocateScope = "SamePCIe"
)

type ExtendedResourceContainerSpec added in v1.1.0

type ExtendedResourceContainerSpec struct {
	Limits   corev1.ResourceList `json:"limits,omitempty"`
	Requests corev1.ResourceList `json:"requests,omitempty"`
}

type ExtendedResourceSpec added in v1.1.0

type ExtendedResourceSpec struct {
	Containers map[string]ExtendedResourceContainerSpec `json:"containers,omitempty"`
}

func GetExtendedResourceSpec added in v1.1.0

func GetExtendedResourceSpec(annotations map[string]string) (*ExtendedResourceSpec, error)

GetExtendedResourceSpec parses ExtendedResourceSpec from annotations

type KubeletCPUManagerPolicy added in v0.6.0

type KubeletCPUManagerPolicy struct {
	Policy       string            `json:"policy,omitempty"`
	Options      map[string]string `json:"options,omitempty"`
	ReservedCPUs string            `json:"reservedCPUs,omitempty"`
}

func GetKubeletCPUManagerPolicy added in v0.6.0

func GetKubeletCPUManagerPolicy(annotations map[string]string) (*KubeletCPUManagerPolicy, error)

type NUMAAllocateStrategy added in v1.0.0

type NUMAAllocateStrategy string

NUMAAllocateStrategy indicates how to choose satisfied NUMA Nodes

const (
	// NUMAMostAllocated indicates that allocates from the NUMA Node with the least amount of available resource.
	NUMAMostAllocated NUMAAllocateStrategy = "MostAllocated"
	// NUMALeastAllocated indicates that allocates from the NUMA Node with the most amount of available resource.
	NUMALeastAllocated NUMAAllocateStrategy = "LeastAllocated"
	// NUMADistributeEvenly indicates that evenly distribute CPUs across NUMA Nodes.
	NUMADistributeEvenly NUMAAllocateStrategy = "DistributeEvenly"
)

type NUMANodeResource added in v1.4.0

type NUMANodeResource struct {
	Node      int32               `json:"node"`
	Resources corev1.ResourceList `json:"resources,omitempty"`
}

type NUMATopologyPolicy added in v1.4.0

type NUMATopologyPolicy string
const (
	NUMATopologyPolicyNone           NUMATopologyPolicy = ""
	NUMATopologyPolicyBestEffort     NUMATopologyPolicy = "BestEffort"
	NUMATopologyPolicyRestricted     NUMATopologyPolicy = "Restricted"
	NUMATopologyPolicySingleNUMANode NUMATopologyPolicy = "SingleNUMANode"
)

func GetNodeNUMATopologyPolicy added in v1.4.0

func GetNodeNUMATopologyPolicy(labels map[string]string) NUMATopologyPolicy

type NodeCPUBindPolicy added in v1.4.0

type NodeCPUBindPolicy string
const (
	// NodeCPUBindPolicyNone does not perform any bind policy
	NodeCPUBindPolicyNone NodeCPUBindPolicy = "None"
	// NodeCPUBindPolicyFullPCPUsOnly requires that the scheduler must allocate full physical cores.
	// Equivalent to kubelet CPU manager policy option full-pcpus-only=true.
	NodeCPUBindPolicyFullPCPUsOnly NodeCPUBindPolicy = "FullPCPUsOnly"
	// NodeCPUBindPolicySpreadByPCPUs requires that the scheduler must evenly allocate logical cpus across physical cores
	NodeCPUBindPolicySpreadByPCPUs NodeCPUBindPolicy = "SpreadByPCPUs"
)

func GetNodeCPUBindPolicy added in v1.1.0

func GetNodeCPUBindPolicy(nodeLabels map[string]string, kubeletCPUPolicy *KubeletCPUManagerPolicy) NodeCPUBindPolicy

type NodeReservation added in v1.2.0

type NodeReservation struct {
	// resources need to be reserved. like, {"cpu":"1C", "memory":"2Gi"}
	Resources corev1.ResourceList `json:"resources,omitempty"`
	// reserved cpus need to be reserved, such as 1-6, or 2,4,6,8
	ReservedCPUs string `json:"reservedCPUs,omitempty"`
	// ApplyPolicy indicates how the reserved resources take effect.
	ApplyPolicy NodeReservationApplyPolicy `json:"applyPolicy,omitempty"`
}

NodeReservation resource reserved by node.annotation, If node.annotation declares the resources to be reserved, like this:

 annotations:
   node.koordinator.sh/reservation: >-
	    {"reservedCPUs":"0-5"}

func GetNodeReservation added in v1.3.0

func GetNodeReservation(annotations map[string]string) (*NodeReservation, error)

type NodeReservationApplyPolicy added in v1.3.0

type NodeReservationApplyPolicy string
const (
	// NodeReservationApplyPolicyDefault will affect the total amount of schedulable resources of the node and reserve CPU Cores.
	// For example, NodeInfo.Allocatable will be modified in the scheduler to deduct the amount of reserved resources
	NodeReservationApplyPolicyDefault NodeReservationApplyPolicy = "Default"
	// NodeReservationApplyPolicyReservedCPUsOnly means that only CPU Cores are reserved, but it will
	// not affect the total amount of schedulable resources of the node.
	// The total amount of schedulable resources is taken into effect by the kubelet's reservation mechanism.
	// But koordinator need to exclude reserved CPUs when allocating CPU Cores
	NodeReservationApplyPolicyReservedCPUsOnly NodeReservationApplyPolicy = "ReservedCPUsOnly"
)

type PodCPUAlloc added in v0.5.0

type PodCPUAlloc struct {
	Namespace        string    `json:"namespace,omitempty"`
	Name             string    `json:"name,omitempty"`
	UID              types.UID `json:"uid,omitempty"`
	CPUSet           string    `json:"cpuset,omitempty"`
	ManagedByKubelet bool      `json:"managedByKubelet,omitempty"`
}

type PodCPUAllocs added in v0.5.0

type PodCPUAllocs []PodCPUAlloc

func GetPodCPUAllocs added in v0.5.0

func GetPodCPUAllocs(annotations map[string]string) (PodCPUAllocs, error)

type PodOperatingMode added in v1.3.0

type PodOperatingMode string
const (
	// RunnablePodOperatingMode represents the original pod behavior, it is the default mode where the
	// pod’s containers are executed by Kubelet when the pod is assigned a node.
	RunnablePodOperatingMode PodOperatingMode = "Runnable"

	// ReservationPodOperatingMode means the pod represents a scheduling and resource reservation unit
	ReservationPodOperatingMode PodOperatingMode = "Reservation"
)

type PriorityClass

type PriorityClass string
const (
	PriorityProd  PriorityClass = "koord-prod"
	PriorityMid   PriorityClass = "koord-mid"
	PriorityBatch PriorityClass = "koord-batch"
	PriorityFree  PriorityClass = "koord-free"
	PriorityNone  PriorityClass = ""
)

https://koordinator.sh/docs/architecture/priority/

func GetPodPriorityClassByName added in v1.3.0

func GetPodPriorityClassByName(priorityClass string) PriorityClass

func GetPodPriorityClassRaw added in v1.3.0

func GetPodPriorityClassRaw(pod *corev1.Pod) PriorityClass

func GetPodPriorityClassWithDefault added in v1.3.0

func GetPodPriorityClassWithDefault(pod *corev1.Pod) PriorityClass

GetPodPriorityClassWithDefault gets the pod's PriorityClass with the default config.

func GetPodPriorityClassWithQoS added in v1.3.0

func GetPodPriorityClassWithQoS(qos QoSClass) PriorityClass

GetPodPriorityClassWithQoS returns the default PriorityClass according to its QoSClass when the pod does not specify a PriorityClass explicitly. Note that this is only a derivation of the default value, and the reverse is not true. For example, PriorityMid can also be combined with QoSLS.

type QoSClass

type QoSClass string
const (
	QoSLSE    QoSClass = "LSE"
	QoSLSR    QoSClass = "LSR"
	QoSLS     QoSClass = "LS"
	QoSBE     QoSClass = "BE"
	QoSSystem QoSClass = "SYSTEM"
	QoSNone   QoSClass = ""
)

https://koordinator.sh/docs/architecture/qos/

func GetPodQoSClassByName added in v0.7.0

func GetPodQoSClassByName(qos string) QoSClass

func GetPodQoSClassRaw added in v1.3.0

func GetPodQoSClassRaw(pod *corev1.Pod) QoSClass

func GetPodQoSClassWithDefault added in v1.3.0

func GetPodQoSClassWithDefault(pod *corev1.Pod) QoSClass

GetPodQoSClassWithDefault gets the pod's QoSClass with the default config.

func GetPodQoSClassWithKubeQoS added in v1.3.0

func GetPodQoSClassWithKubeQoS(kubeQOS corev1.PodQOSClass) QoSClass

GetPodQoSClassWithKubeQoS returns the default QoSClass according to its kubernetes QoSClass when the pod does not specify a koordinator QoSClass explicitly. https://koordinator.sh/docs/architecture/qos#koordinator-qos-vs-kubernetes-qos

func GetQoSClassByAttrs added in v0.6.0

func GetQoSClassByAttrs(labels, annotations map[string]string) QoSClass

type Ratio added in v1.4.0

type Ratio float64

Ratio is a float64 wrapper which will always be json marshalled with precision 2.

func GetNodeResourceAmplificationRatio added in v1.4.0

func GetNodeResourceAmplificationRatio(annotations map[string]string, resource corev1.ResourceName) (Ratio, error)

GetNodeResourceAmplificationRatio gets the amplification ratio of a specific resource of node from annotations. It returns -1 without an error when the amplification ratio is not set for this resource.

func (Ratio) MarshalJSON added in v1.4.0

func (f Ratio) MarshalJSON() ([]byte, error)

type ReservationAffinity added in v1.3.0

type ReservationAffinity struct {
	// If the affinity requirements specified by this field are not met at
	// scheduling time, the pod will not be scheduled onto the node.
	// If the affinity requirements specified by this field cease to be met
	// at some point during pod execution (e.g. due to an update), the system
	// may or may not try to eventually evict the pod from its node.
	RequiredDuringSchedulingIgnoredDuringExecution *ReservationAffinitySelector `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"`
	// ReservationSelector is a selector which must be true for the pod to fit on a reservation.
	// Selector which must match a reservation's labels for the pod to be scheduled on that node.
	ReservationSelector map[string]string `json:"reservationSelector,omitempty"`
}

ReservationAffinity represents the constraints of Pod selection Reservation

func GetReservationAffinity added in v1.3.0

func GetReservationAffinity(annotations map[string]string) (*ReservationAffinity, error)

type ReservationAffinitySelector added in v1.3.0

type ReservationAffinitySelector struct {
	// Required. A list of reservation selector terms. The terms are ORed.
	// Reuse corev1.NodeSelectorTerm to avoid defining too many repeated definitions.
	ReservationSelectorTerms []corev1.NodeSelectorTerm `json:"reservationSelectorTerms,omitempty"`
}

ReservationAffinitySelector represents the union of the results of one or more label queries over a set of reservations; that is, it represents the OR of the selectors represented by the reservation selector terms.

type ReservationAllocated added in v0.6.0

type ReservationAllocated struct {
	Name string    `json:"name,omitempty"`
	UID  types.UID `json:"uid,omitempty"`
}

func GetReservationAllocated added in v0.6.0

func GetReservationAllocated(pod *corev1.Pod) (*ReservationAllocated, error)

type ResourceSpec added in v0.5.0

type ResourceSpec struct {
	// RequiredCPUBindPolicy indicates that the CPU is allocated strictly
	// according to the specified CPUBindPolicy, otherwise the scheduling fails
	RequiredCPUBindPolicy CPUBindPolicy `json:"requiredCPUBindPolicy,omitempty"`
	// PreferredCPUBindPolicy represents best-effort CPU bind policy.
	PreferredCPUBindPolicy CPUBindPolicy `json:"preferredCPUBindPolicy,omitempty"`
	// PreferredCPUExclusivePolicy represents best-effort CPU exclusive policy.
	PreferredCPUExclusivePolicy CPUExclusivePolicy `json:"preferredCPUExclusivePolicy,omitempty"`
}

ResourceSpec describes extra attributes of the resource requirements.

func GetResourceSpec added in v0.5.0

func GetResourceSpec(annotations map[string]string) (*ResourceSpec, error)

GetResourceSpec parses ResourceSpec from annotations

type ResourceStatus added in v0.5.0

type ResourceStatus struct {
	// CPUSet represents the allocated CPUs. It is Linux CPU list formatted string.
	// When LSE/LSR Pod requested, koord-scheduler will update the field.
	CPUSet string `json:"cpuset,omitempty"`
	// NUMANodeResources indicates that the Pod is constrained to run on the specified NUMA Node.
	NUMANodeResources []NUMANodeResource `json:"numaNodeResources,omitempty"`
}

ResourceStatus describes resource allocation result, such as how to bind CPU.

func GetResourceStatus added in v0.5.0

func GetResourceStatus(annotations map[string]string) (*ResourceStatus, error)

GetResourceStatus parses ResourceStatus from annotations

type SoftEvictionSpec added in v1.2.0

type SoftEvictionSpec struct {
	// Timestamp indicates time when custom eviction occurs . It can be used to set a second timestamp.
	Timestamp *metav1.Time `json:"timestamp,omitempty"`
	// DeleteOptions indicates the options to delete the pod.
	DeleteOptions *metav1.DeleteOptions `json:"deleteOptions,omitempty"`
	// Initiator indicates the initiator of the eviction.
	Initiator string `json:"initiator,omitempty"`
	// Reason indicates reason for eviction.
	Reason string `json:"reason,omitempty"`
}

func GetSoftEvictionSpec added in v1.2.0

func GetSoftEvictionSpec(annotations map[string]string) (*SoftEvictionSpec, error)

type SystemQOSResource added in v1.3.0

type SystemQOSResource struct {
	// CPU cores used for System QoS Pods, format should follow Linux CPU list
	// See: http://man7.org/linux/man-pages/man7/cpuset.7.html#FORMATS
	CPUSet string `json:"cpuset,omitempty"`
	// whether CPU cores for System QoS are exclusive(default = true), which means could not be used by other pods(LS/LSR/BE)
	CPUSetExclusive *bool `json:"cpusetExclusive,omitempty"`
}

func GetSystemQOSResource added in v1.3.0

func GetSystemQOSResource(anno map[string]string) (*SystemQOSResource, error)

func (*SystemQOSResource) IsCPUSetExclusive added in v1.3.0

func (r *SystemQOSResource) IsCPUSetExclusive() bool

type VirtualFunction added in v1.4.0

type VirtualFunction struct {
	Minor int    `json:"minor,omitempty"`
	BusID string `json:"busID,omitempty"`
}

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL