Documentation ¶
Index ¶
Constants ¶
const ( CgroupV1 = "v1" CgroupV2 = "v2" )
const variables indicate cgroup versions
const ( NodeAnnotationCPUOvercommitRatioKey = "katalyst.kubewharf.io/cpu_overcommit_ratio" NodeAnnotationMemoryOvercommitRatioKey = "katalyst.kubewharf.io/memory_overcommit_ratio" NodeAnnotationRealtimeCPUOvercommitRatioKey = "katalyst.kubewharf.io/realtime_cpu_overcommit_ratio" NodeAnnotationRealtimeMemoryOvercommitRatioKey = "katalyst.kubewharf.io/realtime_memory_overcommit_ratio" NodeAnnotationOriginalCapacityCPUKey = "katalyst.kubewharf.io/original_capacity_cpu" NodeAnnotationOriginalCapacityMemoryKey = "katalyst.kubewharf.io/original_capacity_memory" NodeAnnotationOriginalAllocatableCPUKey = "katalyst.kubewharf.io/original_allocatable_cpu" NodeAnnotationOriginalAllocatableMemoryKey = "katalyst.kubewharf.io/original_allocatable_memory" NodeAnnotationOvercommitCapacityCPUKey = "katalyst.kubewharf.io/overcommit_capacity_cpu" NodeAnnotationOvercommitAllocatableCPUKey = "katalyst.kubewharf.io/overcommit_allocatable_cpu" NodeAnnotationOvercommitCapacityMemoryKey = "katalyst.kubewharf.io/overcommit_capacity_memory" NodeAnnotationOvercommitAllocatableMemoryKey = "katalyst.kubewharf.io/overcommit_allocatable_memory" )
const variables for node annotations about overcommit ratio
const ( NodeOvercommitSelectorKey = "katalyst.kubewharf.io/overcommit_node_pool" DefaultNodeCPUOvercommitRatio = "1" DefaultNodeMemoryOvercommitRatio = "1" )
const variables for matching up with node labels about overcommit
const ( // KCNRAnnotationGuaranteedCPUs sum of pod guaranteed cpus in node KCNRAnnotationGuaranteedCPUs = "katalyst.kubewharf.io/guaranteed_cpus" KCNRAnnotationCPUManager = "katalyst.kubewharf.io/overcommit_cpu_manager" KCNRAnnotationMemoryManager = "katalyst.kubewharf.io/overcommit_memory_manager" CPUManagerOff KCNRAnnotationCPUManagerPolicy = "none" CPUManagerPolicyNone KCNRAnnotationCPUManagerPolicy = "none" MemoryManagerOff KCNRAnnotationMemoryManagerPolicy = "None" MemoryManagerPolicyNone KCNRAnnotationMemoryManagerPolicy = "None" )
const ( PodAnnotationInplaceUpdateResourcesKey = "pod.kubernetes.io/resizeResources" PodAnnotationInplaceUpdateResizePolicyKey = "pod.kubernetes.io/resizePolicy" PodAnnotationInplaceUpdateResizePolicyRestart = "Restart" )
const variables for pod annotations about vpa in-place resource update.
const ( PodAnnotationQoSLevelKey = "katalyst.kubewharf.io/qos_level" PodAnnotationQoSLevelReclaimedCores = string(QoSLevelReclaimedCores) PodAnnotationQoSLevelDedicatedCores = string(QoSLevelDedicatedCores) PodAnnotationQoSLevelSystemCores = string(QoSLevelSystemCores) )
const variables for pod annotations about qos level
const ( PodAnnotationMemoryEnhancementKey = "katalyst.kubewharf.io/memory_enhancement" // PodAnnotationMemoryEnhancementRssOverUseThreshold provides a mechanism to enable // the ability of overcommit for memory, and we will relay on this enhancement to ensure // memory protection if rss usage exceeds requests (based on this given ratio) PodAnnotationMemoryEnhancementRssOverUseThreshold = "rss_overuse_threshold" // PodAnnotationMemoryEnhancementNumaBinding provides a mechanism to enable numa-binding // for workload to provide more ultimate running performances. // // With PodAnnotationMemoryEnhancementNumaBinding but without PodAnnotationMemoryEnhancementNumaExclusive, // we have several constraints below: // 1. different workloads may still share the same numa // - these workloads may still have contentions on memory bandwidth // 2. the request for pod can be settled in a single numa node // - this to avoid complicated cross numa memory capacity/bandwidth control // // todo: this enhancement is only supported for dedicated-cores now, // the community if to support shared-cores in the short future. PodAnnotationMemoryEnhancementNumaBinding = "numa_binding" PodAnnotationMemoryEnhancementNumaBindingEnable = "true" // PodAnnotationMemoryEnhancementNumaExclusive provides a mechanism to enable numa-exclusive // for A SINGLE Pod to avoid contention on memory bandwidth and so on. // // - this enhancement is only supported for dedicated-cores, for now and foreseeable future PodAnnotationMemoryEnhancementNumaExclusive = "numa_exclusive" PodAnnotationMemoryEnhancementNumaExclusiveEnable = "true" // PodAnnotationMemoryEnhancementOOMPriority provides a mechanism to specify // the OOM priority for pods. Higher priority values indicate a higher likelihood // of surviving OOM events. // // For different QoS levels, the acceptable value ranges are as follows: // - reclaimed_cores: [-100, 0) // - shared_cores: [0, 100) // - dedicated_cores: [100, 200) // - system_cores: [200, 300) // Additionally, there are two predefined values for any pod: // - -300: Indicates that the OOM priority is ignored, and the pod does not // participate in priority comparison. // - 300: Indicates that the OOM priority is set to the highest level, the pod // will never be terminated due to OOM events from the perspective of OOM enhancement PodAnnotationMemoryEnhancementOOMPriority = "oom_priority" )
const variables for pod annotations about qos level enhancement in memory
const ( PodAnnotationCPUEnhancementKey = "katalyst.kubewharf.io/cpu_enhancement" // PodAnnotationCPUEnhancementCPUSet provides a mechanism separate cpuset into // several orthogonal pools to avoid cpu contentions for different types of workloads, // i.e. spark batch, flink streaming, web service may fall into three pools. // and, each individual pod should be put into only one pool. // // - this enhancement is only supported for shared-cores, for now and foreseeable future // - all pods will be settled in `default` pool if not specified PodAnnotationCPUEnhancementCPUSet = "cpuset_pool" // PodAnnotationCPUEnhancementSuppressionToleranceRate provides a mechanism to ensure // the quality for reclaimed resources. since reclaimed resources will always change // dynamically according to running states of none-reclaimed services, it may reach to // a point that the resource contention is still be tolerable for none-reclaimed services, // but the reclaimed services runs too slow and would rather be killed and rescheduled. // in this case, the workload can use this enhancement to trigger eviction. // // - this enhancement is only supported for shared-cores, for now and foreseeable future PodAnnotationCPUEnhancementSuppressionToleranceRate = "suppression_tolerance_rate" )
const variables for pod annotations about qos level enhancement in cpu
const ( PodAnnotationNetworkEnhancementKey = "katalyst.kubewharf.io/network_enhancement" // PodAnnotationNetworkEnhancementNamespaceType provides a mechanism to select nic in different namespaces // - PodAnnotationNetworkEnhancementNamespaceTypeHost // - only select nic device in host namespace // - admit failed if not possible // - PodAnnotationNetworkEnhancementNamespaceTypeHostPrefer // - prefer tp select nic device in non-host namespace // - also accept nic device in non-host namespace if not possible // - PodAnnotationNetworkEnhancementNamespaceTypeNotHost // - only select nic device in non-host namespace // - admit failed if not possible // - PodAnnotationNetworkEnhancementNamespaceTypeNotHostPrefer // - only select nic device in non-host namespace // - also accept nic device in host namespace if not possible PodAnnotationNetworkEnhancementNamespaceType = "namespace_type" PodAnnotationNetworkEnhancementNamespaceTypeHost = "host_ns" PodAnnotationNetworkEnhancementNamespaceTypeHostPrefer = "host_ns_preferred" PodAnnotationNetworkEnhancementNamespaceTypeNotHost = "anti_host_ns" PodAnnotationNetworkEnhancementNamespaceTypeNotHostPrefer = "anti_host_ns_preferred" // PodAnnotationNetworkEnhancementAffinityRestricted sets as true to indicate // we must ensure the numa affinity for nic devices, and we should admit failed if not possible PodAnnotationNetworkEnhancementAffinityRestricted = "topology_affinity_restricted" PodAnnotationNetworkEnhancementAffinityRestrictedTrue = "true" )
const variables for pod annotations about qos level enhancement in network
const ( // BalancedAllocation strategy favors nodes with balanced resource usage rate BalancedAllocation kubeschedulerconfig.ScoringStrategyType = "BalancedAllocation" // LeastNUMANodes strategy favors nodes which requires least amount of NUMA nodes to satisfy resource requests for given pod LeastNUMANodes kubeschedulerconfig.ScoringStrategyType = "LeastNUMANodes" )
const variables for node resource topology scoring strategy
const ( ReclaimedResourceMilliCPU v1.ResourceName = "resource.katalyst.kubewharf.io/reclaimed_millicpu" ReclaimedResourceMemory v1.ResourceName = "resource.katalyst.kubewharf.io/reclaimed_memory" )
const variables for resource names of reclaimed resource
const ( ResourceNetBandwidth v1.ResourceName = "resource.katalyst.kubewharf.io/net_bandwidth" ResourceMemoryBandwidth v1.ResourceName = "resource.katalyst.kubewharf.io/memory_bandwidth" )
const variables for resource names of guaranteed resource
const ( // ResourceAnnotationKeyResourceIdentifier nominated the key to override the default name // field in pod-resource-server (for qrm-related protocols); if the name field can't be // guaranteed to be unique in some cases, we can relay on this annotation to get unique keys // (to replace with the default name) ResourceAnnotationKeyResourceIdentifier = "katalyst.kubewharf.io/resource_identifier" // ResourceAnnotationKeyResourceIdentifier nominated the key indicating net namespace name of the NIC ResourceAnnotationKeyNICNetNSName = "katalyst.kubewharf.io/netns_name" )
const variables for resource attributes of resources
const ( // WorkloadAnnotationSPDEnableKey provides a mechanism for white list when enabling spd, // if it's set as false, we should not maintain spd CR or calculate service profiling automatically. WorkloadAnnotationSPDEnableKey = "spd.katalyst.kubewharf.io/enable" WorkloadAnnotationSPDEnabled = "true" )
const variables for workload annotations about spd.
const ( // SPDAnnotationBaselineSentinelKey and SPDAnnotationExtendedBaselineSentinelKey is // updated by the SPD controller. It represents the sentinel pod among all pods managed // by this SPD. Agents or controllers can use this key to determine if a pod falls within // the baseline by comparing it with the pod's createTime and podName. SPDAnnotationBaselineSentinelKey = "spd.katalyst.kubewharf.io/baselineSentinel" SPDAnnotationExtendedBaselineSentinelKey = "spd.katalyst.kubewharf.io/extendedBaselineSentinel" SPDBaselinePercentMax = 100 SPDBaselinePercentMin = 0 )
const variables for spd.
const ( // WorkloadAnnotationVPAEnabledKey disables for workload means that // we won't apply the recommended resources for pod belonging to this workload; // However, we may still do this calculation logic and update to status if vpa // CR is created for this workload WorkloadAnnotationVPAEnabledKey = "vpa.katalyst.kubewharf.io/enable" WorkloadAnnotationVPAEnabled = "true" WorkloadAnnotationVPANameKey = "vpa.katalyst.kubewharf.io/name" // WorkloadAnnotationVPASelectorKey is pod label selector for non-native workload WorkloadAnnotationVPASelectorKey = "vpa.katalyst.kubewharf.io/selector" )
const variables for workload annotations about vpa.
const ( VPAAnnotationVPARecNameKey = "vpa.katalyst.kubewharf.io/recName" VPAAnnotationWorkloadRetentionPolicyKey = "vpa.katalyst.kubewharf.io/retentionPolicy" VPAAnnotationWorkloadRetentionPolicyRetain = "retain" VPAAnnotationWorkloadRetentionPolicyDelete = "delete" )
const variables for workload annotations about vpaRec.
const (
PodAnnotationNetClassKey = "katalyst.kubewharf.io/net_class_id"
)
PodAnnotationNetClassKey is a const variable for pod annotation about net class.
const ( // PodAnnotationSPDNameKey is used to maintain corresponding spdName in pod // annotation to make metaServer to target its spd more conveniently. PodAnnotationSPDNameKey = "spd.katalyst.kubewharf.io/name" )
const (
// SPDAggMetricNameMemoryBandwidth is per core memory bandwidth
SPDAggMetricNameMemoryBandwidth = "memory_bandwidth"
)
metric names for aggregate metric
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type KCNRAnnotationCPUManagerPolicy ¶ added in v0.5.0
type KCNRAnnotationCPUManagerPolicy string
type KCNRAnnotationMemoryManagerPolicy ¶ added in v0.5.0
type KCNRAnnotationMemoryManagerPolicy string
type QRMPhase ¶ added in v0.4.0
type QRMPhase int
QRMPhase is the phase of each rpc call in qrm plugin
type ResourcePluginPolicyName ¶ added in v0.4.0
type ResourcePluginPolicyName string
ResourcePluginPolicyName is a string type for QosResourceManager plugin policy
const ( // ResourcePluginPolicyNameDynamic is the name of the dynamic policy. ResourcePluginPolicyNameDynamic ResourcePluginPolicyName = "dynamic" // ResourcePluginPolicyNameNative is the name of the native policy. ResourcePluginPolicyNameNative ResourcePluginPolicyName = "native" // ResourcePluginPolicyNameStatic is the name of the static policy. ResourcePluginPolicyNameStatic ResourcePluginPolicyName = "static" )
const variables for QRM plugin policy name