kubernetes: k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates Index | Examples | Files

package predicates

import "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"



Package Files

error.go metadata.go predicates.go utils.go


var (
    // The predicateName tries to be consistent as the predicate name used in DefaultAlgorithmProvider defined in
    // defaults.go (which tend to be stable for backward compatibility)
    ErrDiskConflict              = newPredicateFailureError("NoDiskConflict")
    ErrVolumeZoneConflict        = newPredicateFailureError("NoVolumeZoneConflict")
    ErrNodeSelectorNotMatch      = newPredicateFailureError("MatchNodeSelector")
    ErrPodAffinityNotMatch       = newPredicateFailureError("MatchInterPodAffinity")
    ErrTaintsTolerationsNotMatch = newPredicateFailureError("PodToleratesNodeTaints")
    ErrPodNotMatchHostName       = newPredicateFailureError("HostName")
    ErrPodNotFitsHostPorts       = newPredicateFailureError("PodFitsHostPorts")
    ErrNodeLabelPresenceViolated = newPredicateFailureError("CheckNodeLabelPresence")
    ErrServiceAffinityViolated   = newPredicateFailureError("CheckServiceAffinity")
    ErrMaxVolumeCountExceeded    = newPredicateFailureError("MaxVolumeCount")
    ErrNodeUnderMemoryPressure   = newPredicateFailureError("NodeUnderMemoryPressure")
    ErrNodeUnderDiskPressure     = newPredicateFailureError("NodeUnderDiskPressure")
    // ErrFakePredicate is used for test only. The fake predicates returning false also returns error
    // as ErrFakePredicate.
    ErrFakePredicate = newPredicateFailureError("FakePredicateError")

func AddUnsetLabelsToMap Uses

func AddUnsetLabelsToMap(aL map[string]string, labelsToAdd []string, labelSet labels.Set)

AddUnsetLabelsToMap backfills missing values with values we find in a map.

func CheckNodeDiskPressurePredicate Uses

func CheckNodeDiskPressurePredicate(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error)

CheckNodeDiskPressurePredicate checks if a pod can be scheduled on a node reporting disk pressure condition.

func CheckNodeMemoryPressurePredicate Uses

func CheckNodeMemoryPressurePredicate(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error)

CheckNodeMemoryPressurePredicate checks if a pod can be scheduled on a node reporting memory pressure condition.

func CreateSelectorFromLabels Uses

func CreateSelectorFromLabels(aL map[string]string) labels.Selector

CreateSelectorFromLabels is used to define a selector that corresponds to the keys in a map.

func EssentialPredicates Uses

func EssentialPredicates(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error)

EssentialPredicates are the predicates that all pods, including critical pods, need

func FilterPodsByNamespace Uses

func FilterPodsByNamespace(pods []*v1.Pod, ns string) []*v1.Pod

FilterPodsByNamespace filters pods outside a namespace from the given list.

func FindLabelsInSet Uses

func FindLabelsInSet(labelsToKeep []string, selector labels.Set) map[string]string

FindLabelsInSet gets as many key/value pairs as possible out of a label set.

ExampleUtils is a https://blog.golang.org/examples styled unit test.


labelSubset := labels.Set{}
labelSubset["label1"] = "value1"
labelSubset["label2"] = "value2"
// Lets make believe that these pods are on the cluster.
// Utility functions will inspect their labels, filter them, and so on.
nsPods := []*v1.Pod{
        ObjectMeta: metav1.ObjectMeta{
            Name:      "pod1",
            Namespace: "ns1",
            Labels: map[string]string{
                "label1": "wontSeeThis",
                "label2": "wontSeeThis",
                "label3": "will_see_this",
    },  // first pod which will be used via the utilities
        ObjectMeta: metav1.ObjectMeta{
            Name:      "pod2",
            Namespace: "ns1",

        ObjectMeta: metav1.ObjectMeta{
            Name: "pod3ThatWeWontSee",
fmt.Println(FindLabelsInSet([]string{"label1", "label2", "label3"}, nsPods[0].ObjectMeta.Labels)["label3"])
AddUnsetLabelsToMap(labelSubset, []string{"label1", "label2", "label3"}, nsPods[0].ObjectMeta.Labels)

for _, pod := range FilterPodsByNamespace(nsPods, "ns1") {
    fmt.Print(pod.Name, ",")



func GeneralPredicates Uses

func GeneralPredicates(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error)

func GetResourceRequest Uses

func GetResourceRequest(pod *v1.Pod) *schedulercache.Resource

Returns a *schedulercache.Resource that covers the largest width in each resource dimension. Because init-containers run sequentially, we collect the max in each dimension iteratively. In contrast, we sum the resource vectors for regular containers since they run simultaneously.



    CPU: 2
    Memory: 1G
    CPU: 2
    Memory: 3G
    CPU: 2
    Memory: 1G
    CPU: 1
    Memory: 1G

Result: CPU: 3, Memory: 3G

func GetUsedPorts Uses

func GetUsedPorts(pods ...*v1.Pod) map[int]bool

func NewMaxPDVolumeCountPredicate Uses

func NewMaxPDVolumeCountPredicate(filter VolumeFilter, maxVolumes int, pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo) algorithm.FitPredicate

NewMaxPDVolumeCountPredicate creates a predicate which evaluates whether a pod can fit based on the number of volumes which match a filter that it requests, and those that are already present. The maximum number is configurable to accommodate different systems.

The predicate looks for both volumes used directly, as well as PVC volumes that are backed by relevant volume types, counts the number of unique volumes, and rejects the new pod if it would place the total count over the maximum.

func NewNodeLabelPredicate Uses

func NewNodeLabelPredicate(labels []string, presence bool) algorithm.FitPredicate

func NewPodAffinityPredicate Uses

func NewPodAffinityPredicate(info NodeInfo, podLister algorithm.PodLister) algorithm.FitPredicate

func NewPredicateMetadataFactory Uses

func NewPredicateMetadataFactory(podLister algorithm.PodLister) algorithm.MetadataProducer

func NewServiceAffinityPredicate Uses

func NewServiceAffinityPredicate(podLister algorithm.PodLister, serviceLister algorithm.ServiceLister, nodeInfo NodeInfo, labels []string) (algorithm.FitPredicate, PredicateMetadataModifier)

func NewVolumeZonePredicate Uses

func NewVolumeZonePredicate(pvInfo PersistentVolumeInfo, pvcInfo PersistentVolumeClaimInfo) algorithm.FitPredicate

VolumeZonePredicate evaluates if a pod can fit due to the volumes it requests, given that some volumes may have zone scheduling constraints. The requirement is that any volume zone-labels must match the equivalent zone-labels on the node. It is OK for the node to have more zone-label constraints (for example, a hypothetical replicated volume might allow region-wide access)

Currently this is only supported with PersistentVolumeClaims, and looks to the labels only on the bound PersistentVolume.

Working with volumes declared inline in the pod specification (i.e. not using a PersistentVolume) is likely to be harder, as it would require determining the zone of a volume during scheduling, and that is likely to require calling out to the cloud provider. It seems that we are moving away from inline volume declarations anyway.

func NoDiskConflict Uses

func NoDiskConflict(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error)

NoDiskConflict evaluates if a pod can fit due to the volumes it requests, and those that are already mounted. If there is already a volume mounted on that node, another pod that uses the same volume can't be scheduled there. This is GCE, Amazon EBS, and Ceph RBD specific for now: - GCE PD allows multiple mounts as long as they're all read-only - AWS EBS forbids any two pods mounting the same volume ID - Ceph RBD forbids if any two pods share at least same monitor, and match pool and image. - ISCSI forbids if any two pods share at least same IQN, LUN and Target TODO: migrate this into some per-volume specific code?

func PodFitsHost Uses

func PodFitsHost(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error)

func PodFitsHostPorts Uses

func PodFitsHostPorts(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error)

func PodFitsResources Uses

func PodFitsResources(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error)

func PodSelectorMatches Uses

func PodSelectorMatches(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error)

func PodToleratesNodeTaints Uses

func PodToleratesNodeTaints(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error)

func RegisterPredicatePrecomputation Uses

func RegisterPredicatePrecomputation(predicateName string, precomp PredicateMetadataModifier)

type CachedNodeInfo Uses

type CachedNodeInfo struct {

func (*CachedNodeInfo) GetNodeInfo Uses

func (c *CachedNodeInfo) GetNodeInfo(id string) (*v1.Node, error)

GetNodeInfo returns cached data for the node 'id'.

type CachedPersistentVolumeClaimInfo Uses

type CachedPersistentVolumeClaimInfo struct {

CachedPersistentVolumeClaimInfo implements PersistentVolumeClaimInfo

func (*CachedPersistentVolumeClaimInfo) GetPersistentVolumeClaimInfo Uses

func (c *CachedPersistentVolumeClaimInfo) GetPersistentVolumeClaimInfo(namespace string, name string) (*v1.PersistentVolumeClaim, error)

GetPersistentVolumeClaimInfo fetches the claim in specified namespace with specified name

type CachedPersistentVolumeInfo Uses

type CachedPersistentVolumeInfo struct {

CachedPersistentVolumeInfo implements PersistentVolumeInfo

func (*CachedPersistentVolumeInfo) GetPersistentVolumeInfo Uses

func (c *CachedPersistentVolumeInfo) GetPersistentVolumeInfo(pvID string) (*v1.PersistentVolume, error)

type FailureReason Uses

type FailureReason struct {
    // contains filtered or unexported fields

func NewFailureReason Uses

func NewFailureReason(msg string) *FailureReason

func (*FailureReason) GetReason Uses

func (e *FailureReason) GetReason() string

type InsufficientResourceError Uses

type InsufficientResourceError struct {
    // resourceName is the name of the resource that is insufficient
    ResourceName v1.ResourceName
    // contains filtered or unexported fields

InsufficientResourceError is an error type that indicates what kind of resource limit is hit and caused the unfitting failure.

func NewInsufficientResourceError Uses

func NewInsufficientResourceError(resourceName v1.ResourceName, requested, used, capacity int64) *InsufficientResourceError

func (*InsufficientResourceError) Error Uses

func (e *InsufficientResourceError) Error() string

func (*InsufficientResourceError) GetInsufficientAmount Uses

func (e *InsufficientResourceError) GetInsufficientAmount() int64

func (*InsufficientResourceError) GetReason Uses

func (e *InsufficientResourceError) GetReason() string

type MaxPDVolumeCountChecker Uses

type MaxPDVolumeCountChecker struct {
    // contains filtered or unexported fields

type NodeInfo Uses

type NodeInfo interface {
    GetNodeInfo(nodeID string) (*v1.Node, error)

Other types for predicate functions...

type NodeLabelChecker Uses

type NodeLabelChecker struct {
    // contains filtered or unexported fields

func (*NodeLabelChecker) CheckNodeLabelPresence Uses

func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error)

CheckNodeLabelPresence checks whether all of the specified labels exists on a node or not, regardless of their value If "presence" is false, then returns false if any of the requested labels matches any of the node's labels, otherwise returns true. If "presence" is true, then returns false if any of the requested labels does not match any of the node's labels, otherwise returns true.

Consider the cases where the nodes are placed in regions/zones/racks and these are identified by labels In some cases, it is required that only nodes that are part of ANY of the defined regions/zones/racks be selected

Alternately, eliminating nodes that have a certain label, regardless of value, is also useful A node may have a label with "retiring" as key and the date as the value and it may be desirable to avoid scheduling new pods on this node

type PersistentVolumeClaimInfo Uses

type PersistentVolumeClaimInfo interface {
    GetPersistentVolumeClaimInfo(namespace string, name string) (*v1.PersistentVolumeClaim, error)

type PersistentVolumeInfo Uses

type PersistentVolumeInfo interface {
    GetPersistentVolumeInfo(pvID string) (*v1.PersistentVolume, error)

type PodAffinityChecker Uses

type PodAffinityChecker struct {
    // contains filtered or unexported fields

func (*PodAffinityChecker) InterPodAffinityMatches Uses

func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error)

type PredicateFailureError Uses

type PredicateFailureError struct {
    PredicateName string

func (*PredicateFailureError) Error Uses

func (e *PredicateFailureError) Error() string

func (*PredicateFailureError) GetReason Uses

func (e *PredicateFailureError) GetReason() string

type PredicateMetadataFactory Uses

type PredicateMetadataFactory struct {
    // contains filtered or unexported fields

func (*PredicateMetadataFactory) GetMetadata Uses

func (pfactory *PredicateMetadataFactory) GetMetadata(pod *v1.Pod, nodeNameToInfoMap map[string]*schedulercache.NodeInfo) interface{}

GetMetadata returns the predicateMetadata used which will be used by various predicates.

type PredicateMetadataModifier Uses

type PredicateMetadataModifier func(pm *predicateMetadata)

predicatePrecomputations: Helper types/variables...

type ServiceAffinity Uses

type ServiceAffinity struct {
    // contains filtered or unexported fields

type VolumeFilter Uses

type VolumeFilter struct {
    // Filter normal volumes
    FilterVolume           func(vol *v1.Volume) (id string, relevant bool)
    FilterPersistentVolume func(pv *v1.PersistentVolume) (id string, relevant bool)

VolumeFilter contains information on how to filter PD Volumes when checking PD Volume caps

var AzureDiskVolumeFilter VolumeFilter = VolumeFilter{
    FilterVolume: func(vol *v1.Volume) (string, bool) {
        if vol.AzureDisk != nil {
            return vol.AzureDisk.DiskName, true
        return "", false

    FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) {
        if pv.Spec.AzureDisk != nil {
            return pv.Spec.AzureDisk.DiskName, true
        return "", false

AzureDiskVolumeFilter is a VolumeFilter for filtering Azure Disk Volumes

var EBSVolumeFilter VolumeFilter = VolumeFilter{
    FilterVolume: func(vol *v1.Volume) (string, bool) {
        if vol.AWSElasticBlockStore != nil {
            return vol.AWSElasticBlockStore.VolumeID, true
        return "", false

    FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) {
        if pv.Spec.AWSElasticBlockStore != nil {
            return pv.Spec.AWSElasticBlockStore.VolumeID, true
        return "", false

EBSVolumeFilter is a VolumeFilter for filtering AWS ElasticBlockStore Volumes

var GCEPDVolumeFilter VolumeFilter = VolumeFilter{
    FilterVolume: func(vol *v1.Volume) (string, bool) {
        if vol.GCEPersistentDisk != nil {
            return vol.GCEPersistentDisk.PDName, true
        return "", false

    FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) {
        if pv.Spec.GCEPersistentDisk != nil {
            return pv.Spec.GCEPersistentDisk.PDName, true
        return "", false

GCEPDVolumeFilter is a VolumeFilter for filtering GCE PersistentDisk Volumes

type VolumeZoneChecker Uses

type VolumeZoneChecker struct {
    // contains filtered or unexported fields

Package predicates imports 19 packages (graph) and is imported by 137 packages. Updated 2017-05-22. Refresh now. Tools for package owners.