Documentation ¶
Index ¶
- Constants
- type PodScheduleResult
- type PodScheduler
- type RunningUnitInfo
- type SchedulerHooks
- type SchedulingUnitInfo
- func (s *SchedulingUnitInfo) FinishUnitTraceContext(name string, fields ...attribute.KeyValue)
- func (s *SchedulingUnitInfo) Reset()
- func (s *SchedulingUnitInfo) SetUnitTraceContextFields(name string, fields ...attribute.KeyValue)
- func (s *SchedulingUnitInfo) SetUnitTraceContextTags(name string, tags ...attribute.KeyValue)
- func (s *SchedulingUnitInfo) StartUnitTraceContext(parentSpanName, name string, options ...trace.SpanOption)
- type UnitPreemptionResult
- type UnitResult
- type UnitScheduler
- type UnitSchedulingResult
Constants ¶
View Source
const ( // ReturnAction means scheduler will record the result and stop scheduling ReturnAction = "RecordAndReturn" // ReturnAction means scheduler will record the result and continue scheduling ContinueAction = "RecordAndContinue" // SchedulingAction is the event action used in scheduling process SchedulingAction = "Scheduling" // PreemptingAction is the event action used in preempting process PreemptingAction = "Preempting" )
Variables ¶
This section is empty.
Functions ¶
This section is empty.
Types ¶
type PodScheduleResult ¶
type PodScheduleResult struct { // Number of nodes scheduler evaluated on one pod scheduled NumberOfEvaluatedNodes int // Number of feasible nodes on one pod scheduled NumberOfFeasibleNodes int // Name of the scheduler suggest host SuggestedHost string // other feasible nodes, besides SuggestedHost, // we will only fill this with direct schedulable nodes at the first stage // TODO: this may be useful if we don't want to cache scheduling results in scheduler cache // figure out if we can re-use the scheduling results in scheduling main workflow (per unit, per node group) OtherFeasibleNodes []string // NominatedNode stores the nominated node information for preemption NominatedNode *framework.NominatedNode Victims *framework.Victims // ATTENTION: We reserve this field to take into account the possibility of modifying the original data (SchedulingUnitInfo.NodeToStatusMapByTemplate) // in the event of pod failure at any stage. FilteredNodesStatuses framework.NodeToStatusMap }
PodScheduleResult represents the result of one pod scheduled or preempted. It will contain the final selected Node, along with the selected intermediate information or nominated node information.
type PodScheduler ¶
type PodScheduler interface { GetFrameworkForPod(*v1.Pod) (framework.SchedulerFramework, error) SetFrameworkForPod(framework.SchedulerFramework) GetPreemptionFrameworkForPod(*v1.Pod) framework.SchedulerPreemptionFramework SetPreemptionFrameworkForPod(framework.SchedulerPreemptionFramework) ScheduleInSpecificNodeGroup(ctx context.Context, scheduleFramework framework.SchedulerFramework, unitState, commonPreemptionState, podState *framework.CycleState, pod *v1.Pod, nodeGroup framework.NodeGroup, request *framework.UnitSchedulingRequest, nodeToStatus framework.NodeToStatusMap) (podScheduleResult PodScheduleResult, err error) PreemptInSpecificNodeGroup(ctx context.Context, scheduleFramework framework.SchedulerFramework, preemptFramework framework.SchedulerPreemptionFramework, unitState, commonPreemptionState, podState *framework.CycleState, pod *v1.Pod, nodeGroup framework.NodeGroup, nodeToStatus framework.NodeToStatusMap, cachedNominatedNodes *framework.CachedNominatedNodes) (podScheduleResult PodScheduleResult, err error) DisablePreemption() bool Close() }
PodScheduler is the interface handling scheduling work
type RunningUnitInfo ¶
type RunningUnitInfo struct { QueuedPodInfo *framework.QueuedPodInfo Trace tracing.SchedulingTrace // clonedPod is used to store those changes to the original pods in the workflow // e.g. span initialization, reservation info, preemption info ... // this will be cloned at the beginning of the workflow // TODO: for those unschedulable pods, this deep copy cost is unnecessary ClonedPod *v1.Pod NodeToPlace string Victims *framework.Victims }
type SchedulerHooks ¶
type SchedulerHooks interface { PodScheduler() PodScheduler EventRecorder() events.EventRecorder BootstrapSchedulePod(ctx context.Context, pod *v1.Pod, podTrace tracing.SchedulingTrace, nodeGroup string) (string, framework.SchedulerFramework, framework.SchedulerPreemptionFramework, *framework.CycleState, error) ReservePod(ctx context.Context, clonedPod *v1.Pod, scheduleResult PodScheduleResult) (string, error) }
TODO: revisit this.
type SchedulingUnitInfo ¶
type SchedulingUnitInfo struct { UnitKey string MinMember int AllMember int // everScheduled indicates whether we have ever scheduled some instances of this unit // if unit is pod group, this mean whether min member instances have been scheduled EverScheduled bool // key is running unit key DispatchedPods map[string]*RunningUnitInfo QueuedUnitInfo *framework.QueuedUnitInfo // if true, we need to reset the dispatched pods of this unit to pending state // and let dispatcher to re-dispatch them to another scheduler instance DispatchToAnotherScheduler bool SchedulingSuccessfully bool UnitCycleState *framework.CycleState // ATTENTION: The following fields will be RESET during scheduling. // So we don't need to care about them during initialization. NotScheduledPodKeysByTemplate map[string]sets.String ScheduledIndex int NodeToStatusMapByTemplate framework.NodeToStatusMapByTemplate }
func (*SchedulingUnitInfo) FinishUnitTraceContext ¶
func (s *SchedulingUnitInfo) FinishUnitTraceContext(name string, fields ...attribute.KeyValue)
FinishUnitTraceContext finishes trace context of each RunningUnitInfo
func (*SchedulingUnitInfo) Reset ¶
func (s *SchedulingUnitInfo) Reset()
Reset cleanup some fields at the beginning of NodeGroup Scheduling and Preempting.
func (*SchedulingUnitInfo) SetUnitTraceContextFields ¶
func (s *SchedulingUnitInfo) SetUnitTraceContextFields(name string, fields ...attribute.KeyValue)
func (*SchedulingUnitInfo) SetUnitTraceContextTags ¶
func (s *SchedulingUnitInfo) SetUnitTraceContextTags(name string, tags ...attribute.KeyValue)
func (*SchedulingUnitInfo) StartUnitTraceContext ¶
func (s *SchedulingUnitInfo) StartUnitTraceContext(parentSpanName, name string, options ...trace.SpanOption)
StartUnitTraceContext starts trace context for each RunningUnitInfo
type UnitPreemptionResult ¶
type UnitPreemptionResult struct { // Details is used to describe the detail of each attempted Pod. Details *interpretabity.UnitSchedulingDetails SuccessfulPods []string FailedPods []string }
func (*UnitPreemptionResult) Marshal ¶
func (usr *UnitPreemptionResult) Marshal() string
type UnitResult ¶
type UnitResult struct { Successfully bool // Details is used to describe the detail of each attempted Pod. Details *interpretabity.UnitSchedulingDetails SuccessfulPods []string FailedPods []string }
func NewUnitResult ¶
func NewUnitResult(schedulingResult bool, allPods int) *UnitResult
NewUnitResult returns a new UnitResult.
func TransferToUnitResult ¶
func TransferToUnitResult(unitInfo *SchedulingUnitInfo, details *interpretabity.UnitSchedulingDetails, successfulPods []string, failedPods []string) *UnitResult
type UnitScheduler ¶
type UnitSchedulingResult ¶
type UnitSchedulingResult struct { // Details is used to describe the detail of each attempted Pod. Details *interpretabity.UnitSchedulingDetails // TODO: store NodeToStatus etc. SuccessfulPods []string FailedPods []string }
func (*UnitSchedulingResult) Marshal ¶
func (usr *UnitSchedulingResult) Marshal() string
Click to show internal directories.
Click to hide internal directories.