extendertest

package
v0.69.0 Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Apr 19, 2023 License: Apache-2.0 Imports: 27 Imported by: 0

Documentation

Index

Constants

This section is empty.

Variables

This section is empty.

Functions

func DynamicAllocationSparkPods

func DynamicAllocationSparkPods(sparkApplicationID string, minExecutors int, maxExecutors int) []v1.Pod

DynamicAllocationSparkPods returns a list of pods corresponding to a Spark Application with 1 driver and maxExecutors executors with the proper dynamic allocation annotations set for min and max executor counts

func DynamicAllocationSparkPodsWithSizes added in v0.55.0

func DynamicAllocationSparkPodsWithSizes(
	sparkApplicationID string,
	minExecutors int,
	maxExecutors int,
	driverMem, driverCPU, executorMem, executorCPU string,
) []v1.Pod

DynamicAllocationSparkPodsWithSizes behaves like DynamicAllocationSparkPods except that it allows the caller to configure pod sizes

func NewNode

func NewNode(name string, zone string) v1.Node

NewNode creates a new dummy node with the given name

func StaticAllocationSparkPods

func StaticAllocationSparkPods(sparkApplicationID string, numExecutors int) []v1.Pod

StaticAllocationSparkPods returns a list of pods corresponding to a Spark Application with 1 driver and numExecutors executors with the proper static allocation annotations set

func StaticAllocationSparkPodsWithExecutorGPUs added in v0.2.0

func StaticAllocationSparkPodsWithExecutorGPUs(sparkApplicationID string, numExecutors int) []v1.Pod

StaticAllocationSparkPodsWithExecutorGPUs returns a list of pods corresponding to a Spark Application with 1 driver and numExecutors executors with the proper static allocation annotations set, executors also request one gpu

func StaticAllocationSparkPodsWithSizes added in v0.55.0

func StaticAllocationSparkPodsWithSizes(
	sparkApplicationID string,
	numExecutors int,
	driverMem, driverCPU, executorMem, executorCPU string,
) []v1.Pod

StaticAllocationSparkPodsWithSizes behaves like StaticAllocationSparkPods except that it allows the caller to configure pod sizes

Types

type Harness

type Harness struct {
	Extender                 *extender.SparkSchedulerExtender
	UnschedulablePodMarker   *extender.UnschedulablePodMarker
	PodStore                 cache.Store
	NodeStore                cache.Store
	ResourceReservationCache *sscache.ResourceReservationCache
	SoftReservationStore     *sscache.SoftReservationStore
	Ctx                      context.Context
}

Harness is an extension of an extender with in-memory k8s stores

func NewTestExtender

func NewTestExtender(binpackAlgo string, objects ...runtime.Object) (*Harness, error)

NewTestExtender returns a new extender test harness, initialized with the provided k8s objects

func (*Harness) AssertFailedSchedule

func (h *Harness) AssertFailedSchedule(t *testing.T, pod v1.Pod, nodeNames []string, errorDetails string)

AssertFailedSchedule tries to schedule the provided pods and fails the test if successful

func (*Harness) AssertSuccessfulSchedule

func (h *Harness) AssertSuccessfulSchedule(t *testing.T, pod v1.Pod, nodeNames []string, errorDetails string)

AssertSuccessfulSchedule tries to schedule the provided pods and fails the test if not successful

func (*Harness) AssertSuccessfulScheduleOnNode added in v0.55.0

func (h *Harness) AssertSuccessfulScheduleOnNode(t *testing.T, pod v1.Pod, nodeNames []string, expectedNode string, errorDetails string)

AssertSuccessfulScheduleOnNode tries to schedule the provided pods and fails if the pod isn't schedule on the expected node

func (*Harness) Schedule

func (h *Harness) Schedule(t *testing.T, pod v1.Pod, nodeNames []string) *schedulerapi.ExtenderFilterResult

Schedule calls the extender's Predicate method for the given pod and nodes

func (*Harness) TerminatePod

func (h *Harness) TerminatePod(pod v1.Pod) error

TerminatePod terminates an existing pod

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL