dataproc

package
v0.0.0-...-e165f0f Latest Latest
Warning

This package is not in the latest version of its module.

Go to latest
Published: Sep 1, 2020 License: Apache-2.0 Imports: 13 Imported by: 0

Documentation

Index

Constants

This section is empty.

Variables

View Source
var ClusterOperationStatus_State_name = map[int32]string{
	0: "UNKNOWN",
	1: "PENDING",
	2: "RUNNING",
	3: "DONE",
}
View Source
var ClusterOperationStatus_State_value = map[string]int32{
	"UNKNOWN": 0,
	"PENDING": 1,
	"RUNNING": 2,
	"DONE":    3,
}
View Source
var ClusterStatus_State_name = map[int32]string{
	0: "UNKNOWN",
	1: "CREATING",
	2: "RUNNING",
	3: "ERROR",
	4: "DELETING",
	5: "UPDATING",
	6: "STOPPING",
	7: "STOPPED",
	8: "STARTING",
}
View Source
var ClusterStatus_State_value = map[string]int32{
	"UNKNOWN":  0,
	"CREATING": 1,
	"RUNNING":  2,
	"ERROR":    3,
	"DELETING": 4,
	"UPDATING": 5,
	"STOPPING": 6,
	"STOPPED":  7,
	"STARTING": 8,
}
View Source
var ClusterStatus_Substate_name = map[int32]string{
	0: "UNSPECIFIED",
	1: "UNHEALTHY",
	2: "STALE_STATUS",
}
View Source
var ClusterStatus_Substate_value = map[string]int32{
	"UNSPECIFIED":  0,
	"UNHEALTHY":    1,
	"STALE_STATUS": 2,
}
View Source
var Component_name = map[int32]string{
	0:  "COMPONENT_UNSPECIFIED",
	5:  "ANACONDA",
	13: "DOCKER",
	9:  "DRUID",
	14: "FLINK",
	3:  "HIVE_WEBHCAT",
	1:  "JUPYTER",
	7:  "KERBEROS",
	6:  "PRESTO",
	12: "RANGER",
	10: "SOLR",
	4:  "ZEPPELIN",
	8:  "ZOOKEEPER",
}
View Source
var Component_value = map[string]int32{
	"COMPONENT_UNSPECIFIED": 0,
	"ANACONDA":              5,
	"DOCKER":                13,
	"DRUID":                 9,
	"FLINK":                 14,
	"HIVE_WEBHCAT":          3,
	"JUPYTER":               1,
	"KERBEROS":              7,
	"PRESTO":                6,
	"RANGER":                12,
	"SOLR":                  10,
	"ZEPPELIN":              4,
	"ZOOKEEPER":             8,
}
View Source
var JobStatus_State_name = map[int32]string{
	0: "STATE_UNSPECIFIED",
	1: "PENDING",
	8: "SETUP_DONE",
	2: "RUNNING",
	3: "CANCEL_PENDING",
	7: "CANCEL_STARTED",
	4: "CANCELLED",
	5: "DONE",
	6: "ERROR",
	9: "ATTEMPT_FAILURE",
}
View Source
var JobStatus_State_value = map[string]int32{
	"STATE_UNSPECIFIED": 0,
	"PENDING":           1,
	"SETUP_DONE":        8,
	"RUNNING":           2,
	"CANCEL_PENDING":    3,
	"CANCEL_STARTED":    7,
	"CANCELLED":         4,
	"DONE":              5,
	"ERROR":             6,
	"ATTEMPT_FAILURE":   9,
}
View Source
var JobStatus_Substate_name = map[int32]string{
	0: "UNSPECIFIED",
	1: "SUBMITTED",
	2: "QUEUED",
	3: "STALE_STATUS",
}
View Source
var JobStatus_Substate_value = map[string]int32{
	"UNSPECIFIED":  0,
	"SUBMITTED":    1,
	"QUEUED":       2,
	"STALE_STATUS": 3,
}
View Source
var ListJobsRequest_JobStateMatcher_name = map[int32]string{
	0: "ALL",
	1: "ACTIVE",
	2: "NON_ACTIVE",
}
View Source
var ListJobsRequest_JobStateMatcher_value = map[string]int32{
	"ALL":        0,
	"ACTIVE":     1,
	"NON_ACTIVE": 2,
}
View Source
var LoggingConfig_Level_name = map[int32]string{
	0: "LEVEL_UNSPECIFIED",
	1: "ALL",
	2: "TRACE",
	3: "DEBUG",
	4: "INFO",
	5: "WARN",
	6: "ERROR",
	7: "FATAL",
	8: "OFF",
}
View Source
var LoggingConfig_Level_value = map[string]int32{
	"LEVEL_UNSPECIFIED": 0,
	"ALL":               1,
	"TRACE":             2,
	"DEBUG":             3,
	"INFO":              4,
	"WARN":              5,
	"ERROR":             6,
	"FATAL":             7,
	"OFF":               8,
}
View Source
var ReservationAffinity_Type_name = map[int32]string{
	0: "TYPE_UNSPECIFIED",
	1: "NO_RESERVATION",
	2: "ANY_RESERVATION",
	3: "SPECIFIC_RESERVATION",
}
View Source
var ReservationAffinity_Type_value = map[string]int32{
	"TYPE_UNSPECIFIED":     0,
	"NO_RESERVATION":       1,
	"ANY_RESERVATION":      2,
	"SPECIFIC_RESERVATION": 3,
}
View Source
var WorkflowMetadata_State_name = map[int32]string{
	0: "UNKNOWN",
	1: "PENDING",
	2: "RUNNING",
	3: "DONE",
}
View Source
var WorkflowMetadata_State_value = map[string]int32{
	"UNKNOWN": 0,
	"PENDING": 1,
	"RUNNING": 2,
	"DONE":    3,
}
View Source
var WorkflowNode_NodeState_name = map[int32]string{
	0: "NODE_STATUS_UNSPECIFIED",
	1: "BLOCKED",
	2: "RUNNABLE",
	3: "RUNNING",
	4: "COMPLETED",
	5: "FAILED",
}
View Source
var WorkflowNode_NodeState_value = map[string]int32{
	"NODE_STATUS_UNSPECIFIED": 0,
	"BLOCKED":                 1,
	"RUNNABLE":                2,
	"RUNNING":                 3,
	"COMPLETED":               4,
	"FAILED":                  5,
}
View Source
var YarnApplication_State_name = map[int32]string{
	0: "STATE_UNSPECIFIED",
	1: "NEW",
	2: "NEW_SAVING",
	3: "SUBMITTED",
	4: "ACCEPTED",
	5: "RUNNING",
	6: "FINISHED",
	7: "FAILED",
	8: "KILLED",
}
View Source
var YarnApplication_State_value = map[string]int32{
	"STATE_UNSPECIFIED": 0,
	"NEW":               1,
	"NEW_SAVING":        2,
	"SUBMITTED":         3,
	"ACCEPTED":          4,
	"RUNNING":           5,
	"FINISHED":          6,
	"FAILED":            7,
	"KILLED":            8,
}

Functions

func RegisterAutoscalingPolicyServiceServer

func RegisterAutoscalingPolicyServiceServer(s *grpc.Server, srv AutoscalingPolicyServiceServer)

func RegisterClusterControllerServer

func RegisterClusterControllerServer(s *grpc.Server, srv ClusterControllerServer)

func RegisterJobControllerServer

func RegisterJobControllerServer(s *grpc.Server, srv JobControllerServer)

func RegisterWorkflowTemplateServiceServer

func RegisterWorkflowTemplateServiceServer(s *grpc.Server, srv WorkflowTemplateServiceServer)

Types

type AcceleratorConfig

type AcceleratorConfig struct {
	// Full URL, partial URI, or short name of the accelerator type resource to
	// expose to this instance. See
	// [Compute Engine
	// AcceleratorTypes](https://cloud.google.com/compute/docs/reference/beta/acceleratorTypes)
	//
	// Examples
	// * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
	// * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
	// * `nvidia-tesla-k80`
	//
	// **Auto Zone Exception**: If you are using the Dataproc
	// [Auto Zone
	// Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
	// feature, you must use the short name of the accelerator type
	// resource, for example, `nvidia-tesla-k80`.
	AcceleratorTypeUri string `protobuf:"bytes,1,opt,name=accelerator_type_uri,json=acceleratorTypeUri,proto3" json:"accelerator_type_uri,omitempty"`
	// The number of the accelerator cards of this type exposed to this instance.
	AcceleratorCount     int32    `protobuf:"varint,2,opt,name=accelerator_count,json=acceleratorCount,proto3" json:"accelerator_count,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

Specifies the type and number of accelerator cards attached to the instances of an instance group (see [GPUs on Compute Engine](https://cloud.google.com/compute/docs/gpus/)).

func (*AcceleratorConfig) Descriptor

func (*AcceleratorConfig) Descriptor() ([]byte, []int)

func (*AcceleratorConfig) GetAcceleratorCount

func (m *AcceleratorConfig) GetAcceleratorCount() int32

func (*AcceleratorConfig) GetAcceleratorTypeUri

func (m *AcceleratorConfig) GetAcceleratorTypeUri() string

func (*AcceleratorConfig) ProtoMessage

func (*AcceleratorConfig) ProtoMessage()

func (*AcceleratorConfig) Reset

func (m *AcceleratorConfig) Reset()

func (*AcceleratorConfig) String

func (m *AcceleratorConfig) String() string

func (*AcceleratorConfig) XXX_DiscardUnknown

func (m *AcceleratorConfig) XXX_DiscardUnknown()

func (*AcceleratorConfig) XXX_Marshal

func (m *AcceleratorConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*AcceleratorConfig) XXX_Merge

func (m *AcceleratorConfig) XXX_Merge(src proto.Message)

func (*AcceleratorConfig) XXX_Size

func (m *AcceleratorConfig) XXX_Size() int

func (*AcceleratorConfig) XXX_Unmarshal

func (m *AcceleratorConfig) XXX_Unmarshal(b []byte) error

type AutoscalingConfig

type AutoscalingConfig struct {
	// Optional. The autoscaling policy used by the cluster.
	//
	// Only resource names including projectid and location (region) are valid.
	// Examples:
	//
	// * `https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]`
	// * `projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]`
	//
	// Note that the policy must be in the same project and Dataproc region.
	PolicyUri            string   `protobuf:"bytes,1,opt,name=policy_uri,json=policyUri,proto3" json:"policy_uri,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

Autoscaling Policy config associated with the cluster.

func (*AutoscalingConfig) Descriptor

func (*AutoscalingConfig) Descriptor() ([]byte, []int)

func (*AutoscalingConfig) GetPolicyUri

func (m *AutoscalingConfig) GetPolicyUri() string

func (*AutoscalingConfig) ProtoMessage

func (*AutoscalingConfig) ProtoMessage()

func (*AutoscalingConfig) Reset

func (m *AutoscalingConfig) Reset()

func (*AutoscalingConfig) String

func (m *AutoscalingConfig) String() string

func (*AutoscalingConfig) XXX_DiscardUnknown

func (m *AutoscalingConfig) XXX_DiscardUnknown()

func (*AutoscalingConfig) XXX_Marshal

func (m *AutoscalingConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*AutoscalingConfig) XXX_Merge

func (m *AutoscalingConfig) XXX_Merge(src proto.Message)

func (*AutoscalingConfig) XXX_Size

func (m *AutoscalingConfig) XXX_Size() int

func (*AutoscalingConfig) XXX_Unmarshal

func (m *AutoscalingConfig) XXX_Unmarshal(b []byte) error

type AutoscalingPolicy

type AutoscalingPolicy struct {
	// Required. The policy id.
	//
	// The id must contain only letters (a-z, A-Z), numbers (0-9),
	// underscores (_), and hyphens (-). Cannot begin or end with underscore
	// or hyphen. Must consist of between 3 and 50 characters.
	Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
	// Output only. The "resource name" of the autoscaling policy, as described
	// in https://cloud.google.com/apis/design/resource_names.
	//
	// * For `projects.regions.autoscalingPolicies`, the resource name of the
	//   policy has the following format:
	//   `projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`
	//
	// * For `projects.locations.autoscalingPolicies`, the resource name of the
	//   policy has the following format:
	//   `projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`
	Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
	// Required. Autoscaling algorithm for policy.
	//
	// Types that are valid to be assigned to Algorithm:
	//	*AutoscalingPolicy_BasicAlgorithm
	Algorithm isAutoscalingPolicy_Algorithm `protobuf_oneof:"algorithm"`
	// Required. Describes how the autoscaler will operate for primary workers.
	WorkerConfig *InstanceGroupAutoscalingPolicyConfig `protobuf:"bytes,4,opt,name=worker_config,json=workerConfig,proto3" json:"worker_config,omitempty"`
	// Optional. Describes how the autoscaler will operate for secondary workers.
	SecondaryWorkerConfig *InstanceGroupAutoscalingPolicyConfig `` /* 126-byte string literal not displayed */
	XXX_NoUnkeyedLiteral  struct{}                              `json:"-"`
	XXX_unrecognized      []byte                                `json:"-"`
	XXX_sizecache         int32                                 `json:"-"`
}

Describes an autoscaling policy for Dataproc cluster autoscaler.

func (*AutoscalingPolicy) Descriptor

func (*AutoscalingPolicy) Descriptor() ([]byte, []int)

func (*AutoscalingPolicy) GetAlgorithm

func (m *AutoscalingPolicy) GetAlgorithm() isAutoscalingPolicy_Algorithm

func (*AutoscalingPolicy) GetBasicAlgorithm

func (m *AutoscalingPolicy) GetBasicAlgorithm() *BasicAutoscalingAlgorithm

func (*AutoscalingPolicy) GetId

func (m *AutoscalingPolicy) GetId() string

func (*AutoscalingPolicy) GetName

func (m *AutoscalingPolicy) GetName() string

func (*AutoscalingPolicy) GetSecondaryWorkerConfig

func (m *AutoscalingPolicy) GetSecondaryWorkerConfig() *InstanceGroupAutoscalingPolicyConfig

func (*AutoscalingPolicy) GetWorkerConfig

func (*AutoscalingPolicy) ProtoMessage

func (*AutoscalingPolicy) ProtoMessage()

func (*AutoscalingPolicy) Reset

func (m *AutoscalingPolicy) Reset()

func (*AutoscalingPolicy) String

func (m *AutoscalingPolicy) String() string

func (*AutoscalingPolicy) XXX_DiscardUnknown

func (m *AutoscalingPolicy) XXX_DiscardUnknown()

func (*AutoscalingPolicy) XXX_Marshal

func (m *AutoscalingPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*AutoscalingPolicy) XXX_Merge

func (m *AutoscalingPolicy) XXX_Merge(src proto.Message)

func (*AutoscalingPolicy) XXX_OneofWrappers

func (*AutoscalingPolicy) XXX_OneofWrappers() []interface{}

XXX_OneofWrappers is for the internal use of the proto package.

func (*AutoscalingPolicy) XXX_Size

func (m *AutoscalingPolicy) XXX_Size() int

func (*AutoscalingPolicy) XXX_Unmarshal

func (m *AutoscalingPolicy) XXX_Unmarshal(b []byte) error

type AutoscalingPolicyServiceClient

type AutoscalingPolicyServiceClient interface {
	// Creates new autoscaling policy.
	CreateAutoscalingPolicy(ctx context.Context, in *CreateAutoscalingPolicyRequest, opts ...grpc.CallOption) (*AutoscalingPolicy, error)
	// Updates (replaces) autoscaling policy.
	//
	// Disabled check for update_mask, because all updates will be full
	// replacements.
	UpdateAutoscalingPolicy(ctx context.Context, in *UpdateAutoscalingPolicyRequest, opts ...grpc.CallOption) (*AutoscalingPolicy, error)
	// Retrieves autoscaling policy.
	GetAutoscalingPolicy(ctx context.Context, in *GetAutoscalingPolicyRequest, opts ...grpc.CallOption) (*AutoscalingPolicy, error)
	// Lists autoscaling policies in the project.
	ListAutoscalingPolicies(ctx context.Context, in *ListAutoscalingPoliciesRequest, opts ...grpc.CallOption) (*ListAutoscalingPoliciesResponse, error)
	// Deletes an autoscaling policy. It is an error to delete an autoscaling
	// policy that is in use by one or more clusters.
	DeleteAutoscalingPolicy(ctx context.Context, in *DeleteAutoscalingPolicyRequest, opts ...grpc.CallOption) (*empty.Empty, error)
}

AutoscalingPolicyServiceClient is the client API for AutoscalingPolicyService service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.

type AutoscalingPolicyServiceServer

type AutoscalingPolicyServiceServer interface {
	// Creates new autoscaling policy.
	CreateAutoscalingPolicy(context.Context, *CreateAutoscalingPolicyRequest) (*AutoscalingPolicy, error)
	// Updates (replaces) autoscaling policy.
	//
	// Disabled check for update_mask, because all updates will be full
	// replacements.
	UpdateAutoscalingPolicy(context.Context, *UpdateAutoscalingPolicyRequest) (*AutoscalingPolicy, error)
	// Retrieves autoscaling policy.
	GetAutoscalingPolicy(context.Context, *GetAutoscalingPolicyRequest) (*AutoscalingPolicy, error)
	// Lists autoscaling policies in the project.
	ListAutoscalingPolicies(context.Context, *ListAutoscalingPoliciesRequest) (*ListAutoscalingPoliciesResponse, error)
	// Deletes an autoscaling policy. It is an error to delete an autoscaling
	// policy that is in use by one or more clusters.
	DeleteAutoscalingPolicy(context.Context, *DeleteAutoscalingPolicyRequest) (*empty.Empty, error)
}

AutoscalingPolicyServiceServer is the server API for AutoscalingPolicyService service.

type AutoscalingPolicy_BasicAlgorithm

type AutoscalingPolicy_BasicAlgorithm struct {
	BasicAlgorithm *BasicAutoscalingAlgorithm `protobuf:"bytes,3,opt,name=basic_algorithm,json=basicAlgorithm,proto3,oneof"`
}

type BasicAutoscalingAlgorithm

type BasicAutoscalingAlgorithm struct {
	// Required. YARN autoscaling configuration.
	YarnConfig *BasicYarnAutoscalingConfig `protobuf:"bytes,1,opt,name=yarn_config,json=yarnConfig,proto3" json:"yarn_config,omitempty"`
	// Optional. Duration between scaling events. A scaling period starts after
	// the update operation from the previous event has completed.
	//
	// Bounds: [2m, 1d]. Default: 2m.
	CooldownPeriod       *duration.Duration `protobuf:"bytes,2,opt,name=cooldown_period,json=cooldownPeriod,proto3" json:"cooldown_period,omitempty"`
	XXX_NoUnkeyedLiteral struct{}           `json:"-"`
	XXX_unrecognized     []byte             `json:"-"`
	XXX_sizecache        int32              `json:"-"`
}

Basic algorithm for autoscaling.

func (*BasicAutoscalingAlgorithm) Descriptor

func (*BasicAutoscalingAlgorithm) Descriptor() ([]byte, []int)

func (*BasicAutoscalingAlgorithm) GetCooldownPeriod

func (m *BasicAutoscalingAlgorithm) GetCooldownPeriod() *duration.Duration

func (*BasicAutoscalingAlgorithm) GetYarnConfig

func (*BasicAutoscalingAlgorithm) ProtoMessage

func (*BasicAutoscalingAlgorithm) ProtoMessage()

func (*BasicAutoscalingAlgorithm) Reset

func (m *BasicAutoscalingAlgorithm) Reset()

func (*BasicAutoscalingAlgorithm) String

func (m *BasicAutoscalingAlgorithm) String() string

func (*BasicAutoscalingAlgorithm) XXX_DiscardUnknown

func (m *BasicAutoscalingAlgorithm) XXX_DiscardUnknown()

func (*BasicAutoscalingAlgorithm) XXX_Marshal

func (m *BasicAutoscalingAlgorithm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*BasicAutoscalingAlgorithm) XXX_Merge

func (m *BasicAutoscalingAlgorithm) XXX_Merge(src proto.Message)

func (*BasicAutoscalingAlgorithm) XXX_Size

func (m *BasicAutoscalingAlgorithm) XXX_Size() int

func (*BasicAutoscalingAlgorithm) XXX_Unmarshal

func (m *BasicAutoscalingAlgorithm) XXX_Unmarshal(b []byte) error

type BasicYarnAutoscalingConfig

type BasicYarnAutoscalingConfig struct {
	// Required. Timeout for YARN graceful decommissioning of Node Managers.
	// Specifies the duration to wait for jobs to complete before forcefully
	// removing workers (and potentially interrupting jobs). Only applicable to
	// downscaling operations.
	//
	// Bounds: [0s, 1d].
	GracefulDecommissionTimeout *duration.Duration `` /* 144-byte string literal not displayed */
	// Required. Fraction of average pending memory in the last cooldown period
	// for which to add workers. A scale-up factor of 1.0 will result in scaling
	// up so that there is no pending memory remaining after the update (more
	// aggressive scaling). A scale-up factor closer to 0 will result in a smaller
	// magnitude of scaling up (less aggressive scaling).
	//
	// Bounds: [0.0, 1.0].
	ScaleUpFactor float64 `protobuf:"fixed64,1,opt,name=scale_up_factor,json=scaleUpFactor,proto3" json:"scale_up_factor,omitempty"`
	// Required. Fraction of average pending memory in the last cooldown period
	// for which to remove workers. A scale-down factor of 1 will result in
	// scaling down so that there is no available memory remaining after the
	// update (more aggressive scaling). A scale-down factor of 0 disables
	// removing workers, which can be beneficial for autoscaling a single job.
	//
	// Bounds: [0.0, 1.0].
	ScaleDownFactor float64 `protobuf:"fixed64,2,opt,name=scale_down_factor,json=scaleDownFactor,proto3" json:"scale_down_factor,omitempty"`
	// Optional. Minimum scale-up threshold as a fraction of total cluster size
	// before scaling occurs. For example, in a 20-worker cluster, a threshold of
	// 0.1 means the autoscaler must recommend at least a 2-worker scale-up for
	// the cluster to scale. A threshold of 0 means the autoscaler will scale up
	// on any recommended change.
	//
	// Bounds: [0.0, 1.0]. Default: 0.0.
	ScaleUpMinWorkerFraction float64 `` /* 141-byte string literal not displayed */
	// Optional. Minimum scale-down threshold as a fraction of total cluster size
	// before scaling occurs. For example, in a 20-worker cluster, a threshold of
	// 0.1 means the autoscaler must recommend at least a 2 worker scale-down for
	// the cluster to scale. A threshold of 0 means the autoscaler will scale down
	// on any recommended change.
	//
	// Bounds: [0.0, 1.0]. Default: 0.0.
	ScaleDownMinWorkerFraction float64  `` /* 147-byte string literal not displayed */
	XXX_NoUnkeyedLiteral       struct{} `json:"-"`
	XXX_unrecognized           []byte   `json:"-"`
	XXX_sizecache              int32    `json:"-"`
}

Basic autoscaling configurations for YARN.

func (*BasicYarnAutoscalingConfig) Descriptor

func (*BasicYarnAutoscalingConfig) Descriptor() ([]byte, []int)

func (*BasicYarnAutoscalingConfig) GetGracefulDecommissionTimeout

func (m *BasicYarnAutoscalingConfig) GetGracefulDecommissionTimeout() *duration.Duration

func (*BasicYarnAutoscalingConfig) GetScaleDownFactor

func (m *BasicYarnAutoscalingConfig) GetScaleDownFactor() float64

func (*BasicYarnAutoscalingConfig) GetScaleDownMinWorkerFraction

func (m *BasicYarnAutoscalingConfig) GetScaleDownMinWorkerFraction() float64

func (*BasicYarnAutoscalingConfig) GetScaleUpFactor

func (m *BasicYarnAutoscalingConfig) GetScaleUpFactor() float64

func (*BasicYarnAutoscalingConfig) GetScaleUpMinWorkerFraction

func (m *BasicYarnAutoscalingConfig) GetScaleUpMinWorkerFraction() float64

func (*BasicYarnAutoscalingConfig) ProtoMessage

func (*BasicYarnAutoscalingConfig) ProtoMessage()

func (*BasicYarnAutoscalingConfig) Reset

func (m *BasicYarnAutoscalingConfig) Reset()

func (*BasicYarnAutoscalingConfig) String

func (m *BasicYarnAutoscalingConfig) String() string

func (*BasicYarnAutoscalingConfig) XXX_DiscardUnknown

func (m *BasicYarnAutoscalingConfig) XXX_DiscardUnknown()

func (*BasicYarnAutoscalingConfig) XXX_Marshal

func (m *BasicYarnAutoscalingConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*BasicYarnAutoscalingConfig) XXX_Merge

func (m *BasicYarnAutoscalingConfig) XXX_Merge(src proto.Message)

func (*BasicYarnAutoscalingConfig) XXX_Size

func (m *BasicYarnAutoscalingConfig) XXX_Size() int

func (*BasicYarnAutoscalingConfig) XXX_Unmarshal

func (m *BasicYarnAutoscalingConfig) XXX_Unmarshal(b []byte) error

type CancelJobRequest

type CancelJobRequest struct {
	// Required. The ID of the Google Cloud Platform project that the job
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
	// Required. The job ID.
	JobId                string   `protobuf:"bytes,2,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A request to cancel a job.

func (*CancelJobRequest) Descriptor

func (*CancelJobRequest) Descriptor() ([]byte, []int)

func (*CancelJobRequest) GetJobId

func (m *CancelJobRequest) GetJobId() string

func (*CancelJobRequest) GetProjectId

func (m *CancelJobRequest) GetProjectId() string

func (*CancelJobRequest) GetRegion

func (m *CancelJobRequest) GetRegion() string

func (*CancelJobRequest) ProtoMessage

func (*CancelJobRequest) ProtoMessage()

func (*CancelJobRequest) Reset

func (m *CancelJobRequest) Reset()

func (*CancelJobRequest) String

func (m *CancelJobRequest) String() string

func (*CancelJobRequest) XXX_DiscardUnknown

func (m *CancelJobRequest) XXX_DiscardUnknown()

func (*CancelJobRequest) XXX_Marshal

func (m *CancelJobRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*CancelJobRequest) XXX_Merge

func (m *CancelJobRequest) XXX_Merge(src proto.Message)

func (*CancelJobRequest) XXX_Size

func (m *CancelJobRequest) XXX_Size() int

func (*CancelJobRequest) XXX_Unmarshal

func (m *CancelJobRequest) XXX_Unmarshal(b []byte) error

type Cluster

type Cluster struct {
	// Required. The Google Cloud Platform project ID that the cluster belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The cluster name. Cluster names within a project must be
	// unique. Names of deleted clusters can be reused.
	ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
	// Required. The cluster config. Note that Dataproc may set
	// default values, and values may change when clusters are updated.
	Config *ClusterConfig `protobuf:"bytes,3,opt,name=config,proto3" json:"config,omitempty"`
	// Optional. The labels to associate with this cluster.
	// Label **keys** must contain 1 to 63 characters, and must conform to
	// [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
	// Label **values** may be empty, but, if present, must contain 1 to 63
	// characters, and must conform to [RFC
	// 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
	// associated with a cluster.
	Labels map[string]string `` /* 153-byte string literal not displayed */
	// Output only. Cluster status.
	Status *ClusterStatus `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"`
	// Output only. The previous cluster status.
	StatusHistory []*ClusterStatus `protobuf:"bytes,7,rep,name=status_history,json=statusHistory,proto3" json:"status_history,omitempty"`
	// Output only. A cluster UUID (Unique Universal Identifier). Dataproc
	// generates this value when it creates the cluster.
	ClusterUuid string `protobuf:"bytes,6,opt,name=cluster_uuid,json=clusterUuid,proto3" json:"cluster_uuid,omitempty"`
	// Output only. Contains cluster daemon metrics such as HDFS and YARN stats.
	//
	// **Beta Feature**: This report is available for testing purposes only. It
	// may be changed before final release.
	Metrics              *ClusterMetrics `protobuf:"bytes,9,opt,name=metrics,proto3" json:"metrics,omitempty"`
	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
	XXX_unrecognized     []byte          `json:"-"`
	XXX_sizecache        int32           `json:"-"`
}

Describes the identifying information, config, and status of a cluster of Compute Engine instances.

func (*Cluster) Descriptor

func (*Cluster) Descriptor() ([]byte, []int)

func (*Cluster) GetClusterName

func (m *Cluster) GetClusterName() string

func (*Cluster) GetClusterUuid

func (m *Cluster) GetClusterUuid() string

func (*Cluster) GetConfig

func (m *Cluster) GetConfig() *ClusterConfig

func (*Cluster) GetLabels

func (m *Cluster) GetLabels() map[string]string

func (*Cluster) GetMetrics

func (m *Cluster) GetMetrics() *ClusterMetrics

func (*Cluster) GetProjectId

func (m *Cluster) GetProjectId() string

func (*Cluster) GetStatus

func (m *Cluster) GetStatus() *ClusterStatus

func (*Cluster) GetStatusHistory

func (m *Cluster) GetStatusHistory() []*ClusterStatus

func (*Cluster) ProtoMessage

func (*Cluster) ProtoMessage()

func (*Cluster) Reset

func (m *Cluster) Reset()

func (*Cluster) String

func (m *Cluster) String() string

func (*Cluster) XXX_DiscardUnknown

func (m *Cluster) XXX_DiscardUnknown()

func (*Cluster) XXX_Marshal

func (m *Cluster) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*Cluster) XXX_Merge

func (m *Cluster) XXX_Merge(src proto.Message)

func (*Cluster) XXX_Size

func (m *Cluster) XXX_Size() int

func (*Cluster) XXX_Unmarshal

func (m *Cluster) XXX_Unmarshal(b []byte) error

type ClusterConfig

type ClusterConfig struct {
	// Optional. A Cloud Storage bucket used to stage job
	// dependencies, config files, and job driver console output.
	// If you do not specify a staging bucket, Cloud
	// Dataproc will determine a Cloud Storage location (US,
	// ASIA, or EU) for your cluster's staging bucket according to the
	// Compute Engine zone where your cluster is deployed, and then create
	// and manage this project-level, per-location bucket (see
	// [Dataproc staging
	// bucket](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
	ConfigBucket string `protobuf:"bytes,1,opt,name=config_bucket,json=configBucket,proto3" json:"config_bucket,omitempty"`
	// Optional. The shared Compute Engine config settings for
	// all instances in a cluster.
	GceClusterConfig *GceClusterConfig `protobuf:"bytes,8,opt,name=gce_cluster_config,json=gceClusterConfig,proto3" json:"gce_cluster_config,omitempty"`
	// Optional. The Compute Engine config settings for
	// the master instance in a cluster.
	MasterConfig *InstanceGroupConfig `protobuf:"bytes,9,opt,name=master_config,json=masterConfig,proto3" json:"master_config,omitempty"`
	// Optional. The Compute Engine config settings for
	// worker instances in a cluster.
	WorkerConfig *InstanceGroupConfig `protobuf:"bytes,10,opt,name=worker_config,json=workerConfig,proto3" json:"worker_config,omitempty"`
	// Optional. The Compute Engine config settings for
	// additional worker instances in a cluster.
	SecondaryWorkerConfig *InstanceGroupConfig `` /* 127-byte string literal not displayed */
	// Optional. The config settings for software inside the cluster.
	SoftwareConfig *SoftwareConfig `protobuf:"bytes,13,opt,name=software_config,json=softwareConfig,proto3" json:"software_config,omitempty"`
	// Optional. The config setting for auto delete cluster schedule.
	LifecycleConfig *LifecycleConfig `protobuf:"bytes,14,opt,name=lifecycle_config,json=lifecycleConfig,proto3" json:"lifecycle_config,omitempty"`
	// Optional. Commands to execute on each node after config is
	// completed. By default, executables are run on master and all worker nodes.
	// You can test a node's <code>role</code> metadata to run an executable on
	// a master or worker node, as shown below using `curl` (you can also use
	// `wget`):
	//
	//     ROLE=$(curl -H Metadata-Flavor:Google
	//     http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role)
	//     if [[ "${ROLE}" == 'Master' ]]; then
	//       ... master specific actions ...
	//     else
	//       ... worker specific actions ...
	//     fi
	InitializationActions []*NodeInitializationAction `protobuf:"bytes,11,rep,name=initialization_actions,json=initializationActions,proto3" json:"initialization_actions,omitempty"`
	// Optional. Encryption settings for the cluster.
	EncryptionConfig *EncryptionConfig `protobuf:"bytes,15,opt,name=encryption_config,json=encryptionConfig,proto3" json:"encryption_config,omitempty"`
	// Optional. Autoscaling config for the policy associated with the cluster.
	// Cluster does not autoscale if this field is unset.
	AutoscalingConfig *AutoscalingConfig `protobuf:"bytes,16,opt,name=autoscaling_config,json=autoscalingConfig,proto3" json:"autoscaling_config,omitempty"`
	// Optional. Port/endpoint configuration for this cluster
	EndpointConfig *EndpointConfig `protobuf:"bytes,17,opt,name=endpoint_config,json=endpointConfig,proto3" json:"endpoint_config,omitempty"`
	// Optional. Security related configuration.
	SecurityConfig *SecurityConfig `protobuf:"bytes,18,opt,name=security_config,json=securityConfig,proto3" json:"security_config,omitempty"`
	// Optional. The Kubernetes Engine config for Dataproc clusters deployed to Kubernetes.
	// Setting this is considered mutually exclusive with Compute Engine-based
	// options such as `gce_cluster_config`, `master_config`, `worker_config`,
	// `secondary_worker_config`, and `autoscaling_config`.
	GkeClusterConfig     *GkeClusterConfig `protobuf:"bytes,19,opt,name=gke_cluster_config,json=gkeClusterConfig,proto3" json:"gke_cluster_config,omitempty"`
	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
	XXX_unrecognized     []byte            `json:"-"`
	XXX_sizecache        int32             `json:"-"`
}

The cluster config.

func (*ClusterConfig) Descriptor

func (*ClusterConfig) Descriptor() ([]byte, []int)

func (*ClusterConfig) GetAutoscalingConfig

func (m *ClusterConfig) GetAutoscalingConfig() *AutoscalingConfig

func (*ClusterConfig) GetConfigBucket

func (m *ClusterConfig) GetConfigBucket() string

func (*ClusterConfig) GetEncryptionConfig

func (m *ClusterConfig) GetEncryptionConfig() *EncryptionConfig

func (*ClusterConfig) GetEndpointConfig

func (m *ClusterConfig) GetEndpointConfig() *EndpointConfig

func (*ClusterConfig) GetGceClusterConfig

func (m *ClusterConfig) GetGceClusterConfig() *GceClusterConfig

func (*ClusterConfig) GetGkeClusterConfig

func (m *ClusterConfig) GetGkeClusterConfig() *GkeClusterConfig

func (*ClusterConfig) GetInitializationActions

func (m *ClusterConfig) GetInitializationActions() []*NodeInitializationAction

func (*ClusterConfig) GetLifecycleConfig

func (m *ClusterConfig) GetLifecycleConfig() *LifecycleConfig

func (*ClusterConfig) GetMasterConfig

func (m *ClusterConfig) GetMasterConfig() *InstanceGroupConfig

func (*ClusterConfig) GetSecondaryWorkerConfig

func (m *ClusterConfig) GetSecondaryWorkerConfig() *InstanceGroupConfig

func (*ClusterConfig) GetSecurityConfig

func (m *ClusterConfig) GetSecurityConfig() *SecurityConfig

func (*ClusterConfig) GetSoftwareConfig

func (m *ClusterConfig) GetSoftwareConfig() *SoftwareConfig

func (*ClusterConfig) GetWorkerConfig

func (m *ClusterConfig) GetWorkerConfig() *InstanceGroupConfig

func (*ClusterConfig) ProtoMessage

func (*ClusterConfig) ProtoMessage()

func (*ClusterConfig) Reset

func (m *ClusterConfig) Reset()

func (*ClusterConfig) String

func (m *ClusterConfig) String() string

func (*ClusterConfig) XXX_DiscardUnknown

func (m *ClusterConfig) XXX_DiscardUnknown()

func (*ClusterConfig) XXX_Marshal

func (m *ClusterConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ClusterConfig) XXX_Merge

func (m *ClusterConfig) XXX_Merge(src proto.Message)

func (*ClusterConfig) XXX_Size

func (m *ClusterConfig) XXX_Size() int

func (*ClusterConfig) XXX_Unmarshal

func (m *ClusterConfig) XXX_Unmarshal(b []byte) error

type ClusterControllerClient

type ClusterControllerClient interface {
	// Creates a cluster in a project. The returned
	// [Operation.metadata][google.longrunning.Operation.metadata] will be
	// [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata).
	CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Updates a cluster in a project. The returned
	// [Operation.metadata][google.longrunning.Operation.metadata] will be
	// [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata).
	UpdateCluster(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Deletes a cluster in a project. The returned
	// [Operation.metadata][google.longrunning.Operation.metadata] will be
	// [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata).
	DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Gets the resource representation for a cluster in a project.
	GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error)
	// Lists all regions/{region}/clusters in a project alphabetically.
	ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error)
	// Gets cluster diagnostic information. The returned
	// [Operation.metadata][google.longrunning.Operation.metadata] will be
	// [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata).
	// After the operation completes,
	// [Operation.response][google.longrunning.Operation.response]
	// contains
	// [Empty][google.protobuf.Empty].
	DiagnoseCluster(ctx context.Context, in *DiagnoseClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
}

ClusterControllerClient is the client API for ClusterController service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.

type ClusterControllerServer

type ClusterControllerServer interface {
	// Creates a cluster in a project. The returned
	// [Operation.metadata][google.longrunning.Operation.metadata] will be
	// [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata).
	CreateCluster(context.Context, *CreateClusterRequest) (*longrunning.Operation, error)
	// Updates a cluster in a project. The returned
	// [Operation.metadata][google.longrunning.Operation.metadata] will be
	// [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata).
	UpdateCluster(context.Context, *UpdateClusterRequest) (*longrunning.Operation, error)
	// Deletes a cluster in a project. The returned
	// [Operation.metadata][google.longrunning.Operation.metadata] will be
	// [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata).
	DeleteCluster(context.Context, *DeleteClusterRequest) (*longrunning.Operation, error)
	// Gets the resource representation for a cluster in a project.
	GetCluster(context.Context, *GetClusterRequest) (*Cluster, error)
	// Lists all regions/{region}/clusters in a project alphabetically.
	ListClusters(context.Context, *ListClustersRequest) (*ListClustersResponse, error)
	// Gets cluster diagnostic information. The returned
	// [Operation.metadata][google.longrunning.Operation.metadata] will be
	// [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#clusteroperationmetadata).
	// After the operation completes,
	// [Operation.response][google.longrunning.Operation.response]
	// contains
	// [Empty][google.protobuf.Empty].
	DiagnoseCluster(context.Context, *DiagnoseClusterRequest) (*longrunning.Operation, error)
}

ClusterControllerServer is the server API for ClusterController service.

type ClusterMetrics

type ClusterMetrics struct {
	// The HDFS metrics.
	HdfsMetrics map[string]int64 `` /* 183-byte string literal not displayed */
	// The YARN metrics.
	YarnMetrics          map[string]int64 `` /* 183-byte string literal not displayed */
	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
	XXX_unrecognized     []byte           `json:"-"`
	XXX_sizecache        int32            `json:"-"`
}

Contains cluster daemon metrics, such as HDFS and YARN stats.

**Beta Feature**: This report is available for testing purposes only. It may be changed before final release.

func (*ClusterMetrics) Descriptor

func (*ClusterMetrics) Descriptor() ([]byte, []int)

func (*ClusterMetrics) GetHdfsMetrics

func (m *ClusterMetrics) GetHdfsMetrics() map[string]int64

func (*ClusterMetrics) GetYarnMetrics

func (m *ClusterMetrics) GetYarnMetrics() map[string]int64

func (*ClusterMetrics) ProtoMessage

func (*ClusterMetrics) ProtoMessage()

func (*ClusterMetrics) Reset

func (m *ClusterMetrics) Reset()

func (*ClusterMetrics) String

func (m *ClusterMetrics) String() string

func (*ClusterMetrics) XXX_DiscardUnknown

func (m *ClusterMetrics) XXX_DiscardUnknown()

func (*ClusterMetrics) XXX_Marshal

func (m *ClusterMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ClusterMetrics) XXX_Merge

func (m *ClusterMetrics) XXX_Merge(src proto.Message)

func (*ClusterMetrics) XXX_Size

func (m *ClusterMetrics) XXX_Size() int

func (*ClusterMetrics) XXX_Unmarshal

func (m *ClusterMetrics) XXX_Unmarshal(b []byte) error

type ClusterOperation

type ClusterOperation struct {
	// Output only. The id of the cluster operation.
	OperationId string `protobuf:"bytes,1,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"`
	// Output only. Error, if operation failed.
	Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
	// Output only. Indicates the operation is done.
	Done                 bool     `protobuf:"varint,3,opt,name=done,proto3" json:"done,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

The cluster operation triggered by a workflow.

func (*ClusterOperation) Descriptor

func (*ClusterOperation) Descriptor() ([]byte, []int)

func (*ClusterOperation) GetDone

func (m *ClusterOperation) GetDone() bool

func (*ClusterOperation) GetError

func (m *ClusterOperation) GetError() string

func (*ClusterOperation) GetOperationId

func (m *ClusterOperation) GetOperationId() string

func (*ClusterOperation) ProtoMessage

func (*ClusterOperation) ProtoMessage()

func (*ClusterOperation) Reset

func (m *ClusterOperation) Reset()

func (*ClusterOperation) String

func (m *ClusterOperation) String() string

func (*ClusterOperation) XXX_DiscardUnknown

func (m *ClusterOperation) XXX_DiscardUnknown()

func (*ClusterOperation) XXX_Marshal

func (m *ClusterOperation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ClusterOperation) XXX_Merge

func (m *ClusterOperation) XXX_Merge(src proto.Message)

func (*ClusterOperation) XXX_Size

func (m *ClusterOperation) XXX_Size() int

func (*ClusterOperation) XXX_Unmarshal

func (m *ClusterOperation) XXX_Unmarshal(b []byte) error

type ClusterOperationMetadata

type ClusterOperationMetadata struct {
	// Output only. Name of the cluster for the operation.
	ClusterName string `protobuf:"bytes,7,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
	// Output only. Cluster UUID for the operation.
	ClusterUuid string `protobuf:"bytes,8,opt,name=cluster_uuid,json=clusterUuid,proto3" json:"cluster_uuid,omitempty"`
	// Output only. Current operation status.
	Status *ClusterOperationStatus `protobuf:"bytes,9,opt,name=status,proto3" json:"status,omitempty"`
	// Output only. The previous operation status.
	StatusHistory []*ClusterOperationStatus `protobuf:"bytes,10,rep,name=status_history,json=statusHistory,proto3" json:"status_history,omitempty"`
	// Output only. The operation type.
	OperationType string `protobuf:"bytes,11,opt,name=operation_type,json=operationType,proto3" json:"operation_type,omitempty"`
	// Output only. Short description of operation.
	Description string `protobuf:"bytes,12,opt,name=description,proto3" json:"description,omitempty"`
	// Output only. Labels associated with the operation
	Labels map[string]string `` /* 154-byte string literal not displayed */
	// Output only. Errors encountered during operation execution.
	Warnings             []string `protobuf:"bytes,14,rep,name=warnings,proto3" json:"warnings,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

Metadata describing the operation.

func (*ClusterOperationMetadata) Descriptor

func (*ClusterOperationMetadata) Descriptor() ([]byte, []int)

func (*ClusterOperationMetadata) GetClusterName

func (m *ClusterOperationMetadata) GetClusterName() string

func (*ClusterOperationMetadata) GetClusterUuid

func (m *ClusterOperationMetadata) GetClusterUuid() string

func (*ClusterOperationMetadata) GetDescription

func (m *ClusterOperationMetadata) GetDescription() string

func (*ClusterOperationMetadata) GetLabels

func (m *ClusterOperationMetadata) GetLabels() map[string]string

func (*ClusterOperationMetadata) GetOperationType

func (m *ClusterOperationMetadata) GetOperationType() string

func (*ClusterOperationMetadata) GetStatus

func (*ClusterOperationMetadata) GetStatusHistory

func (m *ClusterOperationMetadata) GetStatusHistory() []*ClusterOperationStatus

func (*ClusterOperationMetadata) GetWarnings

func (m *ClusterOperationMetadata) GetWarnings() []string

func (*ClusterOperationMetadata) ProtoMessage

func (*ClusterOperationMetadata) ProtoMessage()

func (*ClusterOperationMetadata) Reset

func (m *ClusterOperationMetadata) Reset()

func (*ClusterOperationMetadata) String

func (m *ClusterOperationMetadata) String() string

func (*ClusterOperationMetadata) XXX_DiscardUnknown

func (m *ClusterOperationMetadata) XXX_DiscardUnknown()

func (*ClusterOperationMetadata) XXX_Marshal

func (m *ClusterOperationMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ClusterOperationMetadata) XXX_Merge

func (m *ClusterOperationMetadata) XXX_Merge(src proto.Message)

func (*ClusterOperationMetadata) XXX_Size

func (m *ClusterOperationMetadata) XXX_Size() int

func (*ClusterOperationMetadata) XXX_Unmarshal

func (m *ClusterOperationMetadata) XXX_Unmarshal(b []byte) error

type ClusterOperationStatus

type ClusterOperationStatus struct {
	// Output only. A message containing the operation state.
	State ClusterOperationStatus_State `` /* 128-byte string literal not displayed */
	// Output only. A message containing the detailed operation state.
	InnerState string `protobuf:"bytes,2,opt,name=inner_state,json=innerState,proto3" json:"inner_state,omitempty"`
	// Output only. A message containing any operation metadata details.
	Details string `protobuf:"bytes,3,opt,name=details,proto3" json:"details,omitempty"`
	// Output only. The time this state was entered.
	StateStartTime       *timestamp.Timestamp `protobuf:"bytes,4,opt,name=state_start_time,json=stateStartTime,proto3" json:"state_start_time,omitempty"`
	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
	XXX_unrecognized     []byte               `json:"-"`
	XXX_sizecache        int32                `json:"-"`
}

The status of the operation.

func (*ClusterOperationStatus) Descriptor

func (*ClusterOperationStatus) Descriptor() ([]byte, []int)

func (*ClusterOperationStatus) GetDetails

func (m *ClusterOperationStatus) GetDetails() string

func (*ClusterOperationStatus) GetInnerState

func (m *ClusterOperationStatus) GetInnerState() string

func (*ClusterOperationStatus) GetState

func (*ClusterOperationStatus) GetStateStartTime

func (m *ClusterOperationStatus) GetStateStartTime() *timestamp.Timestamp

func (*ClusterOperationStatus) ProtoMessage

func (*ClusterOperationStatus) ProtoMessage()

func (*ClusterOperationStatus) Reset

func (m *ClusterOperationStatus) Reset()

func (*ClusterOperationStatus) String

func (m *ClusterOperationStatus) String() string

func (*ClusterOperationStatus) XXX_DiscardUnknown

func (m *ClusterOperationStatus) XXX_DiscardUnknown()

func (*ClusterOperationStatus) XXX_Marshal

func (m *ClusterOperationStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ClusterOperationStatus) XXX_Merge

func (m *ClusterOperationStatus) XXX_Merge(src proto.Message)

func (*ClusterOperationStatus) XXX_Size

func (m *ClusterOperationStatus) XXX_Size() int

func (*ClusterOperationStatus) XXX_Unmarshal

func (m *ClusterOperationStatus) XXX_Unmarshal(b []byte) error

type ClusterOperationStatus_State

type ClusterOperationStatus_State int32

The operation state.

const (
	// Unused.
	ClusterOperationStatus_UNKNOWN ClusterOperationStatus_State = 0
	// The operation has been created.
	ClusterOperationStatus_PENDING ClusterOperationStatus_State = 1
	// The operation is running.
	ClusterOperationStatus_RUNNING ClusterOperationStatus_State = 2
	// The operation is done; either cancelled or completed.
	ClusterOperationStatus_DONE ClusterOperationStatus_State = 3
)

func (ClusterOperationStatus_State) EnumDescriptor

func (ClusterOperationStatus_State) EnumDescriptor() ([]byte, []int)

func (ClusterOperationStatus_State) String

type ClusterSelector

type ClusterSelector struct {
	// Optional. The zone where workflow process executes. This parameter does not
	// affect the selection of the cluster.
	//
	// If unspecified, the zone of the first cluster matching the selector
	// is used.
	Zone string `protobuf:"bytes,1,opt,name=zone,proto3" json:"zone,omitempty"`
	// Required. The cluster labels. Cluster must have all labels
	// to match.
	ClusterLabels        map[string]string `` /* 188-byte string literal not displayed */
	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
	XXX_unrecognized     []byte            `json:"-"`
	XXX_sizecache        int32             `json:"-"`
}

A selector that chooses target cluster for jobs based on metadata.

func (*ClusterSelector) Descriptor

func (*ClusterSelector) Descriptor() ([]byte, []int)

func (*ClusterSelector) GetClusterLabels

func (m *ClusterSelector) GetClusterLabels() map[string]string

func (*ClusterSelector) GetZone

func (m *ClusterSelector) GetZone() string

func (*ClusterSelector) ProtoMessage

func (*ClusterSelector) ProtoMessage()

func (*ClusterSelector) Reset

func (m *ClusterSelector) Reset()

func (*ClusterSelector) String

func (m *ClusterSelector) String() string

func (*ClusterSelector) XXX_DiscardUnknown

func (m *ClusterSelector) XXX_DiscardUnknown()

func (*ClusterSelector) XXX_Marshal

func (m *ClusterSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ClusterSelector) XXX_Merge

func (m *ClusterSelector) XXX_Merge(src proto.Message)

func (*ClusterSelector) XXX_Size

func (m *ClusterSelector) XXX_Size() int

func (*ClusterSelector) XXX_Unmarshal

func (m *ClusterSelector) XXX_Unmarshal(b []byte) error

type ClusterStatus

type ClusterStatus struct {
	// Output only. The cluster's state.
	State ClusterStatus_State `protobuf:"varint,1,opt,name=state,proto3,enum=google.cloud.dataproc.v1beta2.ClusterStatus_State" json:"state,omitempty"`
	// Output only. Optional details of cluster's state.
	Detail string `protobuf:"bytes,2,opt,name=detail,proto3" json:"detail,omitempty"`
	// Output only. Time when this state was entered (see JSON representation of
	// [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
	StateStartTime *timestamp.Timestamp `protobuf:"bytes,3,opt,name=state_start_time,json=stateStartTime,proto3" json:"state_start_time,omitempty"`
	// Output only. Additional state information that includes
	// status reported by the agent.
	Substate             ClusterStatus_Substate `` /* 128-byte string literal not displayed */
	XXX_NoUnkeyedLiteral struct{}               `json:"-"`
	XXX_unrecognized     []byte                 `json:"-"`
	XXX_sizecache        int32                  `json:"-"`
}

The status of a cluster and its instances.

func (*ClusterStatus) Descriptor

func (*ClusterStatus) Descriptor() ([]byte, []int)

func (*ClusterStatus) GetDetail

func (m *ClusterStatus) GetDetail() string

func (*ClusterStatus) GetState

func (m *ClusterStatus) GetState() ClusterStatus_State

func (*ClusterStatus) GetStateStartTime

func (m *ClusterStatus) GetStateStartTime() *timestamp.Timestamp

func (*ClusterStatus) GetSubstate

func (m *ClusterStatus) GetSubstate() ClusterStatus_Substate

func (*ClusterStatus) ProtoMessage

func (*ClusterStatus) ProtoMessage()

func (*ClusterStatus) Reset

func (m *ClusterStatus) Reset()

func (*ClusterStatus) String

func (m *ClusterStatus) String() string

func (*ClusterStatus) XXX_DiscardUnknown

func (m *ClusterStatus) XXX_DiscardUnknown()

func (*ClusterStatus) XXX_Marshal

func (m *ClusterStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ClusterStatus) XXX_Merge

func (m *ClusterStatus) XXX_Merge(src proto.Message)

func (*ClusterStatus) XXX_Size

func (m *ClusterStatus) XXX_Size() int

func (*ClusterStatus) XXX_Unmarshal

func (m *ClusterStatus) XXX_Unmarshal(b []byte) error

type ClusterStatus_State

type ClusterStatus_State int32

The cluster state.

const (
	// The cluster state is unknown.
	ClusterStatus_UNKNOWN ClusterStatus_State = 0
	// The cluster is being created and set up. It is not ready for use.
	ClusterStatus_CREATING ClusterStatus_State = 1
	// The cluster is currently running and healthy. It is ready for use.
	ClusterStatus_RUNNING ClusterStatus_State = 2
	// The cluster encountered an error. It is not ready for use.
	ClusterStatus_ERROR ClusterStatus_State = 3
	// The cluster is being deleted. It cannot be used.
	ClusterStatus_DELETING ClusterStatus_State = 4
	// The cluster is being updated. It continues to accept and process jobs.
	ClusterStatus_UPDATING ClusterStatus_State = 5
	// The cluster is being stopped. It cannot be used.
	ClusterStatus_STOPPING ClusterStatus_State = 6
	// The cluster is currently stopped. It is not ready for use.
	ClusterStatus_STOPPED ClusterStatus_State = 7
	// The cluster is being started. It is not ready for use.
	ClusterStatus_STARTING ClusterStatus_State = 8
)

func (ClusterStatus_State) EnumDescriptor

func (ClusterStatus_State) EnumDescriptor() ([]byte, []int)

func (ClusterStatus_State) String

func (x ClusterStatus_State) String() string

type ClusterStatus_Substate

type ClusterStatus_Substate int32

The cluster substate.

const (
	// The cluster substate is unknown.
	ClusterStatus_UNSPECIFIED ClusterStatus_Substate = 0
	// The cluster is known to be in an unhealthy state
	// (for example, critical daemons are not running or HDFS capacity is
	// exhausted).
	//
	// Applies to RUNNING state.
	ClusterStatus_UNHEALTHY ClusterStatus_Substate = 1
	// The agent-reported status is out of date (may occur if
	// Dataproc loses communication with Agent).
	//
	// Applies to RUNNING state.
	ClusterStatus_STALE_STATUS ClusterStatus_Substate = 2
)

func (ClusterStatus_Substate) EnumDescriptor

func (ClusterStatus_Substate) EnumDescriptor() ([]byte, []int)

func (ClusterStatus_Substate) String

func (x ClusterStatus_Substate) String() string

type Component

type Component int32

Cluster components that can be activated.

const (
	// Unspecified component.
	Component_COMPONENT_UNSPECIFIED Component = 0
	// The Anaconda python distribution.
	Component_ANACONDA Component = 5
	// Docker
	Component_DOCKER Component = 13
	// The Druid query engine.
	Component_DRUID Component = 9
	// Flink
	Component_FLINK Component = 14
	// The Hive Web HCatalog (the REST service for accessing HCatalog).
	Component_HIVE_WEBHCAT Component = 3
	// The Jupyter Notebook.
	Component_JUPYTER Component = 1
	// The Kerberos security feature.
	Component_KERBEROS Component = 7
	// The Presto query engine.
	Component_PRESTO Component = 6
	// The Ranger service.
	Component_RANGER Component = 12
	// The Solr service.
	Component_SOLR Component = 10
	// The Zeppelin notebook.
	Component_ZEPPELIN Component = 4
	// The Zookeeper service.
	Component_ZOOKEEPER Component = 8
)

func (Component) EnumDescriptor

func (Component) EnumDescriptor() ([]byte, []int)

func (Component) String

func (x Component) String() string

type CreateAutoscalingPolicyRequest

type CreateAutoscalingPolicyRequest struct {
	// Required. The "resource name" of the region or location, as described
	// in https://cloud.google.com/apis/design/resource_names.
	//
	// * For `projects.regions.autoscalingPolicies.create`, the resource name
	//   has the following format:
	//   `projects/{project_id}/regions/{region}`
	//
	// * For `projects.locations.autoscalingPolicies.create`, the resource name
	//   has the following format:
	//   `projects/{project_id}/locations/{location}`
	Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
	// Required. The autoscaling policy to create.
	Policy               *AutoscalingPolicy `protobuf:"bytes,2,opt,name=policy,proto3" json:"policy,omitempty"`
	XXX_NoUnkeyedLiteral struct{}           `json:"-"`
	XXX_unrecognized     []byte             `json:"-"`
	XXX_sizecache        int32              `json:"-"`
}

A request to create an autoscaling policy.

func (*CreateAutoscalingPolicyRequest) Descriptor

func (*CreateAutoscalingPolicyRequest) Descriptor() ([]byte, []int)

func (*CreateAutoscalingPolicyRequest) GetParent

func (m *CreateAutoscalingPolicyRequest) GetParent() string

func (*CreateAutoscalingPolicyRequest) GetPolicy

func (*CreateAutoscalingPolicyRequest) ProtoMessage

func (*CreateAutoscalingPolicyRequest) ProtoMessage()

func (*CreateAutoscalingPolicyRequest) Reset

func (m *CreateAutoscalingPolicyRequest) Reset()

func (*CreateAutoscalingPolicyRequest) String

func (*CreateAutoscalingPolicyRequest) XXX_DiscardUnknown

func (m *CreateAutoscalingPolicyRequest) XXX_DiscardUnknown()

func (*CreateAutoscalingPolicyRequest) XXX_Marshal

func (m *CreateAutoscalingPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*CreateAutoscalingPolicyRequest) XXX_Merge

func (m *CreateAutoscalingPolicyRequest) XXX_Merge(src proto.Message)

func (*CreateAutoscalingPolicyRequest) XXX_Size

func (m *CreateAutoscalingPolicyRequest) XXX_Size() int

func (*CreateAutoscalingPolicyRequest) XXX_Unmarshal

func (m *CreateAutoscalingPolicyRequest) XXX_Unmarshal(b []byte) error

type CreateClusterRequest

type CreateClusterRequest struct {
	// Required. The ID of the Google Cloud Platform project that the cluster
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
	// Required. The cluster to create.
	Cluster *Cluster `protobuf:"bytes,2,opt,name=cluster,proto3" json:"cluster,omitempty"`
	// Optional. A unique id used to identify the request. If the server
	// receives two [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest] requests  with the same
	// id, then the second request will be ignored and the
	// first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
	// is returned.
	//
	// It is recommended to always set this value to a
	// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
	//
	// The id must contain only letters (a-z, A-Z), numbers (0-9),
	// underscores (_), and hyphens (-). The maximum length is 40 characters.
	RequestId            string   `protobuf:"bytes,4,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A request to create a cluster.

func (*CreateClusterRequest) Descriptor

func (*CreateClusterRequest) Descriptor() ([]byte, []int)

func (*CreateClusterRequest) GetCluster

func (m *CreateClusterRequest) GetCluster() *Cluster

func (*CreateClusterRequest) GetProjectId

func (m *CreateClusterRequest) GetProjectId() string

func (*CreateClusterRequest) GetRegion

func (m *CreateClusterRequest) GetRegion() string

func (*CreateClusterRequest) GetRequestId

func (m *CreateClusterRequest) GetRequestId() string

func (*CreateClusterRequest) ProtoMessage

func (*CreateClusterRequest) ProtoMessage()

func (*CreateClusterRequest) Reset

func (m *CreateClusterRequest) Reset()

func (*CreateClusterRequest) String

func (m *CreateClusterRequest) String() string

func (*CreateClusterRequest) XXX_DiscardUnknown

func (m *CreateClusterRequest) XXX_DiscardUnknown()

func (*CreateClusterRequest) XXX_Marshal

func (m *CreateClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*CreateClusterRequest) XXX_Merge

func (m *CreateClusterRequest) XXX_Merge(src proto.Message)

func (*CreateClusterRequest) XXX_Size

func (m *CreateClusterRequest) XXX_Size() int

func (*CreateClusterRequest) XXX_Unmarshal

func (m *CreateClusterRequest) XXX_Unmarshal(b []byte) error

type CreateWorkflowTemplateRequest

type CreateWorkflowTemplateRequest struct {
	// Required. The resource name of the region or location, as described
	// in https://cloud.google.com/apis/design/resource_names.
	//
	// * For `projects.regions.workflowTemplates,create`, the resource name of the
	//   region has the following format:
	//   `projects/{project_id}/regions/{region}`
	//
	// * For `projects.locations.workflowTemplates.create`, the resource name of
	//   the location has the following format:
	//   `projects/{project_id}/locations/{location}`
	Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
	// Required. The Dataproc workflow template to create.
	Template             *WorkflowTemplate `protobuf:"bytes,2,opt,name=template,proto3" json:"template,omitempty"`
	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
	XXX_unrecognized     []byte            `json:"-"`
	XXX_sizecache        int32             `json:"-"`
}

A request to create a workflow template.

func (*CreateWorkflowTemplateRequest) Descriptor

func (*CreateWorkflowTemplateRequest) Descriptor() ([]byte, []int)

func (*CreateWorkflowTemplateRequest) GetParent

func (m *CreateWorkflowTemplateRequest) GetParent() string

func (*CreateWorkflowTemplateRequest) GetTemplate

func (*CreateWorkflowTemplateRequest) ProtoMessage

func (*CreateWorkflowTemplateRequest) ProtoMessage()

func (*CreateWorkflowTemplateRequest) Reset

func (m *CreateWorkflowTemplateRequest) Reset()

func (*CreateWorkflowTemplateRequest) String

func (*CreateWorkflowTemplateRequest) XXX_DiscardUnknown

func (m *CreateWorkflowTemplateRequest) XXX_DiscardUnknown()

func (*CreateWorkflowTemplateRequest) XXX_Marshal

func (m *CreateWorkflowTemplateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*CreateWorkflowTemplateRequest) XXX_Merge

func (m *CreateWorkflowTemplateRequest) XXX_Merge(src proto.Message)

func (*CreateWorkflowTemplateRequest) XXX_Size

func (m *CreateWorkflowTemplateRequest) XXX_Size() int

func (*CreateWorkflowTemplateRequest) XXX_Unmarshal

func (m *CreateWorkflowTemplateRequest) XXX_Unmarshal(b []byte) error

type DeleteAutoscalingPolicyRequest

type DeleteAutoscalingPolicyRequest struct {
	// Required. The "resource name" of the autoscaling policy, as described
	// in https://cloud.google.com/apis/design/resource_names.
	//
	// * For `projects.regions.autoscalingPolicies.delete`, the resource name
	//   of the policy has the following format:
	//   `projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`
	//
	// * For `projects.locations.autoscalingPolicies.delete`, the resource name
	//   of the policy has the following format:
	//   `projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`
	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A request to delete an autoscaling policy.

Autoscaling policies in use by one or more clusters will not be deleted.

func (*DeleteAutoscalingPolicyRequest) Descriptor

func (*DeleteAutoscalingPolicyRequest) Descriptor() ([]byte, []int)

func (*DeleteAutoscalingPolicyRequest) GetName

func (*DeleteAutoscalingPolicyRequest) ProtoMessage

func (*DeleteAutoscalingPolicyRequest) ProtoMessage()

func (*DeleteAutoscalingPolicyRequest) Reset

func (m *DeleteAutoscalingPolicyRequest) Reset()

func (*DeleteAutoscalingPolicyRequest) String

func (*DeleteAutoscalingPolicyRequest) XXX_DiscardUnknown

func (m *DeleteAutoscalingPolicyRequest) XXX_DiscardUnknown()

func (*DeleteAutoscalingPolicyRequest) XXX_Marshal

func (m *DeleteAutoscalingPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DeleteAutoscalingPolicyRequest) XXX_Merge

func (m *DeleteAutoscalingPolicyRequest) XXX_Merge(src proto.Message)

func (*DeleteAutoscalingPolicyRequest) XXX_Size

func (m *DeleteAutoscalingPolicyRequest) XXX_Size() int

func (*DeleteAutoscalingPolicyRequest) XXX_Unmarshal

func (m *DeleteAutoscalingPolicyRequest) XXX_Unmarshal(b []byte) error

type DeleteClusterRequest

type DeleteClusterRequest struct {
	// Required. The ID of the Google Cloud Platform project that the cluster
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
	// Required. The cluster name.
	ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
	// Optional. Specifying the `cluster_uuid` means the RPC should fail
	// (with error NOT_FOUND) if cluster with specified UUID does not exist.
	ClusterUuid string `protobuf:"bytes,4,opt,name=cluster_uuid,json=clusterUuid,proto3" json:"cluster_uuid,omitempty"`
	// Optional. A unique id used to identify the request. If the server
	// receives two [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest] requests  with the same
	// id, then the second request will be ignored and the
	// first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
	// backend is returned.
	//
	// It is recommended to always set this value to a
	// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
	//
	// The id must contain only letters (a-z, A-Z), numbers (0-9),
	// underscores (_), and hyphens (-). The maximum length is 40 characters.
	RequestId            string   `protobuf:"bytes,5,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A request to delete a cluster.

func (*DeleteClusterRequest) Descriptor

func (*DeleteClusterRequest) Descriptor() ([]byte, []int)

func (*DeleteClusterRequest) GetClusterName

func (m *DeleteClusterRequest) GetClusterName() string

func (*DeleteClusterRequest) GetClusterUuid

func (m *DeleteClusterRequest) GetClusterUuid() string

func (*DeleteClusterRequest) GetProjectId

func (m *DeleteClusterRequest) GetProjectId() string

func (*DeleteClusterRequest) GetRegion

func (m *DeleteClusterRequest) GetRegion() string

func (*DeleteClusterRequest) GetRequestId

func (m *DeleteClusterRequest) GetRequestId() string

func (*DeleteClusterRequest) ProtoMessage

func (*DeleteClusterRequest) ProtoMessage()

func (*DeleteClusterRequest) Reset

func (m *DeleteClusterRequest) Reset()

func (*DeleteClusterRequest) String

func (m *DeleteClusterRequest) String() string

func (*DeleteClusterRequest) XXX_DiscardUnknown

func (m *DeleteClusterRequest) XXX_DiscardUnknown()

func (*DeleteClusterRequest) XXX_Marshal

func (m *DeleteClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DeleteClusterRequest) XXX_Merge

func (m *DeleteClusterRequest) XXX_Merge(src proto.Message)

func (*DeleteClusterRequest) XXX_Size

func (m *DeleteClusterRequest) XXX_Size() int

func (*DeleteClusterRequest) XXX_Unmarshal

func (m *DeleteClusterRequest) XXX_Unmarshal(b []byte) error

type DeleteJobRequest

type DeleteJobRequest struct {
	// Required. The ID of the Google Cloud Platform project that the job
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
	// Required. The job ID.
	JobId                string   `protobuf:"bytes,2,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A request to delete a job.

func (*DeleteJobRequest) Descriptor

func (*DeleteJobRequest) Descriptor() ([]byte, []int)

func (*DeleteJobRequest) GetJobId

func (m *DeleteJobRequest) GetJobId() string

func (*DeleteJobRequest) GetProjectId

func (m *DeleteJobRequest) GetProjectId() string

func (*DeleteJobRequest) GetRegion

func (m *DeleteJobRequest) GetRegion() string

func (*DeleteJobRequest) ProtoMessage

func (*DeleteJobRequest) ProtoMessage()

func (*DeleteJobRequest) Reset

func (m *DeleteJobRequest) Reset()

func (*DeleteJobRequest) String

func (m *DeleteJobRequest) String() string

func (*DeleteJobRequest) XXX_DiscardUnknown

func (m *DeleteJobRequest) XXX_DiscardUnknown()

func (*DeleteJobRequest) XXX_Marshal

func (m *DeleteJobRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DeleteJobRequest) XXX_Merge

func (m *DeleteJobRequest) XXX_Merge(src proto.Message)

func (*DeleteJobRequest) XXX_Size

func (m *DeleteJobRequest) XXX_Size() int

func (*DeleteJobRequest) XXX_Unmarshal

func (m *DeleteJobRequest) XXX_Unmarshal(b []byte) error

type DeleteWorkflowTemplateRequest

type DeleteWorkflowTemplateRequest struct {
	// Required. The resource name of the workflow template, as described
	// in https://cloud.google.com/apis/design/resource_names.
	//
	// * For `projects.regions.workflowTemplates.delete`, the resource name
	// of the template has the following format:
	//   `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
	//
	// * For `projects.locations.workflowTemplates.instantiate`, the resource name
	//   of the template has the following format:
	//   `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// Optional. The version of workflow template to delete. If specified,
	// will only delete the template if the current server version matches
	// specified version.
	Version              int32    `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A request to delete a workflow template.

Currently started workflows will remain running.

func (*DeleteWorkflowTemplateRequest) Descriptor

func (*DeleteWorkflowTemplateRequest) Descriptor() ([]byte, []int)

func (*DeleteWorkflowTemplateRequest) GetName

func (*DeleteWorkflowTemplateRequest) GetVersion

func (m *DeleteWorkflowTemplateRequest) GetVersion() int32

func (*DeleteWorkflowTemplateRequest) ProtoMessage

func (*DeleteWorkflowTemplateRequest) ProtoMessage()

func (*DeleteWorkflowTemplateRequest) Reset

func (m *DeleteWorkflowTemplateRequest) Reset()

func (*DeleteWorkflowTemplateRequest) String

func (*DeleteWorkflowTemplateRequest) XXX_DiscardUnknown

func (m *DeleteWorkflowTemplateRequest) XXX_DiscardUnknown()

func (*DeleteWorkflowTemplateRequest) XXX_Marshal

func (m *DeleteWorkflowTemplateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DeleteWorkflowTemplateRequest) XXX_Merge

func (m *DeleteWorkflowTemplateRequest) XXX_Merge(src proto.Message)

func (*DeleteWorkflowTemplateRequest) XXX_Size

func (m *DeleteWorkflowTemplateRequest) XXX_Size() int

func (*DeleteWorkflowTemplateRequest) XXX_Unmarshal

func (m *DeleteWorkflowTemplateRequest) XXX_Unmarshal(b []byte) error

type DiagnoseClusterRequest

type DiagnoseClusterRequest struct {
	// Required. The ID of the Google Cloud Platform project that the cluster
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
	// Required. The cluster name.
	ClusterName          string   `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A request to collect cluster diagnostic information.

func (*DiagnoseClusterRequest) Descriptor

func (*DiagnoseClusterRequest) Descriptor() ([]byte, []int)

func (*DiagnoseClusterRequest) GetClusterName

func (m *DiagnoseClusterRequest) GetClusterName() string

func (*DiagnoseClusterRequest) GetProjectId

func (m *DiagnoseClusterRequest) GetProjectId() string

func (*DiagnoseClusterRequest) GetRegion

func (m *DiagnoseClusterRequest) GetRegion() string

func (*DiagnoseClusterRequest) ProtoMessage

func (*DiagnoseClusterRequest) ProtoMessage()

func (*DiagnoseClusterRequest) Reset

func (m *DiagnoseClusterRequest) Reset()

func (*DiagnoseClusterRequest) String

func (m *DiagnoseClusterRequest) String() string

func (*DiagnoseClusterRequest) XXX_DiscardUnknown

func (m *DiagnoseClusterRequest) XXX_DiscardUnknown()

func (*DiagnoseClusterRequest) XXX_Marshal

func (m *DiagnoseClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DiagnoseClusterRequest) XXX_Merge

func (m *DiagnoseClusterRequest) XXX_Merge(src proto.Message)

func (*DiagnoseClusterRequest) XXX_Size

func (m *DiagnoseClusterRequest) XXX_Size() int

func (*DiagnoseClusterRequest) XXX_Unmarshal

func (m *DiagnoseClusterRequest) XXX_Unmarshal(b []byte) error

type DiagnoseClusterResults

type DiagnoseClusterResults struct {
	// Output only. The Cloud Storage URI of the diagnostic output.
	// The output report is a plain text file with a summary of collected
	// diagnostics.
	OutputUri            string   `protobuf:"bytes,1,opt,name=output_uri,json=outputUri,proto3" json:"output_uri,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

The location of diagnostic output.

func (*DiagnoseClusterResults) Descriptor

func (*DiagnoseClusterResults) Descriptor() ([]byte, []int)

func (*DiagnoseClusterResults) GetOutputUri

func (m *DiagnoseClusterResults) GetOutputUri() string

func (*DiagnoseClusterResults) ProtoMessage

func (*DiagnoseClusterResults) ProtoMessage()

func (*DiagnoseClusterResults) Reset

func (m *DiagnoseClusterResults) Reset()

func (*DiagnoseClusterResults) String

func (m *DiagnoseClusterResults) String() string

func (*DiagnoseClusterResults) XXX_DiscardUnknown

func (m *DiagnoseClusterResults) XXX_DiscardUnknown()

func (*DiagnoseClusterResults) XXX_Marshal

func (m *DiagnoseClusterResults) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DiagnoseClusterResults) XXX_Merge

func (m *DiagnoseClusterResults) XXX_Merge(src proto.Message)

func (*DiagnoseClusterResults) XXX_Size

func (m *DiagnoseClusterResults) XXX_Size() int

func (*DiagnoseClusterResults) XXX_Unmarshal

func (m *DiagnoseClusterResults) XXX_Unmarshal(b []byte) error

type DiskConfig

type DiskConfig struct {
	// Optional. Type of the boot disk (default is "pd-standard").
	// Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or
	// "pd-standard" (Persistent Disk Hard Disk Drive).
	BootDiskType string `protobuf:"bytes,3,opt,name=boot_disk_type,json=bootDiskType,proto3" json:"boot_disk_type,omitempty"`
	// Optional. Size in GB of the boot disk (default is 500GB).
	BootDiskSizeGb int32 `protobuf:"varint,1,opt,name=boot_disk_size_gb,json=bootDiskSizeGb,proto3" json:"boot_disk_size_gb,omitempty"`
	// Number of attached SSDs, from 0 to 4 (default is 0).
	// If SSDs are not attached, the boot disk is used to store runtime logs and
	// [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data.
	// If one or more SSDs are attached, this runtime bulk
	// data is spread across them, and the boot disk contains only basic
	// config and installed binaries.
	NumLocalSsds         int32    `protobuf:"varint,2,opt,name=num_local_ssds,json=numLocalSsds,proto3" json:"num_local_ssds,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

Specifies the config of disk options for a group of VM instances.

func (*DiskConfig) Descriptor

func (*DiskConfig) Descriptor() ([]byte, []int)

func (*DiskConfig) GetBootDiskSizeGb

func (m *DiskConfig) GetBootDiskSizeGb() int32

func (*DiskConfig) GetBootDiskType

func (m *DiskConfig) GetBootDiskType() string

func (*DiskConfig) GetNumLocalSsds

func (m *DiskConfig) GetNumLocalSsds() int32

func (*DiskConfig) ProtoMessage

func (*DiskConfig) ProtoMessage()

func (*DiskConfig) Reset

func (m *DiskConfig) Reset()

func (*DiskConfig) String

func (m *DiskConfig) String() string

func (*DiskConfig) XXX_DiscardUnknown

func (m *DiskConfig) XXX_DiscardUnknown()

func (*DiskConfig) XXX_Marshal

func (m *DiskConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*DiskConfig) XXX_Merge

func (m *DiskConfig) XXX_Merge(src proto.Message)

func (*DiskConfig) XXX_Size

func (m *DiskConfig) XXX_Size() int

func (*DiskConfig) XXX_Unmarshal

func (m *DiskConfig) XXX_Unmarshal(b []byte) error

type EncryptionConfig

type EncryptionConfig struct {
	// Optional. The Cloud KMS key name to use for PD disk encryption for all
	// instances in the cluster.
	GcePdKmsKeyName      string   `protobuf:"bytes,1,opt,name=gce_pd_kms_key_name,json=gcePdKmsKeyName,proto3" json:"gce_pd_kms_key_name,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

Encryption settings for the cluster.

func (*EncryptionConfig) Descriptor

func (*EncryptionConfig) Descriptor() ([]byte, []int)

func (*EncryptionConfig) GetGcePdKmsKeyName

func (m *EncryptionConfig) GetGcePdKmsKeyName() string

func (*EncryptionConfig) ProtoMessage

func (*EncryptionConfig) ProtoMessage()

func (*EncryptionConfig) Reset

func (m *EncryptionConfig) Reset()

func (*EncryptionConfig) String

func (m *EncryptionConfig) String() string

func (*EncryptionConfig) XXX_DiscardUnknown

func (m *EncryptionConfig) XXX_DiscardUnknown()

func (*EncryptionConfig) XXX_Marshal

func (m *EncryptionConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*EncryptionConfig) XXX_Merge

func (m *EncryptionConfig) XXX_Merge(src proto.Message)

func (*EncryptionConfig) XXX_Size

func (m *EncryptionConfig) XXX_Size() int

func (*EncryptionConfig) XXX_Unmarshal

func (m *EncryptionConfig) XXX_Unmarshal(b []byte) error

type EndpointConfig

type EndpointConfig struct {
	// Output only. The map of port descriptions to URLs. Will only be populated
	// if enable_http_port_access is true.
	HttpPorts map[string]string `` /* 176-byte string literal not displayed */
	// Optional. If true, enable http access to specific ports on the cluster
	// from external sources. Defaults to false.
	EnableHttpPortAccess bool     `` /* 126-byte string literal not displayed */
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

Endpoint config for this cluster

func (*EndpointConfig) Descriptor

func (*EndpointConfig) Descriptor() ([]byte, []int)

func (*EndpointConfig) GetEnableHttpPortAccess

func (m *EndpointConfig) GetEnableHttpPortAccess() bool

func (*EndpointConfig) GetHttpPorts

func (m *EndpointConfig) GetHttpPorts() map[string]string

func (*EndpointConfig) ProtoMessage

func (*EndpointConfig) ProtoMessage()

func (*EndpointConfig) Reset

func (m *EndpointConfig) Reset()

func (*EndpointConfig) String

func (m *EndpointConfig) String() string

func (*EndpointConfig) XXX_DiscardUnknown

func (m *EndpointConfig) XXX_DiscardUnknown()

func (*EndpointConfig) XXX_Marshal

func (m *EndpointConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*EndpointConfig) XXX_Merge

func (m *EndpointConfig) XXX_Merge(src proto.Message)

func (*EndpointConfig) XXX_Size

func (m *EndpointConfig) XXX_Size() int

func (*EndpointConfig) XXX_Unmarshal

func (m *EndpointConfig) XXX_Unmarshal(b []byte) error

type GceClusterConfig

type GceClusterConfig struct {
	// Optional. The zone where the Compute Engine cluster will be located.
	// On a create request, it is required in the "global" region. If omitted
	// in a non-global Dataproc region, the service will pick a zone in the
	// corresponding Compute Engine region. On a get request, zone will always be
	// present.
	//
	// A full URL, partial URI, or short name are valid. Examples:
	//
	// * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`
	// * `projects/[project_id]/zones/[zone]`
	// * `us-central1-f`
	ZoneUri string `protobuf:"bytes,1,opt,name=zone_uri,json=zoneUri,proto3" json:"zone_uri,omitempty"`
	// Optional. The Compute Engine network to be used for machine
	// communications. Cannot be specified with subnetwork_uri. If neither
	// `network_uri` nor `subnetwork_uri` is specified, the "default" network of
	// the project is used, if it exists. Cannot be a "Custom Subnet Network" (see
	// [Using Subnetworks](https://cloud.google.com/compute/docs/subnetworks) for
	// more information).
	//
	// A full URL, partial URI, or short name are valid. Examples:
	//
	// * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default`
	// * `projects/[project_id]/regions/global/default`
	// * `default`
	NetworkUri string `protobuf:"bytes,2,opt,name=network_uri,json=networkUri,proto3" json:"network_uri,omitempty"`
	// Optional. The Compute Engine subnetwork to be used for machine
	// communications. Cannot be specified with network_uri.
	//
	// A full URL, partial URI, or short name are valid. Examples:
	//
	// * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/subnetworks/sub0`
	// * `projects/[project_id]/regions/us-east1/subnetworks/sub0`
	// * `sub0`
	SubnetworkUri string `protobuf:"bytes,6,opt,name=subnetwork_uri,json=subnetworkUri,proto3" json:"subnetwork_uri,omitempty"`
	// Optional. If true, all instances in the cluster will only have internal IP
	// addresses. By default, clusters are not restricted to internal IP
	// addresses, and will have ephemeral external IP addresses assigned to each
	// instance. This `internal_ip_only` restriction can only be enabled for
	// subnetwork enabled networks, and all off-cluster dependencies must be
	// configured to be accessible without external IP addresses.
	InternalIpOnly bool `protobuf:"varint,7,opt,name=internal_ip_only,json=internalIpOnly,proto3" json:"internal_ip_only,omitempty"`
	// Optional. The [Dataproc service
	// account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_cloud_dataproc)
	// (also see [VM Data Plane
	// identity](https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity))
	// used by Dataproc cluster VM instances to access Google Cloud Platform
	// services.
	//
	// If not specified, the
	// [Compute Engine default service
	// account](https://cloud.google.com/compute/docs/access/service-accounts#default_service_account)
	// is used.
	ServiceAccount string `protobuf:"bytes,8,opt,name=service_account,json=serviceAccount,proto3" json:"service_account,omitempty"`
	// Optional. The URIs of service account scopes to be included in
	// Compute Engine instances. The following base set of scopes is always
	// included:
	//
	// * https://www.googleapis.com/auth/cloud.useraccounts.readonly
	// * https://www.googleapis.com/auth/devstorage.read_write
	// * https://www.googleapis.com/auth/logging.write
	//
	// If no scopes are specified, the following defaults are also provided:
	//
	// * https://www.googleapis.com/auth/bigquery
	// * https://www.googleapis.com/auth/bigtable.admin.table
	// * https://www.googleapis.com/auth/bigtable.data
	// * https://www.googleapis.com/auth/devstorage.full_control
	ServiceAccountScopes []string `protobuf:"bytes,3,rep,name=service_account_scopes,json=serviceAccountScopes,proto3" json:"service_account_scopes,omitempty"`
	// The Compute Engine tags to add to all instances (see [Tagging
	// instances](https://cloud.google.com/compute/docs/label-or-tag-resources#tags)).
	Tags []string `protobuf:"bytes,4,rep,name=tags,proto3" json:"tags,omitempty"`
	// The Compute Engine metadata entries to add to all instances (see
	// [Project and instance
	// metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
	Metadata map[string]string `` /* 157-byte string literal not displayed */
	// Optional. Reservation Affinity for consuming Zonal reservation.
	ReservationAffinity  *ReservationAffinity `protobuf:"bytes,11,opt,name=reservation_affinity,json=reservationAffinity,proto3" json:"reservation_affinity,omitempty"`
	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
	XXX_unrecognized     []byte               `json:"-"`
	XXX_sizecache        int32                `json:"-"`
}

Common config settings for resources of Compute Engine cluster instances, applicable to all instances in the cluster.

func (*GceClusterConfig) Descriptor

func (*GceClusterConfig) Descriptor() ([]byte, []int)

func (*GceClusterConfig) GetInternalIpOnly

func (m *GceClusterConfig) GetInternalIpOnly() bool

func (*GceClusterConfig) GetMetadata

func (m *GceClusterConfig) GetMetadata() map[string]string

func (*GceClusterConfig) GetNetworkUri

func (m *GceClusterConfig) GetNetworkUri() string

func (*GceClusterConfig) GetReservationAffinity

func (m *GceClusterConfig) GetReservationAffinity() *ReservationAffinity

func (*GceClusterConfig) GetServiceAccount

func (m *GceClusterConfig) GetServiceAccount() string

func (*GceClusterConfig) GetServiceAccountScopes

func (m *GceClusterConfig) GetServiceAccountScopes() []string

func (*GceClusterConfig) GetSubnetworkUri

func (m *GceClusterConfig) GetSubnetworkUri() string

func (*GceClusterConfig) GetTags

func (m *GceClusterConfig) GetTags() []string

func (*GceClusterConfig) GetZoneUri

func (m *GceClusterConfig) GetZoneUri() string

func (*GceClusterConfig) ProtoMessage

func (*GceClusterConfig) ProtoMessage()

func (*GceClusterConfig) Reset

func (m *GceClusterConfig) Reset()

func (*GceClusterConfig) String

func (m *GceClusterConfig) String() string

func (*GceClusterConfig) XXX_DiscardUnknown

func (m *GceClusterConfig) XXX_DiscardUnknown()

func (*GceClusterConfig) XXX_Marshal

func (m *GceClusterConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*GceClusterConfig) XXX_Merge

func (m *GceClusterConfig) XXX_Merge(src proto.Message)

func (*GceClusterConfig) XXX_Size

func (m *GceClusterConfig) XXX_Size() int

func (*GceClusterConfig) XXX_Unmarshal

func (m *GceClusterConfig) XXX_Unmarshal(b []byte) error

type GetAutoscalingPolicyRequest

type GetAutoscalingPolicyRequest struct {
	// Required. The "resource name" of the autoscaling policy, as described
	// in https://cloud.google.com/apis/design/resource_names.
	//
	// * For `projects.regions.autoscalingPolicies.get`, the resource name
	//   of the policy has the following format:
	//   `projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`
	//
	// * For `projects.locations.autoscalingPolicies.get`, the resource name
	//   of the policy has the following format:
	//   `projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`
	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A request to fetch an autoscaling policy.

func (*GetAutoscalingPolicyRequest) Descriptor

func (*GetAutoscalingPolicyRequest) Descriptor() ([]byte, []int)

func (*GetAutoscalingPolicyRequest) GetName

func (m *GetAutoscalingPolicyRequest) GetName() string

func (*GetAutoscalingPolicyRequest) ProtoMessage

func (*GetAutoscalingPolicyRequest) ProtoMessage()

func (*GetAutoscalingPolicyRequest) Reset

func (m *GetAutoscalingPolicyRequest) Reset()

func (*GetAutoscalingPolicyRequest) String

func (m *GetAutoscalingPolicyRequest) String() string

func (*GetAutoscalingPolicyRequest) XXX_DiscardUnknown

func (m *GetAutoscalingPolicyRequest) XXX_DiscardUnknown()

func (*GetAutoscalingPolicyRequest) XXX_Marshal

func (m *GetAutoscalingPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*GetAutoscalingPolicyRequest) XXX_Merge

func (m *GetAutoscalingPolicyRequest) XXX_Merge(src proto.Message)

func (*GetAutoscalingPolicyRequest) XXX_Size

func (m *GetAutoscalingPolicyRequest) XXX_Size() int

func (*GetAutoscalingPolicyRequest) XXX_Unmarshal

func (m *GetAutoscalingPolicyRequest) XXX_Unmarshal(b []byte) error

type GetClusterRequest

type GetClusterRequest struct {
	// Required. The ID of the Google Cloud Platform project that the cluster
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
	// Required. The cluster name.
	ClusterName          string   `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

Request to get the resource representation for a cluster in a project.

func (*GetClusterRequest) Descriptor

func (*GetClusterRequest) Descriptor() ([]byte, []int)

func (*GetClusterRequest) GetClusterName

func (m *GetClusterRequest) GetClusterName() string

func (*GetClusterRequest) GetProjectId

func (m *GetClusterRequest) GetProjectId() string

func (*GetClusterRequest) GetRegion

func (m *GetClusterRequest) GetRegion() string

func (*GetClusterRequest) ProtoMessage

func (*GetClusterRequest) ProtoMessage()

func (*GetClusterRequest) Reset

func (m *GetClusterRequest) Reset()

func (*GetClusterRequest) String

func (m *GetClusterRequest) String() string

func (*GetClusterRequest) XXX_DiscardUnknown

func (m *GetClusterRequest) XXX_DiscardUnknown()

func (*GetClusterRequest) XXX_Marshal

func (m *GetClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*GetClusterRequest) XXX_Merge

func (m *GetClusterRequest) XXX_Merge(src proto.Message)

func (*GetClusterRequest) XXX_Size

func (m *GetClusterRequest) XXX_Size() int

func (*GetClusterRequest) XXX_Unmarshal

func (m *GetClusterRequest) XXX_Unmarshal(b []byte) error

type GetJobRequest

type GetJobRequest struct {
	// Required. The ID of the Google Cloud Platform project that the job
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
	// Required. The job ID.
	JobId                string   `protobuf:"bytes,2,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A request to get the resource representation for a job in a project.

func (*GetJobRequest) Descriptor

func (*GetJobRequest) Descriptor() ([]byte, []int)

func (*GetJobRequest) GetJobId

func (m *GetJobRequest) GetJobId() string

func (*GetJobRequest) GetProjectId

func (m *GetJobRequest) GetProjectId() string

func (*GetJobRequest) GetRegion

func (m *GetJobRequest) GetRegion() string

func (*GetJobRequest) ProtoMessage

func (*GetJobRequest) ProtoMessage()

func (*GetJobRequest) Reset

func (m *GetJobRequest) Reset()

func (*GetJobRequest) String

func (m *GetJobRequest) String() string

func (*GetJobRequest) XXX_DiscardUnknown

func (m *GetJobRequest) XXX_DiscardUnknown()

func (*GetJobRequest) XXX_Marshal

func (m *GetJobRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*GetJobRequest) XXX_Merge

func (m *GetJobRequest) XXX_Merge(src proto.Message)

func (*GetJobRequest) XXX_Size

func (m *GetJobRequest) XXX_Size() int

func (*GetJobRequest) XXX_Unmarshal

func (m *GetJobRequest) XXX_Unmarshal(b []byte) error

type GetWorkflowTemplateRequest

type GetWorkflowTemplateRequest struct {
	// Required. The resource name of the workflow template, as described
	// in https://cloud.google.com/apis/design/resource_names.
	//
	// * For `projects.regions.workflowTemplates.get`, the resource name of the
	//   template has the following format:
	//   `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
	//
	// * For `projects.locations.workflowTemplates.get`, the resource name of the
	//   template has the following format:
	//   `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// Optional. The version of workflow template to retrieve. Only previously
	// instantiated versions can be retrieved.
	//
	// If unspecified, retrieves the current version.
	Version              int32    `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A request to fetch a workflow template.

func (*GetWorkflowTemplateRequest) Descriptor

func (*GetWorkflowTemplateRequest) Descriptor() ([]byte, []int)

func (*GetWorkflowTemplateRequest) GetName

func (m *GetWorkflowTemplateRequest) GetName() string

func (*GetWorkflowTemplateRequest) GetVersion

func (m *GetWorkflowTemplateRequest) GetVersion() int32

func (*GetWorkflowTemplateRequest) ProtoMessage

func (*GetWorkflowTemplateRequest) ProtoMessage()

func (*GetWorkflowTemplateRequest) Reset

func (m *GetWorkflowTemplateRequest) Reset()

func (*GetWorkflowTemplateRequest) String

func (m *GetWorkflowTemplateRequest) String() string

func (*GetWorkflowTemplateRequest) XXX_DiscardUnknown

func (m *GetWorkflowTemplateRequest) XXX_DiscardUnknown()

func (*GetWorkflowTemplateRequest) XXX_Marshal

func (m *GetWorkflowTemplateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*GetWorkflowTemplateRequest) XXX_Merge

func (m *GetWorkflowTemplateRequest) XXX_Merge(src proto.Message)

func (*GetWorkflowTemplateRequest) XXX_Size

func (m *GetWorkflowTemplateRequest) XXX_Size() int

func (*GetWorkflowTemplateRequest) XXX_Unmarshal

func (m *GetWorkflowTemplateRequest) XXX_Unmarshal(b []byte) error

type GkeClusterConfig

type GkeClusterConfig struct {
	// Optional. A target for the deployment.
	NamespacedGkeDeploymentTarget *GkeClusterConfig_NamespacedGkeDeploymentTarget `` /* 152-byte string literal not displayed */
	XXX_NoUnkeyedLiteral          struct{}                                        `json:"-"`
	XXX_unrecognized              []byte                                          `json:"-"`
	XXX_sizecache                 int32                                           `json:"-"`
}

The GKE config for this cluster.

func (*GkeClusterConfig) Descriptor

func (*GkeClusterConfig) Descriptor() ([]byte, []int)

func (*GkeClusterConfig) GetNamespacedGkeDeploymentTarget

func (m *GkeClusterConfig) GetNamespacedGkeDeploymentTarget() *GkeClusterConfig_NamespacedGkeDeploymentTarget

func (*GkeClusterConfig) ProtoMessage

func (*GkeClusterConfig) ProtoMessage()

func (*GkeClusterConfig) Reset

func (m *GkeClusterConfig) Reset()

func (*GkeClusterConfig) String

func (m *GkeClusterConfig) String() string

func (*GkeClusterConfig) XXX_DiscardUnknown

func (m *GkeClusterConfig) XXX_DiscardUnknown()

func (*GkeClusterConfig) XXX_Marshal

func (m *GkeClusterConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*GkeClusterConfig) XXX_Merge

func (m *GkeClusterConfig) XXX_Merge(src proto.Message)

func (*GkeClusterConfig) XXX_Size

func (m *GkeClusterConfig) XXX_Size() int

func (*GkeClusterConfig) XXX_Unmarshal

func (m *GkeClusterConfig) XXX_Unmarshal(b []byte) error

type GkeClusterConfig_NamespacedGkeDeploymentTarget

type GkeClusterConfig_NamespacedGkeDeploymentTarget struct {
	// Optional. The target GKE cluster to deploy to.
	// Format: 'projects/{project}/locations/{location}/clusters/{cluster_id}'
	TargetGkeCluster string `protobuf:"bytes,1,opt,name=target_gke_cluster,json=targetGkeCluster,proto3" json:"target_gke_cluster,omitempty"`
	// Optional. A namespace within the GKE cluster to deploy into.
	ClusterNamespace     string   `protobuf:"bytes,2,opt,name=cluster_namespace,json=clusterNamespace,proto3" json:"cluster_namespace,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A full, namespace-isolated deployment target for an existing GKE cluster.

func (*GkeClusterConfig_NamespacedGkeDeploymentTarget) Descriptor

func (*GkeClusterConfig_NamespacedGkeDeploymentTarget) GetClusterNamespace

func (m *GkeClusterConfig_NamespacedGkeDeploymentTarget) GetClusterNamespace() string

func (*GkeClusterConfig_NamespacedGkeDeploymentTarget) GetTargetGkeCluster

func (m *GkeClusterConfig_NamespacedGkeDeploymentTarget) GetTargetGkeCluster() string

func (*GkeClusterConfig_NamespacedGkeDeploymentTarget) ProtoMessage

func (*GkeClusterConfig_NamespacedGkeDeploymentTarget) Reset

func (*GkeClusterConfig_NamespacedGkeDeploymentTarget) String

func (*GkeClusterConfig_NamespacedGkeDeploymentTarget) XXX_DiscardUnknown

func (m *GkeClusterConfig_NamespacedGkeDeploymentTarget) XXX_DiscardUnknown()

func (*GkeClusterConfig_NamespacedGkeDeploymentTarget) XXX_Marshal

func (m *GkeClusterConfig_NamespacedGkeDeploymentTarget) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*GkeClusterConfig_NamespacedGkeDeploymentTarget) XXX_Merge

func (*GkeClusterConfig_NamespacedGkeDeploymentTarget) XXX_Size

func (*GkeClusterConfig_NamespacedGkeDeploymentTarget) XXX_Unmarshal

type HadoopJob

type HadoopJob struct {
	// Required. Indicates the location of the driver's main class. Specify
	// either the jar file that contains the main class or the main class name.
	// To specify both, add the jar file to `jar_file_uris`, and then specify
	// the main class name in this property.
	//
	// Types that are valid to be assigned to Driver:
	//	*HadoopJob_MainJarFileUri
	//	*HadoopJob_MainClass
	Driver isHadoopJob_Driver `protobuf_oneof:"driver"`
	// Optional. The arguments to pass to the driver. Do not
	// include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
	// job properties, since a collision may occur that causes an incorrect job
	// submission.
	Args []string `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"`
	// Optional. Jar file URIs to add to the CLASSPATHs of the
	// Hadoop driver and tasks.
	JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"`
	// Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied
	// to the working directory of Hadoop drivers and distributed tasks. Useful
	// for naively parallel tasks.
	FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris,proto3" json:"file_uris,omitempty"`
	// Optional. HCFS URIs of archives to be extracted in the working directory of
	// Hadoop drivers and tasks. Supported file types:
	// .jar, .tar, .tar.gz, .tgz, or .zip.
	ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris,proto3" json:"archive_uris,omitempty"`
	// Optional. A mapping of property names to values, used to configure Hadoop.
	// Properties that conflict with values set by the Dataproc API may be
	// overwritten. Can include properties set in /etc/hadoop/conf/*-site and
	// classes in user code.
	Properties map[string]string `` /* 161-byte string literal not displayed */
	// Optional. The runtime log config for job execution.
	LoggingConfig        *LoggingConfig `protobuf:"bytes,8,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"`
	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
	XXX_unrecognized     []byte         `json:"-"`
	XXX_sizecache        int32          `json:"-"`
}

A Dataproc job for running [Apache Hadoop MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) jobs on [Apache Hadoop YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html).

func (*HadoopJob) Descriptor

func (*HadoopJob) Descriptor() ([]byte, []int)

func (*HadoopJob) GetArchiveUris

func (m *HadoopJob) GetArchiveUris() []string

func (*HadoopJob) GetArgs

func (m *HadoopJob) GetArgs() []string

func (*HadoopJob) GetDriver

func (m *HadoopJob) GetDriver() isHadoopJob_Driver

func (*HadoopJob) GetFileUris

func (m *HadoopJob) GetFileUris() []string

func (*HadoopJob) GetJarFileUris

func (m *HadoopJob) GetJarFileUris() []string

func (*HadoopJob) GetLoggingConfig

func (m *HadoopJob) GetLoggingConfig() *LoggingConfig

func (*HadoopJob) GetMainClass

func (m *HadoopJob) GetMainClass() string

func (*HadoopJob) GetMainJarFileUri

func (m *HadoopJob) GetMainJarFileUri() string

func (*HadoopJob) GetProperties

func (m *HadoopJob) GetProperties() map[string]string

func (*HadoopJob) ProtoMessage

func (*HadoopJob) ProtoMessage()

func (*HadoopJob) Reset

func (m *HadoopJob) Reset()

func (*HadoopJob) String

func (m *HadoopJob) String() string

func (*HadoopJob) XXX_DiscardUnknown

func (m *HadoopJob) XXX_DiscardUnknown()

func (*HadoopJob) XXX_Marshal

func (m *HadoopJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*HadoopJob) XXX_Merge

func (m *HadoopJob) XXX_Merge(src proto.Message)

func (*HadoopJob) XXX_OneofWrappers

func (*HadoopJob) XXX_OneofWrappers() []interface{}

XXX_OneofWrappers is for the internal use of the proto package.

func (*HadoopJob) XXX_Size

func (m *HadoopJob) XXX_Size() int

func (*HadoopJob) XXX_Unmarshal

func (m *HadoopJob) XXX_Unmarshal(b []byte) error

type HadoopJob_MainClass

type HadoopJob_MainClass struct {
	MainClass string `protobuf:"bytes,2,opt,name=main_class,json=mainClass,proto3,oneof"`
}

type HadoopJob_MainJarFileUri

type HadoopJob_MainJarFileUri struct {
	MainJarFileUri string `protobuf:"bytes,1,opt,name=main_jar_file_uri,json=mainJarFileUri,proto3,oneof"`
}

type HiveJob

type HiveJob struct {
	// Required. The sequence of Hive queries to execute, specified as either
	// an HCFS file URI or a list of queries.
	//
	// Types that are valid to be assigned to Queries:
	//	*HiveJob_QueryFileUri
	//	*HiveJob_QueryList
	Queries isHiveJob_Queries `protobuf_oneof:"queries"`
	// Optional. Whether to continue executing queries if a query fails.
	// The default value is `false`. Setting to `true` can be useful when
	// executing independent parallel queries.
	ContinueOnFailure bool `protobuf:"varint,3,opt,name=continue_on_failure,json=continueOnFailure,proto3" json:"continue_on_failure,omitempty"`
	// Optional. Mapping of query variable names to values (equivalent to the
	// Hive command: `SET name="value";`).
	ScriptVariables map[string]string `` /* 194-byte string literal not displayed */
	// Optional. A mapping of property names and values, used to configure Hive.
	// Properties that conflict with values set by the Dataproc API may be
	// overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
	// /etc/hive/conf/hive-site.xml, and classes in user code.
	Properties map[string]string `` /* 161-byte string literal not displayed */
	// Optional. HCFS URIs of jar files to add to the CLASSPATH of the
	// Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes
	// and UDFs.
	JarFileUris          []string `protobuf:"bytes,6,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A Dataproc job for running [Apache Hive](https://hive.apache.org/) queries on YARN.

func (*HiveJob) Descriptor

func (*HiveJob) Descriptor() ([]byte, []int)

func (*HiveJob) GetContinueOnFailure

func (m *HiveJob) GetContinueOnFailure() bool

func (*HiveJob) GetJarFileUris

func (m *HiveJob) GetJarFileUris() []string

func (*HiveJob) GetProperties

func (m *HiveJob) GetProperties() map[string]string

func (*HiveJob) GetQueries

func (m *HiveJob) GetQueries() isHiveJob_Queries

func (*HiveJob) GetQueryFileUri

func (m *HiveJob) GetQueryFileUri() string

func (*HiveJob) GetQueryList

func (m *HiveJob) GetQueryList() *QueryList

func (*HiveJob) GetScriptVariables

func (m *HiveJob) GetScriptVariables() map[string]string

func (*HiveJob) ProtoMessage

func (*HiveJob) ProtoMessage()

func (*HiveJob) Reset

func (m *HiveJob) Reset()

func (*HiveJob) String

func (m *HiveJob) String() string

func (*HiveJob) XXX_DiscardUnknown

func (m *HiveJob) XXX_DiscardUnknown()

func (*HiveJob) XXX_Marshal

func (m *HiveJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*HiveJob) XXX_Merge

func (m *HiveJob) XXX_Merge(src proto.Message)

func (*HiveJob) XXX_OneofWrappers

func (*HiveJob) XXX_OneofWrappers() []interface{}

XXX_OneofWrappers is for the internal use of the proto package.

func (*HiveJob) XXX_Size

func (m *HiveJob) XXX_Size() int

func (*HiveJob) XXX_Unmarshal

func (m *HiveJob) XXX_Unmarshal(b []byte) error

type HiveJob_QueryFileUri

type HiveJob_QueryFileUri struct {
	QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,proto3,oneof"`
}

type HiveJob_QueryList

type HiveJob_QueryList struct {
	QueryList *QueryList `protobuf:"bytes,2,opt,name=query_list,json=queryList,proto3,oneof"`
}

type InstanceGroupAutoscalingPolicyConfig

type InstanceGroupAutoscalingPolicyConfig struct {
	// Optional. Minimum number of instances for this group.
	//
	// Primary workers - Bounds: [2, max_instances]. Default: 2.
	// Secondary workers - Bounds: [0, max_instances]. Default: 0.
	MinInstances int32 `protobuf:"varint,1,opt,name=min_instances,json=minInstances,proto3" json:"min_instances,omitempty"`
	// Optional. Maximum number of instances for this group. Required for primary
	// workers. Note that by default, clusters will not use secondary workers.
	// Required for secondary workers if the minimum secondary instances is set.
	//
	// Primary workers - Bounds: [min_instances, ). Required.
	// Secondary workers - Bounds: [min_instances, ). Default: 0.
	MaxInstances int32 `protobuf:"varint,2,opt,name=max_instances,json=maxInstances,proto3" json:"max_instances,omitempty"`
	// Optional. Weight for the instance group, which is used to determine the
	// fraction of total workers in the cluster from this instance group.
	// For example, if primary workers have weight 2, and secondary workers have
	// weight 1, the cluster will have approximately 2 primary workers for each
	// secondary worker.
	//
	// The cluster may not reach the specified balance if constrained
	// by min/max bounds or other autoscaling settings. For example, if
	// `max_instances` for secondary workers is 0, then only primary workers will
	// be added. The cluster can also be out of balance when created.
	//
	// If weight is not set on any instance group, the cluster will default to
	// equal weight for all groups: the cluster will attempt to maintain an equal
	// number of workers in each group within the configured size bounds for each
	// group. If weight is set for one group only, the cluster will default to
	// zero weight on the unset group. For example if weight is set only on
	// primary workers, the cluster will use primary workers only and no
	// secondary workers.
	Weight               int32    `protobuf:"varint,3,opt,name=weight,proto3" json:"weight,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

Configuration for the size bounds of an instance group, including its proportional size to other groups.

func (*InstanceGroupAutoscalingPolicyConfig) Descriptor

func (*InstanceGroupAutoscalingPolicyConfig) Descriptor() ([]byte, []int)

func (*InstanceGroupAutoscalingPolicyConfig) GetMaxInstances

func (m *InstanceGroupAutoscalingPolicyConfig) GetMaxInstances() int32

func (*InstanceGroupAutoscalingPolicyConfig) GetMinInstances

func (m *InstanceGroupAutoscalingPolicyConfig) GetMinInstances() int32

func (*InstanceGroupAutoscalingPolicyConfig) GetWeight

func (*InstanceGroupAutoscalingPolicyConfig) ProtoMessage

func (*InstanceGroupAutoscalingPolicyConfig) ProtoMessage()

func (*InstanceGroupAutoscalingPolicyConfig) Reset

func (*InstanceGroupAutoscalingPolicyConfig) String

func (*InstanceGroupAutoscalingPolicyConfig) XXX_DiscardUnknown

func (m *InstanceGroupAutoscalingPolicyConfig) XXX_DiscardUnknown()

func (*InstanceGroupAutoscalingPolicyConfig) XXX_Marshal

func (m *InstanceGroupAutoscalingPolicyConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*InstanceGroupAutoscalingPolicyConfig) XXX_Merge

func (*InstanceGroupAutoscalingPolicyConfig) XXX_Size

func (*InstanceGroupAutoscalingPolicyConfig) XXX_Unmarshal

func (m *InstanceGroupAutoscalingPolicyConfig) XXX_Unmarshal(b []byte) error

type InstanceGroupConfig

type InstanceGroupConfig struct {
	// Optional. The number of VM instances in the instance group.
	// For master instance groups, must be set to 1.
	NumInstances int32 `protobuf:"varint,1,opt,name=num_instances,json=numInstances,proto3" json:"num_instances,omitempty"`
	// Output only. The list of instance names. Dataproc derives the names
	// from `cluster_name`, `num_instances`, and the instance group.
	InstanceNames []string `protobuf:"bytes,2,rep,name=instance_names,json=instanceNames,proto3" json:"instance_names,omitempty"`
	// Optional. The Compute Engine image resource used for cluster instances.
	//
	// The URI can represent an image or image family.
	//
	// Image examples:
	//
	// * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/[image-id]`
	// * `projects/[project_id]/global/images/[image-id]`
	// * `image-id`
	//
	// Image family examples. Dataproc will use the most recent
	// image from the family:
	//
	// * `https://www.googleapis.com/compute/beta/projects/[project_id]/global/images/family/[custom-image-family-name]`
	// * `projects/[project_id]/global/images/family/[custom-image-family-name]`
	//
	// If the URI is unspecified, it will be inferred from
	// `SoftwareConfig.image_version` or the system default.
	ImageUri string `protobuf:"bytes,3,opt,name=image_uri,json=imageUri,proto3" json:"image_uri,omitempty"`
	// Optional. The Compute Engine machine type used for cluster instances.
	//
	// A full URL, partial URI, or short name are valid. Examples:
	//
	// * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`
	// * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`
	// * `n1-standard-2`
	//
	// **Auto Zone Exception**: If you are using the Dataproc
	// [Auto Zone
	// Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
	// feature, you must use the short name of the machine type
	// resource, for example, `n1-standard-2`.
	MachineTypeUri string `protobuf:"bytes,4,opt,name=machine_type_uri,json=machineTypeUri,proto3" json:"machine_type_uri,omitempty"`
	// Optional. Disk option config settings.
	DiskConfig *DiskConfig `protobuf:"bytes,5,opt,name=disk_config,json=diskConfig,proto3" json:"disk_config,omitempty"`
	// Output only. Specifies that this instance group contains preemptible
	// instances.
	IsPreemptible bool `protobuf:"varint,6,opt,name=is_preemptible,json=isPreemptible,proto3" json:"is_preemptible,omitempty"`
	// Output only. The config for Compute Engine Instance Group
	// Manager that manages this group.
	// This is only used for preemptible instance groups.
	ManagedGroupConfig *ManagedGroupConfig `protobuf:"bytes,7,opt,name=managed_group_config,json=managedGroupConfig,proto3" json:"managed_group_config,omitempty"`
	// Optional. The Compute Engine accelerator configuration for these
	// instances.
	Accelerators []*AcceleratorConfig `protobuf:"bytes,8,rep,name=accelerators,proto3" json:"accelerators,omitempty"`
	// Specifies the minimum cpu platform for the Instance Group.
	// See [Dataproc -> Minimum CPU
	// Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).
	MinCpuPlatform       string   `protobuf:"bytes,9,opt,name=min_cpu_platform,json=minCpuPlatform,proto3" json:"min_cpu_platform,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

The config settings for Compute Engine resources in an instance group, such as a master or worker group.

func (*InstanceGroupConfig) Descriptor

func (*InstanceGroupConfig) Descriptor() ([]byte, []int)

func (*InstanceGroupConfig) GetAccelerators

func (m *InstanceGroupConfig) GetAccelerators() []*AcceleratorConfig

func (*InstanceGroupConfig) GetDiskConfig

func (m *InstanceGroupConfig) GetDiskConfig() *DiskConfig

func (*InstanceGroupConfig) GetImageUri

func (m *InstanceGroupConfig) GetImageUri() string

func (*InstanceGroupConfig) GetInstanceNames

func (m *InstanceGroupConfig) GetInstanceNames() []string

func (*InstanceGroupConfig) GetIsPreemptible

func (m *InstanceGroupConfig) GetIsPreemptible() bool

func (*InstanceGroupConfig) GetMachineTypeUri

func (m *InstanceGroupConfig) GetMachineTypeUri() string

func (*InstanceGroupConfig) GetManagedGroupConfig

func (m *InstanceGroupConfig) GetManagedGroupConfig() *ManagedGroupConfig

func (*InstanceGroupConfig) GetMinCpuPlatform

func (m *InstanceGroupConfig) GetMinCpuPlatform() string

func (*InstanceGroupConfig) GetNumInstances

func (m *InstanceGroupConfig) GetNumInstances() int32

func (*InstanceGroupConfig) ProtoMessage

func (*InstanceGroupConfig) ProtoMessage()

func (*InstanceGroupConfig) Reset

func (m *InstanceGroupConfig) Reset()

func (*InstanceGroupConfig) String

func (m *InstanceGroupConfig) String() string

func (*InstanceGroupConfig) XXX_DiscardUnknown

func (m *InstanceGroupConfig) XXX_DiscardUnknown()

func (*InstanceGroupConfig) XXX_Marshal

func (m *InstanceGroupConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*InstanceGroupConfig) XXX_Merge

func (m *InstanceGroupConfig) XXX_Merge(src proto.Message)

func (*InstanceGroupConfig) XXX_Size

func (m *InstanceGroupConfig) XXX_Size() int

func (*InstanceGroupConfig) XXX_Unmarshal

func (m *InstanceGroupConfig) XXX_Unmarshal(b []byte) error

type InstantiateInlineWorkflowTemplateRequest

type InstantiateInlineWorkflowTemplateRequest struct {
	// Required. The resource name of the region or location, as described
	// in https://cloud.google.com/apis/design/resource_names.
	//
	// * For `projects.regions.workflowTemplates,instantiateinline`, the resource
	//   name of the region has the following format:
	//   `projects/{project_id}/regions/{region}`
	//
	// * For `projects.locations.workflowTemplates.instantiateinline`, the
	//   resource name of the location has the following format:
	//   `projects/{project_id}/locations/{location}`
	Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
	// Required. The workflow template to instantiate.
	Template *WorkflowTemplate `protobuf:"bytes,2,opt,name=template,proto3" json:"template,omitempty"`
	// Deprecated. Please use `request_id` field instead.
	InstanceId string `protobuf:"bytes,3,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"`
	// Optional. A tag that prevents multiple concurrent workflow
	// instances with the same tag from running. This mitigates risk of
	// concurrent instances started due to retries.
	//
	// It is recommended to always set this value to a
	// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
	//
	// The tag must contain only letters (a-z, A-Z), numbers (0-9),
	// underscores (_), and hyphens (-). The maximum length is 40 characters.
	RequestId            string   `protobuf:"bytes,4,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A request to instantiate an inline workflow template.

func (*InstantiateInlineWorkflowTemplateRequest) Descriptor

func (*InstantiateInlineWorkflowTemplateRequest) Descriptor() ([]byte, []int)

func (*InstantiateInlineWorkflowTemplateRequest) GetInstanceId

func (*InstantiateInlineWorkflowTemplateRequest) GetParent

func (*InstantiateInlineWorkflowTemplateRequest) GetRequestId

func (*InstantiateInlineWorkflowTemplateRequest) GetTemplate

func (*InstantiateInlineWorkflowTemplateRequest) ProtoMessage

func (*InstantiateInlineWorkflowTemplateRequest) Reset

func (*InstantiateInlineWorkflowTemplateRequest) String

func (*InstantiateInlineWorkflowTemplateRequest) XXX_DiscardUnknown

func (m *InstantiateInlineWorkflowTemplateRequest) XXX_DiscardUnknown()

func (*InstantiateInlineWorkflowTemplateRequest) XXX_Marshal

func (m *InstantiateInlineWorkflowTemplateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*InstantiateInlineWorkflowTemplateRequest) XXX_Merge

func (*InstantiateInlineWorkflowTemplateRequest) XXX_Size

func (*InstantiateInlineWorkflowTemplateRequest) XXX_Unmarshal

func (m *InstantiateInlineWorkflowTemplateRequest) XXX_Unmarshal(b []byte) error

type InstantiateWorkflowTemplateRequest

type InstantiateWorkflowTemplateRequest struct {
	// Required. The resource name of the workflow template, as described
	// in https://cloud.google.com/apis/design/resource_names.
	//
	// * For `projects.regions.workflowTemplates.instantiate`, the resource name
	// of the template has the following format:
	//   `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
	//
	// * For `projects.locations.workflowTemplates.instantiate`, the resource name
	//   of the template has the following format:
	//   `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// Optional. The version of workflow template to instantiate. If specified,
	// the workflow will be instantiated only if the current version of
	// the workflow template has the supplied version.
	//
	// This option cannot be used to instantiate a previous version of
	// workflow template.
	Version int32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`
	// Deprecated. Please use `request_id` field instead.
	InstanceId string `protobuf:"bytes,3,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"` // Deprecated: Do not use.
	// Optional. A tag that prevents multiple concurrent workflow
	// instances with the same tag from running. This mitigates risk of
	// concurrent instances started due to retries.
	//
	// It is recommended to always set this value to a
	// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
	//
	// The tag must contain only letters (a-z, A-Z), numbers (0-9),
	// underscores (_), and hyphens (-). The maximum length is 40 characters.
	RequestId string `protobuf:"bytes,5,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
	// Optional. Map from parameter names to values that should be used for those
	// parameters. Values may not exceed 100 characters.
	Parameters           map[string]string `` /* 161-byte string literal not displayed */
	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
	XXX_unrecognized     []byte            `json:"-"`
	XXX_sizecache        int32             `json:"-"`
}

A request to instantiate a workflow template.

func (*InstantiateWorkflowTemplateRequest) Descriptor

func (*InstantiateWorkflowTemplateRequest) Descriptor() ([]byte, []int)

func (*InstantiateWorkflowTemplateRequest) GetInstanceId deprecated

func (m *InstantiateWorkflowTemplateRequest) GetInstanceId() string

Deprecated: Do not use.

func (*InstantiateWorkflowTemplateRequest) GetName

func (*InstantiateWorkflowTemplateRequest) GetParameters

func (m *InstantiateWorkflowTemplateRequest) GetParameters() map[string]string

func (*InstantiateWorkflowTemplateRequest) GetRequestId

func (m *InstantiateWorkflowTemplateRequest) GetRequestId() string

func (*InstantiateWorkflowTemplateRequest) GetVersion

func (m *InstantiateWorkflowTemplateRequest) GetVersion() int32

func (*InstantiateWorkflowTemplateRequest) ProtoMessage

func (*InstantiateWorkflowTemplateRequest) ProtoMessage()

func (*InstantiateWorkflowTemplateRequest) Reset

func (*InstantiateWorkflowTemplateRequest) String

func (*InstantiateWorkflowTemplateRequest) XXX_DiscardUnknown

func (m *InstantiateWorkflowTemplateRequest) XXX_DiscardUnknown()

func (*InstantiateWorkflowTemplateRequest) XXX_Marshal

func (m *InstantiateWorkflowTemplateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*InstantiateWorkflowTemplateRequest) XXX_Merge

func (*InstantiateWorkflowTemplateRequest) XXX_Size

func (*InstantiateWorkflowTemplateRequest) XXX_Unmarshal

func (m *InstantiateWorkflowTemplateRequest) XXX_Unmarshal(b []byte) error

type Job

type Job struct {
	// Optional. The fully qualified reference to the job, which can be used to
	// obtain the equivalent REST path of the job resource. If this property
	// is not specified when a job is created, the server generates a
	// <code>job_id</code>.
	Reference *JobReference `protobuf:"bytes,1,opt,name=reference,proto3" json:"reference,omitempty"`
	// Required. Job information, including how, when, and where to
	// run the job.
	Placement *JobPlacement `protobuf:"bytes,2,opt,name=placement,proto3" json:"placement,omitempty"`
	// Required. The application/framework-specific portion of the job.
	//
	// Types that are valid to be assigned to TypeJob:
	//	*Job_HadoopJob
	//	*Job_SparkJob
	//	*Job_PysparkJob
	//	*Job_HiveJob
	//	*Job_PigJob
	//	*Job_SparkRJob
	//	*Job_SparkSqlJob
	//	*Job_PrestoJob
	TypeJob isJob_TypeJob `protobuf_oneof:"type_job"`
	// Output only. The job status. Additional application-specific
	// status information may be contained in the <code>type_job</code>
	// and <code>yarn_applications</code> fields.
	Status *JobStatus `protobuf:"bytes,8,opt,name=status,proto3" json:"status,omitempty"`
	// Output only. The previous job status.
	StatusHistory []*JobStatus `protobuf:"bytes,13,rep,name=status_history,json=statusHistory,proto3" json:"status_history,omitempty"`
	// Output only. The collection of YARN applications spun up by this job.
	//
	// **Beta** Feature: This report is available for testing purposes only. It
	// may be changed before final release.
	YarnApplications []*YarnApplication `protobuf:"bytes,9,rep,name=yarn_applications,json=yarnApplications,proto3" json:"yarn_applications,omitempty"`
	// Output only. The email address of the user submitting the job. For jobs
	// submitted on the cluster, the address is <code>username@hostname</code>.
	SubmittedBy string `protobuf:"bytes,10,opt,name=submitted_by,json=submittedBy,proto3" json:"submitted_by,omitempty"`
	// Output only. A URI pointing to the location of the stdout of the job's
	// driver program.
	DriverOutputResourceUri string `` /* 135-byte string literal not displayed */
	// Output only. If present, the location of miscellaneous control files
	// which may be used as part of job setup and handling. If not present,
	// control files may be placed in the same location as `driver_output_uri`.
	DriverControlFilesUri string `` /* 129-byte string literal not displayed */
	// Optional. The labels to associate with this job.
	// Label **keys** must contain 1 to 63 characters, and must conform to
	// [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
	// Label **values** may be empty, but, if present, must contain 1 to 63
	// characters, and must conform to [RFC
	// 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
	// associated with a job.
	Labels map[string]string `` /* 154-byte string literal not displayed */
	// Optional. Job scheduling configuration.
	Scheduling *JobScheduling `protobuf:"bytes,20,opt,name=scheduling,proto3" json:"scheduling,omitempty"`
	// Output only. A UUID that uniquely identifies a job within the project
	// over time. This is in contrast to a user-settable reference.job_id that
	// may be reused over time.
	JobUuid string `protobuf:"bytes,22,opt,name=job_uuid,json=jobUuid,proto3" json:"job_uuid,omitempty"`
	// Output only. Indicates whether the job is completed. If the value is `false`,
	// the job is still in progress. If `true`, the job is completed, and
	// `status.state` field will indicate if it was successful, failed,
	// or cancelled.
	Done                 bool     `protobuf:"varint,24,opt,name=done,proto3" json:"done,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A Dataproc job resource.

func (*Job) Descriptor

func (*Job) Descriptor() ([]byte, []int)

func (*Job) GetDone

func (m *Job) GetDone() bool

func (*Job) GetDriverControlFilesUri

func (m *Job) GetDriverControlFilesUri() string

func (*Job) GetDriverOutputResourceUri

func (m *Job) GetDriverOutputResourceUri() string

func (*Job) GetHadoopJob

func (m *Job) GetHadoopJob() *HadoopJob

func (*Job) GetHiveJob

func (m *Job) GetHiveJob() *HiveJob

func (*Job) GetJobUuid

func (m *Job) GetJobUuid() string

func (*Job) GetLabels

func (m *Job) GetLabels() map[string]string

func (*Job) GetPigJob

func (m *Job) GetPigJob() *PigJob

func (*Job) GetPlacement

func (m *Job) GetPlacement() *JobPlacement

func (*Job) GetPrestoJob

func (m *Job) GetPrestoJob() *PrestoJob

func (*Job) GetPysparkJob

func (m *Job) GetPysparkJob() *PySparkJob

func (*Job) GetReference

func (m *Job) GetReference() *JobReference

func (*Job) GetScheduling

func (m *Job) GetScheduling() *JobScheduling

func (*Job) GetSparkJob

func (m *Job) GetSparkJob() *SparkJob

func (*Job) GetSparkRJob

func (m *Job) GetSparkRJob() *SparkRJob

func (*Job) GetSparkSqlJob

func (m *Job) GetSparkSqlJob() *SparkSqlJob

func (*Job) GetStatus

func (m *Job) GetStatus() *JobStatus

func (*Job) GetStatusHistory

func (m *Job) GetStatusHistory() []*JobStatus

func (*Job) GetSubmittedBy

func (m *Job) GetSubmittedBy() string

func (*Job) GetTypeJob

func (m *Job) GetTypeJob() isJob_TypeJob

func (*Job) GetYarnApplications

func (m *Job) GetYarnApplications() []*YarnApplication

func (*Job) ProtoMessage

func (*Job) ProtoMessage()

func (*Job) Reset

func (m *Job) Reset()

func (*Job) String

func (m *Job) String() string

func (*Job) XXX_DiscardUnknown

func (m *Job) XXX_DiscardUnknown()

func (*Job) XXX_Marshal

func (m *Job) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*Job) XXX_Merge

func (m *Job) XXX_Merge(src proto.Message)

func (*Job) XXX_OneofWrappers

func (*Job) XXX_OneofWrappers() []interface{}

XXX_OneofWrappers is for the internal use of the proto package.

func (*Job) XXX_Size

func (m *Job) XXX_Size() int

func (*Job) XXX_Unmarshal

func (m *Job) XXX_Unmarshal(b []byte) error

type JobControllerClient

type JobControllerClient interface {
	// Submits a job to a cluster.
	SubmitJob(ctx context.Context, in *SubmitJobRequest, opts ...grpc.CallOption) (*Job, error)
	// Submits job to a cluster.
	SubmitJobAsOperation(ctx context.Context, in *SubmitJobRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Gets the resource representation for a job in a project.
	GetJob(ctx context.Context, in *GetJobRequest, opts ...grpc.CallOption) (*Job, error)
	// Lists regions/{region}/jobs in a project.
	ListJobs(ctx context.Context, in *ListJobsRequest, opts ...grpc.CallOption) (*ListJobsResponse, error)
	// Updates a job in a project.
	UpdateJob(ctx context.Context, in *UpdateJobRequest, opts ...grpc.CallOption) (*Job, error)
	// Starts a job cancellation request. To access the job resource
	// after cancellation, call
	// [regions/{region}/jobs.list](https://cloud.google.com/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list)
	// or
	// [regions/{region}/jobs.get](https://cloud.google.com/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get).
	CancelJob(ctx context.Context, in *CancelJobRequest, opts ...grpc.CallOption) (*Job, error)
	// Deletes the job from the project. If the job is active, the delete fails,
	// and the response returns `FAILED_PRECONDITION`.
	DeleteJob(ctx context.Context, in *DeleteJobRequest, opts ...grpc.CallOption) (*empty.Empty, error)
}

JobControllerClient is the client API for JobController service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.

type JobControllerServer

type JobControllerServer interface {
	// Submits a job to a cluster.
	SubmitJob(context.Context, *SubmitJobRequest) (*Job, error)
	// Submits job to a cluster.
	SubmitJobAsOperation(context.Context, *SubmitJobRequest) (*longrunning.Operation, error)
	// Gets the resource representation for a job in a project.
	GetJob(context.Context, *GetJobRequest) (*Job, error)
	// Lists regions/{region}/jobs in a project.
	ListJobs(context.Context, *ListJobsRequest) (*ListJobsResponse, error)
	// Updates a job in a project.
	UpdateJob(context.Context, *UpdateJobRequest) (*Job, error)
	// Starts a job cancellation request. To access the job resource
	// after cancellation, call
	// [regions/{region}/jobs.list](https://cloud.google.com/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list)
	// or
	// [regions/{region}/jobs.get](https://cloud.google.com/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get).
	CancelJob(context.Context, *CancelJobRequest) (*Job, error)
	// Deletes the job from the project. If the job is active, the delete fails,
	// and the response returns `FAILED_PRECONDITION`.
	DeleteJob(context.Context, *DeleteJobRequest) (*empty.Empty, error)
}

JobControllerServer is the server API for JobController service.

type JobMetadata

type JobMetadata struct {
	// Output only. The job id.
	JobId string `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
	// Output only. Most recent job status.
	Status *JobStatus `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
	// Output only. Operation type.
	OperationType string `protobuf:"bytes,3,opt,name=operation_type,json=operationType,proto3" json:"operation_type,omitempty"`
	// Output only. Job submission time.
	StartTime            *timestamp.Timestamp `protobuf:"bytes,4,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
	XXX_unrecognized     []byte               `json:"-"`
	XXX_sizecache        int32                `json:"-"`
}

Job Operation metadata.

func (*JobMetadata) Descriptor

func (*JobMetadata) Descriptor() ([]byte, []int)

func (*JobMetadata) GetJobId

func (m *JobMetadata) GetJobId() string

func (*JobMetadata) GetOperationType

func (m *JobMetadata) GetOperationType() string

func (*JobMetadata) GetStartTime

func (m *JobMetadata) GetStartTime() *timestamp.Timestamp

func (*JobMetadata) GetStatus

func (m *JobMetadata) GetStatus() *JobStatus

func (*JobMetadata) ProtoMessage

func (*JobMetadata) ProtoMessage()

func (*JobMetadata) Reset

func (m *JobMetadata) Reset()

func (*JobMetadata) String

func (m *JobMetadata) String() string

func (*JobMetadata) XXX_DiscardUnknown

func (m *JobMetadata) XXX_DiscardUnknown()

func (*JobMetadata) XXX_Marshal

func (m *JobMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*JobMetadata) XXX_Merge

func (m *JobMetadata) XXX_Merge(src proto.Message)

func (*JobMetadata) XXX_Size

func (m *JobMetadata) XXX_Size() int

func (*JobMetadata) XXX_Unmarshal

func (m *JobMetadata) XXX_Unmarshal(b []byte) error

type JobPlacement

type JobPlacement struct {
	// Required. The name of the cluster where the job will be submitted.
	ClusterName string `protobuf:"bytes,1,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
	// Output only. A cluster UUID generated by the Dataproc service when
	// the job is submitted.
	ClusterUuid          string   `protobuf:"bytes,2,opt,name=cluster_uuid,json=clusterUuid,proto3" json:"cluster_uuid,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

Dataproc job config.

func (*JobPlacement) Descriptor

func (*JobPlacement) Descriptor() ([]byte, []int)

func (*JobPlacement) GetClusterName

func (m *JobPlacement) GetClusterName() string

func (*JobPlacement) GetClusterUuid

func (m *JobPlacement) GetClusterUuid() string

func (*JobPlacement) ProtoMessage

func (*JobPlacement) ProtoMessage()

func (*JobPlacement) Reset

func (m *JobPlacement) Reset()

func (*JobPlacement) String

func (m *JobPlacement) String() string

func (*JobPlacement) XXX_DiscardUnknown

func (m *JobPlacement) XXX_DiscardUnknown()

func (*JobPlacement) XXX_Marshal

func (m *JobPlacement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*JobPlacement) XXX_Merge

func (m *JobPlacement) XXX_Merge(src proto.Message)

func (*JobPlacement) XXX_Size

func (m *JobPlacement) XXX_Size() int

func (*JobPlacement) XXX_Unmarshal

func (m *JobPlacement) XXX_Unmarshal(b []byte) error

type JobReference

type JobReference struct {
	// Required. The ID of the Google Cloud Platform project that the job
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Optional. The job ID, which must be unique within the project.
	// The ID must contain only letters (a-z, A-Z), numbers (0-9),
	// underscores (_), or hyphens (-). The maximum length is 100 characters.
	//
	// If not specified by the caller, the job ID will be provided by the server.
	JobId                string   `protobuf:"bytes,2,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

Encapsulates the full scoping used to reference a job.

func (*JobReference) Descriptor

func (*JobReference) Descriptor() ([]byte, []int)

func (*JobReference) GetJobId

func (m *JobReference) GetJobId() string

func (*JobReference) GetProjectId

func (m *JobReference) GetProjectId() string

func (*JobReference) ProtoMessage

func (*JobReference) ProtoMessage()

func (*JobReference) Reset

func (m *JobReference) Reset()

func (*JobReference) String

func (m *JobReference) String() string

func (*JobReference) XXX_DiscardUnknown

func (m *JobReference) XXX_DiscardUnknown()

func (*JobReference) XXX_Marshal

func (m *JobReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*JobReference) XXX_Merge

func (m *JobReference) XXX_Merge(src proto.Message)

func (*JobReference) XXX_Size

func (m *JobReference) XXX_Size() int

func (*JobReference) XXX_Unmarshal

func (m *JobReference) XXX_Unmarshal(b []byte) error

type JobScheduling

type JobScheduling struct {
	// Optional. Maximum number of times per hour a driver may be restarted as
	// a result of driver terminating with non-zero code before job is
	// reported failed.
	//
	// A job may be reported as thrashing if driver exits with non-zero code
	// 4 times within 10 minute window.
	//
	// Maximum value is 10.
	MaxFailuresPerHour   int32    `protobuf:"varint,1,opt,name=max_failures_per_hour,json=maxFailuresPerHour,proto3" json:"max_failures_per_hour,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

Job scheduling options.

func (*JobScheduling) Descriptor

func (*JobScheduling) Descriptor() ([]byte, []int)

func (*JobScheduling) GetMaxFailuresPerHour

func (m *JobScheduling) GetMaxFailuresPerHour() int32

func (*JobScheduling) ProtoMessage

func (*JobScheduling) ProtoMessage()

func (*JobScheduling) Reset

func (m *JobScheduling) Reset()

func (*JobScheduling) String

func (m *JobScheduling) String() string

func (*JobScheduling) XXX_DiscardUnknown

func (m *JobScheduling) XXX_DiscardUnknown()

func (*JobScheduling) XXX_Marshal

func (m *JobScheduling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*JobScheduling) XXX_Merge

func (m *JobScheduling) XXX_Merge(src proto.Message)

func (*JobScheduling) XXX_Size

func (m *JobScheduling) XXX_Size() int

func (*JobScheduling) XXX_Unmarshal

func (m *JobScheduling) XXX_Unmarshal(b []byte) error

type JobStatus

type JobStatus struct {
	// Output only. A state message specifying the overall job state.
	State JobStatus_State `protobuf:"varint,1,opt,name=state,proto3,enum=google.cloud.dataproc.v1beta2.JobStatus_State" json:"state,omitempty"`
	// Output only. Optional Job state details, such as an error
	// description if the state is <code>ERROR</code>.
	Details string `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"`
	// Output only. The time when this state was entered.
	StateStartTime *timestamp.Timestamp `protobuf:"bytes,6,opt,name=state_start_time,json=stateStartTime,proto3" json:"state_start_time,omitempty"`
	// Output only. Additional state information, which includes
	// status reported by the agent.
	Substate             JobStatus_Substate `protobuf:"varint,7,opt,name=substate,proto3,enum=google.cloud.dataproc.v1beta2.JobStatus_Substate" json:"substate,omitempty"`
	XXX_NoUnkeyedLiteral struct{}           `json:"-"`
	XXX_unrecognized     []byte             `json:"-"`
	XXX_sizecache        int32              `json:"-"`
}

Dataproc job status.

func (*JobStatus) Descriptor

func (*JobStatus) Descriptor() ([]byte, []int)

func (*JobStatus) GetDetails

func (m *JobStatus) GetDetails() string

func (*JobStatus) GetState

func (m *JobStatus) GetState() JobStatus_State

func (*JobStatus) GetStateStartTime

func (m *JobStatus) GetStateStartTime() *timestamp.Timestamp

func (*JobStatus) GetSubstate

func (m *JobStatus) GetSubstate() JobStatus_Substate

func (*JobStatus) ProtoMessage

func (*JobStatus) ProtoMessage()

func (*JobStatus) Reset

func (m *JobStatus) Reset()

func (*JobStatus) String

func (m *JobStatus) String() string

func (*JobStatus) XXX_DiscardUnknown

func (m *JobStatus) XXX_DiscardUnknown()

func (*JobStatus) XXX_Marshal

func (m *JobStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*JobStatus) XXX_Merge

func (m *JobStatus) XXX_Merge(src proto.Message)

func (*JobStatus) XXX_Size

func (m *JobStatus) XXX_Size() int

func (*JobStatus) XXX_Unmarshal

func (m *JobStatus) XXX_Unmarshal(b []byte) error

type JobStatus_State

type JobStatus_State int32

The job state.

const (
	// The job state is unknown.
	JobStatus_STATE_UNSPECIFIED JobStatus_State = 0
	// The job is pending; it has been submitted, but is not yet running.
	JobStatus_PENDING JobStatus_State = 1
	// Job has been received by the service and completed initial setup;
	// it will soon be submitted to the cluster.
	JobStatus_SETUP_DONE JobStatus_State = 8
	// The job is running on the cluster.
	JobStatus_RUNNING JobStatus_State = 2
	// A CancelJob request has been received, but is pending.
	JobStatus_CANCEL_PENDING JobStatus_State = 3
	// Transient in-flight resources have been canceled, and the request to
	// cancel the running job has been issued to the cluster.
	JobStatus_CANCEL_STARTED JobStatus_State = 7
	// The job cancellation was successful.
	JobStatus_CANCELLED JobStatus_State = 4
	// The job has completed successfully.
	JobStatus_DONE JobStatus_State = 5
	// The job has completed, but encountered an error.
	JobStatus_ERROR JobStatus_State = 6
	// Job attempt has failed. The detail field contains failure details for
	// this attempt.
	//
	// Applies to restartable jobs only.
	JobStatus_ATTEMPT_FAILURE JobStatus_State = 9
)

func (JobStatus_State) EnumDescriptor

func (JobStatus_State) EnumDescriptor() ([]byte, []int)

func (JobStatus_State) String

func (x JobStatus_State) String() string

type JobStatus_Substate

type JobStatus_Substate int32

The job substate.

const (
	// The job substate is unknown.
	JobStatus_UNSPECIFIED JobStatus_Substate = 0
	// The Job is submitted to the agent.
	//
	// Applies to RUNNING state.
	JobStatus_SUBMITTED JobStatus_Substate = 1
	// The Job has been received and is awaiting execution (it may be waiting
	// for a condition to be met). See the "details" field for the reason for
	// the delay.
	//
	// Applies to RUNNING state.
	JobStatus_QUEUED JobStatus_Substate = 2
	// The agent-reported status is out of date, which may be caused by a
	// loss of communication between the agent and Dataproc. If the
	// agent does not send a timely update, the job will fail.
	//
	// Applies to RUNNING state.
	JobStatus_STALE_STATUS JobStatus_Substate = 3
)

func (JobStatus_Substate) EnumDescriptor

func (JobStatus_Substate) EnumDescriptor() ([]byte, []int)

func (JobStatus_Substate) String

func (x JobStatus_Substate) String() string

type Job_HadoopJob

type Job_HadoopJob struct {
	HadoopJob *HadoopJob `protobuf:"bytes,3,opt,name=hadoop_job,json=hadoopJob,proto3,oneof"`
}

type Job_HiveJob

type Job_HiveJob struct {
	HiveJob *HiveJob `protobuf:"bytes,6,opt,name=hive_job,json=hiveJob,proto3,oneof"`
}

type Job_PigJob

type Job_PigJob struct {
	PigJob *PigJob `protobuf:"bytes,7,opt,name=pig_job,json=pigJob,proto3,oneof"`
}

type Job_PrestoJob

type Job_PrestoJob struct {
	PrestoJob *PrestoJob `protobuf:"bytes,23,opt,name=presto_job,json=prestoJob,proto3,oneof"`
}

type Job_PysparkJob

type Job_PysparkJob struct {
	PysparkJob *PySparkJob `protobuf:"bytes,5,opt,name=pyspark_job,json=pysparkJob,proto3,oneof"`
}

type Job_SparkJob

type Job_SparkJob struct {
	SparkJob *SparkJob `protobuf:"bytes,4,opt,name=spark_job,json=sparkJob,proto3,oneof"`
}

type Job_SparkRJob

type Job_SparkRJob struct {
	SparkRJob *SparkRJob `protobuf:"bytes,21,opt,name=spark_r_job,json=sparkRJob,proto3,oneof"`
}

type Job_SparkSqlJob

type Job_SparkSqlJob struct {
	SparkSqlJob *SparkSqlJob `protobuf:"bytes,12,opt,name=spark_sql_job,json=sparkSqlJob,proto3,oneof"`
}

type KerberosConfig

type KerberosConfig struct {
	// Optional. Flag to indicate whether to Kerberize the cluster (default: false). Set
	// this field to true to enable Kerberos on a cluster.
	EnableKerberos bool `protobuf:"varint,1,opt,name=enable_kerberos,json=enableKerberos,proto3" json:"enable_kerberos,omitempty"`
	// Required. The Cloud Storage URI of a KMS encrypted file containing the root
	// principal password.
	RootPrincipalPasswordUri string `` /* 137-byte string literal not displayed */
	// Required. The uri of the KMS key used to encrypt various sensitive
	// files.
	KmsKeyUri string `protobuf:"bytes,3,opt,name=kms_key_uri,json=kmsKeyUri,proto3" json:"kms_key_uri,omitempty"`
	// Optional. The Cloud Storage URI of the keystore file used for SSL
	// encryption. If not provided, Dataproc will provide a self-signed
	// certificate.
	KeystoreUri string `protobuf:"bytes,4,opt,name=keystore_uri,json=keystoreUri,proto3" json:"keystore_uri,omitempty"`
	// Optional. The Cloud Storage URI of the truststore file used for SSL
	// encryption. If not provided, Dataproc will provide a self-signed
	// certificate.
	TruststoreUri string `protobuf:"bytes,5,opt,name=truststore_uri,json=truststoreUri,proto3" json:"truststore_uri,omitempty"`
	// Optional. The Cloud Storage URI of a KMS encrypted file containing the
	// password to the user provided keystore. For the self-signed certificate,
	// this password is generated by Dataproc.
	KeystorePasswordUri string `protobuf:"bytes,6,opt,name=keystore_password_uri,json=keystorePasswordUri,proto3" json:"keystore_password_uri,omitempty"`
	// Optional. The Cloud Storage URI of a KMS encrypted file containing the
	// password to the user provided key. For the self-signed certificate, this
	// password is generated by Dataproc.
	KeyPasswordUri string `protobuf:"bytes,7,opt,name=key_password_uri,json=keyPasswordUri,proto3" json:"key_password_uri,omitempty"`
	// Optional. The Cloud Storage URI of a KMS encrypted file containing the
	// password to the user provided truststore. For the self-signed certificate,
	// this password is generated by Dataproc.
	TruststorePasswordUri string `` /* 126-byte string literal not displayed */
	// Optional. The remote realm the Dataproc on-cluster KDC will trust, should
	// the user enable cross realm trust.
	CrossRealmTrustRealm string `protobuf:"bytes,9,opt,name=cross_realm_trust_realm,json=crossRealmTrustRealm,proto3" json:"cross_realm_trust_realm,omitempty"`
	// Optional. The KDC (IP or hostname) for the remote trusted realm in a cross
	// realm trust relationship.
	CrossRealmTrustKdc string `protobuf:"bytes,10,opt,name=cross_realm_trust_kdc,json=crossRealmTrustKdc,proto3" json:"cross_realm_trust_kdc,omitempty"`
	// Optional. The admin server (IP or hostname) for the remote trusted realm in
	// a cross realm trust relationship.
	CrossRealmTrustAdminServer string `` /* 146-byte string literal not displayed */
	// Optional. The Cloud Storage URI of a KMS encrypted file containing the
	// shared password between the on-cluster Kerberos realm and the remote
	// trusted realm, in a cross realm trust relationship.
	CrossRealmTrustSharedPasswordUri string `` /* 166-byte string literal not displayed */
	// Optional. The Cloud Storage URI of a KMS encrypted file containing the
	// master key of the KDC database.
	KdcDbKeyUri string `protobuf:"bytes,13,opt,name=kdc_db_key_uri,json=kdcDbKeyUri,proto3" json:"kdc_db_key_uri,omitempty"`
	// Optional. The lifetime of the ticket granting ticket, in hours.
	// If not specified, or user specifies 0, then default value 10
	// will be used.
	TgtLifetimeHours int32 `protobuf:"varint,14,opt,name=tgt_lifetime_hours,json=tgtLifetimeHours,proto3" json:"tgt_lifetime_hours,omitempty"`
	// Optional. The name of the on-cluster Kerberos realm.
	// If not specified, the uppercased domain of hostnames will be the realm.
	Realm                string   `protobuf:"bytes,15,opt,name=realm,proto3" json:"realm,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

Specifies Kerberos related configuration.

func (*KerberosConfig) Descriptor

func (*KerberosConfig) Descriptor() ([]byte, []int)

func (*KerberosConfig) GetCrossRealmTrustAdminServer

func (m *KerberosConfig) GetCrossRealmTrustAdminServer() string

func (*KerberosConfig) GetCrossRealmTrustKdc

func (m *KerberosConfig) GetCrossRealmTrustKdc() string

func (*KerberosConfig) GetCrossRealmTrustRealm

func (m *KerberosConfig) GetCrossRealmTrustRealm() string

func (*KerberosConfig) GetCrossRealmTrustSharedPasswordUri

func (m *KerberosConfig) GetCrossRealmTrustSharedPasswordUri() string

func (*KerberosConfig) GetEnableKerberos

func (m *KerberosConfig) GetEnableKerberos() bool

func (*KerberosConfig) GetKdcDbKeyUri

func (m *KerberosConfig) GetKdcDbKeyUri() string

func (*KerberosConfig) GetKeyPasswordUri

func (m *KerberosConfig) GetKeyPasswordUri() string

func (*KerberosConfig) GetKeystorePasswordUri

func (m *KerberosConfig) GetKeystorePasswordUri() string

func (*KerberosConfig) GetKeystoreUri

func (m *KerberosConfig) GetKeystoreUri() string

func (*KerberosConfig) GetKmsKeyUri

func (m *KerberosConfig) GetKmsKeyUri() string

func (*KerberosConfig) GetRealm

func (m *KerberosConfig) GetRealm() string

func (*KerberosConfig) GetRootPrincipalPasswordUri

func (m *KerberosConfig) GetRootPrincipalPasswordUri() string

func (*KerberosConfig) GetTgtLifetimeHours

func (m *KerberosConfig) GetTgtLifetimeHours() int32

func (*KerberosConfig) GetTruststorePasswordUri

func (m *KerberosConfig) GetTruststorePasswordUri() string

func (*KerberosConfig) GetTruststoreUri

func (m *KerberosConfig) GetTruststoreUri() string

func (*KerberosConfig) ProtoMessage

func (*KerberosConfig) ProtoMessage()

func (*KerberosConfig) Reset

func (m *KerberosConfig) Reset()

func (*KerberosConfig) String

func (m *KerberosConfig) String() string

func (*KerberosConfig) XXX_DiscardUnknown

func (m *KerberosConfig) XXX_DiscardUnknown()

func (*KerberosConfig) XXX_Marshal

func (m *KerberosConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*KerberosConfig) XXX_Merge

func (m *KerberosConfig) XXX_Merge(src proto.Message)

func (*KerberosConfig) XXX_Size

func (m *KerberosConfig) XXX_Size() int

func (*KerberosConfig) XXX_Unmarshal

func (m *KerberosConfig) XXX_Unmarshal(b []byte) error

type LifecycleConfig

type LifecycleConfig struct {
	// Optional. The duration to keep the cluster alive while idling (when no jobs
	// are running). Passing this threshold will cause the cluster to be
	// deleted. Minimum value is 10 minutes; maximum value is 14 days (see JSON
	// representation of
	// [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json).
	IdleDeleteTtl *duration.Duration `protobuf:"bytes,1,opt,name=idle_delete_ttl,json=idleDeleteTtl,proto3" json:"idle_delete_ttl,omitempty"`
	// Either the exact time the cluster should be deleted at or
	// the cluster maximum age.
	//
	// Types that are valid to be assigned to Ttl:
	//	*LifecycleConfig_AutoDeleteTime
	//	*LifecycleConfig_AutoDeleteTtl
	Ttl isLifecycleConfig_Ttl `protobuf_oneof:"ttl"`
	// Output only. The time when cluster became idle (most recent job finished)
	// and became eligible for deletion due to idleness (see JSON representation
	// of
	// [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
	IdleStartTime        *timestamp.Timestamp `protobuf:"bytes,4,opt,name=idle_start_time,json=idleStartTime,proto3" json:"idle_start_time,omitempty"`
	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
	XXX_unrecognized     []byte               `json:"-"`
	XXX_sizecache        int32                `json:"-"`
}

Specifies the cluster auto-delete schedule configuration.

func (*LifecycleConfig) Descriptor

func (*LifecycleConfig) Descriptor() ([]byte, []int)

func (*LifecycleConfig) GetAutoDeleteTime

func (m *LifecycleConfig) GetAutoDeleteTime() *timestamp.Timestamp

func (*LifecycleConfig) GetAutoDeleteTtl

func (m *LifecycleConfig) GetAutoDeleteTtl() *duration.Duration

func (*LifecycleConfig) GetIdleDeleteTtl

func (m *LifecycleConfig) GetIdleDeleteTtl() *duration.Duration

func (*LifecycleConfig) GetIdleStartTime

func (m *LifecycleConfig) GetIdleStartTime() *timestamp.Timestamp

func (*LifecycleConfig) GetTtl

func (m *LifecycleConfig) GetTtl() isLifecycleConfig_Ttl

func (*LifecycleConfig) ProtoMessage

func (*LifecycleConfig) ProtoMessage()

func (*LifecycleConfig) Reset

func (m *LifecycleConfig) Reset()

func (*LifecycleConfig) String

func (m *LifecycleConfig) String() string

func (*LifecycleConfig) XXX_DiscardUnknown

func (m *LifecycleConfig) XXX_DiscardUnknown()

func (*LifecycleConfig) XXX_Marshal

func (m *LifecycleConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*LifecycleConfig) XXX_Merge

func (m *LifecycleConfig) XXX_Merge(src proto.Message)

func (*LifecycleConfig) XXX_OneofWrappers

func (*LifecycleConfig) XXX_OneofWrappers() []interface{}

XXX_OneofWrappers is for the internal use of the proto package.

func (*LifecycleConfig) XXX_Size

func (m *LifecycleConfig) XXX_Size() int

func (*LifecycleConfig) XXX_Unmarshal

func (m *LifecycleConfig) XXX_Unmarshal(b []byte) error

type LifecycleConfig_AutoDeleteTime

type LifecycleConfig_AutoDeleteTime struct {
	AutoDeleteTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=auto_delete_time,json=autoDeleteTime,proto3,oneof"`
}

type LifecycleConfig_AutoDeleteTtl

type LifecycleConfig_AutoDeleteTtl struct {
	AutoDeleteTtl *duration.Duration `protobuf:"bytes,3,opt,name=auto_delete_ttl,json=autoDeleteTtl,proto3,oneof"`
}

type ListAutoscalingPoliciesRequest

type ListAutoscalingPoliciesRequest struct {
	// Required. The "resource name" of the region or location, as described
	// in https://cloud.google.com/apis/design/resource_names.
	//
	// * For `projects.regions.autoscalingPolicies.list`, the resource name
	//   of the region has the following format:
	//   `projects/{project_id}/regions/{region}`
	//
	// * For `projects.locations.autoscalingPolicies.list`, the resource name
	//   of the location has the following format:
	//   `projects/{project_id}/locations/{location}`
	Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
	// Optional. The maximum number of results to return in each response.
	// Must be less than or equal to 1000. Defaults to 100.
	PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
	// Optional. The page token, returned by a previous call, to request the
	// next page of results.
	PageToken            string   `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A request to list autoscaling policies in a project.

func (*ListAutoscalingPoliciesRequest) Descriptor

func (*ListAutoscalingPoliciesRequest) Descriptor() ([]byte, []int)

func (*ListAutoscalingPoliciesRequest) GetPageSize

func (m *ListAutoscalingPoliciesRequest) GetPageSize() int32

func (*ListAutoscalingPoliciesRequest) GetPageToken

func (m *ListAutoscalingPoliciesRequest) GetPageToken() string

func (*ListAutoscalingPoliciesRequest) GetParent

func (m *ListAutoscalingPoliciesRequest) GetParent() string

func (*ListAutoscalingPoliciesRequest) ProtoMessage

func (*ListAutoscalingPoliciesRequest) ProtoMessage()

func (*ListAutoscalingPoliciesRequest) Reset

func (m *ListAutoscalingPoliciesRequest) Reset()

func (*ListAutoscalingPoliciesRequest) String

func (*ListAutoscalingPoliciesRequest) XXX_DiscardUnknown

func (m *ListAutoscalingPoliciesRequest) XXX_DiscardUnknown()

func (*ListAutoscalingPoliciesRequest) XXX_Marshal

func (m *ListAutoscalingPoliciesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ListAutoscalingPoliciesRequest) XXX_Merge

func (m *ListAutoscalingPoliciesRequest) XXX_Merge(src proto.Message)

func (*ListAutoscalingPoliciesRequest) XXX_Size

func (m *ListAutoscalingPoliciesRequest) XXX_Size() int

func (*ListAutoscalingPoliciesRequest) XXX_Unmarshal

func (m *ListAutoscalingPoliciesRequest) XXX_Unmarshal(b []byte) error

type ListAutoscalingPoliciesResponse

type ListAutoscalingPoliciesResponse struct {
	// Output only. Autoscaling policies list.
	Policies []*AutoscalingPolicy `protobuf:"bytes,1,rep,name=policies,proto3" json:"policies,omitempty"`
	// Output only. This token is included in the response if there are more
	// results to fetch.
	NextPageToken        string   `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A response to a request to list autoscaling policies in a project.

func (*ListAutoscalingPoliciesResponse) Descriptor

func (*ListAutoscalingPoliciesResponse) Descriptor() ([]byte, []int)

func (*ListAutoscalingPoliciesResponse) GetNextPageToken

func (m *ListAutoscalingPoliciesResponse) GetNextPageToken() string

func (*ListAutoscalingPoliciesResponse) GetPolicies

func (*ListAutoscalingPoliciesResponse) ProtoMessage

func (*ListAutoscalingPoliciesResponse) ProtoMessage()

func (*ListAutoscalingPoliciesResponse) Reset

func (*ListAutoscalingPoliciesResponse) String

func (*ListAutoscalingPoliciesResponse) XXX_DiscardUnknown

func (m *ListAutoscalingPoliciesResponse) XXX_DiscardUnknown()

func (*ListAutoscalingPoliciesResponse) XXX_Marshal

func (m *ListAutoscalingPoliciesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ListAutoscalingPoliciesResponse) XXX_Merge

func (m *ListAutoscalingPoliciesResponse) XXX_Merge(src proto.Message)

func (*ListAutoscalingPoliciesResponse) XXX_Size

func (m *ListAutoscalingPoliciesResponse) XXX_Size() int

func (*ListAutoscalingPoliciesResponse) XXX_Unmarshal

func (m *ListAutoscalingPoliciesResponse) XXX_Unmarshal(b []byte) error

type ListClustersRequest

type ListClustersRequest struct {
	// Required. The ID of the Google Cloud Platform project that the cluster
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,4,opt,name=region,proto3" json:"region,omitempty"`
	// Optional.  A filter constraining the clusters to list. Filters are
	// case-sensitive and have the following syntax:
	//
	// field = value [AND [field = value]] ...
	//
	// where **field** is one of `status.state`, `clusterName`, or `labels.[KEY]`,
	// and `[KEY]` is a label key. **value** can be `*` to match all values.
	// `status.state` can be one of the following: `ACTIVE`, `INACTIVE`,
	// `CREATING`, `RUNNING`, `ERROR`, `DELETING`, or `UPDATING`. `ACTIVE`
	// contains the `CREATING`, `UPDATING`, and `RUNNING` states. `INACTIVE`
	// contains the `DELETING` and `ERROR` states.
	// `clusterName` is the name of the cluster provided at creation time.
	// Only the logical `AND` operator is supported; space-separated items are
	// treated as having an implicit `AND` operator.
	//
	// Example filter:
	//
	// status.state = ACTIVE AND clusterName = mycluster
	// AND labels.env = staging AND labels.starred = *
	Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"`
	// Optional. The standard List page size.
	PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
	// Optional. The standard List page token.
	PageToken            string   `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A request to list the clusters in a project.

func (*ListClustersRequest) Descriptor

func (*ListClustersRequest) Descriptor() ([]byte, []int)

func (*ListClustersRequest) GetFilter

func (m *ListClustersRequest) GetFilter() string

func (*ListClustersRequest) GetPageSize

func (m *ListClustersRequest) GetPageSize() int32

func (*ListClustersRequest) GetPageToken

func (m *ListClustersRequest) GetPageToken() string

func (*ListClustersRequest) GetProjectId

func (m *ListClustersRequest) GetProjectId() string

func (*ListClustersRequest) GetRegion

func (m *ListClustersRequest) GetRegion() string

func (*ListClustersRequest) ProtoMessage

func (*ListClustersRequest) ProtoMessage()

func (*ListClustersRequest) Reset

func (m *ListClustersRequest) Reset()

func (*ListClustersRequest) String

func (m *ListClustersRequest) String() string

func (*ListClustersRequest) XXX_DiscardUnknown

func (m *ListClustersRequest) XXX_DiscardUnknown()

func (*ListClustersRequest) XXX_Marshal

func (m *ListClustersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ListClustersRequest) XXX_Merge

func (m *ListClustersRequest) XXX_Merge(src proto.Message)

func (*ListClustersRequest) XXX_Size

func (m *ListClustersRequest) XXX_Size() int

func (*ListClustersRequest) XXX_Unmarshal

func (m *ListClustersRequest) XXX_Unmarshal(b []byte) error

type ListClustersResponse

type ListClustersResponse struct {
	// Output only. The clusters in the project.
	Clusters []*Cluster `protobuf:"bytes,1,rep,name=clusters,proto3" json:"clusters,omitempty"`
	// Output only. This token is included in the response if there are more
	// results to fetch. To fetch additional results, provide this value as the
	// `page_token` in a subsequent <code>ListClustersRequest</code>.
	NextPageToken        string   `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

The list of all clusters in a project.

func (*ListClustersResponse) Descriptor

func (*ListClustersResponse) Descriptor() ([]byte, []int)

func (*ListClustersResponse) GetClusters

func (m *ListClustersResponse) GetClusters() []*Cluster

func (*ListClustersResponse) GetNextPageToken

func (m *ListClustersResponse) GetNextPageToken() string

func (*ListClustersResponse) ProtoMessage

func (*ListClustersResponse) ProtoMessage()

func (*ListClustersResponse) Reset

func (m *ListClustersResponse) Reset()

func (*ListClustersResponse) String

func (m *ListClustersResponse) String() string

func (*ListClustersResponse) XXX_DiscardUnknown

func (m *ListClustersResponse) XXX_DiscardUnknown()

func (*ListClustersResponse) XXX_Marshal

func (m *ListClustersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ListClustersResponse) XXX_Merge

func (m *ListClustersResponse) XXX_Merge(src proto.Message)

func (*ListClustersResponse) XXX_Size

func (m *ListClustersResponse) XXX_Size() int

func (*ListClustersResponse) XXX_Unmarshal

func (m *ListClustersResponse) XXX_Unmarshal(b []byte) error

type ListJobsRequest

type ListJobsRequest struct {
	// Required. The ID of the Google Cloud Platform project that the job
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,6,opt,name=region,proto3" json:"region,omitempty"`
	// Optional. The number of results to return in each response.
	PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
	// Optional. The page token, returned by a previous call, to request the
	// next page of results.
	PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
	// Optional. If set, the returned jobs list includes only jobs that were
	// submitted to the named cluster.
	ClusterName string `protobuf:"bytes,4,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
	// Optional. Specifies enumerated categories of jobs to list.
	// (default = match ALL jobs).
	//
	// If `filter` is provided, `jobStateMatcher` will be ignored.
	JobStateMatcher ListJobsRequest_JobStateMatcher `` /* 176-byte string literal not displayed */
	// Optional. A filter constraining the jobs to list. Filters are
	// case-sensitive and have the following syntax:
	//
	// [field = value] AND [field [= value]] ...
	//
	// where **field** is `status.state` or `labels.[KEY]`, and `[KEY]` is a label
	// key. **value** can be `*` to match all values.
	// `status.state` can be either `ACTIVE` or `NON_ACTIVE`.
	// Only the logical `AND` operator is supported; space-separated items are
	// treated as having an implicit `AND` operator.
	//
	// Example filter:
	//
	// status.state = ACTIVE AND labels.env = staging AND labels.starred = *
	Filter               string   `protobuf:"bytes,7,opt,name=filter,proto3" json:"filter,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A request to list jobs in a project.

func (*ListJobsRequest) Descriptor

func (*ListJobsRequest) Descriptor() ([]byte, []int)

func (*ListJobsRequest) GetClusterName

func (m *ListJobsRequest) GetClusterName() string

func (*ListJobsRequest) GetFilter

func (m *ListJobsRequest) GetFilter() string

func (*ListJobsRequest) GetJobStateMatcher

func (m *ListJobsRequest) GetJobStateMatcher() ListJobsRequest_JobStateMatcher

func (*ListJobsRequest) GetPageSize

func (m *ListJobsRequest) GetPageSize() int32

func (*ListJobsRequest) GetPageToken

func (m *ListJobsRequest) GetPageToken() string

func (*ListJobsRequest) GetProjectId

func (m *ListJobsRequest) GetProjectId() string

func (*ListJobsRequest) GetRegion

func (m *ListJobsRequest) GetRegion() string

func (*ListJobsRequest) ProtoMessage

func (*ListJobsRequest) ProtoMessage()

func (*ListJobsRequest) Reset

func (m *ListJobsRequest) Reset()

func (*ListJobsRequest) String

func (m *ListJobsRequest) String() string

func (*ListJobsRequest) XXX_DiscardUnknown

func (m *ListJobsRequest) XXX_DiscardUnknown()

func (*ListJobsRequest) XXX_Marshal

func (m *ListJobsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ListJobsRequest) XXX_Merge

func (m *ListJobsRequest) XXX_Merge(src proto.Message)

func (*ListJobsRequest) XXX_Size

func (m *ListJobsRequest) XXX_Size() int

func (*ListJobsRequest) XXX_Unmarshal

func (m *ListJobsRequest) XXX_Unmarshal(b []byte) error

type ListJobsRequest_JobStateMatcher

type ListJobsRequest_JobStateMatcher int32

A matcher that specifies categories of job states.

const (
	// Match all jobs, regardless of state.
	ListJobsRequest_ALL ListJobsRequest_JobStateMatcher = 0
	// Only match jobs in non-terminal states: PENDING, RUNNING, or
	// CANCEL_PENDING.
	ListJobsRequest_ACTIVE ListJobsRequest_JobStateMatcher = 1
	// Only match jobs in terminal states: CANCELLED, DONE, or ERROR.
	ListJobsRequest_NON_ACTIVE ListJobsRequest_JobStateMatcher = 2
)

func (ListJobsRequest_JobStateMatcher) EnumDescriptor

func (ListJobsRequest_JobStateMatcher) EnumDescriptor() ([]byte, []int)

func (ListJobsRequest_JobStateMatcher) String

type ListJobsResponse

type ListJobsResponse struct {
	// Output only. Jobs list.
	Jobs []*Job `protobuf:"bytes,1,rep,name=jobs,proto3" json:"jobs,omitempty"`
	// Optional. This token is included in the response if there are more results
	// to fetch. To fetch additional results, provide this value as the
	// `page_token` in a subsequent <code>ListJobsRequest</code>.
	NextPageToken        string   `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A list of jobs in a project.

func (*ListJobsResponse) Descriptor

func (*ListJobsResponse) Descriptor() ([]byte, []int)

func (*ListJobsResponse) GetJobs

func (m *ListJobsResponse) GetJobs() []*Job

func (*ListJobsResponse) GetNextPageToken

func (m *ListJobsResponse) GetNextPageToken() string

func (*ListJobsResponse) ProtoMessage

func (*ListJobsResponse) ProtoMessage()

func (*ListJobsResponse) Reset

func (m *ListJobsResponse) Reset()

func (*ListJobsResponse) String

func (m *ListJobsResponse) String() string

func (*ListJobsResponse) XXX_DiscardUnknown

func (m *ListJobsResponse) XXX_DiscardUnknown()

func (*ListJobsResponse) XXX_Marshal

func (m *ListJobsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ListJobsResponse) XXX_Merge

func (m *ListJobsResponse) XXX_Merge(src proto.Message)

func (*ListJobsResponse) XXX_Size

func (m *ListJobsResponse) XXX_Size() int

func (*ListJobsResponse) XXX_Unmarshal

func (m *ListJobsResponse) XXX_Unmarshal(b []byte) error

type ListWorkflowTemplatesRequest

type ListWorkflowTemplatesRequest struct {
	// Required. The resource name of the region or location, as described
	// in https://cloud.google.com/apis/design/resource_names.
	//
	// * For `projects.regions.workflowTemplates,list`, the resource
	//   name of the region has the following format:
	//   `projects/{project_id}/regions/{region}`
	//
	// * For `projects.locations.workflowTemplates.list`, the
	//   resource name of the location has the following format:
	//   `projects/{project_id}/locations/{location}`
	Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
	// Optional. The maximum number of results to return in each response.
	PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
	// Optional. The page token, returned by a previous call, to request the
	// next page of results.
	PageToken            string   `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A request to list workflow templates in a project.

func (*ListWorkflowTemplatesRequest) Descriptor

func (*ListWorkflowTemplatesRequest) Descriptor() ([]byte, []int)

func (*ListWorkflowTemplatesRequest) GetPageSize

func (m *ListWorkflowTemplatesRequest) GetPageSize() int32

func (*ListWorkflowTemplatesRequest) GetPageToken

func (m *ListWorkflowTemplatesRequest) GetPageToken() string

func (*ListWorkflowTemplatesRequest) GetParent

func (m *ListWorkflowTemplatesRequest) GetParent() string

func (*ListWorkflowTemplatesRequest) ProtoMessage

func (*ListWorkflowTemplatesRequest) ProtoMessage()

func (*ListWorkflowTemplatesRequest) Reset

func (m *ListWorkflowTemplatesRequest) Reset()

func (*ListWorkflowTemplatesRequest) String

func (*ListWorkflowTemplatesRequest) XXX_DiscardUnknown

func (m *ListWorkflowTemplatesRequest) XXX_DiscardUnknown()

func (*ListWorkflowTemplatesRequest) XXX_Marshal

func (m *ListWorkflowTemplatesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ListWorkflowTemplatesRequest) XXX_Merge

func (m *ListWorkflowTemplatesRequest) XXX_Merge(src proto.Message)

func (*ListWorkflowTemplatesRequest) XXX_Size

func (m *ListWorkflowTemplatesRequest) XXX_Size() int

func (*ListWorkflowTemplatesRequest) XXX_Unmarshal

func (m *ListWorkflowTemplatesRequest) XXX_Unmarshal(b []byte) error

type ListWorkflowTemplatesResponse

type ListWorkflowTemplatesResponse struct {
	// Output only. WorkflowTemplates list.
	Templates []*WorkflowTemplate `protobuf:"bytes,1,rep,name=templates,proto3" json:"templates,omitempty"`
	// Output only. This token is included in the response if there are more
	// results to fetch. To fetch additional results, provide this value as the
	// page_token in a subsequent <code>ListWorkflowTemplatesRequest</code>.
	NextPageToken        string   `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A response to a request to list workflow templates in a project.

func (*ListWorkflowTemplatesResponse) Descriptor

func (*ListWorkflowTemplatesResponse) Descriptor() ([]byte, []int)

func (*ListWorkflowTemplatesResponse) GetNextPageToken

func (m *ListWorkflowTemplatesResponse) GetNextPageToken() string

func (*ListWorkflowTemplatesResponse) GetTemplates

func (m *ListWorkflowTemplatesResponse) GetTemplates() []*WorkflowTemplate

func (*ListWorkflowTemplatesResponse) ProtoMessage

func (*ListWorkflowTemplatesResponse) ProtoMessage()

func (*ListWorkflowTemplatesResponse) Reset

func (m *ListWorkflowTemplatesResponse) Reset()

func (*ListWorkflowTemplatesResponse) String

func (*ListWorkflowTemplatesResponse) XXX_DiscardUnknown

func (m *ListWorkflowTemplatesResponse) XXX_DiscardUnknown()

func (*ListWorkflowTemplatesResponse) XXX_Marshal

func (m *ListWorkflowTemplatesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ListWorkflowTemplatesResponse) XXX_Merge

func (m *ListWorkflowTemplatesResponse) XXX_Merge(src proto.Message)

func (*ListWorkflowTemplatesResponse) XXX_Size

func (m *ListWorkflowTemplatesResponse) XXX_Size() int

func (*ListWorkflowTemplatesResponse) XXX_Unmarshal

func (m *ListWorkflowTemplatesResponse) XXX_Unmarshal(b []byte) error

type LoggingConfig

type LoggingConfig struct {
	// The per-package log levels for the driver. This may include
	// "root" package name to configure rootLogger.
	// Examples:
	//   'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG'
	DriverLogLevels      map[string]LoggingConfig_Level `` /* 252-byte string literal not displayed */
	XXX_NoUnkeyedLiteral struct{}                       `json:"-"`
	XXX_unrecognized     []byte                         `json:"-"`
	XXX_sizecache        int32                          `json:"-"`
}

The runtime logging config of the job.

func (*LoggingConfig) Descriptor

func (*LoggingConfig) Descriptor() ([]byte, []int)

func (*LoggingConfig) GetDriverLogLevels

func (m *LoggingConfig) GetDriverLogLevels() map[string]LoggingConfig_Level

func (*LoggingConfig) ProtoMessage

func (*LoggingConfig) ProtoMessage()

func (*LoggingConfig) Reset

func (m *LoggingConfig) Reset()

func (*LoggingConfig) String

func (m *LoggingConfig) String() string

func (*LoggingConfig) XXX_DiscardUnknown

func (m *LoggingConfig) XXX_DiscardUnknown()

func (*LoggingConfig) XXX_Marshal

func (m *LoggingConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*LoggingConfig) XXX_Merge

func (m *LoggingConfig) XXX_Merge(src proto.Message)

func (*LoggingConfig) XXX_Size

func (m *LoggingConfig) XXX_Size() int

func (*LoggingConfig) XXX_Unmarshal

func (m *LoggingConfig) XXX_Unmarshal(b []byte) error

type LoggingConfig_Level

type LoggingConfig_Level int32

The Log4j level for job execution. When running an [Apache Hive](http://hive.apache.org/) job, Cloud Dataproc configures the Hive client to an equivalent verbosity level.

const (
	// Level is unspecified. Use default level for log4j.
	LoggingConfig_LEVEL_UNSPECIFIED LoggingConfig_Level = 0
	// Use ALL level for log4j.
	LoggingConfig_ALL LoggingConfig_Level = 1
	// Use TRACE level for log4j.
	LoggingConfig_TRACE LoggingConfig_Level = 2
	// Use DEBUG level for log4j.
	LoggingConfig_DEBUG LoggingConfig_Level = 3
	// Use INFO level for log4j.
	LoggingConfig_INFO LoggingConfig_Level = 4
	// Use WARN level for log4j.
	LoggingConfig_WARN LoggingConfig_Level = 5
	// Use ERROR level for log4j.
	LoggingConfig_ERROR LoggingConfig_Level = 6
	// Use FATAL level for log4j.
	LoggingConfig_FATAL LoggingConfig_Level = 7
	// Turn off log4j.
	LoggingConfig_OFF LoggingConfig_Level = 8
)

func (LoggingConfig_Level) EnumDescriptor

func (LoggingConfig_Level) EnumDescriptor() ([]byte, []int)

func (LoggingConfig_Level) String

func (x LoggingConfig_Level) String() string

type ManagedCluster

type ManagedCluster struct {
	// Required. The cluster name prefix. A unique cluster name will be formed by
	// appending a random suffix.
	//
	// The name must contain only lower-case letters (a-z), numbers (0-9),
	// and hyphens (-). Must begin with a letter. Cannot begin or end with
	// hyphen. Must consist of between 2 and 35 characters.
	ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
	// Required. The cluster configuration.
	Config *ClusterConfig `protobuf:"bytes,3,opt,name=config,proto3" json:"config,omitempty"`
	// Optional. The labels to associate with this cluster.
	//
	// Label keys must be between 1 and 63 characters long, and must conform to
	// the following PCRE regular expression:
	// [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
	//
	// Label values must be between 1 and 63 characters long, and must conform to
	// the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
	//
	// No more than 32 labels can be associated with a given cluster.
	Labels               map[string]string `` /* 153-byte string literal not displayed */
	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
	XXX_unrecognized     []byte            `json:"-"`
	XXX_sizecache        int32             `json:"-"`
}

Cluster that is managed by the workflow.

func (*ManagedCluster) Descriptor

func (*ManagedCluster) Descriptor() ([]byte, []int)

func (*ManagedCluster) GetClusterName

func (m *ManagedCluster) GetClusterName() string

func (*ManagedCluster) GetConfig

func (m *ManagedCluster) GetConfig() *ClusterConfig

func (*ManagedCluster) GetLabels

func (m *ManagedCluster) GetLabels() map[string]string

func (*ManagedCluster) ProtoMessage

func (*ManagedCluster) ProtoMessage()

func (*ManagedCluster) Reset

func (m *ManagedCluster) Reset()

func (*ManagedCluster) String

func (m *ManagedCluster) String() string

func (*ManagedCluster) XXX_DiscardUnknown

func (m *ManagedCluster) XXX_DiscardUnknown()

func (*ManagedCluster) XXX_Marshal

func (m *ManagedCluster) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ManagedCluster) XXX_Merge

func (m *ManagedCluster) XXX_Merge(src proto.Message)

func (*ManagedCluster) XXX_Size

func (m *ManagedCluster) XXX_Size() int

func (*ManagedCluster) XXX_Unmarshal

func (m *ManagedCluster) XXX_Unmarshal(b []byte) error

type ManagedGroupConfig

type ManagedGroupConfig struct {
	// Output only. The name of the Instance Template used for the Managed
	// Instance Group.
	InstanceTemplateName string `protobuf:"bytes,1,opt,name=instance_template_name,json=instanceTemplateName,proto3" json:"instance_template_name,omitempty"`
	// Output only. The name of the Instance Group Manager for this group.
	InstanceGroupManagerName string   `` /* 137-byte string literal not displayed */
	XXX_NoUnkeyedLiteral     struct{} `json:"-"`
	XXX_unrecognized         []byte   `json:"-"`
	XXX_sizecache            int32    `json:"-"`
}

Specifies the resources used to actively manage an instance group.

func (*ManagedGroupConfig) Descriptor

func (*ManagedGroupConfig) Descriptor() ([]byte, []int)

func (*ManagedGroupConfig) GetInstanceGroupManagerName

func (m *ManagedGroupConfig) GetInstanceGroupManagerName() string

func (*ManagedGroupConfig) GetInstanceTemplateName

func (m *ManagedGroupConfig) GetInstanceTemplateName() string

func (*ManagedGroupConfig) ProtoMessage

func (*ManagedGroupConfig) ProtoMessage()

func (*ManagedGroupConfig) Reset

func (m *ManagedGroupConfig) Reset()

func (*ManagedGroupConfig) String

func (m *ManagedGroupConfig) String() string

func (*ManagedGroupConfig) XXX_DiscardUnknown

func (m *ManagedGroupConfig) XXX_DiscardUnknown()

func (*ManagedGroupConfig) XXX_Marshal

func (m *ManagedGroupConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ManagedGroupConfig) XXX_Merge

func (m *ManagedGroupConfig) XXX_Merge(src proto.Message)

func (*ManagedGroupConfig) XXX_Size

func (m *ManagedGroupConfig) XXX_Size() int

func (*ManagedGroupConfig) XXX_Unmarshal

func (m *ManagedGroupConfig) XXX_Unmarshal(b []byte) error

type NodeInitializationAction

type NodeInitializationAction struct {
	// Required. Cloud Storage URI of executable file.
	ExecutableFile string `protobuf:"bytes,1,opt,name=executable_file,json=executableFile,proto3" json:"executable_file,omitempty"`
	// Optional. Amount of time executable has to complete. Default is
	// 10 minutes (see JSON representation of
	// [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
	//
	// Cluster creation fails with an explanatory error message (the
	// name of the executable that caused the error and the exceeded timeout
	// period) if the executable is not completed at end of the timeout period.
	ExecutionTimeout     *duration.Duration `protobuf:"bytes,2,opt,name=execution_timeout,json=executionTimeout,proto3" json:"execution_timeout,omitempty"`
	XXX_NoUnkeyedLiteral struct{}           `json:"-"`
	XXX_unrecognized     []byte             `json:"-"`
	XXX_sizecache        int32              `json:"-"`
}

Specifies an executable to run on a fully configured node and a timeout period for executable completion.

func (*NodeInitializationAction) Descriptor

func (*NodeInitializationAction) Descriptor() ([]byte, []int)

func (*NodeInitializationAction) GetExecutableFile

func (m *NodeInitializationAction) GetExecutableFile() string

func (*NodeInitializationAction) GetExecutionTimeout

func (m *NodeInitializationAction) GetExecutionTimeout() *duration.Duration

func (*NodeInitializationAction) ProtoMessage

func (*NodeInitializationAction) ProtoMessage()

func (*NodeInitializationAction) Reset

func (m *NodeInitializationAction) Reset()

func (*NodeInitializationAction) String

func (m *NodeInitializationAction) String() string

func (*NodeInitializationAction) XXX_DiscardUnknown

func (m *NodeInitializationAction) XXX_DiscardUnknown()

func (*NodeInitializationAction) XXX_Marshal

func (m *NodeInitializationAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*NodeInitializationAction) XXX_Merge

func (m *NodeInitializationAction) XXX_Merge(src proto.Message)

func (*NodeInitializationAction) XXX_Size

func (m *NodeInitializationAction) XXX_Size() int

func (*NodeInitializationAction) XXX_Unmarshal

func (m *NodeInitializationAction) XXX_Unmarshal(b []byte) error

type OrderedJob

type OrderedJob struct {
	// Required. The step id. The id must be unique among all jobs
	// within the template.
	//
	// The step id is used as prefix for job id, as job
	// `goog-dataproc-workflow-step-id` label, and in
	// [prerequisiteStepIds][google.cloud.dataproc.v1beta2.OrderedJob.prerequisite_step_ids] field from other
	// steps.
	//
	// The id must contain only letters (a-z, A-Z), numbers (0-9),
	// underscores (_), and hyphens (-). Cannot begin or end with underscore
	// or hyphen. Must consist of between 3 and 50 characters.
	StepId string `protobuf:"bytes,1,opt,name=step_id,json=stepId,proto3" json:"step_id,omitempty"`
	// Required. The job definition.
	//
	// Types that are valid to be assigned to JobType:
	//	*OrderedJob_HadoopJob
	//	*OrderedJob_SparkJob
	//	*OrderedJob_PysparkJob
	//	*OrderedJob_HiveJob
	//	*OrderedJob_PigJob
	//	*OrderedJob_SparkRJob
	//	*OrderedJob_SparkSqlJob
	//	*OrderedJob_PrestoJob
	JobType isOrderedJob_JobType `protobuf_oneof:"job_type"`
	// Optional. The labels to associate with this job.
	//
	// Label keys must be between 1 and 63 characters long, and must conform to
	// the following regular expression:
	// [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
	//
	// Label values must be between 1 and 63 characters long, and must conform to
	// the following regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
	//
	// No more than 32 labels can be associated with a given job.
	Labels map[string]string `` /* 153-byte string literal not displayed */
	// Optional. Job scheduling configuration.
	Scheduling *JobScheduling `protobuf:"bytes,9,opt,name=scheduling,proto3" json:"scheduling,omitempty"`
	// Optional. The optional list of prerequisite job step_ids.
	// If not specified, the job will start at the beginning of workflow.
	PrerequisiteStepIds  []string `protobuf:"bytes,10,rep,name=prerequisite_step_ids,json=prerequisiteStepIds,proto3" json:"prerequisite_step_ids,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A job executed by the workflow.

func (*OrderedJob) Descriptor

func (*OrderedJob) Descriptor() ([]byte, []int)

func (*OrderedJob) GetHadoopJob

func (m *OrderedJob) GetHadoopJob() *HadoopJob

func (*OrderedJob) GetHiveJob

func (m *OrderedJob) GetHiveJob() *HiveJob

func (*OrderedJob) GetJobType

func (m *OrderedJob) GetJobType() isOrderedJob_JobType

func (*OrderedJob) GetLabels

func (m *OrderedJob) GetLabels() map[string]string

func (*OrderedJob) GetPigJob

func (m *OrderedJob) GetPigJob() *PigJob

func (*OrderedJob) GetPrerequisiteStepIds

func (m *OrderedJob) GetPrerequisiteStepIds() []string

func (*OrderedJob) GetPrestoJob

func (m *OrderedJob) GetPrestoJob() *PrestoJob

func (*OrderedJob) GetPysparkJob

func (m *OrderedJob) GetPysparkJob() *PySparkJob

func (*OrderedJob) GetScheduling

func (m *OrderedJob) GetScheduling() *JobScheduling

func (*OrderedJob) GetSparkJob

func (m *OrderedJob) GetSparkJob() *SparkJob

func (*OrderedJob) GetSparkRJob

func (m *OrderedJob) GetSparkRJob() *SparkRJob

func (*OrderedJob) GetSparkSqlJob

func (m *OrderedJob) GetSparkSqlJob() *SparkSqlJob

func (*OrderedJob) GetStepId

func (m *OrderedJob) GetStepId() string

func (*OrderedJob) ProtoMessage

func (*OrderedJob) ProtoMessage()

func (*OrderedJob) Reset

func (m *OrderedJob) Reset()

func (*OrderedJob) String

func (m *OrderedJob) String() string

func (*OrderedJob) XXX_DiscardUnknown

func (m *OrderedJob) XXX_DiscardUnknown()

func (*OrderedJob) XXX_Marshal

func (m *OrderedJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*OrderedJob) XXX_Merge

func (m *OrderedJob) XXX_Merge(src proto.Message)

func (*OrderedJob) XXX_OneofWrappers

func (*OrderedJob) XXX_OneofWrappers() []interface{}

XXX_OneofWrappers is for the internal use of the proto package.

func (*OrderedJob) XXX_Size

func (m *OrderedJob) XXX_Size() int

func (*OrderedJob) XXX_Unmarshal

func (m *OrderedJob) XXX_Unmarshal(b []byte) error

type OrderedJob_HadoopJob

type OrderedJob_HadoopJob struct {
	HadoopJob *HadoopJob `protobuf:"bytes,2,opt,name=hadoop_job,json=hadoopJob,proto3,oneof"`
}

type OrderedJob_HiveJob

type OrderedJob_HiveJob struct {
	HiveJob *HiveJob `protobuf:"bytes,5,opt,name=hive_job,json=hiveJob,proto3,oneof"`
}

type OrderedJob_PigJob

type OrderedJob_PigJob struct {
	PigJob *PigJob `protobuf:"bytes,6,opt,name=pig_job,json=pigJob,proto3,oneof"`
}

type OrderedJob_PrestoJob

type OrderedJob_PrestoJob struct {
	PrestoJob *PrestoJob `protobuf:"bytes,12,opt,name=presto_job,json=prestoJob,proto3,oneof"`
}

type OrderedJob_PysparkJob

type OrderedJob_PysparkJob struct {
	PysparkJob *PySparkJob `protobuf:"bytes,4,opt,name=pyspark_job,json=pysparkJob,proto3,oneof"`
}

type OrderedJob_SparkJob

type OrderedJob_SparkJob struct {
	SparkJob *SparkJob `protobuf:"bytes,3,opt,name=spark_job,json=sparkJob,proto3,oneof"`
}

type OrderedJob_SparkRJob

type OrderedJob_SparkRJob struct {
	SparkRJob *SparkRJob `protobuf:"bytes,11,opt,name=spark_r_job,json=sparkRJob,proto3,oneof"`
}

type OrderedJob_SparkSqlJob

type OrderedJob_SparkSqlJob struct {
	SparkSqlJob *SparkSqlJob `protobuf:"bytes,7,opt,name=spark_sql_job,json=sparkSqlJob,proto3,oneof"`
}

type ParameterValidation

type ParameterValidation struct {
	// Required. The type of validation to be performed.
	//
	// Types that are valid to be assigned to ValidationType:
	//	*ParameterValidation_Regex
	//	*ParameterValidation_Values
	ValidationType       isParameterValidation_ValidationType `protobuf_oneof:"validation_type"`
	XXX_NoUnkeyedLiteral struct{}                             `json:"-"`
	XXX_unrecognized     []byte                               `json:"-"`
	XXX_sizecache        int32                                `json:"-"`
}

Configuration for parameter validation.

func (*ParameterValidation) Descriptor

func (*ParameterValidation) Descriptor() ([]byte, []int)

func (*ParameterValidation) GetRegex

func (m *ParameterValidation) GetRegex() *RegexValidation

func (*ParameterValidation) GetValidationType

func (m *ParameterValidation) GetValidationType() isParameterValidation_ValidationType

func (*ParameterValidation) GetValues

func (m *ParameterValidation) GetValues() *ValueValidation

func (*ParameterValidation) ProtoMessage

func (*ParameterValidation) ProtoMessage()

func (*ParameterValidation) Reset

func (m *ParameterValidation) Reset()

func (*ParameterValidation) String

func (m *ParameterValidation) String() string

func (*ParameterValidation) XXX_DiscardUnknown

func (m *ParameterValidation) XXX_DiscardUnknown()

func (*ParameterValidation) XXX_Marshal

func (m *ParameterValidation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ParameterValidation) XXX_Merge

func (m *ParameterValidation) XXX_Merge(src proto.Message)

func (*ParameterValidation) XXX_OneofWrappers

func (*ParameterValidation) XXX_OneofWrappers() []interface{}

XXX_OneofWrappers is for the internal use of the proto package.

func (*ParameterValidation) XXX_Size

func (m *ParameterValidation) XXX_Size() int

func (*ParameterValidation) XXX_Unmarshal

func (m *ParameterValidation) XXX_Unmarshal(b []byte) error

type ParameterValidation_Regex

type ParameterValidation_Regex struct {
	Regex *RegexValidation `protobuf:"bytes,1,opt,name=regex,proto3,oneof"`
}

type ParameterValidation_Values

type ParameterValidation_Values struct {
	Values *ValueValidation `protobuf:"bytes,2,opt,name=values,proto3,oneof"`
}

type PigJob

type PigJob struct {
	// Required. The sequence of Pig queries to execute, specified as an HCFS
	// file URI or a list of queries.
	//
	// Types that are valid to be assigned to Queries:
	//	*PigJob_QueryFileUri
	//	*PigJob_QueryList
	Queries isPigJob_Queries `protobuf_oneof:"queries"`
	// Optional. Whether to continue executing queries if a query fails.
	// The default value is `false`. Setting to `true` can be useful when
	// executing independent parallel queries.
	ContinueOnFailure bool `protobuf:"varint,3,opt,name=continue_on_failure,json=continueOnFailure,proto3" json:"continue_on_failure,omitempty"`
	// Optional. Mapping of query variable names to values (equivalent to the Pig
	// command: `name=[value]`).
	ScriptVariables map[string]string `` /* 194-byte string literal not displayed */
	// Optional. A mapping of property names to values, used to configure Pig.
	// Properties that conflict with values set by the Dataproc API may be
	// overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml,
	// /etc/pig/conf/pig.properties, and classes in user code.
	Properties map[string]string `` /* 161-byte string literal not displayed */
	// Optional. HCFS URIs of jar files to add to the CLASSPATH of
	// the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
	JarFileUris []string `protobuf:"bytes,6,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"`
	// Optional. The runtime log config for job execution.
	LoggingConfig        *LoggingConfig `protobuf:"bytes,7,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"`
	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
	XXX_unrecognized     []byte         `json:"-"`
	XXX_sizecache        int32          `json:"-"`
}

A Dataproc job for running [Apache Pig](https://pig.apache.org/) queries on YARN.

func (*PigJob) Descriptor

func (*PigJob) Descriptor() ([]byte, []int)

func (*PigJob) GetContinueOnFailure

func (m *PigJob) GetContinueOnFailure() bool

func (*PigJob) GetJarFileUris

func (m *PigJob) GetJarFileUris() []string

func (*PigJob) GetLoggingConfig

func (m *PigJob) GetLoggingConfig() *LoggingConfig

func (*PigJob) GetProperties

func (m *PigJob) GetProperties() map[string]string

func (*PigJob) GetQueries

func (m *PigJob) GetQueries() isPigJob_Queries

func (*PigJob) GetQueryFileUri

func (m *PigJob) GetQueryFileUri() string

func (*PigJob) GetQueryList

func (m *PigJob) GetQueryList() *QueryList

func (*PigJob) GetScriptVariables

func (m *PigJob) GetScriptVariables() map[string]string

func (*PigJob) ProtoMessage

func (*PigJob) ProtoMessage()

func (*PigJob) Reset

func (m *PigJob) Reset()

func (*PigJob) String

func (m *PigJob) String() string

func (*PigJob) XXX_DiscardUnknown

func (m *PigJob) XXX_DiscardUnknown()

func (*PigJob) XXX_Marshal

func (m *PigJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*PigJob) XXX_Merge

func (m *PigJob) XXX_Merge(src proto.Message)

func (*PigJob) XXX_OneofWrappers

func (*PigJob) XXX_OneofWrappers() []interface{}

XXX_OneofWrappers is for the internal use of the proto package.

func (*PigJob) XXX_Size

func (m *PigJob) XXX_Size() int

func (*PigJob) XXX_Unmarshal

func (m *PigJob) XXX_Unmarshal(b []byte) error

type PigJob_QueryFileUri

type PigJob_QueryFileUri struct {
	QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,proto3,oneof"`
}

type PigJob_QueryList

type PigJob_QueryList struct {
	QueryList *QueryList `protobuf:"bytes,2,opt,name=query_list,json=queryList,proto3,oneof"`
}

type PrestoJob

type PrestoJob struct {
	// Required. The sequence of Presto queries to execute, specified as
	// either an HCFS file URI or as a list of queries.
	//
	// Types that are valid to be assigned to Queries:
	//	*PrestoJob_QueryFileUri
	//	*PrestoJob_QueryList
	Queries isPrestoJob_Queries `protobuf_oneof:"queries"`
	// Optional. Whether to continue executing queries if a query fails.
	// The default value is `false`. Setting to `true` can be useful when
	// executing independent parallel queries.
	ContinueOnFailure bool `protobuf:"varint,3,opt,name=continue_on_failure,json=continueOnFailure,proto3" json:"continue_on_failure,omitempty"`
	// Optional. The format in which query output will be displayed. See the
	// Presto documentation for supported output formats
	OutputFormat string `protobuf:"bytes,4,opt,name=output_format,json=outputFormat,proto3" json:"output_format,omitempty"`
	// Optional. Presto client tags to attach to this query
	ClientTags []string `protobuf:"bytes,5,rep,name=client_tags,json=clientTags,proto3" json:"client_tags,omitempty"`
	// Optional. A mapping of property names to values. Used to set Presto
	// [session properties](https://prestodb.io/docs/current/sql/set-session.html)
	// Equivalent to using the --session flag in the Presto CLI
	Properties map[string]string `` /* 161-byte string literal not displayed */
	// Optional. The runtime log config for job execution.
	LoggingConfig        *LoggingConfig `protobuf:"bytes,7,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"`
	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
	XXX_unrecognized     []byte         `json:"-"`
	XXX_sizecache        int32          `json:"-"`
}

A Dataproc job for running [Presto](https://prestosql.io/) queries. **IMPORTANT**: The [Dataproc Presto Optional Component](/dataproc/docs/concepts/components/presto) must be enabled when the cluster is created to submit a Presto job to the cluster.

func (*PrestoJob) Descriptor

func (*PrestoJob) Descriptor() ([]byte, []int)

func (*PrestoJob) GetClientTags

func (m *PrestoJob) GetClientTags() []string

func (*PrestoJob) GetContinueOnFailure

func (m *PrestoJob) GetContinueOnFailure() bool

func (*PrestoJob) GetLoggingConfig

func (m *PrestoJob) GetLoggingConfig() *LoggingConfig

func (*PrestoJob) GetOutputFormat

func (m *PrestoJob) GetOutputFormat() string

func (*PrestoJob) GetProperties

func (m *PrestoJob) GetProperties() map[string]string

func (*PrestoJob) GetQueries

func (m *PrestoJob) GetQueries() isPrestoJob_Queries

func (*PrestoJob) GetQueryFileUri

func (m *PrestoJob) GetQueryFileUri() string

func (*PrestoJob) GetQueryList

func (m *PrestoJob) GetQueryList() *QueryList

func (*PrestoJob) ProtoMessage

func (*PrestoJob) ProtoMessage()

func (*PrestoJob) Reset

func (m *PrestoJob) Reset()

func (*PrestoJob) String

func (m *PrestoJob) String() string

func (*PrestoJob) XXX_DiscardUnknown

func (m *PrestoJob) XXX_DiscardUnknown()

func (*PrestoJob) XXX_Marshal

func (m *PrestoJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*PrestoJob) XXX_Merge

func (m *PrestoJob) XXX_Merge(src proto.Message)

func (*PrestoJob) XXX_OneofWrappers

func (*PrestoJob) XXX_OneofWrappers() []interface{}

XXX_OneofWrappers is for the internal use of the proto package.

func (*PrestoJob) XXX_Size

func (m *PrestoJob) XXX_Size() int

func (*PrestoJob) XXX_Unmarshal

func (m *PrestoJob) XXX_Unmarshal(b []byte) error

type PrestoJob_QueryFileUri

type PrestoJob_QueryFileUri struct {
	QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,proto3,oneof"`
}

type PrestoJob_QueryList

type PrestoJob_QueryList struct {
	QueryList *QueryList `protobuf:"bytes,2,opt,name=query_list,json=queryList,proto3,oneof"`
}

type PySparkJob

type PySparkJob struct {
	// Required. The HCFS URI of the main Python file to use as the driver. Must
	// be a .py file.
	MainPythonFileUri string `protobuf:"bytes,1,opt,name=main_python_file_uri,json=mainPythonFileUri,proto3" json:"main_python_file_uri,omitempty"`
	// Optional. The arguments to pass to the driver.  Do not include arguments,
	// such as `--conf`, that can be set as job properties, since a collision may
	// occur that causes an incorrect job submission.
	Args []string `protobuf:"bytes,2,rep,name=args,proto3" json:"args,omitempty"`
	// Optional. HCFS file URIs of Python files to pass to the PySpark
	// framework. Supported file types: .py, .egg, and .zip.
	PythonFileUris []string `protobuf:"bytes,3,rep,name=python_file_uris,json=pythonFileUris,proto3" json:"python_file_uris,omitempty"`
	// Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
	// Python driver and tasks.
	JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"`
	// Optional. HCFS URIs of files to be copied to the working directory of
	// Python drivers and distributed tasks. Useful for naively parallel tasks.
	FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris,proto3" json:"file_uris,omitempty"`
	// Optional. HCFS URIs of archives to be extracted in the working directory of
	// .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris,proto3" json:"archive_uris,omitempty"`
	// Optional. A mapping of property names to values, used to configure PySpark.
	// Properties that conflict with values set by the Dataproc API may be
	// overwritten. Can include properties set in
	// /etc/spark/conf/spark-defaults.conf and classes in user code.
	Properties map[string]string `` /* 161-byte string literal not displayed */
	// Optional. The runtime log config for job execution.
	LoggingConfig        *LoggingConfig `protobuf:"bytes,8,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"`
	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
	XXX_unrecognized     []byte         `json:"-"`
	XXX_sizecache        int32          `json:"-"`
}

A Dataproc job for running [Apache PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html) applications on YARN.

func (*PySparkJob) Descriptor

func (*PySparkJob) Descriptor() ([]byte, []int)

func (*PySparkJob) GetArchiveUris

func (m *PySparkJob) GetArchiveUris() []string

func (*PySparkJob) GetArgs

func (m *PySparkJob) GetArgs() []string

func (*PySparkJob) GetFileUris

func (m *PySparkJob) GetFileUris() []string

func (*PySparkJob) GetJarFileUris

func (m *PySparkJob) GetJarFileUris() []string

func (*PySparkJob) GetLoggingConfig

func (m *PySparkJob) GetLoggingConfig() *LoggingConfig

func (*PySparkJob) GetMainPythonFileUri

func (m *PySparkJob) GetMainPythonFileUri() string

func (*PySparkJob) GetProperties

func (m *PySparkJob) GetProperties() map[string]string

func (*PySparkJob) GetPythonFileUris

func (m *PySparkJob) GetPythonFileUris() []string

func (*PySparkJob) ProtoMessage

func (*PySparkJob) ProtoMessage()

func (*PySparkJob) Reset

func (m *PySparkJob) Reset()

func (*PySparkJob) String

func (m *PySparkJob) String() string

func (*PySparkJob) XXX_DiscardUnknown

func (m *PySparkJob) XXX_DiscardUnknown()

func (*PySparkJob) XXX_Marshal

func (m *PySparkJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*PySparkJob) XXX_Merge

func (m *PySparkJob) XXX_Merge(src proto.Message)

func (*PySparkJob) XXX_Size

func (m *PySparkJob) XXX_Size() int

func (*PySparkJob) XXX_Unmarshal

func (m *PySparkJob) XXX_Unmarshal(b []byte) error

type QueryList

type QueryList struct {
	// Required. The queries to execute. You do not need to terminate a query
	// with a semicolon. Multiple queries can be specified in one string
	// by separating each with a semicolon. Here is an example of an Cloud
	// Dataproc API snippet that uses a QueryList to specify a HiveJob:
	//
	//     "hiveJob": {
	//       "queryList": {
	//         "queries": [
	//           "query1",
	//           "query2",
	//           "query3;query4",
	//         ]
	//       }
	//     }
	Queries              []string `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A list of queries to run on a cluster.

func (*QueryList) Descriptor

func (*QueryList) Descriptor() ([]byte, []int)

func (*QueryList) GetQueries

func (m *QueryList) GetQueries() []string

func (*QueryList) ProtoMessage

func (*QueryList) ProtoMessage()

func (*QueryList) Reset

func (m *QueryList) Reset()

func (*QueryList) String

func (m *QueryList) String() string

func (*QueryList) XXX_DiscardUnknown

func (m *QueryList) XXX_DiscardUnknown()

func (*QueryList) XXX_Marshal

func (m *QueryList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*QueryList) XXX_Merge

func (m *QueryList) XXX_Merge(src proto.Message)

func (*QueryList) XXX_Size

func (m *QueryList) XXX_Size() int

func (*QueryList) XXX_Unmarshal

func (m *QueryList) XXX_Unmarshal(b []byte) error

type RegexValidation

type RegexValidation struct {
	// Required. RE2 regular expressions used to validate the parameter's value.
	// The value must match the regex in its entirety (substring
	// matches are not sufficient).
	Regexes              []string `protobuf:"bytes,1,rep,name=regexes,proto3" json:"regexes,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

Validation based on regular expressions.

func (*RegexValidation) Descriptor

func (*RegexValidation) Descriptor() ([]byte, []int)

func (*RegexValidation) GetRegexes

func (m *RegexValidation) GetRegexes() []string

func (*RegexValidation) ProtoMessage

func (*RegexValidation) ProtoMessage()

func (*RegexValidation) Reset

func (m *RegexValidation) Reset()

func (*RegexValidation) String

func (m *RegexValidation) String() string

func (*RegexValidation) XXX_DiscardUnknown

func (m *RegexValidation) XXX_DiscardUnknown()

func (*RegexValidation) XXX_Marshal

func (m *RegexValidation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*RegexValidation) XXX_Merge

func (m *RegexValidation) XXX_Merge(src proto.Message)

func (*RegexValidation) XXX_Size

func (m *RegexValidation) XXX_Size() int

func (*RegexValidation) XXX_Unmarshal

func (m *RegexValidation) XXX_Unmarshal(b []byte) error

type ReservationAffinity

type ReservationAffinity struct {
	// Optional. Type of reservation to consume
	ConsumeReservationType ReservationAffinity_Type `` /* 190-byte string literal not displayed */
	// Optional. Corresponds to the label key of reservation resource.
	Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
	// Optional. Corresponds to the label values of reservation resource.
	Values               []string `protobuf:"bytes,3,rep,name=values,proto3" json:"values,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

Reservation Affinity for consuming Zonal reservation.

func (*ReservationAffinity) Descriptor

func (*ReservationAffinity) Descriptor() ([]byte, []int)

func (*ReservationAffinity) GetConsumeReservationType

func (m *ReservationAffinity) GetConsumeReservationType() ReservationAffinity_Type

func (*ReservationAffinity) GetKey

func (m *ReservationAffinity) GetKey() string

func (*ReservationAffinity) GetValues

func (m *ReservationAffinity) GetValues() []string

func (*ReservationAffinity) ProtoMessage

func (*ReservationAffinity) ProtoMessage()

func (*ReservationAffinity) Reset

func (m *ReservationAffinity) Reset()

func (*ReservationAffinity) String

func (m *ReservationAffinity) String() string

func (*ReservationAffinity) XXX_DiscardUnknown

func (m *ReservationAffinity) XXX_DiscardUnknown()

func (*ReservationAffinity) XXX_Marshal

func (m *ReservationAffinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ReservationAffinity) XXX_Merge

func (m *ReservationAffinity) XXX_Merge(src proto.Message)

func (*ReservationAffinity) XXX_Size

func (m *ReservationAffinity) XXX_Size() int

func (*ReservationAffinity) XXX_Unmarshal

func (m *ReservationAffinity) XXX_Unmarshal(b []byte) error

type ReservationAffinity_Type

type ReservationAffinity_Type int32

Indicates whether to consume capacity from an reservation or not.

const (
	ReservationAffinity_TYPE_UNSPECIFIED ReservationAffinity_Type = 0
	// Do not consume from any allocated capacity.
	ReservationAffinity_NO_RESERVATION ReservationAffinity_Type = 1
	// Consume any reservation available.
	ReservationAffinity_ANY_RESERVATION ReservationAffinity_Type = 2
	// Must consume from a specific reservation. Must specify key value fields
	// for specifying the reservations.
	ReservationAffinity_SPECIFIC_RESERVATION ReservationAffinity_Type = 3
)

func (ReservationAffinity_Type) EnumDescriptor

func (ReservationAffinity_Type) EnumDescriptor() ([]byte, []int)

func (ReservationAffinity_Type) String

func (x ReservationAffinity_Type) String() string

type SecurityConfig

type SecurityConfig struct {
	// Kerberos related configuration.
	KerberosConfig       *KerberosConfig `protobuf:"bytes,1,opt,name=kerberos_config,json=kerberosConfig,proto3" json:"kerberos_config,omitempty"`
	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
	XXX_unrecognized     []byte          `json:"-"`
	XXX_sizecache        int32           `json:"-"`
}

Security related configuration, including encryption, Kerberos, etc.

func (*SecurityConfig) Descriptor

func (*SecurityConfig) Descriptor() ([]byte, []int)

func (*SecurityConfig) GetKerberosConfig

func (m *SecurityConfig) GetKerberosConfig() *KerberosConfig

func (*SecurityConfig) ProtoMessage

func (*SecurityConfig) ProtoMessage()

func (*SecurityConfig) Reset

func (m *SecurityConfig) Reset()

func (*SecurityConfig) String

func (m *SecurityConfig) String() string

func (*SecurityConfig) XXX_DiscardUnknown

func (m *SecurityConfig) XXX_DiscardUnknown()

func (*SecurityConfig) XXX_Marshal

func (m *SecurityConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SecurityConfig) XXX_Merge

func (m *SecurityConfig) XXX_Merge(src proto.Message)

func (*SecurityConfig) XXX_Size

func (m *SecurityConfig) XXX_Size() int

func (*SecurityConfig) XXX_Unmarshal

func (m *SecurityConfig) XXX_Unmarshal(b []byte) error

type SoftwareConfig

type SoftwareConfig struct {
	// Optional. The version of software inside the cluster. It must be one of the
	// supported [Dataproc
	// Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
	// such as "1.2" (including a subminor version, such as "1.2.29"), or the
	// ["preview"
	// version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
	// If unspecified, it defaults to the latest Debian version.
	ImageVersion string `protobuf:"bytes,1,opt,name=image_version,json=imageVersion,proto3" json:"image_version,omitempty"`
	// Optional. The properties to set on daemon config files.
	//
	// Property keys are specified in `prefix:property` format, for example
	// `core:hadoop.tmp.dir`. The following are supported prefixes
	// and their mappings:
	//
	// * capacity-scheduler: `capacity-scheduler.xml`
	// * core:   `core-site.xml`
	// * distcp: `distcp-default.xml`
	// * hdfs:   `hdfs-site.xml`
	// * hive:   `hive-site.xml`
	// * mapred: `mapred-site.xml`
	// * pig:    `pig.properties`
	// * spark:  `spark-defaults.conf`
	// * yarn:   `yarn-site.xml`
	//
	// For more information, see [Cluster
	// properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
	Properties map[string]string `` /* 161-byte string literal not displayed */
	// The set of optional components to activate on the cluster.
	OptionalComponents   []Component `` /* 168-byte string literal not displayed */
	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
	XXX_unrecognized     []byte      `json:"-"`
	XXX_sizecache        int32       `json:"-"`
}

Specifies the selection and config of software inside the cluster.

func (*SoftwareConfig) Descriptor

func (*SoftwareConfig) Descriptor() ([]byte, []int)

func (*SoftwareConfig) GetImageVersion

func (m *SoftwareConfig) GetImageVersion() string

func (*SoftwareConfig) GetOptionalComponents

func (m *SoftwareConfig) GetOptionalComponents() []Component

func (*SoftwareConfig) GetProperties

func (m *SoftwareConfig) GetProperties() map[string]string

func (*SoftwareConfig) ProtoMessage

func (*SoftwareConfig) ProtoMessage()

func (*SoftwareConfig) Reset

func (m *SoftwareConfig) Reset()

func (*SoftwareConfig) String

func (m *SoftwareConfig) String() string

func (*SoftwareConfig) XXX_DiscardUnknown

func (m *SoftwareConfig) XXX_DiscardUnknown()

func (*SoftwareConfig) XXX_Marshal

func (m *SoftwareConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SoftwareConfig) XXX_Merge

func (m *SoftwareConfig) XXX_Merge(src proto.Message)

func (*SoftwareConfig) XXX_Size

func (m *SoftwareConfig) XXX_Size() int

func (*SoftwareConfig) XXX_Unmarshal

func (m *SoftwareConfig) XXX_Unmarshal(b []byte) error

type SparkJob

type SparkJob struct {
	// Types that are valid to be assigned to Driver:
	//	*SparkJob_MainJarFileUri
	//	*SparkJob_MainClass
	Driver isSparkJob_Driver `protobuf_oneof:"driver"`
	// Optional. The arguments to pass to the driver. Do not include arguments,
	// such as `--conf`, that can be set as job properties, since a collision may
	// occur that causes an incorrect job submission.
	Args []string `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"`
	// Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
	// Spark driver and tasks.
	JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"`
	// Optional. HCFS URIs of files to be copied to the working directory of
	// Spark drivers and distributed tasks. Useful for naively parallel tasks.
	FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris,proto3" json:"file_uris,omitempty"`
	// Optional. HCFS URIs of archives to be extracted in the working directory
	// of Spark drivers and tasks. Supported file types:
	// .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris,proto3" json:"archive_uris,omitempty"`
	// Optional. A mapping of property names to values, used to configure Spark.
	// Properties that conflict with values set by the Dataproc API may be
	// overwritten. Can include properties set in
	// /etc/spark/conf/spark-defaults.conf and classes in user code.
	Properties map[string]string `` /* 161-byte string literal not displayed */
	// Optional. The runtime log config for job execution.
	LoggingConfig        *LoggingConfig `protobuf:"bytes,8,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"`
	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
	XXX_unrecognized     []byte         `json:"-"`
	XXX_sizecache        int32          `json:"-"`
}

A Dataproc job for running [Apache Spark](http://spark.apache.org/) applications on YARN. The specification of the main method to call to drive the job. Specify either the jar file that contains the main class or the main class name. To pass both a main jar and a main class in that jar, add the jar to `CommonJob.jar_file_uris`, and then specify the main class name in `main_class`.

func (*SparkJob) Descriptor

func (*SparkJob) Descriptor() ([]byte, []int)

func (*SparkJob) GetArchiveUris

func (m *SparkJob) GetArchiveUris() []string

func (*SparkJob) GetArgs

func (m *SparkJob) GetArgs() []string

func (*SparkJob) GetDriver

func (m *SparkJob) GetDriver() isSparkJob_Driver

func (*SparkJob) GetFileUris

func (m *SparkJob) GetFileUris() []string

func (*SparkJob) GetJarFileUris

func (m *SparkJob) GetJarFileUris() []string

func (*SparkJob) GetLoggingConfig

func (m *SparkJob) GetLoggingConfig() *LoggingConfig

func (*SparkJob) GetMainClass

func (m *SparkJob) GetMainClass() string

func (*SparkJob) GetMainJarFileUri

func (m *SparkJob) GetMainJarFileUri() string

func (*SparkJob) GetProperties

func (m *SparkJob) GetProperties() map[string]string

func (*SparkJob) ProtoMessage

func (*SparkJob) ProtoMessage()

func (*SparkJob) Reset

func (m *SparkJob) Reset()

func (*SparkJob) String

func (m *SparkJob) String() string

func (*SparkJob) XXX_DiscardUnknown

func (m *SparkJob) XXX_DiscardUnknown()

func (*SparkJob) XXX_Marshal

func (m *SparkJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SparkJob) XXX_Merge

func (m *SparkJob) XXX_Merge(src proto.Message)

func (*SparkJob) XXX_OneofWrappers

func (*SparkJob) XXX_OneofWrappers() []interface{}

XXX_OneofWrappers is for the internal use of the proto package.

func (*SparkJob) XXX_Size

func (m *SparkJob) XXX_Size() int

func (*SparkJob) XXX_Unmarshal

func (m *SparkJob) XXX_Unmarshal(b []byte) error

type SparkJob_MainClass

type SparkJob_MainClass struct {
	MainClass string `protobuf:"bytes,2,opt,name=main_class,json=mainClass,proto3,oneof"`
}

type SparkJob_MainJarFileUri

type SparkJob_MainJarFileUri struct {
	MainJarFileUri string `protobuf:"bytes,1,opt,name=main_jar_file_uri,json=mainJarFileUri,proto3,oneof"`
}

type SparkRJob

type SparkRJob struct {
	// Required. The HCFS URI of the main R file to use as the driver.
	// Must be a .R file.
	MainRFileUri string `protobuf:"bytes,1,opt,name=main_r_file_uri,json=mainRFileUri,proto3" json:"main_r_file_uri,omitempty"`
	// Optional. The arguments to pass to the driver.  Do not include arguments,
	// such as `--conf`, that can be set as job properties, since a collision may
	// occur that causes an incorrect job submission.
	Args []string `protobuf:"bytes,2,rep,name=args,proto3" json:"args,omitempty"`
	// Optional. HCFS URIs of files to be copied to the working directory of
	// R drivers and distributed tasks. Useful for naively parallel tasks.
	FileUris []string `protobuf:"bytes,3,rep,name=file_uris,json=fileUris,proto3" json:"file_uris,omitempty"`
	// Optional. HCFS URIs of archives to be extracted in the working directory of
	// Spark drivers and tasks. Supported file types:
	// .jar, .tar, .tar.gz, .tgz, and .zip.
	ArchiveUris []string `protobuf:"bytes,4,rep,name=archive_uris,json=archiveUris,proto3" json:"archive_uris,omitempty"`
	// Optional. A mapping of property names to values, used to configure SparkR.
	// Properties that conflict with values set by the Dataproc API may be
	// overwritten. Can include properties set in
	// /etc/spark/conf/spark-defaults.conf and classes in user code.
	Properties map[string]string `` /* 161-byte string literal not displayed */
	// Optional. The runtime log config for job execution.
	LoggingConfig        *LoggingConfig `protobuf:"bytes,6,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"`
	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
	XXX_unrecognized     []byte         `json:"-"`
	XXX_sizecache        int32          `json:"-"`
}

A Dataproc job for running [Apache SparkR](https://spark.apache.org/docs/latest/sparkr.html) applications on YARN.

func (*SparkRJob) Descriptor

func (*SparkRJob) Descriptor() ([]byte, []int)

func (*SparkRJob) GetArchiveUris

func (m *SparkRJob) GetArchiveUris() []string

func (*SparkRJob) GetArgs

func (m *SparkRJob) GetArgs() []string

func (*SparkRJob) GetFileUris

func (m *SparkRJob) GetFileUris() []string

func (*SparkRJob) GetLoggingConfig

func (m *SparkRJob) GetLoggingConfig() *LoggingConfig

func (*SparkRJob) GetMainRFileUri

func (m *SparkRJob) GetMainRFileUri() string

func (*SparkRJob) GetProperties

func (m *SparkRJob) GetProperties() map[string]string

func (*SparkRJob) ProtoMessage

func (*SparkRJob) ProtoMessage()

func (*SparkRJob) Reset

func (m *SparkRJob) Reset()

func (*SparkRJob) String

func (m *SparkRJob) String() string

func (*SparkRJob) XXX_DiscardUnknown

func (m *SparkRJob) XXX_DiscardUnknown()

func (*SparkRJob) XXX_Marshal

func (m *SparkRJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SparkRJob) XXX_Merge

func (m *SparkRJob) XXX_Merge(src proto.Message)

func (*SparkRJob) XXX_Size

func (m *SparkRJob) XXX_Size() int

func (*SparkRJob) XXX_Unmarshal

func (m *SparkRJob) XXX_Unmarshal(b []byte) error

type SparkSqlJob

type SparkSqlJob struct {
	// Required. The sequence of Spark SQL queries to execute, specified as
	// either an HCFS file URI or as a list of queries.
	//
	// Types that are valid to be assigned to Queries:
	//	*SparkSqlJob_QueryFileUri
	//	*SparkSqlJob_QueryList
	Queries isSparkSqlJob_Queries `protobuf_oneof:"queries"`
	// Optional. Mapping of query variable names to values (equivalent to the
	// Spark SQL command: SET `name="value";`).
	ScriptVariables map[string]string `` /* 194-byte string literal not displayed */
	// Optional. A mapping of property names to values, used to configure
	// Spark SQL's SparkConf. Properties that conflict with values set by the
	// Dataproc API may be overwritten.
	Properties map[string]string `` /* 161-byte string literal not displayed */
	// Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
	JarFileUris []string `protobuf:"bytes,56,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"`
	// Optional. The runtime log config for job execution.
	LoggingConfig        *LoggingConfig `protobuf:"bytes,6,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"`
	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
	XXX_unrecognized     []byte         `json:"-"`
	XXX_sizecache        int32          `json:"-"`
}

A Dataproc job for running [Apache Spark SQL](http://spark.apache.org/sql/) queries.

func (*SparkSqlJob) Descriptor

func (*SparkSqlJob) Descriptor() ([]byte, []int)

func (*SparkSqlJob) GetJarFileUris

func (m *SparkSqlJob) GetJarFileUris() []string

func (*SparkSqlJob) GetLoggingConfig

func (m *SparkSqlJob) GetLoggingConfig() *LoggingConfig

func (*SparkSqlJob) GetProperties

func (m *SparkSqlJob) GetProperties() map[string]string

func (*SparkSqlJob) GetQueries

func (m *SparkSqlJob) GetQueries() isSparkSqlJob_Queries

func (*SparkSqlJob) GetQueryFileUri

func (m *SparkSqlJob) GetQueryFileUri() string

func (*SparkSqlJob) GetQueryList

func (m *SparkSqlJob) GetQueryList() *QueryList

func (*SparkSqlJob) GetScriptVariables

func (m *SparkSqlJob) GetScriptVariables() map[string]string

func (*SparkSqlJob) ProtoMessage

func (*SparkSqlJob) ProtoMessage()

func (*SparkSqlJob) Reset

func (m *SparkSqlJob) Reset()

func (*SparkSqlJob) String

func (m *SparkSqlJob) String() string

func (*SparkSqlJob) XXX_DiscardUnknown

func (m *SparkSqlJob) XXX_DiscardUnknown()

func (*SparkSqlJob) XXX_Marshal

func (m *SparkSqlJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SparkSqlJob) XXX_Merge

func (m *SparkSqlJob) XXX_Merge(src proto.Message)

func (*SparkSqlJob) XXX_OneofWrappers

func (*SparkSqlJob) XXX_OneofWrappers() []interface{}

XXX_OneofWrappers is for the internal use of the proto package.

func (*SparkSqlJob) XXX_Size

func (m *SparkSqlJob) XXX_Size() int

func (*SparkSqlJob) XXX_Unmarshal

func (m *SparkSqlJob) XXX_Unmarshal(b []byte) error

type SparkSqlJob_QueryFileUri

type SparkSqlJob_QueryFileUri struct {
	QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,proto3,oneof"`
}

type SparkSqlJob_QueryList

type SparkSqlJob_QueryList struct {
	QueryList *QueryList `protobuf:"bytes,2,opt,name=query_list,json=queryList,proto3,oneof"`
}

type SubmitJobRequest

type SubmitJobRequest struct {
	// Required. The ID of the Google Cloud Platform project that the job
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
	// Required. The job resource.
	Job *Job `protobuf:"bytes,2,opt,name=job,proto3" json:"job,omitempty"`
	// Optional. A unique id used to identify the request. If the server
	// receives two [SubmitJobRequest][google.cloud.dataproc.v1beta2.SubmitJobRequest] requests  with the same
	// id, then the second request will be ignored and the
	// first [Job][google.cloud.dataproc.v1beta2.Job] created and stored in the backend
	// is returned.
	//
	// It is recommended to always set this value to a
	// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
	//
	// The id must contain only letters (a-z, A-Z), numbers (0-9),
	// underscores (_), and hyphens (-). The maximum length is 40 characters.
	RequestId            string   `protobuf:"bytes,4,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A request to submit a job.

func (*SubmitJobRequest) Descriptor

func (*SubmitJobRequest) Descriptor() ([]byte, []int)

func (*SubmitJobRequest) GetJob

func (m *SubmitJobRequest) GetJob() *Job

func (*SubmitJobRequest) GetProjectId

func (m *SubmitJobRequest) GetProjectId() string

func (*SubmitJobRequest) GetRegion

func (m *SubmitJobRequest) GetRegion() string

func (*SubmitJobRequest) GetRequestId

func (m *SubmitJobRequest) GetRequestId() string

func (*SubmitJobRequest) ProtoMessage

func (*SubmitJobRequest) ProtoMessage()

func (*SubmitJobRequest) Reset

func (m *SubmitJobRequest) Reset()

func (*SubmitJobRequest) String

func (m *SubmitJobRequest) String() string

func (*SubmitJobRequest) XXX_DiscardUnknown

func (m *SubmitJobRequest) XXX_DiscardUnknown()

func (*SubmitJobRequest) XXX_Marshal

func (m *SubmitJobRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*SubmitJobRequest) XXX_Merge

func (m *SubmitJobRequest) XXX_Merge(src proto.Message)

func (*SubmitJobRequest) XXX_Size

func (m *SubmitJobRequest) XXX_Size() int

func (*SubmitJobRequest) XXX_Unmarshal

func (m *SubmitJobRequest) XXX_Unmarshal(b []byte) error

type TemplateParameter

type TemplateParameter struct {
	// Required. Parameter name.
	// The parameter name is used as the key, and paired with the
	// parameter value, which are passed to the template when the template
	// is instantiated.
	// The name must contain only capital letters (A-Z), numbers (0-9), and
	// underscores (_), and must not start with a number. The maximum length is
	// 40 characters.
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// Required. Paths to all fields that the parameter replaces.
	// A field is allowed to appear in at most one parameter's list of field
	// paths.
	//
	// A field path is similar in syntax to a [google.protobuf.FieldMask][google.protobuf.FieldMask].
	// For example, a field path that references the zone field of a workflow
	// template's cluster selector would be specified as
	// `placement.clusterSelector.zone`.
	//
	// Also, field paths can reference fields using the following syntax:
	//
	// * Values in maps can be referenced by key:
	//     * labels['key']
	//     * placement.clusterSelector.clusterLabels['key']
	//     * placement.managedCluster.labels['key']
	//     * placement.clusterSelector.clusterLabels['key']
	//     * jobs['step-id'].labels['key']
	//
	// * Jobs in the jobs list can be referenced by step-id:
	//     * jobs['step-id'].hadoopJob.mainJarFileUri
	//     * jobs['step-id'].hiveJob.queryFileUri
	//     * jobs['step-id'].pySparkJob.mainPythonFileUri
	//     * jobs['step-id'].hadoopJob.jarFileUris[0]
	//     * jobs['step-id'].hadoopJob.archiveUris[0]
	//     * jobs['step-id'].hadoopJob.fileUris[0]
	//     * jobs['step-id'].pySparkJob.pythonFileUris[0]
	//
	// * Items in repeated fields can be referenced by a zero-based index:
	//     * jobs['step-id'].sparkJob.args[0]
	//
	// * Other examples:
	//     * jobs['step-id'].hadoopJob.properties['key']
	//     * jobs['step-id'].hadoopJob.args[0]
	//     * jobs['step-id'].hiveJob.scriptVariables['key']
	//     * jobs['step-id'].hadoopJob.mainJarFileUri
	//     * placement.clusterSelector.zone
	//
	// It may not be possible to parameterize maps and repeated fields in their
	// entirety since only individual map values and individual items in repeated
	// fields can be referenced. For example, the following field paths are
	// invalid:
	//
	// - placement.clusterSelector.clusterLabels
	// - jobs['step-id'].sparkJob.args
	Fields []string `protobuf:"bytes,2,rep,name=fields,proto3" json:"fields,omitempty"`
	// Optional. Brief description of the parameter.
	// Must not exceed 1024 characters.
	Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
	// Optional. Validation rules to be applied to this parameter's value.
	Validation           *ParameterValidation `protobuf:"bytes,4,opt,name=validation,proto3" json:"validation,omitempty"`
	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
	XXX_unrecognized     []byte               `json:"-"`
	XXX_sizecache        int32                `json:"-"`
}

A configurable parameter that replaces one or more fields in the template. Parameterizable fields: - Labels - File uris - Job properties - Job arguments - Script variables - Main class (in HadoopJob and SparkJob) - Zone (in ClusterSelector)

func (*TemplateParameter) Descriptor

func (*TemplateParameter) Descriptor() ([]byte, []int)

func (*TemplateParameter) GetDescription

func (m *TemplateParameter) GetDescription() string

func (*TemplateParameter) GetFields

func (m *TemplateParameter) GetFields() []string

func (*TemplateParameter) GetName

func (m *TemplateParameter) GetName() string

func (*TemplateParameter) GetValidation

func (m *TemplateParameter) GetValidation() *ParameterValidation

func (*TemplateParameter) ProtoMessage

func (*TemplateParameter) ProtoMessage()

func (*TemplateParameter) Reset

func (m *TemplateParameter) Reset()

func (*TemplateParameter) String

func (m *TemplateParameter) String() string

func (*TemplateParameter) XXX_DiscardUnknown

func (m *TemplateParameter) XXX_DiscardUnknown()

func (*TemplateParameter) XXX_Marshal

func (m *TemplateParameter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*TemplateParameter) XXX_Merge

func (m *TemplateParameter) XXX_Merge(src proto.Message)

func (*TemplateParameter) XXX_Size

func (m *TemplateParameter) XXX_Size() int

func (*TemplateParameter) XXX_Unmarshal

func (m *TemplateParameter) XXX_Unmarshal(b []byte) error

type UnimplementedAutoscalingPolicyServiceServer

type UnimplementedAutoscalingPolicyServiceServer struct {
}

UnimplementedAutoscalingPolicyServiceServer can be embedded to have forward compatible implementations.

func (*UnimplementedAutoscalingPolicyServiceServer) CreateAutoscalingPolicy

func (*UnimplementedAutoscalingPolicyServiceServer) DeleteAutoscalingPolicy

func (*UnimplementedAutoscalingPolicyServiceServer) GetAutoscalingPolicy

func (*UnimplementedAutoscalingPolicyServiceServer) ListAutoscalingPolicies

func (*UnimplementedAutoscalingPolicyServiceServer) UpdateAutoscalingPolicy

type UnimplementedClusterControllerServer

type UnimplementedClusterControllerServer struct {
}

UnimplementedClusterControllerServer can be embedded to have forward compatible implementations.

func (*UnimplementedClusterControllerServer) CreateCluster

func (*UnimplementedClusterControllerServer) DeleteCluster

func (*UnimplementedClusterControllerServer) DiagnoseCluster

func (*UnimplementedClusterControllerServer) GetCluster

func (*UnimplementedClusterControllerServer) ListClusters

func (*UnimplementedClusterControllerServer) UpdateCluster

type UnimplementedJobControllerServer

type UnimplementedJobControllerServer struct {
}

UnimplementedJobControllerServer can be embedded to have forward compatible implementations.

func (*UnimplementedJobControllerServer) CancelJob

func (*UnimplementedJobControllerServer) DeleteJob

func (*UnimplementedJobControllerServer) GetJob

func (*UnimplementedJobControllerServer) ListJobs

func (*UnimplementedJobControllerServer) SubmitJob

func (*UnimplementedJobControllerServer) SubmitJobAsOperation

func (*UnimplementedJobControllerServer) UpdateJob

type UnimplementedWorkflowTemplateServiceServer

type UnimplementedWorkflowTemplateServiceServer struct {
}

UnimplementedWorkflowTemplateServiceServer can be embedded to have forward compatible implementations.

func (*UnimplementedWorkflowTemplateServiceServer) CreateWorkflowTemplate

func (*UnimplementedWorkflowTemplateServiceServer) DeleteWorkflowTemplate

func (*UnimplementedWorkflowTemplateServiceServer) GetWorkflowTemplate

func (*UnimplementedWorkflowTemplateServiceServer) InstantiateInlineWorkflowTemplate

func (*UnimplementedWorkflowTemplateServiceServer) InstantiateWorkflowTemplate

func (*UnimplementedWorkflowTemplateServiceServer) ListWorkflowTemplates

func (*UnimplementedWorkflowTemplateServiceServer) UpdateWorkflowTemplate

type UpdateAutoscalingPolicyRequest

type UpdateAutoscalingPolicyRequest struct {
	// Required. The updated autoscaling policy.
	Policy               *AutoscalingPolicy `protobuf:"bytes,1,opt,name=policy,proto3" json:"policy,omitempty"`
	XXX_NoUnkeyedLiteral struct{}           `json:"-"`
	XXX_unrecognized     []byte             `json:"-"`
	XXX_sizecache        int32              `json:"-"`
}

A request to update an autoscaling policy.

func (*UpdateAutoscalingPolicyRequest) Descriptor

func (*UpdateAutoscalingPolicyRequest) Descriptor() ([]byte, []int)

func (*UpdateAutoscalingPolicyRequest) GetPolicy

func (*UpdateAutoscalingPolicyRequest) ProtoMessage

func (*UpdateAutoscalingPolicyRequest) ProtoMessage()

func (*UpdateAutoscalingPolicyRequest) Reset

func (m *UpdateAutoscalingPolicyRequest) Reset()

func (*UpdateAutoscalingPolicyRequest) String

func (*UpdateAutoscalingPolicyRequest) XXX_DiscardUnknown

func (m *UpdateAutoscalingPolicyRequest) XXX_DiscardUnknown()

func (*UpdateAutoscalingPolicyRequest) XXX_Marshal

func (m *UpdateAutoscalingPolicyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*UpdateAutoscalingPolicyRequest) XXX_Merge

func (m *UpdateAutoscalingPolicyRequest) XXX_Merge(src proto.Message)

func (*UpdateAutoscalingPolicyRequest) XXX_Size

func (m *UpdateAutoscalingPolicyRequest) XXX_Size() int

func (*UpdateAutoscalingPolicyRequest) XXX_Unmarshal

func (m *UpdateAutoscalingPolicyRequest) XXX_Unmarshal(b []byte) error

type UpdateClusterRequest

type UpdateClusterRequest struct {
	// Required. The ID of the Google Cloud Platform project the
	// cluster belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,5,opt,name=region,proto3" json:"region,omitempty"`
	// Required. The cluster name.
	ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
	// Required. The changes to the cluster.
	Cluster *Cluster `protobuf:"bytes,3,opt,name=cluster,proto3" json:"cluster,omitempty"`
	// Optional. Timeout for graceful YARN decomissioning. Graceful
	// decommissioning allows removing nodes from the cluster without
	// interrupting jobs in progress. Timeout specifies how long to wait for jobs
	// in progress to finish before forcefully removing nodes (and potentially
	// interrupting jobs). Default timeout is 0 (for forceful decommission), and
	// the maximum allowed timeout is 1 day (see JSON representation of
	// [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
	//
	// Only supported on Dataproc image versions 1.2 and higher.
	GracefulDecommissionTimeout *duration.Duration `` /* 144-byte string literal not displayed */
	// Required. Specifies the path, relative to `Cluster`, of
	// the field to update. For example, to change the number of workers
	// in a cluster to 5, the `update_mask` parameter would be
	// specified as `config.worker_config.num_instances`,
	// and the `PATCH` request body would specify the new value, as follows:
	//
	//     {
	//       "config":{
	//         "workerConfig":{
	//           "numInstances":"5"
	//         }
	//       }
	//     }
	//
	// Similarly, to change the number of preemptible workers in a cluster to 5,
	// the `update_mask` parameter would be
	// `config.secondary_worker_config.num_instances`, and the `PATCH` request
	// body would be set as follows:
	//
	//     {
	//       "config":{
	//         "secondaryWorkerConfig":{
	//           "numInstances":"5"
	//         }
	//       }
	//     }
	// <strong>Note:</strong> currently only the following fields can be updated:
	//
	// <table>
	// <tr>
	// <td><strong>Mask</strong></td><td><strong>Purpose</strong></td>
	// </tr>
	// <tr>
	// <td>labels</td><td>Updates labels</td>
	// </tr>
	// <tr>
	// <td>config.worker_config.num_instances</td><td>Resize primary worker
	// group</td>
	// </tr>
	// <tr>
	// <td>config.secondary_worker_config.num_instances</td><td>Resize secondary
	// worker group</td>
	// </tr>
	// <tr>
	// <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL
	// duration</td>
	// </tr>
	// <tr>
	// <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL
	// deletion timestamp</td>
	// </tr>
	// <tr>
	// <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL
	// duration</td>
	// </tr>
	// <tr>
	// <td>config.autoscaling_config.policy_uri</td><td>Use, stop using, or change
	// autoscaling policies</td>
	// </tr>
	// </table>
	UpdateMask *field_mask.FieldMask `protobuf:"bytes,4,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
	// Optional. A unique id used to identify the request. If the server
	// receives two [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest] requests  with the same
	// id, then the second request will be ignored and the
	// first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
	// backend is returned.
	//
	// It is recommended to always set this value to a
	// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
	//
	// The id must contain only letters (a-z, A-Z), numbers (0-9),
	// underscores (_), and hyphens (-). The maximum length is 40 characters.
	RequestId            string   `protobuf:"bytes,7,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A request to update a cluster.

func (*UpdateClusterRequest) Descriptor

func (*UpdateClusterRequest) Descriptor() ([]byte, []int)

func (*UpdateClusterRequest) GetCluster

func (m *UpdateClusterRequest) GetCluster() *Cluster

func (*UpdateClusterRequest) GetClusterName

func (m *UpdateClusterRequest) GetClusterName() string

func (*UpdateClusterRequest) GetGracefulDecommissionTimeout

func (m *UpdateClusterRequest) GetGracefulDecommissionTimeout() *duration.Duration

func (*UpdateClusterRequest) GetProjectId

func (m *UpdateClusterRequest) GetProjectId() string

func (*UpdateClusterRequest) GetRegion

func (m *UpdateClusterRequest) GetRegion() string

func (*UpdateClusterRequest) GetRequestId

func (m *UpdateClusterRequest) GetRequestId() string

func (*UpdateClusterRequest) GetUpdateMask

func (m *UpdateClusterRequest) GetUpdateMask() *field_mask.FieldMask

func (*UpdateClusterRequest) ProtoMessage

func (*UpdateClusterRequest) ProtoMessage()

func (*UpdateClusterRequest) Reset

func (m *UpdateClusterRequest) Reset()

func (*UpdateClusterRequest) String

func (m *UpdateClusterRequest) String() string

func (*UpdateClusterRequest) XXX_DiscardUnknown

func (m *UpdateClusterRequest) XXX_DiscardUnknown()

func (*UpdateClusterRequest) XXX_Marshal

func (m *UpdateClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*UpdateClusterRequest) XXX_Merge

func (m *UpdateClusterRequest) XXX_Merge(src proto.Message)

func (*UpdateClusterRequest) XXX_Size

func (m *UpdateClusterRequest) XXX_Size() int

func (*UpdateClusterRequest) XXX_Unmarshal

func (m *UpdateClusterRequest) XXX_Unmarshal(b []byte) error

type UpdateJobRequest

type UpdateJobRequest struct {
	// Required. The ID of the Google Cloud Platform project that the job
	// belongs to.
	ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
	// Required. The Dataproc region in which to handle the request.
	Region string `protobuf:"bytes,2,opt,name=region,proto3" json:"region,omitempty"`
	// Required. The job ID.
	JobId string `protobuf:"bytes,3,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
	// Required. The changes to the job.
	Job *Job `protobuf:"bytes,4,opt,name=job,proto3" json:"job,omitempty"`
	// Required. Specifies the path, relative to <code>Job</code>, of
	// the field to update. For example, to update the labels of a Job the
	// <code>update_mask</code> parameter would be specified as
	// <code>labels</code>, and the `PATCH` request body would specify the new
	// value. <strong>Note:</strong> Currently, <code>labels</code> is the only
	// field that can be updated.
	UpdateMask           *field_mask.FieldMask `protobuf:"bytes,5,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
	XXX_NoUnkeyedLiteral struct{}              `json:"-"`
	XXX_unrecognized     []byte                `json:"-"`
	XXX_sizecache        int32                 `json:"-"`
}

A request to update a job.

func (*UpdateJobRequest) Descriptor

func (*UpdateJobRequest) Descriptor() ([]byte, []int)

func (*UpdateJobRequest) GetJob

func (m *UpdateJobRequest) GetJob() *Job

func (*UpdateJobRequest) GetJobId

func (m *UpdateJobRequest) GetJobId() string

func (*UpdateJobRequest) GetProjectId

func (m *UpdateJobRequest) GetProjectId() string

func (*UpdateJobRequest) GetRegion

func (m *UpdateJobRequest) GetRegion() string

func (*UpdateJobRequest) GetUpdateMask

func (m *UpdateJobRequest) GetUpdateMask() *field_mask.FieldMask

func (*UpdateJobRequest) ProtoMessage

func (*UpdateJobRequest) ProtoMessage()

func (*UpdateJobRequest) Reset

func (m *UpdateJobRequest) Reset()

func (*UpdateJobRequest) String

func (m *UpdateJobRequest) String() string

func (*UpdateJobRequest) XXX_DiscardUnknown

func (m *UpdateJobRequest) XXX_DiscardUnknown()

func (*UpdateJobRequest) XXX_Marshal

func (m *UpdateJobRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*UpdateJobRequest) XXX_Merge

func (m *UpdateJobRequest) XXX_Merge(src proto.Message)

func (*UpdateJobRequest) XXX_Size

func (m *UpdateJobRequest) XXX_Size() int

func (*UpdateJobRequest) XXX_Unmarshal

func (m *UpdateJobRequest) XXX_Unmarshal(b []byte) error

type UpdateWorkflowTemplateRequest

type UpdateWorkflowTemplateRequest struct {
	// Required. The updated workflow template.
	//
	// The `template.version` field must match the current version.
	Template             *WorkflowTemplate `protobuf:"bytes,1,opt,name=template,proto3" json:"template,omitempty"`
	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
	XXX_unrecognized     []byte            `json:"-"`
	XXX_sizecache        int32             `json:"-"`
}

A request to update a workflow template.

func (*UpdateWorkflowTemplateRequest) Descriptor

func (*UpdateWorkflowTemplateRequest) Descriptor() ([]byte, []int)

func (*UpdateWorkflowTemplateRequest) GetTemplate

func (*UpdateWorkflowTemplateRequest) ProtoMessage

func (*UpdateWorkflowTemplateRequest) ProtoMessage()

func (*UpdateWorkflowTemplateRequest) Reset

func (m *UpdateWorkflowTemplateRequest) Reset()

func (*UpdateWorkflowTemplateRequest) String

func (*UpdateWorkflowTemplateRequest) XXX_DiscardUnknown

func (m *UpdateWorkflowTemplateRequest) XXX_DiscardUnknown()

func (*UpdateWorkflowTemplateRequest) XXX_Marshal

func (m *UpdateWorkflowTemplateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*UpdateWorkflowTemplateRequest) XXX_Merge

func (m *UpdateWorkflowTemplateRequest) XXX_Merge(src proto.Message)

func (*UpdateWorkflowTemplateRequest) XXX_Size

func (m *UpdateWorkflowTemplateRequest) XXX_Size() int

func (*UpdateWorkflowTemplateRequest) XXX_Unmarshal

func (m *UpdateWorkflowTemplateRequest) XXX_Unmarshal(b []byte) error

type ValueValidation

type ValueValidation struct {
	// Required. List of allowed values for the parameter.
	Values               []string `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

Validation based on a list of allowed values.

func (*ValueValidation) Descriptor

func (*ValueValidation) Descriptor() ([]byte, []int)

func (*ValueValidation) GetValues

func (m *ValueValidation) GetValues() []string

func (*ValueValidation) ProtoMessage

func (*ValueValidation) ProtoMessage()

func (*ValueValidation) Reset

func (m *ValueValidation) Reset()

func (*ValueValidation) String

func (m *ValueValidation) String() string

func (*ValueValidation) XXX_DiscardUnknown

func (m *ValueValidation) XXX_DiscardUnknown()

func (*ValueValidation) XXX_Marshal

func (m *ValueValidation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*ValueValidation) XXX_Merge

func (m *ValueValidation) XXX_Merge(src proto.Message)

func (*ValueValidation) XXX_Size

func (m *ValueValidation) XXX_Size() int

func (*ValueValidation) XXX_Unmarshal

func (m *ValueValidation) XXX_Unmarshal(b []byte) error

type WorkflowGraph

type WorkflowGraph struct {
	// Output only. The workflow nodes.
	Nodes                []*WorkflowNode `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"`
	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
	XXX_unrecognized     []byte          `json:"-"`
	XXX_sizecache        int32           `json:"-"`
}

The workflow graph.

func (*WorkflowGraph) Descriptor

func (*WorkflowGraph) Descriptor() ([]byte, []int)

func (*WorkflowGraph) GetNodes

func (m *WorkflowGraph) GetNodes() []*WorkflowNode

func (*WorkflowGraph) ProtoMessage

func (*WorkflowGraph) ProtoMessage()

func (*WorkflowGraph) Reset

func (m *WorkflowGraph) Reset()

func (*WorkflowGraph) String

func (m *WorkflowGraph) String() string

func (*WorkflowGraph) XXX_DiscardUnknown

func (m *WorkflowGraph) XXX_DiscardUnknown()

func (*WorkflowGraph) XXX_Marshal

func (m *WorkflowGraph) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*WorkflowGraph) XXX_Merge

func (m *WorkflowGraph) XXX_Merge(src proto.Message)

func (*WorkflowGraph) XXX_Size

func (m *WorkflowGraph) XXX_Size() int

func (*WorkflowGraph) XXX_Unmarshal

func (m *WorkflowGraph) XXX_Unmarshal(b []byte) error

type WorkflowMetadata

type WorkflowMetadata struct {
	// Output only. The resource name of the workflow template as described
	// in https://cloud.google.com/apis/design/resource_names.
	//
	// * For `projects.regions.workflowTemplates`, the resource name of the
	//   template has the following format:
	//   `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
	//
	// * For `projects.locations.workflowTemplates`, the resource name of the
	//   template has the following format:
	//   `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`
	Template string `protobuf:"bytes,1,opt,name=template,proto3" json:"template,omitempty"`
	// Output only. The version of template at the time of
	// workflow instantiation.
	Version int32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`
	// Output only. The create cluster operation metadata.
	CreateCluster *ClusterOperation `protobuf:"bytes,3,opt,name=create_cluster,json=createCluster,proto3" json:"create_cluster,omitempty"`
	// Output only. The workflow graph.
	Graph *WorkflowGraph `protobuf:"bytes,4,opt,name=graph,proto3" json:"graph,omitempty"`
	// Output only. The delete cluster operation metadata.
	DeleteCluster *ClusterOperation `protobuf:"bytes,5,opt,name=delete_cluster,json=deleteCluster,proto3" json:"delete_cluster,omitempty"`
	// Output only. The workflow state.
	State WorkflowMetadata_State `protobuf:"varint,6,opt,name=state,proto3,enum=google.cloud.dataproc.v1beta2.WorkflowMetadata_State" json:"state,omitempty"`
	// Output only. The name of the target cluster.
	ClusterName string `protobuf:"bytes,7,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
	// Map from parameter names to values that were used for those parameters.
	Parameters map[string]string `` /* 161-byte string literal not displayed */
	// Output only. Workflow start time.
	StartTime *timestamp.Timestamp `protobuf:"bytes,9,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
	// Output only. Workflow end time.
	EndTime *timestamp.Timestamp `protobuf:"bytes,10,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
	// Output only. The UUID of target cluster.
	ClusterUuid          string   `protobuf:"bytes,11,opt,name=cluster_uuid,json=clusterUuid,proto3" json:"cluster_uuid,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A Dataproc workflow template resource.

func (*WorkflowMetadata) Descriptor

func (*WorkflowMetadata) Descriptor() ([]byte, []int)

func (*WorkflowMetadata) GetClusterName

func (m *WorkflowMetadata) GetClusterName() string

func (*WorkflowMetadata) GetClusterUuid

func (m *WorkflowMetadata) GetClusterUuid() string

func (*WorkflowMetadata) GetCreateCluster

func (m *WorkflowMetadata) GetCreateCluster() *ClusterOperation

func (*WorkflowMetadata) GetDeleteCluster

func (m *WorkflowMetadata) GetDeleteCluster() *ClusterOperation

func (*WorkflowMetadata) GetEndTime

func (m *WorkflowMetadata) GetEndTime() *timestamp.Timestamp

func (*WorkflowMetadata) GetGraph

func (m *WorkflowMetadata) GetGraph() *WorkflowGraph

func (*WorkflowMetadata) GetParameters

func (m *WorkflowMetadata) GetParameters() map[string]string

func (*WorkflowMetadata) GetStartTime

func (m *WorkflowMetadata) GetStartTime() *timestamp.Timestamp

func (*WorkflowMetadata) GetState

func (*WorkflowMetadata) GetTemplate

func (m *WorkflowMetadata) GetTemplate() string

func (*WorkflowMetadata) GetVersion

func (m *WorkflowMetadata) GetVersion() int32

func (*WorkflowMetadata) ProtoMessage

func (*WorkflowMetadata) ProtoMessage()

func (*WorkflowMetadata) Reset

func (m *WorkflowMetadata) Reset()

func (*WorkflowMetadata) String

func (m *WorkflowMetadata) String() string

func (*WorkflowMetadata) XXX_DiscardUnknown

func (m *WorkflowMetadata) XXX_DiscardUnknown()

func (*WorkflowMetadata) XXX_Marshal

func (m *WorkflowMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*WorkflowMetadata) XXX_Merge

func (m *WorkflowMetadata) XXX_Merge(src proto.Message)

func (*WorkflowMetadata) XXX_Size

func (m *WorkflowMetadata) XXX_Size() int

func (*WorkflowMetadata) XXX_Unmarshal

func (m *WorkflowMetadata) XXX_Unmarshal(b []byte) error

type WorkflowMetadata_State

type WorkflowMetadata_State int32

The operation state.

const (
	// Unused.
	WorkflowMetadata_UNKNOWN WorkflowMetadata_State = 0
	// The operation has been created.
	WorkflowMetadata_PENDING WorkflowMetadata_State = 1
	// The operation is running.
	WorkflowMetadata_RUNNING WorkflowMetadata_State = 2
	// The operation is done; either cancelled or completed.
	WorkflowMetadata_DONE WorkflowMetadata_State = 3
)

func (WorkflowMetadata_State) EnumDescriptor

func (WorkflowMetadata_State) EnumDescriptor() ([]byte, []int)

func (WorkflowMetadata_State) String

func (x WorkflowMetadata_State) String() string

type WorkflowNode

type WorkflowNode struct {
	// Output only. The name of the node.
	StepId string `protobuf:"bytes,1,opt,name=step_id,json=stepId,proto3" json:"step_id,omitempty"`
	// Output only. Node's prerequisite nodes.
	PrerequisiteStepIds []string `protobuf:"bytes,2,rep,name=prerequisite_step_ids,json=prerequisiteStepIds,proto3" json:"prerequisite_step_ids,omitempty"`
	// Output only. The job id; populated after the node enters RUNNING state.
	JobId string `protobuf:"bytes,3,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
	// Output only. The node state.
	State WorkflowNode_NodeState `protobuf:"varint,5,opt,name=state,proto3,enum=google.cloud.dataproc.v1beta2.WorkflowNode_NodeState" json:"state,omitempty"`
	// Output only. The error detail.
	Error                string   `protobuf:"bytes,6,opt,name=error,proto3" json:"error,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

The workflow node.

func (*WorkflowNode) Descriptor

func (*WorkflowNode) Descriptor() ([]byte, []int)

func (*WorkflowNode) GetError

func (m *WorkflowNode) GetError() string

func (*WorkflowNode) GetJobId

func (m *WorkflowNode) GetJobId() string

func (*WorkflowNode) GetPrerequisiteStepIds

func (m *WorkflowNode) GetPrerequisiteStepIds() []string

func (*WorkflowNode) GetState

func (m *WorkflowNode) GetState() WorkflowNode_NodeState

func (*WorkflowNode) GetStepId

func (m *WorkflowNode) GetStepId() string

func (*WorkflowNode) ProtoMessage

func (*WorkflowNode) ProtoMessage()

func (*WorkflowNode) Reset

func (m *WorkflowNode) Reset()

func (*WorkflowNode) String

func (m *WorkflowNode) String() string

func (*WorkflowNode) XXX_DiscardUnknown

func (m *WorkflowNode) XXX_DiscardUnknown()

func (*WorkflowNode) XXX_Marshal

func (m *WorkflowNode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*WorkflowNode) XXX_Merge

func (m *WorkflowNode) XXX_Merge(src proto.Message)

func (*WorkflowNode) XXX_Size

func (m *WorkflowNode) XXX_Size() int

func (*WorkflowNode) XXX_Unmarshal

func (m *WorkflowNode) XXX_Unmarshal(b []byte) error

type WorkflowNode_NodeState

type WorkflowNode_NodeState int32

The workflow node state.

const (
	// State is unspecified.
	WorkflowNode_NODE_STATUS_UNSPECIFIED WorkflowNode_NodeState = 0
	// The node is awaiting prerequisite node to finish.
	WorkflowNode_BLOCKED WorkflowNode_NodeState = 1
	// The node is runnable but not running.
	WorkflowNode_RUNNABLE WorkflowNode_NodeState = 2
	// The node is running.
	WorkflowNode_RUNNING WorkflowNode_NodeState = 3
	// The node completed successfully.
	WorkflowNode_COMPLETED WorkflowNode_NodeState = 4
	// The node failed. A node can be marked FAILED because
	// its ancestor or peer failed.
	WorkflowNode_FAILED WorkflowNode_NodeState = 5
)

func (WorkflowNode_NodeState) EnumDescriptor

func (WorkflowNode_NodeState) EnumDescriptor() ([]byte, []int)

func (WorkflowNode_NodeState) String

func (x WorkflowNode_NodeState) String() string

type WorkflowTemplate

type WorkflowTemplate struct {
	// Required. The template id.
	//
	// The id must contain only letters (a-z, A-Z), numbers (0-9),
	// underscores (_), and hyphens (-). Cannot begin or end with underscore
	// or hyphen. Must consist of between 3 and 50 characters.
	//
	// .
	Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
	// Output only. The resource name of the workflow template, as described
	// in https://cloud.google.com/apis/design/resource_names.
	//
	// * For `projects.regions.workflowTemplates`, the resource name of the
	//   template has the following format:
	//   `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
	//
	// * For `projects.locations.workflowTemplates`, the resource name of the
	//   template has the following format:
	//   `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// Optional. Used to perform a consistent read-modify-write.
	//
	// This field should be left blank for a `CreateWorkflowTemplate` request. It
	// is required for an `UpdateWorkflowTemplate` request, and must match the
	// current server version. A typical update template flow would fetch the
	// current template with a `GetWorkflowTemplate` request, which will return
	// the current template with the `version` field filled in with the
	// current server version. The user updates other fields in the template,
	// then returns it as part of the `UpdateWorkflowTemplate` request.
	Version int32 `protobuf:"varint,3,opt,name=version,proto3" json:"version,omitempty"`
	// Output only. The time template was created.
	CreateTime *timestamp.Timestamp `protobuf:"bytes,4,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
	// Output only. The time template was last updated.
	UpdateTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
	// Optional. The labels to associate with this template. These labels
	// will be propagated to all jobs and clusters created by the workflow
	// instance.
	//
	// Label **keys** must contain 1 to 63 characters, and must conform to
	// [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
	//
	// Label **values** may be empty, but, if present, must contain 1 to 63
	// characters, and must conform to
	// [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
	//
	// No more than 32 labels can be associated with a template.
	Labels map[string]string `` /* 153-byte string literal not displayed */
	// Required. WorkflowTemplate scheduling information.
	Placement *WorkflowTemplatePlacement `protobuf:"bytes,7,opt,name=placement,proto3" json:"placement,omitempty"`
	// Required. The Directed Acyclic Graph of Jobs to submit.
	Jobs []*OrderedJob `protobuf:"bytes,8,rep,name=jobs,proto3" json:"jobs,omitempty"`
	// Optional. Template parameters whose values are substituted into the
	// template. Values for parameters must be provided when the template is
	// instantiated.
	Parameters           []*TemplateParameter `protobuf:"bytes,9,rep,name=parameters,proto3" json:"parameters,omitempty"`
	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
	XXX_unrecognized     []byte               `json:"-"`
	XXX_sizecache        int32                `json:"-"`
}

A Dataproc workflow template resource.

func (*WorkflowTemplate) Descriptor

func (*WorkflowTemplate) Descriptor() ([]byte, []int)

func (*WorkflowTemplate) GetCreateTime

func (m *WorkflowTemplate) GetCreateTime() *timestamp.Timestamp

func (*WorkflowTemplate) GetId

func (m *WorkflowTemplate) GetId() string

func (*WorkflowTemplate) GetJobs

func (m *WorkflowTemplate) GetJobs() []*OrderedJob

func (*WorkflowTemplate) GetLabels

func (m *WorkflowTemplate) GetLabels() map[string]string

func (*WorkflowTemplate) GetName

func (m *WorkflowTemplate) GetName() string

func (*WorkflowTemplate) GetParameters

func (m *WorkflowTemplate) GetParameters() []*TemplateParameter

func (*WorkflowTemplate) GetPlacement

func (m *WorkflowTemplate) GetPlacement() *WorkflowTemplatePlacement

func (*WorkflowTemplate) GetUpdateTime

func (m *WorkflowTemplate) GetUpdateTime() *timestamp.Timestamp

func (*WorkflowTemplate) GetVersion

func (m *WorkflowTemplate) GetVersion() int32

func (*WorkflowTemplate) ProtoMessage

func (*WorkflowTemplate) ProtoMessage()

func (*WorkflowTemplate) Reset

func (m *WorkflowTemplate) Reset()

func (*WorkflowTemplate) String

func (m *WorkflowTemplate) String() string

func (*WorkflowTemplate) XXX_DiscardUnknown

func (m *WorkflowTemplate) XXX_DiscardUnknown()

func (*WorkflowTemplate) XXX_Marshal

func (m *WorkflowTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*WorkflowTemplate) XXX_Merge

func (m *WorkflowTemplate) XXX_Merge(src proto.Message)

func (*WorkflowTemplate) XXX_Size

func (m *WorkflowTemplate) XXX_Size() int

func (*WorkflowTemplate) XXX_Unmarshal

func (m *WorkflowTemplate) XXX_Unmarshal(b []byte) error

type WorkflowTemplatePlacement

type WorkflowTemplatePlacement struct {
	// Required. Specifies where workflow executes; either on a managed
	// cluster or an existing cluster chosen by labels.
	//
	// Types that are valid to be assigned to Placement:
	//	*WorkflowTemplatePlacement_ManagedCluster
	//	*WorkflowTemplatePlacement_ClusterSelector
	Placement            isWorkflowTemplatePlacement_Placement `protobuf_oneof:"placement"`
	XXX_NoUnkeyedLiteral struct{}                              `json:"-"`
	XXX_unrecognized     []byte                                `json:"-"`
	XXX_sizecache        int32                                 `json:"-"`
}

Specifies workflow execution target.

Either `managed_cluster` or `cluster_selector` is required.

func (*WorkflowTemplatePlacement) Descriptor

func (*WorkflowTemplatePlacement) Descriptor() ([]byte, []int)

func (*WorkflowTemplatePlacement) GetClusterSelector

func (m *WorkflowTemplatePlacement) GetClusterSelector() *ClusterSelector

func (*WorkflowTemplatePlacement) GetManagedCluster

func (m *WorkflowTemplatePlacement) GetManagedCluster() *ManagedCluster

func (*WorkflowTemplatePlacement) GetPlacement

func (m *WorkflowTemplatePlacement) GetPlacement() isWorkflowTemplatePlacement_Placement

func (*WorkflowTemplatePlacement) ProtoMessage

func (*WorkflowTemplatePlacement) ProtoMessage()

func (*WorkflowTemplatePlacement) Reset

func (m *WorkflowTemplatePlacement) Reset()

func (*WorkflowTemplatePlacement) String

func (m *WorkflowTemplatePlacement) String() string

func (*WorkflowTemplatePlacement) XXX_DiscardUnknown

func (m *WorkflowTemplatePlacement) XXX_DiscardUnknown()

func (*WorkflowTemplatePlacement) XXX_Marshal

func (m *WorkflowTemplatePlacement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*WorkflowTemplatePlacement) XXX_Merge

func (m *WorkflowTemplatePlacement) XXX_Merge(src proto.Message)

func (*WorkflowTemplatePlacement) XXX_OneofWrappers

func (*WorkflowTemplatePlacement) XXX_OneofWrappers() []interface{}

XXX_OneofWrappers is for the internal use of the proto package.

func (*WorkflowTemplatePlacement) XXX_Size

func (m *WorkflowTemplatePlacement) XXX_Size() int

func (*WorkflowTemplatePlacement) XXX_Unmarshal

func (m *WorkflowTemplatePlacement) XXX_Unmarshal(b []byte) error

type WorkflowTemplatePlacement_ClusterSelector

type WorkflowTemplatePlacement_ClusterSelector struct {
	ClusterSelector *ClusterSelector `protobuf:"bytes,2,opt,name=cluster_selector,json=clusterSelector,proto3,oneof"`
}

type WorkflowTemplatePlacement_ManagedCluster

type WorkflowTemplatePlacement_ManagedCluster struct {
	ManagedCluster *ManagedCluster `protobuf:"bytes,1,opt,name=managed_cluster,json=managedCluster,proto3,oneof"`
}

type WorkflowTemplateServiceClient

type WorkflowTemplateServiceClient interface {
	// Creates new workflow template.
	CreateWorkflowTemplate(ctx context.Context, in *CreateWorkflowTemplateRequest, opts ...grpc.CallOption) (*WorkflowTemplate, error)
	// Retrieves the latest workflow template.
	//
	// Can retrieve previously instantiated template by specifying optional
	// version parameter.
	GetWorkflowTemplate(ctx context.Context, in *GetWorkflowTemplateRequest, opts ...grpc.CallOption) (*WorkflowTemplate, error)
	// Instantiates a template and begins execution.
	//
	// The returned Operation can be used to track execution of
	// workflow by polling
	// [operations.get][google.longrunning.Operations.GetOperation].
	// The Operation will complete when entire workflow is finished.
	//
	// The running workflow can be aborted via
	// [operations.cancel][google.longrunning.Operations.CancelOperation].
	// This will cause any inflight jobs to be cancelled and workflow-owned
	// clusters to be deleted.
	//
	// The [Operation.metadata][google.longrunning.Operation.metadata] will be
	// [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#workflowmetadata).
	// Also see [Using
	// WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata).
	//
	// On successful completion,
	// [Operation.response][google.longrunning.Operation.response] will be
	// [Empty][google.protobuf.Empty].
	InstantiateWorkflowTemplate(ctx context.Context, in *InstantiateWorkflowTemplateRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Instantiates a template and begins execution.
	//
	// This method is equivalent to executing the sequence
	// [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate],
	// [DeleteWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.DeleteWorkflowTemplate].
	//
	// The returned Operation can be used to track execution of
	// workflow by polling
	// [operations.get][google.longrunning.Operations.GetOperation].
	// The Operation will complete when entire workflow is finished.
	//
	// The running workflow can be aborted via
	// [operations.cancel][google.longrunning.Operations.CancelOperation].
	// This will cause any inflight jobs to be cancelled and workflow-owned
	// clusters to be deleted.
	//
	// The [Operation.metadata][google.longrunning.Operation.metadata] will be
	// [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata).
	// Also see [Using
	// WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata).
	//
	// On successful completion,
	// [Operation.response][google.longrunning.Operation.response] will be
	// [Empty][google.protobuf.Empty].
	InstantiateInlineWorkflowTemplate(ctx context.Context, in *InstantiateInlineWorkflowTemplateRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
	// Updates (replaces) workflow template. The updated template
	// must contain version that matches the current server version.
	UpdateWorkflowTemplate(ctx context.Context, in *UpdateWorkflowTemplateRequest, opts ...grpc.CallOption) (*WorkflowTemplate, error)
	// Lists workflows that match the specified filter in the request.
	ListWorkflowTemplates(ctx context.Context, in *ListWorkflowTemplatesRequest, opts ...grpc.CallOption) (*ListWorkflowTemplatesResponse, error)
	// Deletes a workflow template. It does not cancel in-progress workflows.
	DeleteWorkflowTemplate(ctx context.Context, in *DeleteWorkflowTemplateRequest, opts ...grpc.CallOption) (*empty.Empty, error)
}

WorkflowTemplateServiceClient is the client API for WorkflowTemplateService service.

For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.

type WorkflowTemplateServiceServer

type WorkflowTemplateServiceServer interface {
	// Creates new workflow template.
	CreateWorkflowTemplate(context.Context, *CreateWorkflowTemplateRequest) (*WorkflowTemplate, error)
	// Retrieves the latest workflow template.
	//
	// Can retrieve previously instantiated template by specifying optional
	// version parameter.
	GetWorkflowTemplate(context.Context, *GetWorkflowTemplateRequest) (*WorkflowTemplate, error)
	// Instantiates a template and begins execution.
	//
	// The returned Operation can be used to track execution of
	// workflow by polling
	// [operations.get][google.longrunning.Operations.GetOperation].
	// The Operation will complete when entire workflow is finished.
	//
	// The running workflow can be aborted via
	// [operations.cancel][google.longrunning.Operations.CancelOperation].
	// This will cause any inflight jobs to be cancelled and workflow-owned
	// clusters to be deleted.
	//
	// The [Operation.metadata][google.longrunning.Operation.metadata] will be
	// [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1beta2#workflowmetadata).
	// Also see [Using
	// WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata).
	//
	// On successful completion,
	// [Operation.response][google.longrunning.Operation.response] will be
	// [Empty][google.protobuf.Empty].
	InstantiateWorkflowTemplate(context.Context, *InstantiateWorkflowTemplateRequest) (*longrunning.Operation, error)
	// Instantiates a template and begins execution.
	//
	// This method is equivalent to executing the sequence
	// [CreateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.CreateWorkflowTemplate], [InstantiateWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.InstantiateWorkflowTemplate],
	// [DeleteWorkflowTemplate][google.cloud.dataproc.v1beta2.WorkflowTemplateService.DeleteWorkflowTemplate].
	//
	// The returned Operation can be used to track execution of
	// workflow by polling
	// [operations.get][google.longrunning.Operations.GetOperation].
	// The Operation will complete when entire workflow is finished.
	//
	// The running workflow can be aborted via
	// [operations.cancel][google.longrunning.Operations.CancelOperation].
	// This will cause any inflight jobs to be cancelled and workflow-owned
	// clusters to be deleted.
	//
	// The [Operation.metadata][google.longrunning.Operation.metadata] will be
	// [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata).
	// Also see [Using
	// WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata).
	//
	// On successful completion,
	// [Operation.response][google.longrunning.Operation.response] will be
	// [Empty][google.protobuf.Empty].
	InstantiateInlineWorkflowTemplate(context.Context, *InstantiateInlineWorkflowTemplateRequest) (*longrunning.Operation, error)
	// Updates (replaces) workflow template. The updated template
	// must contain version that matches the current server version.
	UpdateWorkflowTemplate(context.Context, *UpdateWorkflowTemplateRequest) (*WorkflowTemplate, error)
	// Lists workflows that match the specified filter in the request.
	ListWorkflowTemplates(context.Context, *ListWorkflowTemplatesRequest) (*ListWorkflowTemplatesResponse, error)
	// Deletes a workflow template. It does not cancel in-progress workflows.
	DeleteWorkflowTemplate(context.Context, *DeleteWorkflowTemplateRequest) (*empty.Empty, error)
}

WorkflowTemplateServiceServer is the server API for WorkflowTemplateService service.

type YarnApplication

type YarnApplication struct {
	// Output only. The application name.
	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
	// Output only. The application state.
	State YarnApplication_State `protobuf:"varint,2,opt,name=state,proto3,enum=google.cloud.dataproc.v1beta2.YarnApplication_State" json:"state,omitempty"`
	// Output only. The numerical progress of the application, from 1 to 100.
	Progress float32 `protobuf:"fixed32,3,opt,name=progress,proto3" json:"progress,omitempty"`
	// Output only. The HTTP URL of the ApplicationMaster, HistoryServer, or
	// TimelineServer that provides application-specific information. The URL uses
	// the internal hostname, and requires a proxy server for resolution and,
	// possibly, access.
	TrackingUrl          string   `protobuf:"bytes,4,opt,name=tracking_url,json=trackingUrl,proto3" json:"tracking_url,omitempty"`
	XXX_NoUnkeyedLiteral struct{} `json:"-"`
	XXX_unrecognized     []byte   `json:"-"`
	XXX_sizecache        int32    `json:"-"`
}

A YARN application created by a job. Application information is a subset of <code>org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto</code>.

**Beta Feature**: This report is available for testing purposes only. It may be changed before final release.

func (*YarnApplication) Descriptor

func (*YarnApplication) Descriptor() ([]byte, []int)

func (*YarnApplication) GetName

func (m *YarnApplication) GetName() string

func (*YarnApplication) GetProgress

func (m *YarnApplication) GetProgress() float32

func (*YarnApplication) GetState

func (m *YarnApplication) GetState() YarnApplication_State

func (*YarnApplication) GetTrackingUrl

func (m *YarnApplication) GetTrackingUrl() string

func (*YarnApplication) ProtoMessage

func (*YarnApplication) ProtoMessage()

func (*YarnApplication) Reset

func (m *YarnApplication) Reset()

func (*YarnApplication) String

func (m *YarnApplication) String() string

func (*YarnApplication) XXX_DiscardUnknown

func (m *YarnApplication) XXX_DiscardUnknown()

func (*YarnApplication) XXX_Marshal

func (m *YarnApplication) XXX_Marshal(b []byte, deterministic bool) ([]byte, error)

func (*YarnApplication) XXX_Merge

func (m *YarnApplication) XXX_Merge(src proto.Message)

func (*YarnApplication) XXX_Size

func (m *YarnApplication) XXX_Size() int

func (*YarnApplication) XXX_Unmarshal

func (m *YarnApplication) XXX_Unmarshal(b []byte) error

type YarnApplication_State

type YarnApplication_State int32

The application state, corresponding to <code>YarnProtos.YarnApplicationStateProto</code>.

const (
	// Status is unspecified.
	YarnApplication_STATE_UNSPECIFIED YarnApplication_State = 0
	// Status is NEW.
	YarnApplication_NEW YarnApplication_State = 1
	// Status is NEW_SAVING.
	YarnApplication_NEW_SAVING YarnApplication_State = 2
	// Status is SUBMITTED.
	YarnApplication_SUBMITTED YarnApplication_State = 3
	// Status is ACCEPTED.
	YarnApplication_ACCEPTED YarnApplication_State = 4
	// Status is RUNNING.
	YarnApplication_RUNNING YarnApplication_State = 5
	// Status is FINISHED.
	YarnApplication_FINISHED YarnApplication_State = 6
	// Status is FAILED.
	YarnApplication_FAILED YarnApplication_State = 7
	// Status is KILLED.
	YarnApplication_KILLED YarnApplication_State = 8
)

func (YarnApplication_State) EnumDescriptor

func (YarnApplication_State) EnumDescriptor() ([]byte, []int)

func (YarnApplication_State) String

func (x YarnApplication_State) String() string

Jump to

Keyboard shortcuts

? : This menu
/ : Search site
f or F : Jump to
y or Y : Canonical URL