Variables
Batch_State_name, Batch_State_value
var (
Batch_State_name = map[int32]string{
0: "STATE_UNSPECIFIED",
1: "PENDING",
2: "RUNNING",
3: "CANCELLING",
4: "CANCELLED",
5: "SUCCEEDED",
6: "FAILED",
}
Batch_State_value = map[string]int32{
"STATE_UNSPECIFIED": 0,
"PENDING": 1,
"RUNNING": 2,
"CANCELLING": 3,
"CANCELLED": 4,
"SUCCEEDED": 5,
"FAILED": 6,
}
)
Enum value maps for Batch_State.
GceClusterConfig_PrivateIpv6GoogleAccess_name, GceClusterConfig_PrivateIpv6GoogleAccess_value
var (
GceClusterConfig_PrivateIpv6GoogleAccess_name = map[int32]string{
0: "PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED",
1: "INHERIT_FROM_SUBNETWORK",
2: "OUTBOUND",
3: "BIDIRECTIONAL",
}
GceClusterConfig_PrivateIpv6GoogleAccess_value = map[string]int32{
"PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED": 0,
"INHERIT_FROM_SUBNETWORK": 1,
"OUTBOUND": 2,
"BIDIRECTIONAL": 3,
}
)
Enum value maps for GceClusterConfig_PrivateIpv6GoogleAccess.
InstanceGroupConfig_Preemptibility_name, InstanceGroupConfig_Preemptibility_value
var (
InstanceGroupConfig_Preemptibility_name = map[int32]string{
0: "PREEMPTIBILITY_UNSPECIFIED",
1: "NON_PREEMPTIBLE",
2: "PREEMPTIBLE",
3: "SPOT",
}
InstanceGroupConfig_Preemptibility_value = map[string]int32{
"PREEMPTIBILITY_UNSPECIFIED": 0,
"NON_PREEMPTIBLE": 1,
"PREEMPTIBLE": 2,
"SPOT": 3,
}
)
Enum value maps for InstanceGroupConfig_Preemptibility.
NodeGroup_Role_name, NodeGroup_Role_value
var (
NodeGroup_Role_name = map[int32]string{
0: "ROLE_UNSPECIFIED",
1: "DRIVER",
}
NodeGroup_Role_value = map[string]int32{
"ROLE_UNSPECIFIED": 0,
"DRIVER": 1,
}
)
Enum value maps for NodeGroup_Role.
ClusterStatus_State_name, ClusterStatus_State_value
var (
ClusterStatus_State_name = map[int32]string{
0: "UNKNOWN",
1: "CREATING",
2: "RUNNING",
3: "ERROR",
9: "ERROR_DUE_TO_UPDATE",
4: "DELETING",
5: "UPDATING",
6: "STOPPING",
7: "STOPPED",
8: "STARTING",
10: "REPAIRING",
}
ClusterStatus_State_value = map[string]int32{
"UNKNOWN": 0,
"CREATING": 1,
"RUNNING": 2,
"ERROR": 3,
"ERROR_DUE_TO_UPDATE": 9,
"DELETING": 4,
"UPDATING": 5,
"STOPPING": 6,
"STOPPED": 7,
"STARTING": 8,
"REPAIRING": 10,
}
)
Enum value maps for ClusterStatus_State.
ClusterStatus_Substate_name, ClusterStatus_Substate_value
var (
ClusterStatus_Substate_name = map[int32]string{
0: "UNSPECIFIED",
1: "UNHEALTHY",
2: "STALE_STATUS",
}
ClusterStatus_Substate_value = map[string]int32{
"UNSPECIFIED": 0,
"UNHEALTHY": 1,
"STALE_STATUS": 2,
}
)
Enum value maps for ClusterStatus_Substate.
DataprocMetricConfig_MetricSource_name, DataprocMetricConfig_MetricSource_value
var (
DataprocMetricConfig_MetricSource_name = map[int32]string{
0: "METRIC_SOURCE_UNSPECIFIED",
1: "MONITORING_AGENT_DEFAULTS",
2: "HDFS",
3: "SPARK",
4: "YARN",
5: "SPARK_HISTORY_SERVER",
6: "HIVESERVER2",
7: "HIVEMETASTORE",
8: "FLINK",
}
DataprocMetricConfig_MetricSource_value = map[string]int32{
"METRIC_SOURCE_UNSPECIFIED": 0,
"MONITORING_AGENT_DEFAULTS": 1,
"HDFS": 2,
"SPARK": 3,
"YARN": 4,
"SPARK_HISTORY_SERVER": 5,
"HIVESERVER2": 6,
"HIVEMETASTORE": 7,
"FLINK": 8,
}
)
Enum value maps for DataprocMetricConfig_MetricSource.
DiagnoseClusterRequest_TarballAccess_name, DiagnoseClusterRequest_TarballAccess_value
var (
DiagnoseClusterRequest_TarballAccess_name = map[int32]string{
0: "TARBALL_ACCESS_UNSPECIFIED",
1: "GOOGLE_CLOUD_SUPPORT",
2: "GOOGLE_DATAPROC_DIAGNOSE",
}
DiagnoseClusterRequest_TarballAccess_value = map[string]int32{
"TARBALL_ACCESS_UNSPECIFIED": 0,
"GOOGLE_CLOUD_SUPPORT": 1,
"GOOGLE_DATAPROC_DIAGNOSE": 2,
}
)
Enum value maps for DiagnoseClusterRequest_TarballAccess.
ReservationAffinity_Type_name, ReservationAffinity_Type_value
var (
ReservationAffinity_Type_name = map[int32]string{
0: "TYPE_UNSPECIFIED",
1: "NO_RESERVATION",
2: "ANY_RESERVATION",
3: "SPECIFIC_RESERVATION",
}
ReservationAffinity_Type_value = map[string]int32{
"TYPE_UNSPECIFIED": 0,
"NO_RESERVATION": 1,
"ANY_RESERVATION": 2,
"SPECIFIC_RESERVATION": 3,
}
)
Enum value maps for ReservationAffinity_Type.
LoggingConfig_Level_name, LoggingConfig_Level_value
var (
LoggingConfig_Level_name = map[int32]string{
0: "LEVEL_UNSPECIFIED",
1: "ALL",
2: "TRACE",
3: "DEBUG",
4: "INFO",
5: "WARN",
6: "ERROR",
7: "FATAL",
8: "OFF",
}
LoggingConfig_Level_value = map[string]int32{
"LEVEL_UNSPECIFIED": 0,
"ALL": 1,
"TRACE": 2,
"DEBUG": 3,
"INFO": 4,
"WARN": 5,
"ERROR": 6,
"FATAL": 7,
"OFF": 8,
}
)
Enum value maps for LoggingConfig_Level.
JobStatus_State_name, JobStatus_State_value
var (
JobStatus_State_name = map[int32]string{
0: "STATE_UNSPECIFIED",
1: "PENDING",
8: "SETUP_DONE",
2: "RUNNING",
3: "CANCEL_PENDING",
7: "CANCEL_STARTED",
4: "CANCELLED",
5: "DONE",
6: "ERROR",
9: "ATTEMPT_FAILURE",
}
JobStatus_State_value = map[string]int32{
"STATE_UNSPECIFIED": 0,
"PENDING": 1,
"SETUP_DONE": 8,
"RUNNING": 2,
"CANCEL_PENDING": 3,
"CANCEL_STARTED": 7,
"CANCELLED": 4,
"DONE": 5,
"ERROR": 6,
"ATTEMPT_FAILURE": 9,
}
)
Enum value maps for JobStatus_State.
JobStatus_Substate_name, JobStatus_Substate_value
var (
JobStatus_Substate_name = map[int32]string{
0: "UNSPECIFIED",
1: "SUBMITTED",
2: "QUEUED",
3: "STALE_STATUS",
}
JobStatus_Substate_value = map[string]int32{
"UNSPECIFIED": 0,
"SUBMITTED": 1,
"QUEUED": 2,
"STALE_STATUS": 3,
}
)
Enum value maps for JobStatus_Substate.
YarnApplication_State_name, YarnApplication_State_value
var (
YarnApplication_State_name = map[int32]string{
0: "STATE_UNSPECIFIED",
1: "NEW",
2: "NEW_SAVING",
3: "SUBMITTED",
4: "ACCEPTED",
5: "RUNNING",
6: "FINISHED",
7: "FAILED",
8: "KILLED",
}
YarnApplication_State_value = map[string]int32{
"STATE_UNSPECIFIED": 0,
"NEW": 1,
"NEW_SAVING": 2,
"SUBMITTED": 3,
"ACCEPTED": 4,
"RUNNING": 5,
"FINISHED": 6,
"FAILED": 7,
"KILLED": 8,
}
)
Enum value maps for YarnApplication_State.
ListJobsRequest_JobStateMatcher_name, ListJobsRequest_JobStateMatcher_value
var (
ListJobsRequest_JobStateMatcher_name = map[int32]string{
0: "ALL",
1: "ACTIVE",
2: "NON_ACTIVE",
}
ListJobsRequest_JobStateMatcher_value = map[string]int32{
"ALL": 0,
"ACTIVE": 1,
"NON_ACTIVE": 2,
}
)
Enum value maps for ListJobsRequest_JobStateMatcher.
BatchOperationMetadata_BatchOperationType_name, BatchOperationMetadata_BatchOperationType_value
var (
BatchOperationMetadata_BatchOperationType_name = map[int32]string{
0: "BATCH_OPERATION_TYPE_UNSPECIFIED",
1: "BATCH",
}
BatchOperationMetadata_BatchOperationType_value = map[string]int32{
"BATCH_OPERATION_TYPE_UNSPECIFIED": 0,
"BATCH": 1,
}
)
Enum value maps for BatchOperationMetadata_BatchOperationType.
SessionOperationMetadata_SessionOperationType_name, SessionOperationMetadata_SessionOperationType_value
var (
SessionOperationMetadata_SessionOperationType_name = map[int32]string{
0: "SESSION_OPERATION_TYPE_UNSPECIFIED",
1: "CREATE",
2: "TERMINATE",
3: "DELETE",
}
SessionOperationMetadata_SessionOperationType_value = map[string]int32{
"SESSION_OPERATION_TYPE_UNSPECIFIED": 0,
"CREATE": 1,
"TERMINATE": 2,
"DELETE": 3,
}
)
Enum value maps for SessionOperationMetadata_SessionOperationType.
ClusterOperationStatus_State_name, ClusterOperationStatus_State_value
var (
ClusterOperationStatus_State_name = map[int32]string{
0: "UNKNOWN",
1: "PENDING",
2: "RUNNING",
3: "DONE",
}
ClusterOperationStatus_State_value = map[string]int32{
"UNKNOWN": 0,
"PENDING": 1,
"RUNNING": 2,
"DONE": 3,
}
)
Enum value maps for ClusterOperationStatus_State.
NodeGroupOperationMetadata_NodeGroupOperationType_name, NodeGroupOperationMetadata_NodeGroupOperationType_value
var (
NodeGroupOperationMetadata_NodeGroupOperationType_name = map[int32]string{
0: "NODE_GROUP_OPERATION_TYPE_UNSPECIFIED",
1: "CREATE",
2: "UPDATE",
3: "DELETE",
4: "RESIZE",
}
NodeGroupOperationMetadata_NodeGroupOperationType_value = map[string]int32{
"NODE_GROUP_OPERATION_TYPE_UNSPECIFIED": 0,
"CREATE": 1,
"UPDATE": 2,
"DELETE": 3,
"RESIZE": 4,
}
)
Enum value maps for NodeGroupOperationMetadata_NodeGroupOperationType.
Session_State_name, Session_State_value
var (
Session_State_name = map[int32]string{
0: "STATE_UNSPECIFIED",
1: "CREATING",
2: "ACTIVE",
3: "TERMINATING",
4: "TERMINATED",
5: "FAILED",
}
Session_State_value = map[string]int32{
"STATE_UNSPECIFIED": 0,
"CREATING": 1,
"ACTIVE": 2,
"TERMINATING": 3,
"TERMINATED": 4,
"FAILED": 5,
}
)
Enum value maps for Session_State.
JupyterConfig_Kernel_name, JupyterConfig_Kernel_value
var (
JupyterConfig_Kernel_name = map[int32]string{
0: "KERNEL_UNSPECIFIED",
1: "PYTHON",
2: "SCALA",
}
JupyterConfig_Kernel_value = map[string]int32{
"KERNEL_UNSPECIFIED": 0,
"PYTHON": 1,
"SCALA": 2,
}
)
Enum value maps for JupyterConfig_Kernel.
Component_name, Component_value
var (
Component_name = map[int32]string{
0: "COMPONENT_UNSPECIFIED",
5: "ANACONDA",
13: "DOCKER",
9: "DRUID",
14: "FLINK",
11: "HBASE",
3: "HIVE_WEBHCAT",
18: "HUDI",
1: "JUPYTER",
6: "PRESTO",
17: "TRINO",
12: "RANGER",
10: "SOLR",
4: "ZEPPELIN",
8: "ZOOKEEPER",
}
Component_value = map[string]int32{
"COMPONENT_UNSPECIFIED": 0,
"ANACONDA": 5,
"DOCKER": 13,
"DRUID": 9,
"FLINK": 14,
"HBASE": 11,
"HIVE_WEBHCAT": 3,
"HUDI": 18,
"JUPYTER": 1,
"PRESTO": 6,
"TRINO": 17,
"RANGER": 12,
"SOLR": 10,
"ZEPPELIN": 4,
"ZOOKEEPER": 8,
}
)
Enum value maps for Component.
FailureAction_name, FailureAction_value
var (
FailureAction_name = map[int32]string{
0: "FAILURE_ACTION_UNSPECIFIED",
1: "NO_ACTION",
2: "DELETE",
}
FailureAction_value = map[string]int32{
"FAILURE_ACTION_UNSPECIFIED": 0,
"NO_ACTION": 1,
"DELETE": 2,
}
)
Enum value maps for FailureAction.
GkeNodePoolTarget_Role_name, GkeNodePoolTarget_Role_value
var (
GkeNodePoolTarget_Role_name = map[int32]string{
0: "ROLE_UNSPECIFIED",
1: "DEFAULT",
2: "CONTROLLER",
3: "SPARK_DRIVER",
4: "SPARK_EXECUTOR",
}
GkeNodePoolTarget_Role_value = map[string]int32{
"ROLE_UNSPECIFIED": 0,
"DEFAULT": 1,
"CONTROLLER": 2,
"SPARK_DRIVER": 3,
"SPARK_EXECUTOR": 4,
}
)
Enum value maps for GkeNodePoolTarget_Role.
AuthenticationConfig_AuthenticationType_name, AuthenticationConfig_AuthenticationType_value
var (
AuthenticationConfig_AuthenticationType_name = map[int32]string{
0: "AUTHENTICATION_TYPE_UNSPECIFIED",
1: "SERVICE_ACCOUNT",
2: "END_USER_CREDENTIALS",
}
AuthenticationConfig_AuthenticationType_value = map[string]int32{
"AUTHENTICATION_TYPE_UNSPECIFIED": 0,
"SERVICE_ACCOUNT": 1,
"END_USER_CREDENTIALS": 2,
}
)
Enum value maps for AuthenticationConfig_AuthenticationType.
AutotuningConfig_Scenario_name, AutotuningConfig_Scenario_value
var (
AutotuningConfig_Scenario_name = map[int32]string{
0: "SCENARIO_UNSPECIFIED",
2: "SCALING",
3: "BROADCAST_HASH_JOIN",
4: "MEMORY",
}
AutotuningConfig_Scenario_value = map[string]int32{
"SCENARIO_UNSPECIFIED": 0,
"SCALING": 2,
"BROADCAST_HASH_JOIN": 3,
"MEMORY": 4,
}
)
Enum value maps for AutotuningConfig_Scenario.
WorkflowMetadata_State_name, WorkflowMetadata_State_value
var (
WorkflowMetadata_State_name = map[int32]string{
0: "UNKNOWN",
1: "PENDING",
2: "RUNNING",
3: "DONE",
}
WorkflowMetadata_State_value = map[string]int32{
"UNKNOWN": 0,
"PENDING": 1,
"RUNNING": 2,
"DONE": 3,
}
)
Enum value maps for WorkflowMetadata_State.
WorkflowNode_NodeState_name, WorkflowNode_NodeState_value
var (
WorkflowNode_NodeState_name = map[int32]string{
0: "NODE_STATE_UNSPECIFIED",
1: "BLOCKED",
2: "RUNNABLE",
3: "RUNNING",
4: "COMPLETED",
5: "FAILED",
}
WorkflowNode_NodeState_value = map[string]int32{
"NODE_STATE_UNSPECIFIED": 0,
"BLOCKED": 1,
"RUNNABLE": 2,
"RUNNING": 3,
"COMPLETED": 4,
"FAILED": 5,
}
)
Enum value maps for WorkflowNode_NodeState.
File_google_cloud_dataproc_v1_autoscaling_policies_proto
var File_google_cloud_dataproc_v1_autoscaling_policies_proto protoreflect.FileDescriptor
File_google_cloud_dataproc_v1_batches_proto
var File_google_cloud_dataproc_v1_batches_proto protoreflect.FileDescriptor
File_google_cloud_dataproc_v1_clusters_proto
var File_google_cloud_dataproc_v1_clusters_proto protoreflect.FileDescriptor
File_google_cloud_dataproc_v1_jobs_proto
var File_google_cloud_dataproc_v1_jobs_proto protoreflect.FileDescriptor
File_google_cloud_dataproc_v1_node_groups_proto
var File_google_cloud_dataproc_v1_node_groups_proto protoreflect.FileDescriptor
File_google_cloud_dataproc_v1_operations_proto
var File_google_cloud_dataproc_v1_operations_proto protoreflect.FileDescriptor
File_google_cloud_dataproc_v1_session_templates_proto
var File_google_cloud_dataproc_v1_session_templates_proto protoreflect.FileDescriptor
File_google_cloud_dataproc_v1_sessions_proto
var File_google_cloud_dataproc_v1_sessions_proto protoreflect.FileDescriptor
File_google_cloud_dataproc_v1_shared_proto
var File_google_cloud_dataproc_v1_shared_proto protoreflect.FileDescriptor
File_google_cloud_dataproc_v1_workflow_templates_proto
var File_google_cloud_dataproc_v1_workflow_templates_proto protoreflect.FileDescriptor
Functions
func RegisterAutoscalingPolicyServiceServer
func RegisterAutoscalingPolicyServiceServer(s *grpc.Server, srv AutoscalingPolicyServiceServer)
func RegisterBatchControllerServer
func RegisterBatchControllerServer(s *grpc.Server, srv BatchControllerServer)
func RegisterClusterControllerServer
func RegisterClusterControllerServer(s *grpc.Server, srv ClusterControllerServer)
func RegisterJobControllerServer
func RegisterJobControllerServer(s *grpc.Server, srv JobControllerServer)
func RegisterNodeGroupControllerServer
func RegisterNodeGroupControllerServer(s *grpc.Server, srv NodeGroupControllerServer)
func RegisterSessionControllerServer
func RegisterSessionControllerServer(s *grpc.Server, srv SessionControllerServer)
func RegisterSessionTemplateControllerServer
func RegisterSessionTemplateControllerServer(s *grpc.Server, srv SessionTemplateControllerServer)
func RegisterWorkflowTemplateServiceServer
func RegisterWorkflowTemplateServiceServer(s *grpc.Server, srv WorkflowTemplateServiceServer)
AcceleratorConfig
type AcceleratorConfig struct {
// Full URL, partial URI, or short name of the accelerator type resource to
// expose to this instance. See
// [Compute Engine
// AcceleratorTypes](https://cloud.google.com/compute/docs/reference/v1/acceleratorTypes).
//
// Examples:
//
// * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4`
// * `projects/[project_id]/zones/[zone]/acceleratorTypes/nvidia-tesla-t4`
// * `nvidia-tesla-t4`
//
// **Auto Zone Exception**: If you are using the Dataproc
// [Auto Zone
// Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
// feature, you must use the short name of the accelerator type
// resource, for example, `nvidia-tesla-t4`.
AcceleratorTypeUri string `protobuf:"bytes,1,opt,name=accelerator_type_uri,json=acceleratorTypeUri,proto3" json:"accelerator_type_uri,omitempty"`
// The number of the accelerator cards of this type exposed to this instance.
AcceleratorCount int32 `protobuf:"varint,2,opt,name=accelerator_count,json=acceleratorCount,proto3" json:"accelerator_count,omitempty"`
// contains filtered or unexported fields
}
Specifies the type and number of accelerator cards attached to the instances of an instance. See GPUs on Compute Engine.
func (*AcceleratorConfig) Descriptor
func (*AcceleratorConfig) Descriptor() ([]byte, []int)
Deprecated: Use AcceleratorConfig.ProtoReflect.Descriptor instead.
func (*AcceleratorConfig) GetAcceleratorCount
func (x *AcceleratorConfig) GetAcceleratorCount() int32
func (*AcceleratorConfig) GetAcceleratorTypeUri
func (x *AcceleratorConfig) GetAcceleratorTypeUri() string
func (*AcceleratorConfig) ProtoMessage
func (*AcceleratorConfig) ProtoMessage()
func (*AcceleratorConfig) ProtoReflect
func (x *AcceleratorConfig) ProtoReflect() protoreflect.Message
func (*AcceleratorConfig) Reset
func (x *AcceleratorConfig) Reset()
func (*AcceleratorConfig) String
func (x *AcceleratorConfig) String() string
AuthenticationConfig
type AuthenticationConfig struct {
// Optional. Authentication type for the user workload running in containers.
UserWorkloadAuthenticationType AuthenticationConfig_AuthenticationType `protobuf:"varint,1,opt,name=user_workload_authentication_type,json=userWorkloadAuthenticationType,proto3,enum=google.cloud.dataproc.v1.AuthenticationConfig_AuthenticationType" json:"user_workload_authentication_type,omitempty"`
// contains filtered or unexported fields
}
Authentication configuration for a workload is used to set the default identity for the workload execution. The config specifies the type of identity (service account or user) that will be used by workloads to access resources on the project(s).
func (*AuthenticationConfig) Descriptor
func (*AuthenticationConfig) Descriptor() ([]byte, []int)
Deprecated: Use AuthenticationConfig.ProtoReflect.Descriptor instead.
func (*AuthenticationConfig) GetUserWorkloadAuthenticationType
func (x *AuthenticationConfig) GetUserWorkloadAuthenticationType() AuthenticationConfig_AuthenticationType
func (*AuthenticationConfig) ProtoMessage
func (*AuthenticationConfig) ProtoMessage()
func (*AuthenticationConfig) ProtoReflect
func (x *AuthenticationConfig) ProtoReflect() protoreflect.Message
func (*AuthenticationConfig) Reset
func (x *AuthenticationConfig) Reset()
func (*AuthenticationConfig) String
func (x *AuthenticationConfig) String() string
AuthenticationConfig_AuthenticationType
type AuthenticationConfig_AuthenticationType int32
Authentication types for workload execution.
AuthenticationConfig_AUTHENTICATION_TYPE_UNSPECIFIED, AuthenticationConfig_SERVICE_ACCOUNT, AuthenticationConfig_END_USER_CREDENTIALS
const (
// If AuthenticationType is unspecified then END_USER_CREDENTIALS is used
// for 3.0 and newer runtimes, and SERVICE_ACCOUNT is used for older
// runtimes.
AuthenticationConfig_AUTHENTICATION_TYPE_UNSPECIFIED AuthenticationConfig_AuthenticationType = 0
// Use service account credentials for authenticating to other services.
AuthenticationConfig_SERVICE_ACCOUNT AuthenticationConfig_AuthenticationType = 1
// Use OAuth credentials associated with the workload creator/user for
// authenticating to other services.
AuthenticationConfig_END_USER_CREDENTIALS AuthenticationConfig_AuthenticationType = 2
)
func (AuthenticationConfig_AuthenticationType) Descriptor
func (AuthenticationConfig_AuthenticationType) Descriptor() protoreflect.EnumDescriptor
func (AuthenticationConfig_AuthenticationType) Enum
func (AuthenticationConfig_AuthenticationType) EnumDescriptor
func (AuthenticationConfig_AuthenticationType) EnumDescriptor() ([]byte, []int)
Deprecated: Use AuthenticationConfig_AuthenticationType.Descriptor instead.
func (AuthenticationConfig_AuthenticationType) Number
func (x AuthenticationConfig_AuthenticationType) Number() protoreflect.EnumNumber
func (AuthenticationConfig_AuthenticationType) String
func (x AuthenticationConfig_AuthenticationType) String() string
func (AuthenticationConfig_AuthenticationType) Type
func (AuthenticationConfig_AuthenticationType) Type() protoreflect.EnumType
AutoscalingConfig
type AutoscalingConfig struct {
// Optional. The autoscaling policy used by the cluster.
//
// Only resource names including projectid and location (region) are valid.
// Examples:
//
// * `https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]`
// * `projects/[project_id]/locations/[dataproc_region]/autoscalingPolicies/[policy_id]`
//
// Note that the policy must be in the same project and Dataproc region.
PolicyUri string `protobuf:"bytes,1,opt,name=policy_uri,json=policyUri,proto3" json:"policy_uri,omitempty"`
// contains filtered or unexported fields
}
Autoscaling Policy config associated with the cluster.
func (*AutoscalingConfig) Descriptor
func (*AutoscalingConfig) Descriptor() ([]byte, []int)
Deprecated: Use AutoscalingConfig.ProtoReflect.Descriptor instead.
func (*AutoscalingConfig) GetPolicyUri
func (x *AutoscalingConfig) GetPolicyUri() string
func (*AutoscalingConfig) ProtoMessage
func (*AutoscalingConfig) ProtoMessage()
func (*AutoscalingConfig) ProtoReflect
func (x *AutoscalingConfig) ProtoReflect() protoreflect.Message
func (*AutoscalingConfig) Reset
func (x *AutoscalingConfig) Reset()
func (*AutoscalingConfig) String
func (x *AutoscalingConfig) String() string
AutoscalingPolicy
type AutoscalingPolicy struct {
// Required. The policy id.
//
// The id must contain only letters (a-z, A-Z), numbers (0-9),
// underscores (_), and hyphens (-). Cannot begin or end with underscore
// or hyphen. Must consist of between 3 and 50 characters.
Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
// Output only. The "resource name" of the autoscaling policy, as described
// in https://cloud.google.com/apis/design/resource_names.
//
// - For `projects.regions.autoscalingPolicies`, the resource name of the
// policy has the following format:
// `projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`
//
// - For `projects.locations.autoscalingPolicies`, the resource name of the
// policy has the following format:
// `projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`
Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
// Autoscaling algorithm for policy.
//
// Types that are assignable to Algorithm:
//
// *AutoscalingPolicy_BasicAlgorithm
Algorithm isAutoscalingPolicy_Algorithm `protobuf_oneof:"algorithm"`
// Required. Describes how the autoscaler will operate for primary workers.
WorkerConfig *InstanceGroupAutoscalingPolicyConfig `protobuf:"bytes,4,opt,name=worker_config,json=workerConfig,proto3" json:"worker_config,omitempty"`
// Optional. Describes how the autoscaler will operate for secondary workers.
SecondaryWorkerConfig *InstanceGroupAutoscalingPolicyConfig `protobuf:"bytes,5,opt,name=secondary_worker_config,json=secondaryWorkerConfig,proto3" json:"secondary_worker_config,omitempty"`
// Optional. The labels to associate with this autoscaling policy.
// Label **keys** must contain 1 to 63 characters, and must conform to
// [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
// Label **values** may be empty, but, if present, must contain 1 to 63
// characters, and must conform to [RFC
// 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
// associated with an autoscaling policy.
Labels map[string]string `protobuf:"bytes,6,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// contains filtered or unexported fields
}
Describes an autoscaling policy for Dataproc cluster autoscaler.
func (*AutoscalingPolicy) Descriptor
func (*AutoscalingPolicy) Descriptor() ([]byte, []int)
Deprecated: Use AutoscalingPolicy.ProtoReflect.Descriptor instead.
func (*AutoscalingPolicy) GetAlgorithm
func (m *AutoscalingPolicy) GetAlgorithm() isAutoscalingPolicy_Algorithm
func (*AutoscalingPolicy) GetBasicAlgorithm
func (x *AutoscalingPolicy) GetBasicAlgorithm() *BasicAutoscalingAlgorithm
func (*AutoscalingPolicy) GetId
func (x *AutoscalingPolicy) GetId() string
func (*AutoscalingPolicy) GetLabels
func (x *AutoscalingPolicy) GetLabels() map[string]string
func (*AutoscalingPolicy) GetName
func (x *AutoscalingPolicy) GetName() string
func (*AutoscalingPolicy) GetSecondaryWorkerConfig
func (x *AutoscalingPolicy) GetSecondaryWorkerConfig() *InstanceGroupAutoscalingPolicyConfig
func (*AutoscalingPolicy) GetWorkerConfig
func (x *AutoscalingPolicy) GetWorkerConfig() *InstanceGroupAutoscalingPolicyConfig
func (*AutoscalingPolicy) ProtoMessage
func (*AutoscalingPolicy) ProtoMessage()
func (*AutoscalingPolicy) ProtoReflect
func (x *AutoscalingPolicy) ProtoReflect() protoreflect.Message
func (*AutoscalingPolicy) Reset
func (x *AutoscalingPolicy) Reset()
func (*AutoscalingPolicy) String
func (x *AutoscalingPolicy) String() string
AutoscalingPolicyServiceClient
type AutoscalingPolicyServiceClient interface {
// Creates new autoscaling policy.
CreateAutoscalingPolicy(ctx context.Context, in *CreateAutoscalingPolicyRequest, opts ...grpc.CallOption) (*AutoscalingPolicy, error)
// Updates (replaces) autoscaling policy.
//
// Disabled check for update_mask, because all updates will be full
// replacements.
UpdateAutoscalingPolicy(ctx context.Context, in *UpdateAutoscalingPolicyRequest, opts ...grpc.CallOption) (*AutoscalingPolicy, error)
// Retrieves autoscaling policy.
GetAutoscalingPolicy(ctx context.Context, in *GetAutoscalingPolicyRequest, opts ...grpc.CallOption) (*AutoscalingPolicy, error)
// Lists autoscaling policies in the project.
ListAutoscalingPolicies(ctx context.Context, in *ListAutoscalingPoliciesRequest, opts ...grpc.CallOption) (*ListAutoscalingPoliciesResponse, error)
// Deletes an autoscaling policy. It is an error to delete an autoscaling
// policy that is in use by one or more clusters.
DeleteAutoscalingPolicy(ctx context.Context, in *DeleteAutoscalingPolicyRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
}
AutoscalingPolicyServiceClient is the client API for AutoscalingPolicyService service.
For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
func NewAutoscalingPolicyServiceClient
func NewAutoscalingPolicyServiceClient(cc grpc.ClientConnInterface) AutoscalingPolicyServiceClient
AutoscalingPolicyServiceServer
type AutoscalingPolicyServiceServer interface {
// Creates new autoscaling policy.
CreateAutoscalingPolicy(context.Context, *CreateAutoscalingPolicyRequest) (*AutoscalingPolicy, error)
// Updates (replaces) autoscaling policy.
//
// Disabled check for update_mask, because all updates will be full
// replacements.
UpdateAutoscalingPolicy(context.Context, *UpdateAutoscalingPolicyRequest) (*AutoscalingPolicy, error)
// Retrieves autoscaling policy.
GetAutoscalingPolicy(context.Context, *GetAutoscalingPolicyRequest) (*AutoscalingPolicy, error)
// Lists autoscaling policies in the project.
ListAutoscalingPolicies(context.Context, *ListAutoscalingPoliciesRequest) (*ListAutoscalingPoliciesResponse, error)
// Deletes an autoscaling policy. It is an error to delete an autoscaling
// policy that is in use by one or more clusters.
DeleteAutoscalingPolicy(context.Context, *DeleteAutoscalingPolicyRequest) (*emptypb.Empty, error)
}
AutoscalingPolicyServiceServer is the server API for AutoscalingPolicyService service.
AutoscalingPolicy_BasicAlgorithm
type AutoscalingPolicy_BasicAlgorithm struct {
BasicAlgorithm *BasicAutoscalingAlgorithm `protobuf:"bytes,3,opt,name=basic_algorithm,json=basicAlgorithm,proto3,oneof"`
}
AutotuningConfig
type AutotuningConfig struct {
// Optional. Scenarios for which tunings are applied.
Scenarios []AutotuningConfig_Scenario `protobuf:"varint,2,rep,packed,name=scenarios,proto3,enum=google.cloud.dataproc.v1.AutotuningConfig_Scenario" json:"scenarios,omitempty"`
// contains filtered or unexported fields
}
Autotuning configuration of the workload.
func (*AutotuningConfig) Descriptor
func (*AutotuningConfig) Descriptor() ([]byte, []int)
Deprecated: Use AutotuningConfig.ProtoReflect.Descriptor instead.
func (*AutotuningConfig) GetScenarios
func (x *AutotuningConfig) GetScenarios() []AutotuningConfig_Scenario
func (*AutotuningConfig) ProtoMessage
func (*AutotuningConfig) ProtoMessage()
func (*AutotuningConfig) ProtoReflect
func (x *AutotuningConfig) ProtoReflect() protoreflect.Message
func (*AutotuningConfig) Reset
func (x *AutotuningConfig) Reset()
func (*AutotuningConfig) String
func (x *AutotuningConfig) String() string
AutotuningConfig_Scenario
type AutotuningConfig_Scenario int32
Scenario represents a specific goal that autotuning will attempt to achieve by modifying workloads.
AutotuningConfig_SCENARIO_UNSPECIFIED, AutotuningConfig_SCALING, AutotuningConfig_BROADCAST_HASH_JOIN, AutotuningConfig_MEMORY
const (
// Default value.
AutotuningConfig_SCENARIO_UNSPECIFIED AutotuningConfig_Scenario = 0
// Scaling recommendations such as initialExecutors.
AutotuningConfig_SCALING AutotuningConfig_Scenario = 2
// Adding hints for potential relation broadcasts.
AutotuningConfig_BROADCAST_HASH_JOIN AutotuningConfig_Scenario = 3
// Memory management for workloads.
AutotuningConfig_MEMORY AutotuningConfig_Scenario = 4
)
func (AutotuningConfig_Scenario) Descriptor
func (AutotuningConfig_Scenario) Descriptor() protoreflect.EnumDescriptor
func (AutotuningConfig_Scenario) Enum
func (x AutotuningConfig_Scenario) Enum() *AutotuningConfig_Scenario
func (AutotuningConfig_Scenario) EnumDescriptor
func (AutotuningConfig_Scenario) EnumDescriptor() ([]byte, []int)
Deprecated: Use AutotuningConfig_Scenario.Descriptor instead.
func (AutotuningConfig_Scenario) Number
func (x AutotuningConfig_Scenario) Number() protoreflect.EnumNumber
func (AutotuningConfig_Scenario) String
func (x AutotuningConfig_Scenario) String() string
func (AutotuningConfig_Scenario) Type
func (AutotuningConfig_Scenario) Type() protoreflect.EnumType
AuxiliaryNodeGroup
type AuxiliaryNodeGroup struct {
// Required. Node group configuration.
NodeGroup *NodeGroup `protobuf:"bytes,1,opt,name=node_group,json=nodeGroup,proto3" json:"node_group,omitempty"`
// Optional. A node group ID. Generated if not specified.
//
// The ID must contain only letters (a-z, A-Z), numbers (0-9),
// underscores (_), and hyphens (-). Cannot begin or end with underscore
// or hyphen. Must consist of from 3 to 33 characters.
NodeGroupId string `protobuf:"bytes,2,opt,name=node_group_id,json=nodeGroupId,proto3" json:"node_group_id,omitempty"`
// contains filtered or unexported fields
}
Node group identification and configuration information.
func (*AuxiliaryNodeGroup) Descriptor
func (*AuxiliaryNodeGroup) Descriptor() ([]byte, []int)
Deprecated: Use AuxiliaryNodeGroup.ProtoReflect.Descriptor instead.
func (*AuxiliaryNodeGroup) GetNodeGroup
func (x *AuxiliaryNodeGroup) GetNodeGroup() *NodeGroup
func (*AuxiliaryNodeGroup) GetNodeGroupId
func (x *AuxiliaryNodeGroup) GetNodeGroupId() string
func (*AuxiliaryNodeGroup) ProtoMessage
func (*AuxiliaryNodeGroup) ProtoMessage()
func (*AuxiliaryNodeGroup) ProtoReflect
func (x *AuxiliaryNodeGroup) ProtoReflect() protoreflect.Message
func (*AuxiliaryNodeGroup) Reset
func (x *AuxiliaryNodeGroup) Reset()
func (*AuxiliaryNodeGroup) String
func (x *AuxiliaryNodeGroup) String() string
AuxiliaryServicesConfig
type AuxiliaryServicesConfig struct {
// Optional. The Hive Metastore configuration for this workload.
MetastoreConfig *MetastoreConfig `protobuf:"bytes,1,opt,name=metastore_config,json=metastoreConfig,proto3" json:"metastore_config,omitempty"`
// Optional. The Spark History Server configuration for the workload.
SparkHistoryServerConfig *SparkHistoryServerConfig `protobuf:"bytes,2,opt,name=spark_history_server_config,json=sparkHistoryServerConfig,proto3" json:"spark_history_server_config,omitempty"`
// contains filtered or unexported fields
}
Auxiliary services configuration for a Cluster.
func (*AuxiliaryServicesConfig) Descriptor
func (*AuxiliaryServicesConfig) Descriptor() ([]byte, []int)
Deprecated: Use AuxiliaryServicesConfig.ProtoReflect.Descriptor instead.
func (*AuxiliaryServicesConfig) GetMetastoreConfig
func (x *AuxiliaryServicesConfig) GetMetastoreConfig() *MetastoreConfig
func (*AuxiliaryServicesConfig) GetSparkHistoryServerConfig
func (x *AuxiliaryServicesConfig) GetSparkHistoryServerConfig() *SparkHistoryServerConfig
func (*AuxiliaryServicesConfig) ProtoMessage
func (*AuxiliaryServicesConfig) ProtoMessage()
func (*AuxiliaryServicesConfig) ProtoReflect
func (x *AuxiliaryServicesConfig) ProtoReflect() protoreflect.Message
func (*AuxiliaryServicesConfig) Reset
func (x *AuxiliaryServicesConfig) Reset()
func (*AuxiliaryServicesConfig) String
func (x *AuxiliaryServicesConfig) String() string
BasicAutoscalingAlgorithm
type BasicAutoscalingAlgorithm struct {
// Types that are assignable to Config:
//
// *BasicAutoscalingAlgorithm_YarnConfig
Config isBasicAutoscalingAlgorithm_Config `protobuf_oneof:"config"`
// Optional. Duration between scaling events. A scaling period starts after
// the update operation from the previous event has completed.
//
// Bounds: [2m, 1d]. Default: 2m.
CooldownPeriod *durationpb.Duration `protobuf:"bytes,2,opt,name=cooldown_period,json=cooldownPeriod,proto3" json:"cooldown_period,omitempty"`
// contains filtered or unexported fields
}
Basic algorithm for autoscaling.
func (*BasicAutoscalingAlgorithm) Descriptor
func (*BasicAutoscalingAlgorithm) Descriptor() ([]byte, []int)
Deprecated: Use BasicAutoscalingAlgorithm.ProtoReflect.Descriptor instead.
func (*BasicAutoscalingAlgorithm) GetConfig
func (m *BasicAutoscalingAlgorithm) GetConfig() isBasicAutoscalingAlgorithm_Config
func (*BasicAutoscalingAlgorithm) GetCooldownPeriod
func (x *BasicAutoscalingAlgorithm) GetCooldownPeriod() *durationpb.Duration
func (*BasicAutoscalingAlgorithm) GetYarnConfig
func (x *BasicAutoscalingAlgorithm) GetYarnConfig() *BasicYarnAutoscalingConfig
func (*BasicAutoscalingAlgorithm) ProtoMessage
func (*BasicAutoscalingAlgorithm) ProtoMessage()
func (*BasicAutoscalingAlgorithm) ProtoReflect
func (x *BasicAutoscalingAlgorithm) ProtoReflect() protoreflect.Message
func (*BasicAutoscalingAlgorithm) Reset
func (x *BasicAutoscalingAlgorithm) Reset()
func (*BasicAutoscalingAlgorithm) String
func (x *BasicAutoscalingAlgorithm) String() string
BasicAutoscalingAlgorithm_YarnConfig
type BasicAutoscalingAlgorithm_YarnConfig struct {
// Required. YARN autoscaling configuration.
YarnConfig *BasicYarnAutoscalingConfig `protobuf:"bytes,1,opt,name=yarn_config,json=yarnConfig,proto3,oneof"`
}
BasicYarnAutoscalingConfig
type BasicYarnAutoscalingConfig struct {
// Required. Timeout for YARN graceful decommissioning of Node Managers.
// Specifies the duration to wait for jobs to complete before forcefully
// removing workers (and potentially interrupting jobs). Only applicable to
// downscaling operations.
//
// Bounds: [0s, 1d].
GracefulDecommissionTimeout *durationpb.Duration `protobuf:"bytes,5,opt,name=graceful_decommission_timeout,json=gracefulDecommissionTimeout,proto3" json:"graceful_decommission_timeout,omitempty"`
// Required. Fraction of average YARN pending memory in the last cooldown
// period for which to add workers. A scale-up factor of 1.0 will result in
// scaling up so that there is no pending memory remaining after the update
// (more aggressive scaling). A scale-up factor closer to 0 will result in a
// smaller magnitude of scaling up (less aggressive scaling). See [How
// autoscaling
// works](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works)
// for more information.
//
// Bounds: [0.0, 1.0].
ScaleUpFactor float64 `protobuf:"fixed64,1,opt,name=scale_up_factor,json=scaleUpFactor,proto3" json:"scale_up_factor,omitempty"`
// Required. Fraction of average YARN pending memory in the last cooldown
// period for which to remove workers. A scale-down factor of 1 will result in
// scaling down so that there is no available memory remaining after the
// update (more aggressive scaling). A scale-down factor of 0 disables
// removing workers, which can be beneficial for autoscaling a single job.
// See [How autoscaling
// works](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/autoscaling#how_autoscaling_works)
// for more information.
//
// Bounds: [0.0, 1.0].
ScaleDownFactor float64 `protobuf:"fixed64,2,opt,name=scale_down_factor,json=scaleDownFactor,proto3" json:"scale_down_factor,omitempty"`
// Optional. Minimum scale-up threshold as a fraction of total cluster size
// before scaling occurs. For example, in a 20-worker cluster, a threshold of
// 0.1 means the autoscaler must recommend at least a 2-worker scale-up for
// the cluster to scale. A threshold of 0 means the autoscaler will scale up
// on any recommended change.
//
// Bounds: [0.0, 1.0]. Default: 0.0.
ScaleUpMinWorkerFraction float64 `protobuf:"fixed64,3,opt,name=scale_up_min_worker_fraction,json=scaleUpMinWorkerFraction,proto3" json:"scale_up_min_worker_fraction,omitempty"`
// Optional. Minimum scale-down threshold as a fraction of total cluster size
// before scaling occurs. For example, in a 20-worker cluster, a threshold of
// 0.1 means the autoscaler must recommend at least a 2 worker scale-down for
// the cluster to scale. A threshold of 0 means the autoscaler will scale down
// on any recommended change.
//
// Bounds: [0.0, 1.0]. Default: 0.0.
ScaleDownMinWorkerFraction float64 `protobuf:"fixed64,4,opt,name=scale_down_min_worker_fraction,json=scaleDownMinWorkerFraction,proto3" json:"scale_down_min_worker_fraction,omitempty"`
// contains filtered or unexported fields
}
Basic autoscaling configurations for YARN.
func (*BasicYarnAutoscalingConfig) Descriptor
func (*BasicYarnAutoscalingConfig) Descriptor() ([]byte, []int)
Deprecated: Use BasicYarnAutoscalingConfig.ProtoReflect.Descriptor instead.
func (*BasicYarnAutoscalingConfig) GetGracefulDecommissionTimeout
func (x *BasicYarnAutoscalingConfig) GetGracefulDecommissionTimeout() *durationpb.Duration
func (*BasicYarnAutoscalingConfig) GetScaleDownFactor
func (x *BasicYarnAutoscalingConfig) GetScaleDownFactor() float64
func (*BasicYarnAutoscalingConfig) GetScaleDownMinWorkerFraction
func (x *BasicYarnAutoscalingConfig) GetScaleDownMinWorkerFraction() float64
func (*BasicYarnAutoscalingConfig) GetScaleUpFactor
func (x *BasicYarnAutoscalingConfig) GetScaleUpFactor() float64
func (*BasicYarnAutoscalingConfig) GetScaleUpMinWorkerFraction
func (x *BasicYarnAutoscalingConfig) GetScaleUpMinWorkerFraction() float64
func (*BasicYarnAutoscalingConfig) ProtoMessage
func (*BasicYarnAutoscalingConfig) ProtoMessage()
func (*BasicYarnAutoscalingConfig) ProtoReflect
func (x *BasicYarnAutoscalingConfig) ProtoReflect() protoreflect.Message
func (*BasicYarnAutoscalingConfig) Reset
func (x *BasicYarnAutoscalingConfig) Reset()
func (*BasicYarnAutoscalingConfig) String
func (x *BasicYarnAutoscalingConfig) String() string
Batch
type Batch struct {
// Output only. The resource name of the batch.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Output only. A batch UUID (Unique Universal Identifier). The service
// generates this value when it creates the batch.
Uuid string `protobuf:"bytes,2,opt,name=uuid,proto3" json:"uuid,omitempty"`
// Output only. The time when the batch was created.
CreateTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
// The application/framework-specific portion of the batch configuration.
//
// Types that are assignable to BatchConfig:
//
// *Batch_PysparkBatch
// *Batch_SparkBatch
// *Batch_SparkRBatch
// *Batch_SparkSqlBatch
BatchConfig isBatch_BatchConfig `protobuf_oneof:"batch_config"`
// Output only. Runtime information about batch execution.
RuntimeInfo *RuntimeInfo `protobuf:"bytes,8,opt,name=runtime_info,json=runtimeInfo,proto3" json:"runtime_info,omitempty"`
// Output only. The state of the batch.
State Batch_State `protobuf:"varint,9,opt,name=state,proto3,enum=google.cloud.dataproc.v1.Batch_State" json:"state,omitempty"`
// Output only. Batch state details, such as a failure
// description if the state is `FAILED`.
StateMessage string `protobuf:"bytes,10,opt,name=state_message,json=stateMessage,proto3" json:"state_message,omitempty"`
// Output only. The time when the batch entered a current state.
StateTime *timestamppb.Timestamp `protobuf:"bytes,11,opt,name=state_time,json=stateTime,proto3" json:"state_time,omitempty"`
// Output only. The email address of the user who created the batch.
Creator string `protobuf:"bytes,12,opt,name=creator,proto3" json:"creator,omitempty"`
// Optional. The labels to associate with this batch.
// Label **keys** must contain 1 to 63 characters, and must conform to
// [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
// Label **values** may be empty, but, if present, must contain 1 to 63
// characters, and must conform to [RFC
// 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
// associated with a batch.
Labels map[string]string `protobuf:"bytes,13,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Optional. Runtime configuration for the batch execution.
RuntimeConfig *RuntimeConfig `protobuf:"bytes,14,opt,name=runtime_config,json=runtimeConfig,proto3" json:"runtime_config,omitempty"`
// Optional. Environment configuration for the batch execution.
EnvironmentConfig *EnvironmentConfig `protobuf:"bytes,15,opt,name=environment_config,json=environmentConfig,proto3" json:"environment_config,omitempty"`
// Output only. The resource name of the operation associated with this batch.
Operation string `protobuf:"bytes,16,opt,name=operation,proto3" json:"operation,omitempty"`
// Output only. Historical state information for the batch.
StateHistory []*Batch_StateHistory `protobuf:"bytes,17,rep,name=state_history,json=stateHistory,proto3" json:"state_history,omitempty"`
// contains filtered or unexported fields
}
A representation of a batch workload in the service.
func (*Batch) Descriptor
Deprecated: Use Batch.ProtoReflect.Descriptor instead.
func (*Batch) GetBatchConfig
func (m *Batch) GetBatchConfig() isBatch_BatchConfig
func (*Batch) GetCreateTime
func (x *Batch) GetCreateTime() *timestamppb.Timestamp
func (*Batch) GetCreator
func (*Batch) GetEnvironmentConfig
func (x *Batch) GetEnvironmentConfig() *EnvironmentConfig
func (*Batch) GetLabels
func (*Batch) GetName
func (*Batch) GetOperation
func (*Batch) GetPysparkBatch
func (x *Batch) GetPysparkBatch() *PySparkBatch
func (*Batch) GetRuntimeConfig
func (x *Batch) GetRuntimeConfig() *RuntimeConfig
func (*Batch) GetRuntimeInfo
func (x *Batch) GetRuntimeInfo() *RuntimeInfo
func (*Batch) GetSparkBatch
func (x *Batch) GetSparkBatch() *SparkBatch
func (*Batch) GetSparkRBatch
func (x *Batch) GetSparkRBatch() *SparkRBatch
func (*Batch) GetSparkSqlBatch
func (x *Batch) GetSparkSqlBatch() *SparkSqlBatch
func (*Batch) GetState
func (x *Batch) GetState() Batch_State
func (*Batch) GetStateHistory
func (x *Batch) GetStateHistory() []*Batch_StateHistory
func (*Batch) GetStateMessage
func (*Batch) GetStateTime
func (x *Batch) GetStateTime() *timestamppb.Timestamp
func (*Batch) GetUuid
func (*Batch) ProtoMessage
func (*Batch) ProtoMessage()
func (*Batch) ProtoReflect
func (x *Batch) ProtoReflect() protoreflect.Message
func (*Batch) Reset
func (x *Batch) Reset()
func (*Batch) String
BatchControllerClient
type BatchControllerClient interface {
// Creates a batch workload that executes asynchronously.
CreateBatch(ctx context.Context, in *CreateBatchRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
// Gets the batch workload resource representation.
GetBatch(ctx context.Context, in *GetBatchRequest, opts ...grpc.CallOption) (*Batch, error)
// Lists batch workloads.
ListBatches(ctx context.Context, in *ListBatchesRequest, opts ...grpc.CallOption) (*ListBatchesResponse, error)
// Deletes the batch workload resource. If the batch is not in terminal state,
// the delete fails and the response returns `FAILED_PRECONDITION`.
DeleteBatch(ctx context.Context, in *DeleteBatchRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
}
BatchControllerClient is the client API for BatchController service.
For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
func NewBatchControllerClient
func NewBatchControllerClient(cc grpc.ClientConnInterface) BatchControllerClient
BatchControllerServer
type BatchControllerServer interface {
// Creates a batch workload that executes asynchronously.
CreateBatch(context.Context, *CreateBatchRequest) (*longrunningpb.Operation, error)
// Gets the batch workload resource representation.
GetBatch(context.Context, *GetBatchRequest) (*Batch, error)
// Lists batch workloads.
ListBatches(context.Context, *ListBatchesRequest) (*ListBatchesResponse, error)
// Deletes the batch workload resource. If the batch is not in terminal state,
// the delete fails and the response returns `FAILED_PRECONDITION`.
DeleteBatch(context.Context, *DeleteBatchRequest) (*emptypb.Empty, error)
}
BatchControllerServer is the server API for BatchController service.
BatchOperationMetadata
type BatchOperationMetadata struct {
// Name of the batch for the operation.
Batch string `protobuf:"bytes,1,opt,name=batch,proto3" json:"batch,omitempty"`
// Batch UUID for the operation.
BatchUuid string `protobuf:"bytes,2,opt,name=batch_uuid,json=batchUuid,proto3" json:"batch_uuid,omitempty"`
// The time when the operation was created.
CreateTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
// The time when the operation finished.
DoneTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=done_time,json=doneTime,proto3" json:"done_time,omitempty"`
// The operation type.
OperationType BatchOperationMetadata_BatchOperationType `protobuf:"varint,6,opt,name=operation_type,json=operationType,proto3,enum=google.cloud.dataproc.v1.BatchOperationMetadata_BatchOperationType" json:"operation_type,omitempty"`
// Short description of the operation.
Description string `protobuf:"bytes,7,opt,name=description,proto3" json:"description,omitempty"`
// Labels associated with the operation.
Labels map[string]string `protobuf:"bytes,8,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Warnings encountered during operation execution.
Warnings []string `protobuf:"bytes,9,rep,name=warnings,proto3" json:"warnings,omitempty"`
// contains filtered or unexported fields
}
Metadata describing the Batch operation.
func (*BatchOperationMetadata) Descriptor
func (*BatchOperationMetadata) Descriptor() ([]byte, []int)
Deprecated: Use BatchOperationMetadata.ProtoReflect.Descriptor instead.
func (*BatchOperationMetadata) GetBatch
func (x *BatchOperationMetadata) GetBatch() string
func (*BatchOperationMetadata) GetBatchUuid
func (x *BatchOperationMetadata) GetBatchUuid() string
func (*BatchOperationMetadata) GetCreateTime
func (x *BatchOperationMetadata) GetCreateTime() *timestamppb.Timestamp
func (*BatchOperationMetadata) GetDescription
func (x *BatchOperationMetadata) GetDescription() string
func (*BatchOperationMetadata) GetDoneTime
func (x *BatchOperationMetadata) GetDoneTime() *timestamppb.Timestamp
func (*BatchOperationMetadata) GetLabels
func (x *BatchOperationMetadata) GetLabels() map[string]string
func (*BatchOperationMetadata) GetOperationType
func (x *BatchOperationMetadata) GetOperationType() BatchOperationMetadata_BatchOperationType
func (*BatchOperationMetadata) GetWarnings
func (x *BatchOperationMetadata) GetWarnings() []string
func (*BatchOperationMetadata) ProtoMessage
func (*BatchOperationMetadata) ProtoMessage()
func (*BatchOperationMetadata) ProtoReflect
func (x *BatchOperationMetadata) ProtoReflect() protoreflect.Message
func (*BatchOperationMetadata) Reset
func (x *BatchOperationMetadata) Reset()
func (*BatchOperationMetadata) String
func (x *BatchOperationMetadata) String() string
BatchOperationMetadata_BatchOperationType
type BatchOperationMetadata_BatchOperationType int32
Operation type for Batch resources
BatchOperationMetadata_BATCH_OPERATION_TYPE_UNSPECIFIED, BatchOperationMetadata_BATCH
const (
// Batch operation type is unknown.
BatchOperationMetadata_BATCH_OPERATION_TYPE_UNSPECIFIED BatchOperationMetadata_BatchOperationType = 0
// Batch operation type.
BatchOperationMetadata_BATCH BatchOperationMetadata_BatchOperationType = 1
)
func (BatchOperationMetadata_BatchOperationType) Descriptor
func (BatchOperationMetadata_BatchOperationType) Descriptor() protoreflect.EnumDescriptor
func (BatchOperationMetadata_BatchOperationType) Enum
func (x BatchOperationMetadata_BatchOperationType) Enum() *BatchOperationMetadata_BatchOperationType
func (BatchOperationMetadata_BatchOperationType) EnumDescriptor
func (BatchOperationMetadata_BatchOperationType) EnumDescriptor() ([]byte, []int)
Deprecated: Use BatchOperationMetadata_BatchOperationType.Descriptor instead.
func (BatchOperationMetadata_BatchOperationType) Number
func (x BatchOperationMetadata_BatchOperationType) Number() protoreflect.EnumNumber
func (BatchOperationMetadata_BatchOperationType) String
func (x BatchOperationMetadata_BatchOperationType) String() string
func (BatchOperationMetadata_BatchOperationType) Type
func (BatchOperationMetadata_BatchOperationType) Type() protoreflect.EnumType
Batch_PysparkBatch
type Batch_PysparkBatch struct {
// Optional. PySpark batch config.
PysparkBatch *PySparkBatch `protobuf:"bytes,4,opt,name=pyspark_batch,json=pysparkBatch,proto3,oneof"`
}
Batch_SparkBatch
type Batch_SparkBatch struct {
// Optional. Spark batch config.
SparkBatch *SparkBatch `protobuf:"bytes,5,opt,name=spark_batch,json=sparkBatch,proto3,oneof"`
}
Batch_SparkRBatch
type Batch_SparkRBatch struct {
// Optional. SparkR batch config.
SparkRBatch *SparkRBatch `protobuf:"bytes,6,opt,name=spark_r_batch,json=sparkRBatch,proto3,oneof"`
}
Batch_SparkSqlBatch
type Batch_SparkSqlBatch struct {
// Optional. SparkSql batch config.
SparkSqlBatch *SparkSqlBatch `protobuf:"bytes,7,opt,name=spark_sql_batch,json=sparkSqlBatch,proto3,oneof"`
}
Batch_State
type Batch_State int32
The batch state.
Batch_STATE_UNSPECIFIED, Batch_PENDING, Batch_RUNNING, Batch_CANCELLING, Batch_CANCELLED, Batch_SUCCEEDED, Batch_FAILED
const (
// The batch state is unknown.
Batch_STATE_UNSPECIFIED Batch_State = 0
// The batch is created before running.
Batch_PENDING Batch_State = 1
// The batch is running.
Batch_RUNNING Batch_State = 2
// The batch is cancelling.
Batch_CANCELLING Batch_State = 3
// The batch cancellation was successful.
Batch_CANCELLED Batch_State = 4
// The batch completed successfully.
Batch_SUCCEEDED Batch_State = 5
// The batch is no longer running due to an error.
Batch_FAILED Batch_State = 6
)
func (Batch_State) Descriptor
func (Batch_State) Descriptor() protoreflect.EnumDescriptor
func (Batch_State) Enum
func (x Batch_State) Enum() *Batch_State
func (Batch_State) EnumDescriptor
func (Batch_State) EnumDescriptor() ([]byte, []int)
Deprecated: Use Batch_State.Descriptor instead.
func (Batch_State) Number
func (x Batch_State) Number() protoreflect.EnumNumber
func (Batch_State) String
func (x Batch_State) String() string
func (Batch_State) Type
func (Batch_State) Type() protoreflect.EnumType
Batch_StateHistory
type Batch_StateHistory struct {
// Output only. The state of the batch at this point in history.
State Batch_State `protobuf:"varint,1,opt,name=state,proto3,enum=google.cloud.dataproc.v1.Batch_State" json:"state,omitempty"`
// Output only. Details about the state at this point in history.
StateMessage string `protobuf:"bytes,2,opt,name=state_message,json=stateMessage,proto3" json:"state_message,omitempty"`
// Output only. The time when the batch entered the historical state.
StateStartTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=state_start_time,json=stateStartTime,proto3" json:"state_start_time,omitempty"`
// contains filtered or unexported fields
}
Historical state information.
func (*Batch_StateHistory) Descriptor
func (*Batch_StateHistory) Descriptor() ([]byte, []int)
Deprecated: Use Batch_StateHistory.ProtoReflect.Descriptor instead.
func (*Batch_StateHistory) GetState
func (x *Batch_StateHistory) GetState() Batch_State
func (*Batch_StateHistory) GetStateMessage
func (x *Batch_StateHistory) GetStateMessage() string
func (*Batch_StateHistory) GetStateStartTime
func (x *Batch_StateHistory) GetStateStartTime() *timestamppb.Timestamp
func (*Batch_StateHistory) ProtoMessage
func (*Batch_StateHistory) ProtoMessage()
func (*Batch_StateHistory) ProtoReflect
func (x *Batch_StateHistory) ProtoReflect() protoreflect.Message
func (*Batch_StateHistory) Reset
func (x *Batch_StateHistory) Reset()
func (*Batch_StateHistory) String
func (x *Batch_StateHistory) String() string
CancelJobRequest
type CancelJobRequest struct {
// Required. The ID of the Google Cloud Platform project that the job
// belongs to.
ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
// Required. The Dataproc region in which to handle the request.
Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
// Required. The job ID.
JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
// contains filtered or unexported fields
}
A request to cancel a job.
func (*CancelJobRequest) Descriptor
func (*CancelJobRequest) Descriptor() ([]byte, []int)
Deprecated: Use CancelJobRequest.ProtoReflect.Descriptor instead.
func (*CancelJobRequest) GetJobId
func (x *CancelJobRequest) GetJobId() string
func (*CancelJobRequest) GetProjectId
func (x *CancelJobRequest) GetProjectId() string
func (*CancelJobRequest) GetRegion
func (x *CancelJobRequest) GetRegion() string
func (*CancelJobRequest) ProtoMessage
func (*CancelJobRequest) ProtoMessage()
func (*CancelJobRequest) ProtoReflect
func (x *CancelJobRequest) ProtoReflect() protoreflect.Message
func (*CancelJobRequest) Reset
func (x *CancelJobRequest) Reset()
func (*CancelJobRequest) String
func (x *CancelJobRequest) String() string
Cluster
type Cluster struct {
// Required. The Google Cloud Platform project ID that the cluster belongs to.
ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
// Required. The cluster name, which must be unique within a project.
// The name must start with a lowercase letter, and can contain
// up to 51 lowercase letters, numbers, and hyphens. It cannot end
// with a hyphen. The name of a deleted cluster can be reused.
ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
// Optional. The cluster config for a cluster of Compute Engine Instances.
// Note that Dataproc may set default values, and values may change
// when clusters are updated.
//
// Exactly one of ClusterConfig or VirtualClusterConfig must be specified.
Config *ClusterConfig `protobuf:"bytes,3,opt,name=config,proto3" json:"config,omitempty"`
// Optional. The virtual cluster config is used when creating a Dataproc
// cluster that does not directly control the underlying compute resources,
// for example, when creating a [Dataproc-on-GKE
// cluster](https://cloud.google.com/dataproc/docs/guides/dpgke/dataproc-gke-overview).
// Dataproc may set default values, and values may change when
// clusters are updated. Exactly one of
// [config][google.cloud.dataproc.v1.Cluster.config] or
// [virtual_cluster_config][google.cloud.dataproc.v1.Cluster.virtual_cluster_config]
// must be specified.
VirtualClusterConfig *VirtualClusterConfig `protobuf:"bytes,10,opt,name=virtual_cluster_config,json=virtualClusterConfig,proto3" json:"virtual_cluster_config,omitempty"`
// Optional. The labels to associate with this cluster.
// Label **keys** must contain 1 to 63 characters, and must conform to
// [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
// Label **values** may be empty, but, if present, must contain 1 to 63
// characters, and must conform to [RFC
// 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
// associated with a cluster.
Labels map[string]string `protobuf:"bytes,8,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Output only. Cluster status.
Status *ClusterStatus `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"`
// Output only. The previous cluster status.
StatusHistory []*ClusterStatus `protobuf:"bytes,7,rep,name=status_history,json=statusHistory,proto3" json:"status_history,omitempty"`
// Output only. A cluster UUID (Unique Universal Identifier). Dataproc
// generates this value when it creates the cluster.
ClusterUuid string `protobuf:"bytes,6,opt,name=cluster_uuid,json=clusterUuid,proto3" json:"cluster_uuid,omitempty"`
// Output only. Contains cluster daemon metrics such as HDFS and YARN stats.
//
// **Beta Feature**: This report is available for testing purposes only. It
// may be changed before final release.
Metrics *ClusterMetrics `protobuf:"bytes,9,opt,name=metrics,proto3" json:"metrics,omitempty"`
// contains filtered or unexported fields
}
Describes the identifying information, config, and status of a Dataproc cluster
func (*Cluster) Descriptor
Deprecated: Use Cluster.ProtoReflect.Descriptor instead.
func (*Cluster) GetClusterName
func (*Cluster) GetClusterUuid
func (*Cluster) GetConfig
func (x *Cluster) GetConfig() *ClusterConfig
func (*Cluster) GetLabels
func (*Cluster) GetMetrics
func (x *Cluster) GetMetrics() *ClusterMetrics
func (*Cluster) GetProjectId
func (*Cluster) GetStatus
func (x *Cluster) GetStatus() *ClusterStatus
func (*Cluster) GetStatusHistory
func (x *Cluster) GetStatusHistory() []*ClusterStatus
func (*Cluster) GetVirtualClusterConfig
func (x *Cluster) GetVirtualClusterConfig() *VirtualClusterConfig
func (*Cluster) ProtoMessage
func (*Cluster) ProtoMessage()
func (*Cluster) ProtoReflect
func (x *Cluster) ProtoReflect() protoreflect.Message
func (*Cluster) Reset
func (x *Cluster) Reset()
func (*Cluster) String
ClusterConfig
type ClusterConfig struct {
// Optional. A Cloud Storage bucket used to stage job
// dependencies, config files, and job driver console output.
// If you do not specify a staging bucket, Cloud
// Dataproc will determine a Cloud Storage location (US,
// ASIA, or EU) for your cluster's staging bucket according to the
// Compute Engine zone where your cluster is deployed, and then create
// and manage this project-level, per-location bucket (see
// [Dataproc staging and temp
// buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
// **This field requires a Cloud Storage bucket name, not a `gs://...` URI to
// a Cloud Storage bucket.**
ConfigBucket string `protobuf:"bytes,1,opt,name=config_bucket,json=configBucket,proto3" json:"config_bucket,omitempty"`
// Optional. A Cloud Storage bucket used to store ephemeral cluster and jobs
// data, such as Spark and MapReduce history files. If you do not specify a
// temp bucket, Dataproc will determine a Cloud Storage location (US, ASIA, or
// EU) for your cluster's temp bucket according to the Compute Engine zone
// where your cluster is deployed, and then create and manage this
// project-level, per-location bucket. The default bucket has a TTL of 90
// days, but you can use any TTL (or none) if you specify a bucket (see
// [Dataproc staging and temp
// buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
// **This field requires a Cloud Storage bucket name, not a `gs://...` URI to
// a Cloud Storage bucket.**
TempBucket string `protobuf:"bytes,2,opt,name=temp_bucket,json=tempBucket,proto3" json:"temp_bucket,omitempty"`
// Optional. The shared Compute Engine config settings for
// all instances in a cluster.
GceClusterConfig *GceClusterConfig `protobuf:"bytes,8,opt,name=gce_cluster_config,json=gceClusterConfig,proto3" json:"gce_cluster_config,omitempty"`
// Optional. The Compute Engine config settings for
// the cluster's master instance.
MasterConfig *InstanceGroupConfig `protobuf:"bytes,9,opt,name=master_config,json=masterConfig,proto3" json:"master_config,omitempty"`
// Optional. The Compute Engine config settings for
// the cluster's worker instances.
WorkerConfig *InstanceGroupConfig `protobuf:"bytes,10,opt,name=worker_config,json=workerConfig,proto3" json:"worker_config,omitempty"`
// Optional. The Compute Engine config settings for
// a cluster's secondary worker instances
SecondaryWorkerConfig *InstanceGroupConfig `protobuf:"bytes,12,opt,name=secondary_worker_config,json=secondaryWorkerConfig,proto3" json:"secondary_worker_config,omitempty"`
// Optional. The config settings for cluster software.
SoftwareConfig *SoftwareConfig `protobuf:"bytes,13,opt,name=software_config,json=softwareConfig,proto3" json:"software_config,omitempty"`
// Optional. Commands to execute on each node after config is
// completed. By default, executables are run on master and all worker nodes.
// You can test a node's `role` metadata to run an executable on
// a master or worker node, as shown below using `curl` (you can also use
// `wget`):
//
// ROLE=$(curl -H Metadata-Flavor:Google
// http://metadata/computeMetadata/v1/instance/attributes/dataproc-role)
// if [[ "${ROLE}" == 'Master' ]]; then
// ... master specific actions ...
// else
// ... worker specific actions ...
// fi
InitializationActions []*NodeInitializationAction `protobuf:"bytes,11,rep,name=initialization_actions,json=initializationActions,proto3" json:"initialization_actions,omitempty"`
// Optional. Encryption settings for the cluster.
EncryptionConfig *EncryptionConfig `protobuf:"bytes,15,opt,name=encryption_config,json=encryptionConfig,proto3" json:"encryption_config,omitempty"`
// Optional. Autoscaling config for the policy associated with the cluster.
// Cluster does not autoscale if this field is unset.
AutoscalingConfig *AutoscalingConfig `protobuf:"bytes,18,opt,name=autoscaling_config,json=autoscalingConfig,proto3" json:"autoscaling_config,omitempty"`
// Optional. Security settings for the cluster.
SecurityConfig *SecurityConfig `protobuf:"bytes,16,opt,name=security_config,json=securityConfig,proto3" json:"security_config,omitempty"`
// Optional. Lifecycle setting for the cluster.
LifecycleConfig *LifecycleConfig `protobuf:"bytes,17,opt,name=lifecycle_config,json=lifecycleConfig,proto3" json:"lifecycle_config,omitempty"`
// Optional. Port/endpoint configuration for this cluster
EndpointConfig *EndpointConfig `protobuf:"bytes,19,opt,name=endpoint_config,json=endpointConfig,proto3" json:"endpoint_config,omitempty"`
// Optional. Metastore configuration.
MetastoreConfig *MetastoreConfig `protobuf:"bytes,20,opt,name=metastore_config,json=metastoreConfig,proto3" json:"metastore_config,omitempty"`
// Optional. The config for Dataproc metrics.
DataprocMetricConfig *DataprocMetricConfig `protobuf:"bytes,23,opt,name=dataproc_metric_config,json=dataprocMetricConfig,proto3" json:"dataproc_metric_config,omitempty"`
// Optional. The node group settings.
AuxiliaryNodeGroups []*AuxiliaryNodeGroup `protobuf:"bytes,25,rep,name=auxiliary_node_groups,json=auxiliaryNodeGroups,proto3" json:"auxiliary_node_groups,omitempty"`
// contains filtered or unexported fields
}
The cluster config.
func (*ClusterConfig) Descriptor
func (*ClusterConfig) Descriptor() ([]byte, []int)
Deprecated: Use ClusterConfig.ProtoReflect.Descriptor instead.
func (*ClusterConfig) GetAutoscalingConfig
func (x *ClusterConfig) GetAutoscalingConfig() *AutoscalingConfig
func (*ClusterConfig) GetAuxiliaryNodeGroups
func (x *ClusterConfig) GetAuxiliaryNodeGroups() []*AuxiliaryNodeGroup
func (*ClusterConfig) GetConfigBucket
func (x *ClusterConfig) GetConfigBucket() string
func (*ClusterConfig) GetDataprocMetricConfig
func (x *ClusterConfig) GetDataprocMetricConfig() *DataprocMetricConfig
func (*ClusterConfig) GetEncryptionConfig
func (x *ClusterConfig) GetEncryptionConfig() *EncryptionConfig
func (*ClusterConfig) GetEndpointConfig
func (x *ClusterConfig) GetEndpointConfig() *EndpointConfig
func (*ClusterConfig) GetGceClusterConfig
func (x *ClusterConfig) GetGceClusterConfig() *GceClusterConfig
func (*ClusterConfig) GetInitializationActions
func (x *ClusterConfig) GetInitializationActions() []*NodeInitializationAction
func (*ClusterConfig) GetLifecycleConfig
func (x *ClusterConfig) GetLifecycleConfig() *LifecycleConfig
func (*ClusterConfig) GetMasterConfig
func (x *ClusterConfig) GetMasterConfig() *InstanceGroupConfig
func (*ClusterConfig) GetMetastoreConfig
func (x *ClusterConfig) GetMetastoreConfig() *MetastoreConfig
func (*ClusterConfig) GetSecondaryWorkerConfig
func (x *ClusterConfig) GetSecondaryWorkerConfig() *InstanceGroupConfig
func (*ClusterConfig) GetSecurityConfig
func (x *ClusterConfig) GetSecurityConfig() *SecurityConfig
func (*ClusterConfig) GetSoftwareConfig
func (x *ClusterConfig) GetSoftwareConfig() *SoftwareConfig
func (*ClusterConfig) GetTempBucket
func (x *ClusterConfig) GetTempBucket() string
func (*ClusterConfig) GetWorkerConfig
func (x *ClusterConfig) GetWorkerConfig() *InstanceGroupConfig
func (*ClusterConfig) ProtoMessage
func (*ClusterConfig) ProtoMessage()
func (*ClusterConfig) ProtoReflect
func (x *ClusterConfig) ProtoReflect() protoreflect.Message
func (*ClusterConfig) Reset
func (x *ClusterConfig) Reset()
func (*ClusterConfig) String
func (x *ClusterConfig) String() string
ClusterControllerClient
type ClusterControllerClient interface {
// Creates a cluster in a project. The returned
// [Operation.metadata][google.longrunning.Operation.metadata] will be
// [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
// Updates a cluster in a project. The returned
// [Operation.metadata][google.longrunning.Operation.metadata] will be
// [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
// The cluster must be in a
// [`RUNNING`][google.cloud.dataproc.v1.ClusterStatus.State] state or an error
// is returned.
UpdateCluster(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
// Stops a cluster in a project.
StopCluster(ctx context.Context, in *StopClusterRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
// Starts a cluster in a project.
StartCluster(ctx context.Context, in *StartClusterRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
// Deletes a cluster in a project. The returned
// [Operation.metadata][google.longrunning.Operation.metadata] will be
// [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
// Gets the resource representation for a cluster in a project.
GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error)
// Lists all regions/{region}/clusters in a project alphabetically.
ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error)
// Gets cluster diagnostic information. The returned
// [Operation.metadata][google.longrunning.Operation.metadata] will be
// [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
// After the operation completes,
// [Operation.response][google.longrunning.Operation.response]
// contains
// [DiagnoseClusterResults](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults).
DiagnoseCluster(ctx context.Context, in *DiagnoseClusterRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
}
ClusterControllerClient is the client API for ClusterController service.
For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
func NewClusterControllerClient
func NewClusterControllerClient(cc grpc.ClientConnInterface) ClusterControllerClient
ClusterControllerServer
type ClusterControllerServer interface {
// Creates a cluster in a project. The returned
// [Operation.metadata][google.longrunning.Operation.metadata] will be
// [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
CreateCluster(context.Context, *CreateClusterRequest) (*longrunningpb.Operation, error)
// Updates a cluster in a project. The returned
// [Operation.metadata][google.longrunning.Operation.metadata] will be
// [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
// The cluster must be in a
// [`RUNNING`][google.cloud.dataproc.v1.ClusterStatus.State] state or an error
// is returned.
UpdateCluster(context.Context, *UpdateClusterRequest) (*longrunningpb.Operation, error)
// Stops a cluster in a project.
StopCluster(context.Context, *StopClusterRequest) (*longrunningpb.Operation, error)
// Starts a cluster in a project.
StartCluster(context.Context, *StartClusterRequest) (*longrunningpb.Operation, error)
// Deletes a cluster in a project. The returned
// [Operation.metadata][google.longrunning.Operation.metadata] will be
// [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
DeleteCluster(context.Context, *DeleteClusterRequest) (*longrunningpb.Operation, error)
// Gets the resource representation for a cluster in a project.
GetCluster(context.Context, *GetClusterRequest) (*Cluster, error)
// Lists all regions/{region}/clusters in a project alphabetically.
ListClusters(context.Context, *ListClustersRequest) (*ListClustersResponse, error)
// Gets cluster diagnostic information. The returned
// [Operation.metadata][google.longrunning.Operation.metadata] will be
// [ClusterOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#clusteroperationmetadata).
// After the operation completes,
// [Operation.response][google.longrunning.Operation.response]
// contains
// [DiagnoseClusterResults](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#diagnoseclusterresults).
DiagnoseCluster(context.Context, *DiagnoseClusterRequest) (*longrunningpb.Operation, error)
}
ClusterControllerServer is the server API for ClusterController service.
ClusterMetrics
type ClusterMetrics struct {
// The HDFS metrics.
HdfsMetrics map[string]int64 `protobuf:"bytes,1,rep,name=hdfs_metrics,json=hdfsMetrics,proto3" json:"hdfs_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
// YARN metrics.
YarnMetrics map[string]int64 `protobuf:"bytes,2,rep,name=yarn_metrics,json=yarnMetrics,proto3" json:"yarn_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
// contains filtered or unexported fields
}
Contains cluster daemon metrics, such as HDFS and YARN stats.
Beta Feature: This report is available for testing purposes only. It may be changed before final release.
func (*ClusterMetrics) Descriptor
func (*ClusterMetrics) Descriptor() ([]byte, []int)
Deprecated: Use ClusterMetrics.ProtoReflect.Descriptor instead.
func (*ClusterMetrics) GetHdfsMetrics
func (x *ClusterMetrics) GetHdfsMetrics() map[string]int64
func (*ClusterMetrics) GetYarnMetrics
func (x *ClusterMetrics) GetYarnMetrics() map[string]int64
func (*ClusterMetrics) ProtoMessage
func (*ClusterMetrics) ProtoMessage()
func (*ClusterMetrics) ProtoReflect
func (x *ClusterMetrics) ProtoReflect() protoreflect.Message
func (*ClusterMetrics) Reset
func (x *ClusterMetrics) Reset()
func (*ClusterMetrics) String
func (x *ClusterMetrics) String() string
ClusterOperation
type ClusterOperation struct {
// Output only. The id of the cluster operation.
OperationId string `protobuf:"bytes,1,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"`
// Output only. Error, if operation failed.
Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"`
// Output only. Indicates the operation is done.
Done bool `protobuf:"varint,3,opt,name=done,proto3" json:"done,omitempty"`
// contains filtered or unexported fields
}
The cluster operation triggered by a workflow.
func (*ClusterOperation) Descriptor
func (*ClusterOperation) Descriptor() ([]byte, []int)
Deprecated: Use ClusterOperation.ProtoReflect.Descriptor instead.
func (*ClusterOperation) GetDone
func (x *ClusterOperation) GetDone() bool
func (*ClusterOperation) GetError
func (x *ClusterOperation) GetError() string
func (*ClusterOperation) GetOperationId
func (x *ClusterOperation) GetOperationId() string
func (*ClusterOperation) ProtoMessage
func (*ClusterOperation) ProtoMessage()
func (*ClusterOperation) ProtoReflect
func (x *ClusterOperation) ProtoReflect() protoreflect.Message
func (*ClusterOperation) Reset
func (x *ClusterOperation) Reset()
func (*ClusterOperation) String
func (x *ClusterOperation) String() string
ClusterOperationMetadata
type ClusterOperationMetadata struct {
// Output only. Name of the cluster for the operation.
ClusterName string `protobuf:"bytes,7,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
// Output only. Cluster UUID for the operation.
ClusterUuid string `protobuf:"bytes,8,opt,name=cluster_uuid,json=clusterUuid,proto3" json:"cluster_uuid,omitempty"`
// Output only. Current operation status.
Status *ClusterOperationStatus `protobuf:"bytes,9,opt,name=status,proto3" json:"status,omitempty"`
// Output only. The previous operation status.
StatusHistory []*ClusterOperationStatus `protobuf:"bytes,10,rep,name=status_history,json=statusHistory,proto3" json:"status_history,omitempty"`
// Output only. The operation type.
OperationType string `protobuf:"bytes,11,opt,name=operation_type,json=operationType,proto3" json:"operation_type,omitempty"`
// Output only. Short description of operation.
Description string `protobuf:"bytes,12,opt,name=description,proto3" json:"description,omitempty"`
// Output only. Labels associated with the operation
Labels map[string]string `protobuf:"bytes,13,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Output only. Errors encountered during operation execution.
Warnings []string `protobuf:"bytes,14,rep,name=warnings,proto3" json:"warnings,omitempty"`
// Output only. Child operation ids
ChildOperationIds []string `protobuf:"bytes,15,rep,name=child_operation_ids,json=childOperationIds,proto3" json:"child_operation_ids,omitempty"`
// contains filtered or unexported fields
}
Metadata describing the operation.
func (*ClusterOperationMetadata) Descriptor
func (*ClusterOperationMetadata) Descriptor() ([]byte, []int)
Deprecated: Use ClusterOperationMetadata.ProtoReflect.Descriptor instead.
func (*ClusterOperationMetadata) GetChildOperationIds
func (x *ClusterOperationMetadata) GetChildOperationIds() []string
func (*ClusterOperationMetadata) GetClusterName
func (x *ClusterOperationMetadata) GetClusterName() string
func (*ClusterOperationMetadata) GetClusterUuid
func (x *ClusterOperationMetadata) GetClusterUuid() string
func (*ClusterOperationMetadata) GetDescription
func (x *ClusterOperationMetadata) GetDescription() string
func (*ClusterOperationMetadata) GetLabels
func (x *ClusterOperationMetadata) GetLabels() map[string]string
func (*ClusterOperationMetadata) GetOperationType
func (x *ClusterOperationMetadata) GetOperationType() string
func (*ClusterOperationMetadata) GetStatus
func (x *ClusterOperationMetadata) GetStatus() *ClusterOperationStatus
func (*ClusterOperationMetadata) GetStatusHistory
func (x *ClusterOperationMetadata) GetStatusHistory() []*ClusterOperationStatus
func (*ClusterOperationMetadata) GetWarnings
func (x *ClusterOperationMetadata) GetWarnings() []string
func (*ClusterOperationMetadata) ProtoMessage
func (*ClusterOperationMetadata) ProtoMessage()
func (*ClusterOperationMetadata) ProtoReflect
func (x *ClusterOperationMetadata) ProtoReflect() protoreflect.Message
func (*ClusterOperationMetadata) Reset
func (x *ClusterOperationMetadata) Reset()
func (*ClusterOperationMetadata) String
func (x *ClusterOperationMetadata) String() string
ClusterOperationStatus
type ClusterOperationStatus struct {
// Output only. A message containing the operation state.
State ClusterOperationStatus_State `protobuf:"varint,1,opt,name=state,proto3,enum=google.cloud.dataproc.v1.ClusterOperationStatus_State" json:"state,omitempty"`
// Output only. A message containing the detailed operation state.
InnerState string `protobuf:"bytes,2,opt,name=inner_state,json=innerState,proto3" json:"inner_state,omitempty"`
// Output only. A message containing any operation metadata details.
Details string `protobuf:"bytes,3,opt,name=details,proto3" json:"details,omitempty"`
// Output only. The time this state was entered.
StateStartTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=state_start_time,json=stateStartTime,proto3" json:"state_start_time,omitempty"`
// contains filtered or unexported fields
}
The status of the operation.
func (*ClusterOperationStatus) Descriptor
func (*ClusterOperationStatus) Descriptor() ([]byte, []int)
Deprecated: Use ClusterOperationStatus.ProtoReflect.Descriptor instead.
func (*ClusterOperationStatus) GetDetails
func (x *ClusterOperationStatus) GetDetails() string
func (*ClusterOperationStatus) GetInnerState
func (x *ClusterOperationStatus) GetInnerState() string
func (*ClusterOperationStatus) GetState
func (x *ClusterOperationStatus) GetState() ClusterOperationStatus_State
func (*ClusterOperationStatus) GetStateStartTime
func (x *ClusterOperationStatus) GetStateStartTime() *timestamppb.Timestamp
func (*ClusterOperationStatus) ProtoMessage
func (*ClusterOperationStatus) ProtoMessage()
func (*ClusterOperationStatus) ProtoReflect
func (x *ClusterOperationStatus) ProtoReflect() protoreflect.Message
func (*ClusterOperationStatus) Reset
func (x *ClusterOperationStatus) Reset()
func (*ClusterOperationStatus) String
func (x *ClusterOperationStatus) String() string
ClusterOperationStatus_State
type ClusterOperationStatus_State int32
The operation state.
ClusterOperationStatus_UNKNOWN, ClusterOperationStatus_PENDING, ClusterOperationStatus_RUNNING, ClusterOperationStatus_DONE
const (
// Unused.
ClusterOperationStatus_UNKNOWN ClusterOperationStatus_State = 0
// The operation has been created.
ClusterOperationStatus_PENDING ClusterOperationStatus_State = 1
// The operation is running.
ClusterOperationStatus_RUNNING ClusterOperationStatus_State = 2
// The operation is done; either cancelled or completed.
ClusterOperationStatus_DONE ClusterOperationStatus_State = 3
)
func (ClusterOperationStatus_State) Descriptor
func (ClusterOperationStatus_State) Descriptor() protoreflect.EnumDescriptor
func (ClusterOperationStatus_State) Enum
func (x ClusterOperationStatus_State) Enum() *ClusterOperationStatus_State
func (ClusterOperationStatus_State) EnumDescriptor
func (ClusterOperationStatus_State) EnumDescriptor() ([]byte, []int)
Deprecated: Use ClusterOperationStatus_State.Descriptor instead.
func (ClusterOperationStatus_State) Number
func (x ClusterOperationStatus_State) Number() protoreflect.EnumNumber
func (ClusterOperationStatus_State) String
func (x ClusterOperationStatus_State) String() string
func (ClusterOperationStatus_State) Type
func (ClusterOperationStatus_State) Type() protoreflect.EnumType
ClusterSelector
type ClusterSelector struct {
// Optional. The zone where workflow process executes. This parameter does not
// affect the selection of the cluster.
//
// If unspecified, the zone of the first cluster matching the selector
// is used.
Zone string `protobuf:"bytes,1,opt,name=zone,proto3" json:"zone,omitempty"`
// Required. The cluster labels. Cluster must have all labels
// to match.
ClusterLabels map[string]string `protobuf:"bytes,2,rep,name=cluster_labels,json=clusterLabels,proto3" json:"cluster_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// contains filtered or unexported fields
}
A selector that chooses target cluster for jobs based on metadata.
func (*ClusterSelector) Descriptor
func (*ClusterSelector) Descriptor() ([]byte, []int)
Deprecated: Use ClusterSelector.ProtoReflect.Descriptor instead.
func (*ClusterSelector) GetClusterLabels
func (x *ClusterSelector) GetClusterLabels() map[string]string
func (*ClusterSelector) GetZone
func (x *ClusterSelector) GetZone() string
func (*ClusterSelector) ProtoMessage
func (*ClusterSelector) ProtoMessage()
func (*ClusterSelector) ProtoReflect
func (x *ClusterSelector) ProtoReflect() protoreflect.Message
func (*ClusterSelector) Reset
func (x *ClusterSelector) Reset()
func (*ClusterSelector) String
func (x *ClusterSelector) String() string
ClusterStatus
type ClusterStatus struct {
// Output only. The cluster's state.
State ClusterStatus_State `protobuf:"varint,1,opt,name=state,proto3,enum=google.cloud.dataproc.v1.ClusterStatus_State" json:"state,omitempty"`
// Optional. Output only. Details of cluster's state.
Detail string `protobuf:"bytes,2,opt,name=detail,proto3" json:"detail,omitempty"`
// Output only. Time when this state was entered (see JSON representation of
// [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
StateStartTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=state_start_time,json=stateStartTime,proto3" json:"state_start_time,omitempty"`
// Output only. Additional state information that includes
// status reported by the agent.
Substate ClusterStatus_Substate `protobuf:"varint,4,opt,name=substate,proto3,enum=google.cloud.dataproc.v1.ClusterStatus_Substate" json:"substate,omitempty"`
// contains filtered or unexported fields
}
The status of a cluster and its instances.
func (*ClusterStatus) Descriptor
func (*ClusterStatus) Descriptor() ([]byte, []int)
Deprecated: Use ClusterStatus.ProtoReflect.Descriptor instead.
func (*ClusterStatus) GetDetail
func (x *ClusterStatus) GetDetail() string
func (*ClusterStatus) GetState
func (x *ClusterStatus) GetState() ClusterStatus_State
func (*ClusterStatus) GetStateStartTime
func (x *ClusterStatus) GetStateStartTime() *timestamppb.Timestamp
func (*ClusterStatus) GetSubstate
func (x *ClusterStatus) GetSubstate() ClusterStatus_Substate
func (*ClusterStatus) ProtoMessage
func (*ClusterStatus) ProtoMessage()
func (*ClusterStatus) ProtoReflect
func (x *ClusterStatus) ProtoReflect() protoreflect.Message
func (*ClusterStatus) Reset
func (x *ClusterStatus) Reset()
func (*ClusterStatus) String
func (x *ClusterStatus) String() string
ClusterStatus_State
type ClusterStatus_State int32
The cluster state.
ClusterStatus_UNKNOWN, ClusterStatus_CREATING, ClusterStatus_RUNNING, ClusterStatus_ERROR, ClusterStatus_ERROR_DUE_TO_UPDATE, ClusterStatus_DELETING, ClusterStatus_UPDATING, ClusterStatus_STOPPING, ClusterStatus_STOPPED, ClusterStatus_STARTING, ClusterStatus_REPAIRING
const (
// The cluster state is unknown.
ClusterStatus_UNKNOWN ClusterStatus_State = 0
// The cluster is being created and set up. It is not ready for use.
ClusterStatus_CREATING ClusterStatus_State = 1
// The cluster is currently running and healthy. It is ready for use.
//
// **Note:** The cluster state changes from "creating" to "running" status
// after the master node(s), first two primary worker nodes (and the last
// primary worker node if primary workers > 2) are running.
ClusterStatus_RUNNING ClusterStatus_State = 2
// The cluster encountered an error. It is not ready for use.
ClusterStatus_ERROR ClusterStatus_State = 3
// The cluster has encountered an error while being updated. Jobs can
// be submitted to the cluster, but the cluster cannot be updated.
ClusterStatus_ERROR_DUE_TO_UPDATE ClusterStatus_State = 9
// The cluster is being deleted. It cannot be used.
ClusterStatus_DELETING ClusterStatus_State = 4
// The cluster is being updated. It continues to accept and process jobs.
ClusterStatus_UPDATING ClusterStatus_State = 5
// The cluster is being stopped. It cannot be used.
ClusterStatus_STOPPING ClusterStatus_State = 6
// The cluster is currently stopped. It is not ready for use.
ClusterStatus_STOPPED ClusterStatus_State = 7
// The cluster is being started. It is not ready for use.
ClusterStatus_STARTING ClusterStatus_State = 8
// The cluster is being repaired. It is not ready for use.
ClusterStatus_REPAIRING ClusterStatus_State = 10
)
func (ClusterStatus_State) Descriptor
func (ClusterStatus_State) Descriptor() protoreflect.EnumDescriptor
func (ClusterStatus_State) Enum
func (x ClusterStatus_State) Enum() *ClusterStatus_State
func (ClusterStatus_State) EnumDescriptor
func (ClusterStatus_State) EnumDescriptor() ([]byte, []int)
Deprecated: Use ClusterStatus_State.Descriptor instead.
func (ClusterStatus_State) Number
func (x ClusterStatus_State) Number() protoreflect.EnumNumber
func (ClusterStatus_State) String
func (x ClusterStatus_State) String() string
func (ClusterStatus_State) Type
func (ClusterStatus_State) Type() protoreflect.EnumType
ClusterStatus_Substate
type ClusterStatus_Substate int32
The cluster substate.
ClusterStatus_UNSPECIFIED, ClusterStatus_UNHEALTHY, ClusterStatus_STALE_STATUS
const (
// The cluster substate is unknown.
ClusterStatus_UNSPECIFIED ClusterStatus_Substate = 0
// The cluster is known to be in an unhealthy state
// (for example, critical daemons are not running or HDFS capacity is
// exhausted).
//
// Applies to RUNNING state.
ClusterStatus_UNHEALTHY ClusterStatus_Substate = 1
// The agent-reported status is out of date (may occur if
// Dataproc loses communication with Agent).
//
// Applies to RUNNING state.
ClusterStatus_STALE_STATUS ClusterStatus_Substate = 2
)
func (ClusterStatus_Substate) Descriptor
func (ClusterStatus_Substate) Descriptor() protoreflect.EnumDescriptor
func (ClusterStatus_Substate) Enum
func (x ClusterStatus_Substate) Enum() *ClusterStatus_Substate
func (ClusterStatus_Substate) EnumDescriptor
func (ClusterStatus_Substate) EnumDescriptor() ([]byte, []int)
Deprecated: Use ClusterStatus_Substate.Descriptor instead.
func (ClusterStatus_Substate) Number
func (x ClusterStatus_Substate) Number() protoreflect.EnumNumber
func (ClusterStatus_Substate) String
func (x ClusterStatus_Substate) String() string
func (ClusterStatus_Substate) Type
func (ClusterStatus_Substate) Type() protoreflect.EnumType
Component
type Component int32
Cluster components that can be activated.
Component_COMPONENT_UNSPECIFIED, Component_ANACONDA, Component_DOCKER, Component_DRUID, Component_FLINK, Component_HBASE, Component_HIVE_WEBHCAT, Component_HUDI, Component_JUPYTER, Component_PRESTO, Component_TRINO, Component_RANGER, Component_SOLR, Component_ZEPPELIN, Component_ZOOKEEPER
const (
// Unspecified component. Specifying this will cause Cluster creation to fail.
Component_COMPONENT_UNSPECIFIED Component = 0
// The Anaconda component is no longer supported or applicable to
// [supported Dataproc on Compute Engine image versions]
// (https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-version-clusters#supported-dataproc-image-versions).
// It cannot be activated on clusters created with supported Dataproc on
// Compute Engine image versions.
Component_ANACONDA Component = 5
// Docker
Component_DOCKER Component = 13
// The Druid query engine. (alpha)
Component_DRUID Component = 9
// Flink
Component_FLINK Component = 14
// HBase. (beta)
Component_HBASE Component = 11
// The Hive Web HCatalog (the REST service for accessing HCatalog).
Component_HIVE_WEBHCAT Component = 3
// Hudi.
Component_HUDI Component = 18
// The Jupyter Notebook.
Component_JUPYTER Component = 1
// The Presto query engine.
Component_PRESTO Component = 6
// The Trino query engine.
Component_TRINO Component = 17
// The Ranger service.
Component_RANGER Component = 12
// The Solr service.
Component_SOLR Component = 10
// The Zeppelin notebook.
Component_ZEPPELIN Component = 4
// The Zookeeper service.
Component_ZOOKEEPER Component = 8
)
func (Component) Descriptor
func (Component) Descriptor() protoreflect.EnumDescriptor
func (Component) Enum
func (Component) EnumDescriptor
Deprecated: Use Component.Descriptor instead.
func (Component) Number
func (x Component) Number() protoreflect.EnumNumber
func (Component) String
func (Component) Type
func (Component) Type() protoreflect.EnumType
ConfidentialInstanceConfig
type ConfidentialInstanceConfig struct {
// Optional. Defines whether the instance should have confidential compute
// enabled.
EnableConfidentialCompute bool `protobuf:"varint,1,opt,name=enable_confidential_compute,json=enableConfidentialCompute,proto3" json:"enable_confidential_compute,omitempty"`
// contains filtered or unexported fields
}
Confidential Instance Config for clusters using Confidential VMs
func (*ConfidentialInstanceConfig) Descriptor
func (*ConfidentialInstanceConfig) Descriptor() ([]byte, []int)
Deprecated: Use ConfidentialInstanceConfig.ProtoReflect.Descriptor instead.
func (*ConfidentialInstanceConfig) GetEnableConfidentialCompute
func (x *ConfidentialInstanceConfig) GetEnableConfidentialCompute() bool
func (*ConfidentialInstanceConfig) ProtoMessage
func (*ConfidentialInstanceConfig) ProtoMessage()
func (*ConfidentialInstanceConfig) ProtoReflect
func (x *ConfidentialInstanceConfig) ProtoReflect() protoreflect.Message
func (*ConfidentialInstanceConfig) Reset
func (x *ConfidentialInstanceConfig) Reset()
func (*ConfidentialInstanceConfig) String
func (x *ConfidentialInstanceConfig) String() string
CreateAutoscalingPolicyRequest
type CreateAutoscalingPolicyRequest struct {
// Required. The "resource name" of the region or location, as described
// in https://cloud.google.com/apis/design/resource_names.
//
// - For `projects.regions.autoscalingPolicies.create`, the resource name
// of the region has the following format:
// `projects/{project_id}/regions/{region}`
//
// - For `projects.locations.autoscalingPolicies.create`, the resource name
// of the location has the following format:
// `projects/{project_id}/locations/{location}`
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// Required. The autoscaling policy to create.
Policy *AutoscalingPolicy `protobuf:"bytes,2,opt,name=policy,proto3" json:"policy,omitempty"`
// contains filtered or unexported fields
}
A request to create an autoscaling policy.
func (*CreateAutoscalingPolicyRequest) Descriptor
func (*CreateAutoscalingPolicyRequest) Descriptor() ([]byte, []int)
Deprecated: Use CreateAutoscalingPolicyRequest.ProtoReflect.Descriptor instead.
func (*CreateAutoscalingPolicyRequest) GetParent
func (x *CreateAutoscalingPolicyRequest) GetParent() string
func (*CreateAutoscalingPolicyRequest) GetPolicy
func (x *CreateAutoscalingPolicyRequest) GetPolicy() *AutoscalingPolicy
func (*CreateAutoscalingPolicyRequest) ProtoMessage
func (*CreateAutoscalingPolicyRequest) ProtoMessage()
func (*CreateAutoscalingPolicyRequest) ProtoReflect
func (x *CreateAutoscalingPolicyRequest) ProtoReflect() protoreflect.Message
func (*CreateAutoscalingPolicyRequest) Reset
func (x *CreateAutoscalingPolicyRequest) Reset()
func (*CreateAutoscalingPolicyRequest) String
func (x *CreateAutoscalingPolicyRequest) String() string
CreateBatchRequest
type CreateBatchRequest struct {
// Required. The parent resource where this batch will be created.
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// Required. The batch to create.
Batch *Batch `protobuf:"bytes,2,opt,name=batch,proto3" json:"batch,omitempty"`
// Optional. The ID to use for the batch, which will become the final
// component of the batch's resource name.
//
// This value must be 4-63 characters. Valid characters are `/[a-z][0-9]-/`.
BatchId string `protobuf:"bytes,3,opt,name=batch_id,json=batchId,proto3" json:"batch_id,omitempty"`
// Optional. A unique ID used to identify the request. If the service
// receives two
// [CreateBatchRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateBatchRequest)s
// with the same request_id, the second request is ignored and the
// Operation that corresponds to the first Batch created and stored
// in the backend is returned.
//
// Recommendation: Set this value to a
// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
//
// The value must contain only letters (a-z, A-Z), numbers (0-9),
// underscores (_), and hyphens (-). The maximum length is 40 characters.
RequestId string `protobuf:"bytes,4,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
// contains filtered or unexported fields
}
A request to create a batch workload.
func (*CreateBatchRequest) Descriptor
func (*CreateBatchRequest) Descriptor() ([]byte, []int)
Deprecated: Use CreateBatchRequest.ProtoReflect.Descriptor instead.
func (*CreateBatchRequest) GetBatch
func (x *CreateBatchRequest) GetBatch() *Batch
func (*CreateBatchRequest) GetBatchId
func (x *CreateBatchRequest) GetBatchId() string
func (*CreateBatchRequest) GetParent
func (x *CreateBatchRequest) GetParent() string
func (*CreateBatchRequest) GetRequestId
func (x *CreateBatchRequest) GetRequestId() string
func (*CreateBatchRequest) ProtoMessage
func (*CreateBatchRequest) ProtoMessage()
func (*CreateBatchRequest) ProtoReflect
func (x *CreateBatchRequest) ProtoReflect() protoreflect.Message
func (*CreateBatchRequest) Reset
func (x *CreateBatchRequest) Reset()
func (*CreateBatchRequest) String
func (x *CreateBatchRequest) String() string
CreateClusterRequest
type CreateClusterRequest struct {
// Required. The ID of the Google Cloud Platform project that the cluster
// belongs to.
ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
// Required. The Dataproc region in which to handle the request.
Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
// Required. The cluster to create.
Cluster *Cluster `protobuf:"bytes,2,opt,name=cluster,proto3" json:"cluster,omitempty"`
// Optional. A unique ID used to identify the request. If the server receives
// two
// [CreateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateClusterRequest)s
// with the same id, then the second request will be ignored and the
// first [google.longrunning.Operation][google.longrunning.Operation] created
// and stored in the backend is returned.
//
// It is recommended to always set this value to a
// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
//
// The ID must contain only letters (a-z, A-Z), numbers (0-9),
// underscores (_), and hyphens (-). The maximum length is 40 characters.
RequestId string `protobuf:"bytes,4,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
// Optional. Failure action when primary worker creation fails.
ActionOnFailedPrimaryWorkers FailureAction `protobuf:"varint,5,opt,name=action_on_failed_primary_workers,json=actionOnFailedPrimaryWorkers,proto3,enum=google.cloud.dataproc.v1.FailureAction" json:"action_on_failed_primary_workers,omitempty"`
// contains filtered or unexported fields
}
A request to create a cluster.
func (*CreateClusterRequest) Descriptor
func (*CreateClusterRequest) Descriptor() ([]byte, []int)
Deprecated: Use CreateClusterRequest.ProtoReflect.Descriptor instead.
func (*CreateClusterRequest) GetActionOnFailedPrimaryWorkers
func (x *CreateClusterRequest) GetActionOnFailedPrimaryWorkers() FailureAction
func (*CreateClusterRequest) GetCluster
func (x *CreateClusterRequest) GetCluster() *Cluster
func (*CreateClusterRequest) GetProjectId
func (x *CreateClusterRequest) GetProjectId() string
func (*CreateClusterRequest) GetRegion
func (x *CreateClusterRequest) GetRegion() string
func (*CreateClusterRequest) GetRequestId
func (x *CreateClusterRequest) GetRequestId() string
func (*CreateClusterRequest) ProtoMessage
func (*CreateClusterRequest) ProtoMessage()
func (*CreateClusterRequest) ProtoReflect
func (x *CreateClusterRequest) ProtoReflect() protoreflect.Message
func (*CreateClusterRequest) Reset
func (x *CreateClusterRequest) Reset()
func (*CreateClusterRequest) String
func (x *CreateClusterRequest) String() string
CreateNodeGroupRequest
type CreateNodeGroupRequest struct {
// Required. The parent resource where this node group will be created.
// Format: `projects/{project}/regions/{region}/clusters/{cluster}`
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// Required. The node group to create.
NodeGroup *NodeGroup `protobuf:"bytes,2,opt,name=node_group,json=nodeGroup,proto3" json:"node_group,omitempty"`
// Optional. An optional node group ID. Generated if not specified.
//
// The ID must contain only letters (a-z, A-Z), numbers (0-9),
// underscores (_), and hyphens (-). Cannot begin or end with underscore
// or hyphen. Must consist of from 3 to 33 characters.
NodeGroupId string `protobuf:"bytes,4,opt,name=node_group_id,json=nodeGroupId,proto3" json:"node_group_id,omitempty"`
// Optional. A unique ID used to identify the request. If the server receives
// two
// [CreateNodeGroupRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateNodeGroupRequests)
// with the same ID, the second request is ignored and the
// first [google.longrunning.Operation][google.longrunning.Operation] created
// and stored in the backend is returned.
//
// Recommendation: Set this value to a
// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
//
// The ID must contain only letters (a-z, A-Z), numbers (0-9),
// underscores (_), and hyphens (-). The maximum length is 40 characters.
RequestId string `protobuf:"bytes,3,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
// contains filtered or unexported fields
}
A request to create a node group.
func (*CreateNodeGroupRequest) Descriptor
func (*CreateNodeGroupRequest) Descriptor() ([]byte, []int)
Deprecated: Use CreateNodeGroupRequest.ProtoReflect.Descriptor instead.
func (*CreateNodeGroupRequest) GetNodeGroup
func (x *CreateNodeGroupRequest) GetNodeGroup() *NodeGroup
func (*CreateNodeGroupRequest) GetNodeGroupId
func (x *CreateNodeGroupRequest) GetNodeGroupId() string
func (*CreateNodeGroupRequest) GetParent
func (x *CreateNodeGroupRequest) GetParent() string
func (*CreateNodeGroupRequest) GetRequestId
func (x *CreateNodeGroupRequest) GetRequestId() string
func (*CreateNodeGroupRequest) ProtoMessage
func (*CreateNodeGroupRequest) ProtoMessage()
func (*CreateNodeGroupRequest) ProtoReflect
func (x *CreateNodeGroupRequest) ProtoReflect() protoreflect.Message
func (*CreateNodeGroupRequest) Reset
func (x *CreateNodeGroupRequest) Reset()
func (*CreateNodeGroupRequest) String
func (x *CreateNodeGroupRequest) String() string
CreateSessionRequest
type CreateSessionRequest struct {
// Required. The parent resource where this session will be created.
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// Required. The interactive session to create.
Session *Session `protobuf:"bytes,2,opt,name=session,proto3" json:"session,omitempty"`
// Required. The ID to use for the session, which becomes the final component
// of the session's resource name.
//
// This value must be 4-63 characters. Valid characters
// are /[a-z][0-9]-/.
SessionId string `protobuf:"bytes,3,opt,name=session_id,json=sessionId,proto3" json:"session_id,omitempty"`
// Optional. A unique ID used to identify the request. If the service
// receives two
// [CreateSessionRequests](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.CreateSessionRequest)s
// with the same ID, the second request is ignored, and the
// first [Session][google.cloud.dataproc.v1.Session] is created and stored in
// the backend.
//
// Recommendation: Set this value to a
// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
//
// The value must contain only letters (a-z, A-Z), numbers (0-9),
// underscores (_), and hyphens (-). The maximum length is 40 characters.
RequestId string `protobuf:"bytes,4,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
// contains filtered or unexported fields
}
A request to create a session.
func (*CreateSessionRequest) Descriptor
func (*CreateSessionRequest) Descriptor() ([]byte, []int)
Deprecated: Use CreateSessionRequest.ProtoReflect.Descriptor instead.
func (*CreateSessionRequest) GetParent
func (x *CreateSessionRequest) GetParent() string
func (*CreateSessionRequest) GetRequestId
func (x *CreateSessionRequest) GetRequestId() string
func (*CreateSessionRequest) GetSession
func (x *CreateSessionRequest) GetSession() *Session
func (*CreateSessionRequest) GetSessionId
func (x *CreateSessionRequest) GetSessionId() string
func (*CreateSessionRequest) ProtoMessage
func (*CreateSessionRequest) ProtoMessage()
func (*CreateSessionRequest) ProtoReflect
func (x *CreateSessionRequest) ProtoReflect() protoreflect.Message
func (*CreateSessionRequest) Reset
func (x *CreateSessionRequest) Reset()
func (*CreateSessionRequest) String
func (x *CreateSessionRequest) String() string
CreateSessionTemplateRequest
type CreateSessionTemplateRequest struct {
// Required. The parent resource where this session template will be created.
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// Required. The session template to create.
SessionTemplate *SessionTemplate `protobuf:"bytes,3,opt,name=session_template,json=sessionTemplate,proto3" json:"session_template,omitempty"`
// contains filtered or unexported fields
}
A request to create a session template.
func (*CreateSessionTemplateRequest) Descriptor
func (*CreateSessionTemplateRequest) Descriptor() ([]byte, []int)
Deprecated: Use CreateSessionTemplateRequest.ProtoReflect.Descriptor instead.
func (*CreateSessionTemplateRequest) GetParent
func (x *CreateSessionTemplateRequest) GetParent() string
func (*CreateSessionTemplateRequest) GetSessionTemplate
func (x *CreateSessionTemplateRequest) GetSessionTemplate() *SessionTemplate
func (*CreateSessionTemplateRequest) ProtoMessage
func (*CreateSessionTemplateRequest) ProtoMessage()
func (*CreateSessionTemplateRequest) ProtoReflect
func (x *CreateSessionTemplateRequest) ProtoReflect() protoreflect.Message
func (*CreateSessionTemplateRequest) Reset
func (x *CreateSessionTemplateRequest) Reset()
func (*CreateSessionTemplateRequest) String
func (x *CreateSessionTemplateRequest) String() string
CreateWorkflowTemplateRequest
type CreateWorkflowTemplateRequest struct {
// Required. The resource name of the region or location, as described
// in https://cloud.google.com/apis/design/resource_names.
//
// - For `projects.regions.workflowTemplates.create`, the resource name of the
// region has the following format:
// `projects/{project_id}/regions/{region}`
//
// - For `projects.locations.workflowTemplates.create`, the resource name of
// the location has the following format:
// `projects/{project_id}/locations/{location}`
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// Required. The Dataproc workflow template to create.
Template *WorkflowTemplate `protobuf:"bytes,2,opt,name=template,proto3" json:"template,omitempty"`
// contains filtered or unexported fields
}
A request to create a workflow template.
func (*CreateWorkflowTemplateRequest) Descriptor
func (*CreateWorkflowTemplateRequest) Descriptor() ([]byte, []int)
Deprecated: Use CreateWorkflowTemplateRequest.ProtoReflect.Descriptor instead.
func (*CreateWorkflowTemplateRequest) GetParent
func (x *CreateWorkflowTemplateRequest) GetParent() string
func (*CreateWorkflowTemplateRequest) GetTemplate
func (x *CreateWorkflowTemplateRequest) GetTemplate() *WorkflowTemplate
func (*CreateWorkflowTemplateRequest) ProtoMessage
func (*CreateWorkflowTemplateRequest) ProtoMessage()
func (*CreateWorkflowTemplateRequest) ProtoReflect
func (x *CreateWorkflowTemplateRequest) ProtoReflect() protoreflect.Message
func (*CreateWorkflowTemplateRequest) Reset
func (x *CreateWorkflowTemplateRequest) Reset()
func (*CreateWorkflowTemplateRequest) String
func (x *CreateWorkflowTemplateRequest) String() string
DataprocMetricConfig
type DataprocMetricConfig struct {
// Required. Metrics sources to enable.
Metrics []*DataprocMetricConfig_Metric `protobuf:"bytes,1,rep,name=metrics,proto3" json:"metrics,omitempty"`
// contains filtered or unexported fields
}
Dataproc metric config.
func (*DataprocMetricConfig) Descriptor
func (*DataprocMetricConfig) Descriptor() ([]byte, []int)
Deprecated: Use DataprocMetricConfig.ProtoReflect.Descriptor instead.
func (*DataprocMetricConfig) GetMetrics
func (x *DataprocMetricConfig) GetMetrics() []*DataprocMetricConfig_Metric
func (*DataprocMetricConfig) ProtoMessage
func (*DataprocMetricConfig) ProtoMessage()
func (*DataprocMetricConfig) ProtoReflect
func (x *DataprocMetricConfig) ProtoReflect() protoreflect.Message
func (*DataprocMetricConfig) Reset
func (x *DataprocMetricConfig) Reset()
func (*DataprocMetricConfig) String
func (x *DataprocMetricConfig) String() string
DataprocMetricConfig_Metric
type DataprocMetricConfig_Metric struct {
// Required. A standard set of metrics is collected unless `metricOverrides`
// are specified for the metric source (see [Custom metrics]
// (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics)
// for more information).
MetricSource DataprocMetricConfig_MetricSource `protobuf:"varint,1,opt,name=metric_source,json=metricSource,proto3,enum=google.cloud.dataproc.v1.DataprocMetricConfig_MetricSource" json:"metric_source,omitempty"`
// Optional. Specify one or more [Custom metrics]
// (https://cloud.google.com/dataproc/docs/guides/dataproc-metrics#custom_metrics)
// to collect for the metric course (for the `SPARK` metric source (any
// [Spark metric]
// (https://spark.apache.org/docs/latest/monitoring.html#metrics) can be
// specified).
//
// Provide metrics in the following format:
// METRIC_SOURCE:INSTANCE:GROUP:METRIC
// Use camelcase as appropriate.
//
// Examples:
//
// ```
// yarn:ResourceManager:QueueMetrics:AppsCompleted
// spark:driver:DAGScheduler:job.allJobs
// sparkHistoryServer:JVM:Memory:NonHeapMemoryUsage.committed
// hiveserver2:JVM:Memory:NonHeapMemoryUsage.used
// ```
//
// Notes:
//
// - Only the specified overridden metrics are collected for the
// metric source. For example, if one or more `spark:executive` metrics
// are listed as metric overrides, other `SPARK` metrics are not
// collected. The collection of the metrics for other enabled custom
// metric sources is unaffected. For example, if both `SPARK` andd `YARN`
// metric sources are enabled, and overrides are provided for Spark
// metrics only, all YARN metrics are collected.
MetricOverrides []string `protobuf:"bytes,2,rep,name=metric_overrides,json=metricOverrides,proto3" json:"metric_overrides,omitempty"`
// contains filtered or unexported fields
}
A Dataproc custom metric.
func (*DataprocMetricConfig_Metric) Descriptor
func (*DataprocMetricConfig_Metric) Descriptor() ([]byte, []int)
Deprecated: Use DataprocMetricConfig_Metric.ProtoReflect.Descriptor instead.
func (*DataprocMetricConfig_Metric) GetMetricOverrides
func (x *DataprocMetricConfig_Metric) GetMetricOverrides() []string
func (*DataprocMetricConfig_Metric) GetMetricSource
func (x *DataprocMetricConfig_Metric) GetMetricSource() DataprocMetricConfig_MetricSource
func (*DataprocMetricConfig_Metric) ProtoMessage
func (*DataprocMetricConfig_Metric) ProtoMessage()
func (*DataprocMetricConfig_Metric) ProtoReflect
func (x *DataprocMetricConfig_Metric) ProtoReflect() protoreflect.Message
func (*DataprocMetricConfig_Metric) Reset
func (x *DataprocMetricConfig_Metric) Reset()
func (*DataprocMetricConfig_Metric) String
func (x *DataprocMetricConfig_Metric) String() string
DataprocMetricConfig_MetricSource
type DataprocMetricConfig_MetricSource int32
A source for the collection of Dataproc custom metrics (see Custom metrics).
DataprocMetricConfig_METRIC_SOURCE_UNSPECIFIED, DataprocMetricConfig_MONITORING_AGENT_DEFAULTS, DataprocMetricConfig_HDFS, DataprocMetricConfig_SPARK, DataprocMetricConfig_YARN, DataprocMetricConfig_SPARK_HISTORY_SERVER, DataprocMetricConfig_HIVESERVER2, DataprocMetricConfig_HIVEMETASTORE, DataprocMetricConfig_FLINK
const (
// Required unspecified metric source.
DataprocMetricConfig_METRIC_SOURCE_UNSPECIFIED DataprocMetricConfig_MetricSource = 0
// Monitoring agent metrics. If this source is enabled,
// Dataproc enables the monitoring agent in Compute Engine,
// and collects monitoring agent metrics, which are published
// with an `agent.googleapis.com` prefix.
DataprocMetricConfig_MONITORING_AGENT_DEFAULTS DataprocMetricConfig_MetricSource = 1
// HDFS metric source.
DataprocMetricConfig_HDFS DataprocMetricConfig_MetricSource = 2
// Spark metric source.
DataprocMetricConfig_SPARK DataprocMetricConfig_MetricSource = 3
// YARN metric source.
DataprocMetricConfig_YARN DataprocMetricConfig_MetricSource = 4
// Spark History Server metric source.
DataprocMetricConfig_SPARK_HISTORY_SERVER DataprocMetricConfig_MetricSource = 5
// Hiveserver2 metric source.
DataprocMetricConfig_HIVESERVER2 DataprocMetricConfig_MetricSource = 6
// hivemetastore metric source
DataprocMetricConfig_HIVEMETASTORE DataprocMetricConfig_MetricSource = 7
// flink metric source
DataprocMetricConfig_FLINK DataprocMetricConfig_MetricSource = 8
)
func (DataprocMetricConfig_MetricSource) Descriptor
func (DataprocMetricConfig_MetricSource) Descriptor() protoreflect.EnumDescriptor
func (DataprocMetricConfig_MetricSource) Enum
func (x DataprocMetricConfig_MetricSource) Enum() *DataprocMetricConfig_MetricSource
func (DataprocMetricConfig_MetricSource) EnumDescriptor
func (DataprocMetricConfig_MetricSource) EnumDescriptor() ([]byte, []int)
Deprecated: Use DataprocMetricConfig_MetricSource.Descriptor instead.
func (DataprocMetricConfig_MetricSource) Number
func (x DataprocMetricConfig_MetricSource) Number() protoreflect.EnumNumber
func (DataprocMetricConfig_MetricSource) String
func (x DataprocMetricConfig_MetricSource) String() string
func (DataprocMetricConfig_MetricSource) Type
func (DataprocMetricConfig_MetricSource) Type() protoreflect.EnumType
DeleteAutoscalingPolicyRequest
type DeleteAutoscalingPolicyRequest struct {
// Required. The "resource name" of the autoscaling policy, as described
// in https://cloud.google.com/apis/design/resource_names.
//
// - For `projects.regions.autoscalingPolicies.delete`, the resource name
// of the policy has the following format:
// `projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`
//
// - For `projects.locations.autoscalingPolicies.delete`, the resource name
// of the policy has the following format:
// `projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// contains filtered or unexported fields
}
A request to delete an autoscaling policy.
Autoscaling policies in use by one or more clusters will not be deleted.
func (*DeleteAutoscalingPolicyRequest) Descriptor
func (*DeleteAutoscalingPolicyRequest) Descriptor() ([]byte, []int)
Deprecated: Use DeleteAutoscalingPolicyRequest.ProtoReflect.Descriptor instead.
func (*DeleteAutoscalingPolicyRequest) GetName
func (x *DeleteAutoscalingPolicyRequest) GetName() string
func (*DeleteAutoscalingPolicyRequest) ProtoMessage
func (*DeleteAutoscalingPolicyRequest) ProtoMessage()
func (*DeleteAutoscalingPolicyRequest) ProtoReflect
func (x *DeleteAutoscalingPolicyRequest) ProtoReflect() protoreflect.Message
func (*DeleteAutoscalingPolicyRequest) Reset
func (x *DeleteAutoscalingPolicyRequest) Reset()
func (*DeleteAutoscalingPolicyRequest) String
func (x *DeleteAutoscalingPolicyRequest) String() string
DeleteBatchRequest
type DeleteBatchRequest struct {
// Required. The fully qualified name of the batch to retrieve
// in the format
// "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID"
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// contains filtered or unexported fields
}
A request to delete a batch workload.
func (*DeleteBatchRequest) Descriptor
func (*DeleteBatchRequest) Descriptor() ([]byte, []int)
Deprecated: Use DeleteBatchRequest.ProtoReflect.Descriptor instead.
func (*DeleteBatchRequest) GetName
func (x *DeleteBatchRequest) GetName() string
func (*DeleteBatchRequest) ProtoMessage
func (*DeleteBatchRequest) ProtoMessage()
func (*DeleteBatchRequest) ProtoReflect
func (x *DeleteBatchRequest) ProtoReflect() protoreflect.Message
func (*DeleteBatchRequest) Reset
func (x *DeleteBatchRequest) Reset()
func (*DeleteBatchRequest) String
func (x *DeleteBatchRequest) String() string
DeleteClusterRequest
type DeleteClusterRequest struct {
// Required. The ID of the Google Cloud Platform project that the cluster
// belongs to.
ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
// Required. The Dataproc region in which to handle the request.
Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
// Required. The cluster name.
ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
// Optional. Specifying the `cluster_uuid` means the RPC should fail
// (with error NOT_FOUND) if cluster with specified UUID does not exist.
ClusterUuid string `protobuf:"bytes,4,opt,name=cluster_uuid,json=clusterUuid,proto3" json:"cluster_uuid,omitempty"`
// Optional. A unique ID used to identify the request. If the server
// receives two
// [DeleteClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteClusterRequest)s
// with the same id, then the second request will be ignored and the
// first [google.longrunning.Operation][google.longrunning.Operation] created
// and stored in the backend is returned.
//
// It is recommended to always set this value to a
// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
//
// The ID must contain only letters (a-z, A-Z), numbers (0-9),
// underscores (_), and hyphens (-). The maximum length is 40 characters.
RequestId string `protobuf:"bytes,5,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
// contains filtered or unexported fields
}
A request to delete a cluster.
func (*DeleteClusterRequest) Descriptor
func (*DeleteClusterRequest) Descriptor() ([]byte, []int)
Deprecated: Use DeleteClusterRequest.ProtoReflect.Descriptor instead.
func (*DeleteClusterRequest) GetClusterName
func (x *DeleteClusterRequest) GetClusterName() string
func (*DeleteClusterRequest) GetClusterUuid
func (x *DeleteClusterRequest) GetClusterUuid() string
func (*DeleteClusterRequest) GetProjectId
func (x *DeleteClusterRequest) GetProjectId() string
func (*DeleteClusterRequest) GetRegion
func (x *DeleteClusterRequest) GetRegion() string
func (*DeleteClusterRequest) GetRequestId
func (x *DeleteClusterRequest) GetRequestId() string
func (*DeleteClusterRequest) ProtoMessage
func (*DeleteClusterRequest) ProtoMessage()
func (*DeleteClusterRequest) ProtoReflect
func (x *DeleteClusterRequest) ProtoReflect() protoreflect.Message
func (*DeleteClusterRequest) Reset
func (x *DeleteClusterRequest) Reset()
func (*DeleteClusterRequest) String
func (x *DeleteClusterRequest) String() string
DeleteJobRequest
type DeleteJobRequest struct {
// Required. The ID of the Google Cloud Platform project that the job
// belongs to.
ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
// Required. The Dataproc region in which to handle the request.
Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
// Required. The job ID.
JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
// contains filtered or unexported fields
}
A request to delete a job.
func (*DeleteJobRequest) Descriptor
func (*DeleteJobRequest) Descriptor() ([]byte, []int)
Deprecated: Use DeleteJobRequest.ProtoReflect.Descriptor instead.
func (*DeleteJobRequest) GetJobId
func (x *DeleteJobRequest) GetJobId() string
func (*DeleteJobRequest) GetProjectId
func (x *DeleteJobRequest) GetProjectId() string
func (*DeleteJobRequest) GetRegion
func (x *DeleteJobRequest) GetRegion() string
func (*DeleteJobRequest) ProtoMessage
func (*DeleteJobRequest) ProtoMessage()
func (*DeleteJobRequest) ProtoReflect
func (x *DeleteJobRequest) ProtoReflect() protoreflect.Message
func (*DeleteJobRequest) Reset
func (x *DeleteJobRequest) Reset()
func (*DeleteJobRequest) String
func (x *DeleteJobRequest) String() string
DeleteSessionRequest
type DeleteSessionRequest struct {
// Required. The name of the session resource to delete.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Optional. A unique ID used to identify the request. If the service
// receives two
// [DeleteSessionRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.DeleteSessionRequest)s
// with the same ID, the second request is ignored.
//
// Recommendation: Set this value to a
// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
//
// The value must contain only letters (a-z, A-Z), numbers (0-9),
// underscores (_), and hyphens (-). The maximum length is 40 characters.
RequestId string `protobuf:"bytes,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
// contains filtered or unexported fields
}
A request to delete a session.
func (*DeleteSessionRequest) Descriptor
func (*DeleteSessionRequest) Descriptor() ([]byte, []int)
Deprecated: Use DeleteSessionRequest.ProtoReflect.Descriptor instead.
func (*DeleteSessionRequest) GetName
func (x *DeleteSessionRequest) GetName() string
func (*DeleteSessionRequest) GetRequestId
func (x *DeleteSessionRequest) GetRequestId() string
func (*DeleteSessionRequest) ProtoMessage
func (*DeleteSessionRequest) ProtoMessage()
func (*DeleteSessionRequest) ProtoReflect
func (x *DeleteSessionRequest) ProtoReflect() protoreflect.Message
func (*DeleteSessionRequest) Reset
func (x *DeleteSessionRequest) Reset()
func (*DeleteSessionRequest) String
func (x *DeleteSessionRequest) String() string
DeleteSessionTemplateRequest
type DeleteSessionTemplateRequest struct {
// Required. The name of the session template resource to delete.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// contains filtered or unexported fields
}
A request to delete a session template.
func (*DeleteSessionTemplateRequest) Descriptor
func (*DeleteSessionTemplateRequest) Descriptor() ([]byte, []int)
Deprecated: Use DeleteSessionTemplateRequest.ProtoReflect.Descriptor instead.
func (*DeleteSessionTemplateRequest) GetName
func (x *DeleteSessionTemplateRequest) GetName() string
func (*DeleteSessionTemplateRequest) ProtoMessage
func (*DeleteSessionTemplateRequest) ProtoMessage()
func (*DeleteSessionTemplateRequest) ProtoReflect
func (x *DeleteSessionTemplateRequest) ProtoReflect() protoreflect.Message
func (*DeleteSessionTemplateRequest) Reset
func (x *DeleteSessionTemplateRequest) Reset()
func (*DeleteSessionTemplateRequest) String
func (x *DeleteSessionTemplateRequest) String() string
DeleteWorkflowTemplateRequest
type DeleteWorkflowTemplateRequest struct {
// Required. The resource name of the workflow template, as described
// in https://cloud.google.com/apis/design/resource_names.
//
// * For `projects.regions.workflowTemplates.delete`, the resource name
// of the template has the following format:
//
// `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
//
// - For `projects.locations.workflowTemplates.instantiate`, the resource name
// of the template has the following format:
// `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Optional. The version of workflow template to delete. If specified,
// will only delete the template if the current server version matches
// specified version.
Version int32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`
// contains filtered or unexported fields
}
A request to delete a workflow template.
Currently started workflows will remain running.
func (*DeleteWorkflowTemplateRequest) Descriptor
func (*DeleteWorkflowTemplateRequest) Descriptor() ([]byte, []int)
Deprecated: Use DeleteWorkflowTemplateRequest.ProtoReflect.Descriptor instead.
func (*DeleteWorkflowTemplateRequest) GetName
func (x *DeleteWorkflowTemplateRequest) GetName() string
func (*DeleteWorkflowTemplateRequest) GetVersion
func (x *DeleteWorkflowTemplateRequest) GetVersion() int32
func (*DeleteWorkflowTemplateRequest) ProtoMessage
func (*DeleteWorkflowTemplateRequest) ProtoMessage()
func (*DeleteWorkflowTemplateRequest) ProtoReflect
func (x *DeleteWorkflowTemplateRequest) ProtoReflect() protoreflect.Message
func (*DeleteWorkflowTemplateRequest) Reset
func (x *DeleteWorkflowTemplateRequest) Reset()
func (*DeleteWorkflowTemplateRequest) String
func (x *DeleteWorkflowTemplateRequest) String() string
DiagnoseClusterRequest
type DiagnoseClusterRequest struct {
// Required. The ID of the Google Cloud Platform project that the cluster
// belongs to.
ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
// Required. The Dataproc region in which to handle the request.
Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
// Required. The cluster name.
ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
// Optional. (Optional) The output Cloud Storage directory for the diagnostic
// tarball. If not specified, a task-specific directory in the cluster's
// staging bucket will be used.
TarballGcsDir string `protobuf:"bytes,4,opt,name=tarball_gcs_dir,json=tarballGcsDir,proto3" json:"tarball_gcs_dir,omitempty"`
// Optional. (Optional) The access type to the diagnostic tarball. If not
// specified, falls back to default access of the bucket
TarballAccess DiagnoseClusterRequest_TarballAccess `protobuf:"varint,5,opt,name=tarball_access,json=tarballAccess,proto3,enum=google.cloud.dataproc.v1.DiagnoseClusterRequest_TarballAccess" json:"tarball_access,omitempty"`
// Optional. Time interval in which diagnosis should be carried out on the
// cluster.
DiagnosisInterval *interval.Interval `protobuf:"bytes,6,opt,name=diagnosis_interval,json=diagnosisInterval,proto3" json:"diagnosis_interval,omitempty"`
// Optional. Specifies a list of jobs on which diagnosis is to be performed.
// Format: projects/{project}/regions/{region}/jobs/{job}
Jobs []string `protobuf:"bytes,10,rep,name=jobs,proto3" json:"jobs,omitempty"`
// Optional. Specifies a list of yarn applications on which diagnosis is to be
// performed.
YarnApplicationIds []string `protobuf:"bytes,11,rep,name=yarn_application_ids,json=yarnApplicationIds,proto3" json:"yarn_application_ids,omitempty"`
// contains filtered or unexported fields
}
A request to collect cluster diagnostic information.
func (*DiagnoseClusterRequest) Descriptor
func (*DiagnoseClusterRequest) Descriptor() ([]byte, []int)
Deprecated: Use DiagnoseClusterRequest.ProtoReflect.Descriptor instead.
func (*DiagnoseClusterRequest) GetClusterName
func (x *DiagnoseClusterRequest) GetClusterName() string
func (*DiagnoseClusterRequest) GetDiagnosisInterval
func (x *DiagnoseClusterRequest) GetDiagnosisInterval() *interval.Interval
func (*DiagnoseClusterRequest) GetJobs
func (x *DiagnoseClusterRequest) GetJobs() []string
func (*DiagnoseClusterRequest) GetProjectId
func (x *DiagnoseClusterRequest) GetProjectId() string
func (*DiagnoseClusterRequest) GetRegion
func (x *DiagnoseClusterRequest) GetRegion() string
func (*DiagnoseClusterRequest) GetTarballAccess
func (x *DiagnoseClusterRequest) GetTarballAccess() DiagnoseClusterRequest_TarballAccess
func (*DiagnoseClusterRequest) GetTarballGcsDir
func (x *DiagnoseClusterRequest) GetTarballGcsDir() string
func (*DiagnoseClusterRequest) GetYarnApplicationIds
func (x *DiagnoseClusterRequest) GetYarnApplicationIds() []string
func (*DiagnoseClusterRequest) ProtoMessage
func (*DiagnoseClusterRequest) ProtoMessage()
func (*DiagnoseClusterRequest) ProtoReflect
func (x *DiagnoseClusterRequest) ProtoReflect() protoreflect.Message
func (*DiagnoseClusterRequest) Reset
func (x *DiagnoseClusterRequest) Reset()
func (*DiagnoseClusterRequest) String
func (x *DiagnoseClusterRequest) String() string
DiagnoseClusterRequest_TarballAccess
type DiagnoseClusterRequest_TarballAccess int32
Defines who has access to the diagnostic tarball
DiagnoseClusterRequest_TARBALL_ACCESS_UNSPECIFIED, DiagnoseClusterRequest_GOOGLE_CLOUD_SUPPORT, DiagnoseClusterRequest_GOOGLE_DATAPROC_DIAGNOSE
const (
// Tarball Access unspecified. Falls back to default access of the bucket
DiagnoseClusterRequest_TARBALL_ACCESS_UNSPECIFIED DiagnoseClusterRequest_TarballAccess = 0
// Google Cloud Support group has read access to the
// diagnostic tarball
DiagnoseClusterRequest_GOOGLE_CLOUD_SUPPORT DiagnoseClusterRequest_TarballAccess = 1
// Google Cloud Dataproc Diagnose service account has read access to the
// diagnostic tarball
DiagnoseClusterRequest_GOOGLE_DATAPROC_DIAGNOSE DiagnoseClusterRequest_TarballAccess = 2
)
func (DiagnoseClusterRequest_TarballAccess) Descriptor
func (DiagnoseClusterRequest_TarballAccess) Descriptor() protoreflect.EnumDescriptor
func (DiagnoseClusterRequest_TarballAccess) Enum
func (x DiagnoseClusterRequest_TarballAccess) Enum() *DiagnoseClusterRequest_TarballAccess
func (DiagnoseClusterRequest_TarballAccess) EnumDescriptor
func (DiagnoseClusterRequest_TarballAccess) EnumDescriptor() ([]byte, []int)
Deprecated: Use DiagnoseClusterRequest_TarballAccess.Descriptor instead.
func (DiagnoseClusterRequest_TarballAccess) Number
func (x DiagnoseClusterRequest_TarballAccess) Number() protoreflect.EnumNumber
func (DiagnoseClusterRequest_TarballAccess) String
func (x DiagnoseClusterRequest_TarballAccess) String() string
func (DiagnoseClusterRequest_TarballAccess) Type
func (DiagnoseClusterRequest_TarballAccess) Type() protoreflect.EnumType
DiagnoseClusterResults
type DiagnoseClusterResults struct {
// Output only. The Cloud Storage URI of the diagnostic output.
// The output report is a plain text file with a summary of collected
// diagnostics.
OutputUri string `protobuf:"bytes,1,opt,name=output_uri,json=outputUri,proto3" json:"output_uri,omitempty"`
// contains filtered or unexported fields
}
The location of diagnostic output.
func (*DiagnoseClusterResults) Descriptor
func (*DiagnoseClusterResults) Descriptor() ([]byte, []int)
Deprecated: Use DiagnoseClusterResults.ProtoReflect.Descriptor instead.
func (*DiagnoseClusterResults) GetOutputUri
func (x *DiagnoseClusterResults) GetOutputUri() string
func (*DiagnoseClusterResults) ProtoMessage
func (*DiagnoseClusterResults) ProtoMessage()
func (*DiagnoseClusterResults) ProtoReflect
func (x *DiagnoseClusterResults) ProtoReflect() protoreflect.Message
func (*DiagnoseClusterResults) Reset
func (x *DiagnoseClusterResults) Reset()
func (*DiagnoseClusterResults) String
func (x *DiagnoseClusterResults) String() string
DiskConfig
type DiskConfig struct {
// Optional. Type of the boot disk (default is "pd-standard").
// Valid values: "pd-balanced" (Persistent Disk Balanced Solid State Drive),
// "pd-ssd" (Persistent Disk Solid State Drive),
// or "pd-standard" (Persistent Disk Hard Disk Drive).
// See [Disk types](https://cloud.google.com/compute/docs/disks#disk-types).
BootDiskType string `protobuf:"bytes,3,opt,name=boot_disk_type,json=bootDiskType,proto3" json:"boot_disk_type,omitempty"`
// Optional. Size in GB of the boot disk (default is 500GB).
BootDiskSizeGb int32 `protobuf:"varint,1,opt,name=boot_disk_size_gb,json=bootDiskSizeGb,proto3" json:"boot_disk_size_gb,omitempty"`
// Optional. Number of attached SSDs, from 0 to 8 (default is 0).
// If SSDs are not attached, the boot disk is used to store runtime logs and
// [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data.
// If one or more SSDs are attached, this runtime bulk
// data is spread across them, and the boot disk contains only basic
// config and installed binaries.
//
// Note: Local SSD options may vary by machine type and number of vCPUs
// selected.
NumLocalSsds int32 `protobuf:"varint,2,opt,name=num_local_ssds,json=numLocalSsds,proto3" json:"num_local_ssds,omitempty"`
// Optional. Interface type of local SSDs (default is "scsi").
// Valid values: "scsi" (Small Computer System Interface),
// "nvme" (Non-Volatile Memory Express).
// See [local SSD
// performance](https://cloud.google.com/compute/docs/disks/local-ssd#performance).
LocalSsdInterface string `protobuf:"bytes,4,opt,name=local_ssd_interface,json=localSsdInterface,proto3" json:"local_ssd_interface,omitempty"`
// Optional. Indicates how many IOPS to provision for the disk. This sets the
// number of I/O operations per second that the disk can handle. Note: This
// field is only supported if boot_disk_type is hyperdisk-balanced.
BootDiskProvisionedIops *int64 `protobuf:"varint,5,opt,name=boot_disk_provisioned_iops,json=bootDiskProvisionedIops,proto3,oneof" json:"boot_disk_provisioned_iops,omitempty"`
// Optional. Indicates how much throughput to provision for the disk. This
// sets the number of throughput mb per second that the disk can handle.
// Values must be greater than or equal to 1. Note: This field is only
// supported if boot_disk_type is hyperdisk-balanced.
BootDiskProvisionedThroughput *int64 `protobuf:"varint,6,opt,name=boot_disk_provisioned_throughput,json=bootDiskProvisionedThroughput,proto3,oneof" json:"boot_disk_provisioned_throughput,omitempty"`
// contains filtered or unexported fields
}
Specifies the config of disk options for a group of VM instances.
func (*DiskConfig) Descriptor
func (*DiskConfig) Descriptor() ([]byte, []int)
Deprecated: Use DiskConfig.ProtoReflect.Descriptor instead.
func (*DiskConfig) GetBootDiskProvisionedIops
func (x *DiskConfig) GetBootDiskProvisionedIops() int64
func (*DiskConfig) GetBootDiskProvisionedThroughput
func (x *DiskConfig) GetBootDiskProvisionedThroughput() int64
func (*DiskConfig) GetBootDiskSizeGb
func (x *DiskConfig) GetBootDiskSizeGb() int32
func (*DiskConfig) GetBootDiskType
func (x *DiskConfig) GetBootDiskType() string
func (*DiskConfig) GetLocalSsdInterface
func (x *DiskConfig) GetLocalSsdInterface() string
func (*DiskConfig) GetNumLocalSsds
func (x *DiskConfig) GetNumLocalSsds() int32
func (*DiskConfig) ProtoMessage
func (*DiskConfig) ProtoMessage()
func (*DiskConfig) ProtoReflect
func (x *DiskConfig) ProtoReflect() protoreflect.Message
func (*DiskConfig) Reset
func (x *DiskConfig) Reset()
func (*DiskConfig) String
func (x *DiskConfig) String() string
DriverSchedulingConfig
type DriverSchedulingConfig struct {
// Required. The amount of memory in MB the driver is requesting.
MemoryMb int32 `protobuf:"varint,1,opt,name=memory_mb,json=memoryMb,proto3" json:"memory_mb,omitempty"`
// Required. The number of vCPUs the driver is requesting.
Vcores int32 `protobuf:"varint,2,opt,name=vcores,proto3" json:"vcores,omitempty"`
// contains filtered or unexported fields
}
Driver scheduling configuration.
func (*DriverSchedulingConfig) Descriptor
func (*DriverSchedulingConfig) Descriptor() ([]byte, []int)
Deprecated: Use DriverSchedulingConfig.ProtoReflect.Descriptor instead.
func (*DriverSchedulingConfig) GetMemoryMb
func (x *DriverSchedulingConfig) GetMemoryMb() int32
func (*DriverSchedulingConfig) GetVcores
func (x *DriverSchedulingConfig) GetVcores() int32
func (*DriverSchedulingConfig) ProtoMessage
func (*DriverSchedulingConfig) ProtoMessage()
func (*DriverSchedulingConfig) ProtoReflect
func (x *DriverSchedulingConfig) ProtoReflect() protoreflect.Message
func (*DriverSchedulingConfig) Reset
func (x *DriverSchedulingConfig) Reset()
func (*DriverSchedulingConfig) String
func (x *DriverSchedulingConfig) String() string
EncryptionConfig
type EncryptionConfig struct {
// Optional. The Cloud KMS key resource name to use for persistent disk
// encryption for all instances in the cluster. See [Use CMEK with cluster
// data]
// (https://cloud.google.com//dataproc/docs/concepts/configuring-clusters/customer-managed-encryption#use_cmek_with_cluster_data)
// for more information.
GcePdKmsKeyName string `protobuf:"bytes,1,opt,name=gce_pd_kms_key_name,json=gcePdKmsKeyName,proto3" json:"gce_pd_kms_key_name,omitempty"`
// Optional. The Cloud KMS key resource name to use for cluster persistent
// disk and job argument encryption. See [Use CMEK with cluster data]
// (https://cloud.google.com//dataproc/docs/concepts/configuring-clusters/customer-managed-encryption#use_cmek_with_cluster_data)
// for more information.
//
// When this key resource name is provided, the following job arguments of
// the following job types submitted to the cluster are encrypted using CMEK:
//
// * [FlinkJob
// args](https://cloud.google.com/dataproc/docs/reference/rest/v1/FlinkJob)
// * [HadoopJob
// args](https://cloud.google.com/dataproc/docs/reference/rest/v1/HadoopJob)
// * [SparkJob
// args](https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkJob)
// * [SparkRJob
// args](https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkRJob)
// * [PySparkJob
// args](https://cloud.google.com/dataproc/docs/reference/rest/v1/PySparkJob)
// - [SparkSqlJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkSqlJob)
// scriptVariables and queryList.queries
// - [HiveJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/HiveJob)
// scriptVariables and queryList.queries
// - [PigJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/PigJob)
// scriptVariables and queryList.queries
// - [PrestoJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/PrestoJob)
// scriptVariables and queryList.queries
KmsKey string `protobuf:"bytes,2,opt,name=kms_key,json=kmsKey,proto3" json:"kms_key,omitempty"`
// contains filtered or unexported fields
}
Encryption settings for the cluster.
func (*EncryptionConfig) Descriptor
func (*EncryptionConfig) Descriptor() ([]byte, []int)
Deprecated: Use EncryptionConfig.ProtoReflect.Descriptor instead.
func (*EncryptionConfig) GetGcePdKmsKeyName
func (x *EncryptionConfig) GetGcePdKmsKeyName() string
func (*EncryptionConfig) GetKmsKey
func (x *EncryptionConfig) GetKmsKey() string
func (*EncryptionConfig) ProtoMessage
func (*EncryptionConfig) ProtoMessage()
func (*EncryptionConfig) ProtoReflect
func (x *EncryptionConfig) ProtoReflect() protoreflect.Message
func (*EncryptionConfig) Reset
func (x *EncryptionConfig) Reset()
func (*EncryptionConfig) String
func (x *EncryptionConfig) String() string
EndpointConfig
type EndpointConfig struct {
// Output only. The map of port descriptions to URLs. Will only be populated
// if enable_http_port_access is true.
HttpPorts map[string]string `protobuf:"bytes,1,rep,name=http_ports,json=httpPorts,proto3" json:"http_ports,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Optional. If true, enable http access to specific ports on the cluster
// from external sources. Defaults to false.
EnableHttpPortAccess bool `protobuf:"varint,2,opt,name=enable_http_port_access,json=enableHttpPortAccess,proto3" json:"enable_http_port_access,omitempty"`
// contains filtered or unexported fields
}
Endpoint config for this cluster
func (*EndpointConfig) Descriptor
func (*EndpointConfig) Descriptor() ([]byte, []int)
Deprecated: Use EndpointConfig.ProtoReflect.Descriptor instead.
func (*EndpointConfig) GetEnableHttpPortAccess
func (x *EndpointConfig) GetEnableHttpPortAccess() bool
func (*EndpointConfig) GetHttpPorts
func (x *EndpointConfig) GetHttpPorts() map[string]string
func (*EndpointConfig) ProtoMessage
func (*EndpointConfig) ProtoMessage()
func (*EndpointConfig) ProtoReflect
func (x *EndpointConfig) ProtoReflect() protoreflect.Message
func (*EndpointConfig) Reset
func (x *EndpointConfig) Reset()
func (*EndpointConfig) String
func (x *EndpointConfig) String() string
EnvironmentConfig
type EnvironmentConfig struct {
// Optional. Execution configuration for a workload.
ExecutionConfig *ExecutionConfig `protobuf:"bytes,1,opt,name=execution_config,json=executionConfig,proto3" json:"execution_config,omitempty"`
// Optional. Peripherals configuration that workload has access to.
PeripheralsConfig *PeripheralsConfig `protobuf:"bytes,2,opt,name=peripherals_config,json=peripheralsConfig,proto3" json:"peripherals_config,omitempty"`
// contains filtered or unexported fields
}
Environment configuration for a workload.
func (*EnvironmentConfig) Descriptor
func (*EnvironmentConfig) Descriptor() ([]byte, []int)
Deprecated: Use EnvironmentConfig.ProtoReflect.Descriptor instead.
func (*EnvironmentConfig) GetExecutionConfig
func (x *EnvironmentConfig) GetExecutionConfig() *ExecutionConfig
func (*EnvironmentConfig) GetPeripheralsConfig
func (x *EnvironmentConfig) GetPeripheralsConfig() *PeripheralsConfig
func (*EnvironmentConfig) ProtoMessage
func (*EnvironmentConfig) ProtoMessage()
func (*EnvironmentConfig) ProtoReflect
func (x *EnvironmentConfig) ProtoReflect() protoreflect.Message
func (*EnvironmentConfig) Reset
func (x *EnvironmentConfig) Reset()
func (*EnvironmentConfig) String
func (x *EnvironmentConfig) String() string
ExecutionConfig
type ExecutionConfig struct {
// Optional. Service account that used to execute workload.
ServiceAccount string `protobuf:"bytes,2,opt,name=service_account,json=serviceAccount,proto3" json:"service_account,omitempty"`
// Network configuration for workload execution.
//
// Types that are assignable to Network:
//
// *ExecutionConfig_NetworkUri
// *ExecutionConfig_SubnetworkUri
Network isExecutionConfig_Network `protobuf_oneof:"network"`
// Optional. Tags used for network traffic control.
NetworkTags []string `protobuf:"bytes,6,rep,name=network_tags,json=networkTags,proto3" json:"network_tags,omitempty"`
// Optional. The Cloud KMS key to use for encryption.
KmsKey string `protobuf:"bytes,7,opt,name=kms_key,json=kmsKey,proto3" json:"kms_key,omitempty"`
// Optional. Applies to sessions only. The duration to keep the session alive
// while it's idling. Exceeding this threshold causes the session to
// terminate. This field cannot be set on a batch workload. Minimum value is
// 10 minutes; maximum value is 14 days (see JSON representation of
// [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
// Defaults to 1 hour if not set.
// If both `ttl` and `idle_ttl` are specified for an interactive session,
// the conditions are treated as `OR` conditions: the workload will be
// terminated when it has been idle for `idle_ttl` or when `ttl` has been
// exceeded, whichever occurs first.
IdleTtl *durationpb.Duration `protobuf:"bytes,8,opt,name=idle_ttl,json=idleTtl,proto3" json:"idle_ttl,omitempty"`
// Optional. The duration after which the workload will be terminated,
// specified as the JSON representation for
// [Duration](https://protobuf.dev/programming-guides/proto3/#json).
// When the workload exceeds this duration, it will be unconditionally
// terminated without waiting for ongoing work to finish. If `ttl` is not
// specified for a batch workload, the workload will be allowed to run until
// it exits naturally (or run forever without exiting). If `ttl` is not
// specified for an interactive session, it defaults to 24 hours. If `ttl` is
// not specified for a batch that uses 2.1+ runtime version, it defaults to 4
// hours. Minimum value is 10 minutes; maximum value is 14 days. If both `ttl`
// and `idle_ttl` are specified (for an interactive session), the conditions
// are treated as `OR` conditions: the workload will be terminated when it has
// been idle for `idle_ttl` or when `ttl` has been exceeded, whichever occurs
// first.
Ttl *durationpb.Duration `protobuf:"bytes,9,opt,name=ttl,proto3" json:"ttl,omitempty"`
// Optional. A Cloud Storage bucket used to stage workload dependencies,
// config files, and store workload output and other ephemeral data, such as
// Spark history files. If you do not specify a staging bucket, Cloud Dataproc
// will determine a Cloud Storage location according to the region where your
// workload is running, and then create and manage project-level, per-location
// staging and temporary buckets.
// **This field requires a Cloud Storage bucket name, not a `gs://...` URI to
// a Cloud Storage bucket.**
StagingBucket string `protobuf:"bytes,10,opt,name=staging_bucket,json=stagingBucket,proto3" json:"staging_bucket,omitempty"`
// Optional. Authentication configuration used to set the default identity for
// the workload execution. The config specifies the type of identity
// (service account or user) that will be used by workloads to access
// resources on the project(s).
AuthenticationConfig *AuthenticationConfig `protobuf:"bytes,11,opt,name=authentication_config,json=authenticationConfig,proto3" json:"authentication_config,omitempty"`
// contains filtered or unexported fields
}
Execution configuration for a workload.
func (*ExecutionConfig) Descriptor
func (*ExecutionConfig) Descriptor() ([]byte, []int)
Deprecated: Use ExecutionConfig.ProtoReflect.Descriptor instead.
func (*ExecutionConfig) GetAuthenticationConfig
func (x *ExecutionConfig) GetAuthenticationConfig() *AuthenticationConfig
func (*ExecutionConfig) GetIdleTtl
func (x *ExecutionConfig) GetIdleTtl() *durationpb.Duration
func (*ExecutionConfig) GetKmsKey
func (x *ExecutionConfig) GetKmsKey() string
func (*ExecutionConfig) GetNetwork
func (m *ExecutionConfig) GetNetwork() isExecutionConfig_Network
func (*ExecutionConfig) GetNetworkTags
func (x *ExecutionConfig) GetNetworkTags() []string
func (*ExecutionConfig) GetNetworkUri
func (x *ExecutionConfig) GetNetworkUri() string
func (*ExecutionConfig) GetServiceAccount
func (x *ExecutionConfig) GetServiceAccount() string
func (*ExecutionConfig) GetStagingBucket
func (x *ExecutionConfig) GetStagingBucket() string
func (*ExecutionConfig) GetSubnetworkUri
func (x *ExecutionConfig) GetSubnetworkUri() string
func (*ExecutionConfig) GetTtl
func (x *ExecutionConfig) GetTtl() *durationpb.Duration
func (*ExecutionConfig) ProtoMessage
func (*ExecutionConfig) ProtoMessage()
func (*ExecutionConfig) ProtoReflect
func (x *ExecutionConfig) ProtoReflect() protoreflect.Message
func (*ExecutionConfig) Reset
func (x *ExecutionConfig) Reset()
func (*ExecutionConfig) String
func (x *ExecutionConfig) String() string
ExecutionConfig_NetworkUri
type ExecutionConfig_NetworkUri struct {
// Optional. Network URI to connect workload to.
NetworkUri string `protobuf:"bytes,4,opt,name=network_uri,json=networkUri,proto3,oneof"`
}
ExecutionConfig_SubnetworkUri
type ExecutionConfig_SubnetworkUri struct {
// Optional. Subnetwork URI to connect workload to.
SubnetworkUri string `protobuf:"bytes,5,opt,name=subnetwork_uri,json=subnetworkUri,proto3,oneof"`
}
FailureAction
type FailureAction int32
Actions in response to failure of a resource associated with a cluster.
FailureAction_FAILURE_ACTION_UNSPECIFIED, FailureAction_NO_ACTION, FailureAction_DELETE
const (
// When FailureAction is unspecified, failure action defaults to NO_ACTION.
FailureAction_FAILURE_ACTION_UNSPECIFIED FailureAction = 0
// Take no action on failure to create a cluster resource. NO_ACTION is the
// default.
FailureAction_NO_ACTION FailureAction = 1
// Delete the failed cluster resource.
FailureAction_DELETE FailureAction = 2
)
func (FailureAction) Descriptor
func (FailureAction) Descriptor() protoreflect.EnumDescriptor
func (FailureAction) Enum
func (x FailureAction) Enum() *FailureAction
func (FailureAction) EnumDescriptor
func (FailureAction) EnumDescriptor() ([]byte, []int)
Deprecated: Use FailureAction.Descriptor instead.
func (FailureAction) Number
func (x FailureAction) Number() protoreflect.EnumNumber
func (FailureAction) String
func (x FailureAction) String() string
func (FailureAction) Type
func (FailureAction) Type() protoreflect.EnumType
FlinkJob
type FlinkJob struct {
// Required. The specification of the main method to call to drive the job.
// Specify either the jar file that contains the main class or the main class
// name. To pass both a main jar and a main class in the jar, add the jar to
// [jarFileUris][google.cloud.dataproc.v1.FlinkJob.jar_file_uris], and then
// specify the main class name in
// [mainClass][google.cloud.dataproc.v1.FlinkJob.main_class].
//
// Types that are assignable to Driver:
//
// *FlinkJob_MainJarFileUri
// *FlinkJob_MainClass
Driver isFlinkJob_Driver `protobuf_oneof:"driver"`
// Optional. The arguments to pass to the driver. Do not include arguments,
// such as `--conf`, that can be set as job properties, since a collision
// might occur that causes an incorrect job submission.
Args []string `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"`
// Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
// Flink driver and tasks.
JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"`
// Optional. HCFS URI of the savepoint, which contains the last saved progress
// for starting the current job.
SavepointUri string `protobuf:"bytes,9,opt,name=savepoint_uri,json=savepointUri,proto3" json:"savepoint_uri,omitempty"`
// Optional. A mapping of property names to values, used to configure Flink.
// Properties that conflict with values set by the Dataproc API might be
// overwritten. Can include properties set in
// `/etc/flink/conf/flink-defaults.conf` and classes in user code.
Properties map[string]string `protobuf:"bytes,7,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Optional. The runtime log config for job execution.
LoggingConfig *LoggingConfig `protobuf:"bytes,8,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"`
// contains filtered or unexported fields
}
A Dataproc job for running Apache Flink applications on YARN.
func (*FlinkJob) Descriptor
Deprecated: Use FlinkJob.ProtoReflect.Descriptor instead.
func (*FlinkJob) GetArgs
func (*FlinkJob) GetDriver
func (m *FlinkJob) GetDriver() isFlinkJob_Driver
func (*FlinkJob) GetJarFileUris
func (*FlinkJob) GetLoggingConfig
func (x *FlinkJob) GetLoggingConfig() *LoggingConfig
func (*FlinkJob) GetMainClass
func (*FlinkJob) GetMainJarFileUri
func (*FlinkJob) GetProperties
func (*FlinkJob) GetSavepointUri
func (*FlinkJob) ProtoMessage
func (*FlinkJob) ProtoMessage()
func (*FlinkJob) ProtoReflect
func (x *FlinkJob) ProtoReflect() protoreflect.Message
func (*FlinkJob) Reset
func (x *FlinkJob) Reset()
func (*FlinkJob) String
FlinkJob_MainClass
type FlinkJob_MainClass struct {
// The name of the driver's main class. The jar file that contains the class
// must be in the default CLASSPATH or specified in
// [jarFileUris][google.cloud.dataproc.v1.FlinkJob.jar_file_uris].
MainClass string `protobuf:"bytes,2,opt,name=main_class,json=mainClass,proto3,oneof"`
}
FlinkJob_MainJarFileUri
type FlinkJob_MainJarFileUri struct {
// The HCFS URI of the jar file that contains the main class.
MainJarFileUri string `protobuf:"bytes,1,opt,name=main_jar_file_uri,json=mainJarFileUri,proto3,oneof"`
}
GceClusterConfig
type GceClusterConfig struct {
// Optional. The Compute Engine zone where the Dataproc cluster will be
// located. If omitted, the service will pick a zone in the cluster's Compute
// Engine region. On a get request, zone will always be present.
//
// A full URL, partial URI, or short name are valid. Examples:
//
// * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`
// * `projects/[project_id]/zones/[zone]`
// * `[zone]`
ZoneUri string `protobuf:"bytes,1,opt,name=zone_uri,json=zoneUri,proto3" json:"zone_uri,omitempty"`
// Optional. The Compute Engine network to be used for machine
// communications. Cannot be specified with subnetwork_uri. If neither
// `network_uri` nor `subnetwork_uri` is specified, the "default" network of
// the project is used, if it exists. Cannot be a "Custom Subnet Network" (see
// [Using Subnetworks](https://cloud.google.com/compute/docs/subnetworks) for
// more information).
//
// A full URL, partial URI, or short name are valid. Examples:
//
// * `https://www.googleapis.com/compute/v1/projects/[project_id]/global/networks/default`
// * `projects/[project_id]/global/networks/default`
// * `default`
NetworkUri string `protobuf:"bytes,2,opt,name=network_uri,json=networkUri,proto3" json:"network_uri,omitempty"`
// Optional. The Compute Engine subnetwork to be used for machine
// communications. Cannot be specified with network_uri.
//
// A full URL, partial URI, or short name are valid. Examples:
//
// * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/[region]/subnetworks/sub0`
// * `projects/[project_id]/regions/[region]/subnetworks/sub0`
// * `sub0`
SubnetworkUri string `protobuf:"bytes,6,opt,name=subnetwork_uri,json=subnetworkUri,proto3" json:"subnetwork_uri,omitempty"`
// Optional. This setting applies to subnetwork-enabled networks. It is set to
// `true` by default in clusters created with image versions 2.2.x.
//
// When set to `true`:
//
// * All cluster VMs have internal IP addresses.
// * [Google Private Access]
// (https://cloud.google.com/vpc/docs/private-google-access)
// must be enabled to access Dataproc and other Google Cloud APIs.
// * Off-cluster dependencies must be configured to be accessible
// without external IP addresses.
//
// When set to `false`:
//
// * Cluster VMs are not restricted to internal IP addresses.
// * Ephemeral external IP addresses are assigned to each cluster VM.
InternalIpOnly *bool `protobuf:"varint,7,opt,name=internal_ip_only,json=internalIpOnly,proto3,oneof" json:"internal_ip_only,omitempty"`
// Optional. The type of IPv6 access for a cluster.
PrivateIpv6GoogleAccess GceClusterConfig_PrivateIpv6GoogleAccess `protobuf:"varint,12,opt,name=private_ipv6_google_access,json=privateIpv6GoogleAccess,proto3,enum=google.cloud.dataproc.v1.GceClusterConfig_PrivateIpv6GoogleAccess" json:"private_ipv6_google_access,omitempty"`
// Optional. The [Dataproc service
// account](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/service-accounts#service_accounts_in_dataproc)
// (also see [VM Data Plane
// identity](https://cloud.google.com/dataproc/docs/concepts/iam/dataproc-principals#vm_service_account_data_plane_identity))
// used by Dataproc cluster VM instances to access Google Cloud Platform
// services.
//
// If not specified, the
// [Compute Engine default service
// account](https://cloud.google.com/compute/docs/access/service-accounts#default_service_account)
// is used.
ServiceAccount string `protobuf:"bytes,8,opt,name=service_account,json=serviceAccount,proto3" json:"service_account,omitempty"`
// Optional. The URIs of service account scopes to be included in
// Compute Engine instances. The following base set of scopes is always
// included:
//
// * https://www.googleapis.com/auth/cloud.useraccounts.readonly
// * https://www.googleapis.com/auth/devstorage.read_write
// * https://www.googleapis.com/auth/logging.write
//
// If no scopes are specified, the following defaults are also provided:
//
// * https://www.googleapis.com/auth/bigquery
// * https://www.googleapis.com/auth/bigtable.admin.table
// * https://www.googleapis.com/auth/bigtable.data
// * https://www.googleapis.com/auth/devstorage.full_control
ServiceAccountScopes []string `protobuf:"bytes,3,rep,name=service_account_scopes,json=serviceAccountScopes,proto3" json:"service_account_scopes,omitempty"`
// The Compute Engine network tags to add to all instances (see [Tagging
// instances](https://cloud.google.com/vpc/docs/add-remove-network-tags)).
Tags []string `protobuf:"bytes,4,rep,name=tags,proto3" json:"tags,omitempty"`
// Optional. The Compute Engine metadata entries to add to all instances (see
// [Project and instance
// metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
Metadata map[string]string `protobuf:"bytes,5,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Optional. Reservation Affinity for consuming Zonal reservation.
ReservationAffinity *ReservationAffinity `protobuf:"bytes,11,opt,name=reservation_affinity,json=reservationAffinity,proto3" json:"reservation_affinity,omitempty"`
// Optional. Node Group Affinity for sole-tenant clusters.
NodeGroupAffinity *NodeGroupAffinity `protobuf:"bytes,13,opt,name=node_group_affinity,json=nodeGroupAffinity,proto3" json:"node_group_affinity,omitempty"`
// Optional. Shielded Instance Config for clusters using [Compute Engine
// Shielded
// VMs](https://cloud.google.com/security/shielded-cloud/shielded-vm).
ShieldedInstanceConfig *ShieldedInstanceConfig `protobuf:"bytes,14,opt,name=shielded_instance_config,json=shieldedInstanceConfig,proto3" json:"shielded_instance_config,omitempty"`
// Optional. Confidential Instance Config for clusters using [Confidential
// VMs](https://cloud.google.com/compute/confidential-vm/docs).
ConfidentialInstanceConfig *ConfidentialInstanceConfig `protobuf:"bytes,15,opt,name=confidential_instance_config,json=confidentialInstanceConfig,proto3" json:"confidential_instance_config,omitempty"`
// contains filtered or unexported fields
}
Common config settings for resources of Compute Engine cluster instances, applicable to all instances in the cluster.
func (*GceClusterConfig) Descriptor
func (*GceClusterConfig) Descriptor() ([]byte, []int)
Deprecated: Use GceClusterConfig.ProtoReflect.Descriptor instead.
func (*GceClusterConfig) GetConfidentialInstanceConfig
func (x *GceClusterConfig) GetConfidentialInstanceConfig() *ConfidentialInstanceConfig
func (*GceClusterConfig) GetInternalIpOnly
func (x *GceClusterConfig) GetInternalIpOnly() bool
func (*GceClusterConfig) GetMetadata
func (x *GceClusterConfig) GetMetadata() map[string]string
func (*GceClusterConfig) GetNetworkUri
func (x *GceClusterConfig) GetNetworkUri() string
func (*GceClusterConfig) GetNodeGroupAffinity
func (x *GceClusterConfig) GetNodeGroupAffinity() *NodeGroupAffinity
func (*GceClusterConfig) GetPrivateIpv6GoogleAccess
func (x *GceClusterConfig) GetPrivateIpv6GoogleAccess() GceClusterConfig_PrivateIpv6GoogleAccess
func (*GceClusterConfig) GetReservationAffinity
func (x *GceClusterConfig) GetReservationAffinity() *ReservationAffinity
func (*GceClusterConfig) GetServiceAccount
func (x *GceClusterConfig) GetServiceAccount() string
func (*GceClusterConfig) GetServiceAccountScopes
func (x *GceClusterConfig) GetServiceAccountScopes() []string
func (*GceClusterConfig) GetShieldedInstanceConfig
func (x *GceClusterConfig) GetShieldedInstanceConfig() *ShieldedInstanceConfig
func (*GceClusterConfig) GetSubnetworkUri
func (x *GceClusterConfig) GetSubnetworkUri() string
func (*GceClusterConfig) GetTags
func (x *GceClusterConfig) GetTags() []string
func (*GceClusterConfig) GetZoneUri
func (x *GceClusterConfig) GetZoneUri() string
func (*GceClusterConfig) ProtoMessage
func (*GceClusterConfig) ProtoMessage()
func (*GceClusterConfig) ProtoReflect
func (x *GceClusterConfig) ProtoReflect() protoreflect.Message
func (*GceClusterConfig) Reset
func (x *GceClusterConfig) Reset()
func (*GceClusterConfig) String
func (x *GceClusterConfig) String() string
GceClusterConfig_PrivateIpv6GoogleAccess
type GceClusterConfig_PrivateIpv6GoogleAccess int32
PrivateIpv6GoogleAccess
controls whether and how Dataproc cluster nodes
can communicate with Google Services through gRPC over IPv6.
These values are directly mapped to corresponding values in the
Compute Engine Instance
fields.
GceClusterConfig_PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED, GceClusterConfig_INHERIT_FROM_SUBNETWORK, GceClusterConfig_OUTBOUND, GceClusterConfig_BIDIRECTIONAL
const (
// If unspecified, Compute Engine default behavior will apply, which
// is the same as
// [INHERIT_FROM_SUBNETWORK][google.cloud.dataproc.v1.GceClusterConfig.PrivateIpv6GoogleAccess.INHERIT_FROM_SUBNETWORK].
GceClusterConfig_PRIVATE_IPV6_GOOGLE_ACCESS_UNSPECIFIED GceClusterConfig_PrivateIpv6GoogleAccess = 0
// Private access to and from Google Services configuration
// inherited from the subnetwork configuration. This is the
// default Compute Engine behavior.
GceClusterConfig_INHERIT_FROM_SUBNETWORK GceClusterConfig_PrivateIpv6GoogleAccess = 1
// Enables outbound private IPv6 access to Google Services from the Dataproc
// cluster.
GceClusterConfig_OUTBOUND GceClusterConfig_PrivateIpv6GoogleAccess = 2
// Enables bidirectional private IPv6 access between Google Services and the
// Dataproc cluster.
GceClusterConfig_BIDIRECTIONAL GceClusterConfig_PrivateIpv6GoogleAccess = 3
)
func (GceClusterConfig_PrivateIpv6GoogleAccess) Descriptor
func (GceClusterConfig_PrivateIpv6GoogleAccess) Descriptor() protoreflect.EnumDescriptor
func (GceClusterConfig_PrivateIpv6GoogleAccess) Enum
func (GceClusterConfig_PrivateIpv6GoogleAccess) EnumDescriptor
func (GceClusterConfig_PrivateIpv6GoogleAccess) EnumDescriptor() ([]byte, []int)
Deprecated: Use GceClusterConfig_PrivateIpv6GoogleAccess.Descriptor instead.
func (GceClusterConfig_PrivateIpv6GoogleAccess) Number
func (x GceClusterConfig_PrivateIpv6GoogleAccess) Number() protoreflect.EnumNumber
func (GceClusterConfig_PrivateIpv6GoogleAccess) String
func (x GceClusterConfig_PrivateIpv6GoogleAccess) String() string
func (GceClusterConfig_PrivateIpv6GoogleAccess) Type
func (GceClusterConfig_PrivateIpv6GoogleAccess) Type() protoreflect.EnumType
GetAutoscalingPolicyRequest
type GetAutoscalingPolicyRequest struct {
// Required. The "resource name" of the autoscaling policy, as described
// in https://cloud.google.com/apis/design/resource_names.
//
// - For `projects.regions.autoscalingPolicies.get`, the resource name
// of the policy has the following format:
// `projects/{project_id}/regions/{region}/autoscalingPolicies/{policy_id}`
//
// - For `projects.locations.autoscalingPolicies.get`, the resource name
// of the policy has the following format:
// `projects/{project_id}/locations/{location}/autoscalingPolicies/{policy_id}`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// contains filtered or unexported fields
}
A request to fetch an autoscaling policy.
func (*GetAutoscalingPolicyRequest) Descriptor
func (*GetAutoscalingPolicyRequest) Descriptor() ([]byte, []int)
Deprecated: Use GetAutoscalingPolicyRequest.ProtoReflect.Descriptor instead.
func (*GetAutoscalingPolicyRequest) GetName
func (x *GetAutoscalingPolicyRequest) GetName() string
func (*GetAutoscalingPolicyRequest) ProtoMessage
func (*GetAutoscalingPolicyRequest) ProtoMessage()
func (*GetAutoscalingPolicyRequest) ProtoReflect
func (x *GetAutoscalingPolicyRequest) ProtoReflect() protoreflect.Message
func (*GetAutoscalingPolicyRequest) Reset
func (x *GetAutoscalingPolicyRequest) Reset()
func (*GetAutoscalingPolicyRequest) String
func (x *GetAutoscalingPolicyRequest) String() string
GetBatchRequest
type GetBatchRequest struct {
// Required. The fully qualified name of the batch to retrieve
// in the format
// "projects/PROJECT_ID/locations/DATAPROC_REGION/batches/BATCH_ID"
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// contains filtered or unexported fields
}
A request to get the resource representation for a batch workload.
func (*GetBatchRequest) Descriptor
func (*GetBatchRequest) Descriptor() ([]byte, []int)
Deprecated: Use GetBatchRequest.ProtoReflect.Descriptor instead.
func (*GetBatchRequest) GetName
func (x *GetBatchRequest) GetName() string
func (*GetBatchRequest) ProtoMessage
func (*GetBatchRequest) ProtoMessage()
func (*GetBatchRequest) ProtoReflect
func (x *GetBatchRequest) ProtoReflect() protoreflect.Message
func (*GetBatchRequest) Reset
func (x *GetBatchRequest) Reset()
func (*GetBatchRequest) String
func (x *GetBatchRequest) String() string
GetClusterRequest
type GetClusterRequest struct {
// Required. The ID of the Google Cloud Platform project that the cluster
// belongs to.
ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
// Required. The Dataproc region in which to handle the request.
Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
// Required. The cluster name.
ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
// contains filtered or unexported fields
}
Request to get the resource representation for a cluster in a project.
func (*GetClusterRequest) Descriptor
func (*GetClusterRequest) Descriptor() ([]byte, []int)
Deprecated: Use GetClusterRequest.ProtoReflect.Descriptor instead.
func (*GetClusterRequest) GetClusterName
func (x *GetClusterRequest) GetClusterName() string
func (*GetClusterRequest) GetProjectId
func (x *GetClusterRequest) GetProjectId() string
func (*GetClusterRequest) GetRegion
func (x *GetClusterRequest) GetRegion() string
func (*GetClusterRequest) ProtoMessage
func (*GetClusterRequest) ProtoMessage()
func (*GetClusterRequest) ProtoReflect
func (x *GetClusterRequest) ProtoReflect() protoreflect.Message
func (*GetClusterRequest) Reset
func (x *GetClusterRequest) Reset()
func (*GetClusterRequest) String
func (x *GetClusterRequest) String() string
GetJobRequest
type GetJobRequest struct {
// Required. The ID of the Google Cloud Platform project that the job
// belongs to.
ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
// Required. The Dataproc region in which to handle the request.
Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
// Required. The job ID.
JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
// contains filtered or unexported fields
}
A request to get the resource representation for a job in a project.
func (*GetJobRequest) Descriptor
func (*GetJobRequest) Descriptor() ([]byte, []int)
Deprecated: Use GetJobRequest.ProtoReflect.Descriptor instead.
func (*GetJobRequest) GetJobId
func (x *GetJobRequest) GetJobId() string
func (*GetJobRequest) GetProjectId
func (x *GetJobRequest) GetProjectId() string
func (*GetJobRequest) GetRegion
func (x *GetJobRequest) GetRegion() string
func (*GetJobRequest) ProtoMessage
func (*GetJobRequest) ProtoMessage()
func (*GetJobRequest) ProtoReflect
func (x *GetJobRequest) ProtoReflect() protoreflect.Message
func (*GetJobRequest) Reset
func (x *GetJobRequest) Reset()
func (*GetJobRequest) String
func (x *GetJobRequest) String() string
GetNodeGroupRequest
type GetNodeGroupRequest struct {
// Required. The name of the node group to retrieve.
// Format:
// `projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{nodeGroup}`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// contains filtered or unexported fields
}
A request to get a node group .
func (*GetNodeGroupRequest) Descriptor
func (*GetNodeGroupRequest) Descriptor() ([]byte, []int)
Deprecated: Use GetNodeGroupRequest.ProtoReflect.Descriptor instead.
func (*GetNodeGroupRequest) GetName
func (x *GetNodeGroupRequest) GetName() string
func (*GetNodeGroupRequest) ProtoMessage
func (*GetNodeGroupRequest) ProtoMessage()
func (*GetNodeGroupRequest) ProtoReflect
func (x *GetNodeGroupRequest) ProtoReflect() protoreflect.Message
func (*GetNodeGroupRequest) Reset
func (x *GetNodeGroupRequest) Reset()
func (*GetNodeGroupRequest) String
func (x *GetNodeGroupRequest) String() string
GetSessionRequest
type GetSessionRequest struct {
// Required. The name of the session to retrieve.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// contains filtered or unexported fields
}
A request to get the resource representation for a session.
func (*GetSessionRequest) Descriptor
func (*GetSessionRequest) Descriptor() ([]byte, []int)
Deprecated: Use GetSessionRequest.ProtoReflect.Descriptor instead.
func (*GetSessionRequest) GetName
func (x *GetSessionRequest) GetName() string
func (*GetSessionRequest) ProtoMessage
func (*GetSessionRequest) ProtoMessage()
func (*GetSessionRequest) ProtoReflect
func (x *GetSessionRequest) ProtoReflect() protoreflect.Message
func (*GetSessionRequest) Reset
func (x *GetSessionRequest) Reset()
func (*GetSessionRequest) String
func (x *GetSessionRequest) String() string
GetSessionTemplateRequest
type GetSessionTemplateRequest struct {
// Required. The name of the session template to retrieve.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// contains filtered or unexported fields
}
A request to get the resource representation for a session template.
func (*GetSessionTemplateRequest) Descriptor
func (*GetSessionTemplateRequest) Descriptor() ([]byte, []int)
Deprecated: Use GetSessionTemplateRequest.ProtoReflect.Descriptor instead.
func (*GetSessionTemplateRequest) GetName
func (x *GetSessionTemplateRequest) GetName() string
func (*GetSessionTemplateRequest) ProtoMessage
func (*GetSessionTemplateRequest) ProtoMessage()
func (*GetSessionTemplateRequest) ProtoReflect
func (x *GetSessionTemplateRequest) ProtoReflect() protoreflect.Message
func (*GetSessionTemplateRequest) Reset
func (x *GetSessionTemplateRequest) Reset()
func (*GetSessionTemplateRequest) String
func (x *GetSessionTemplateRequest) String() string
GetWorkflowTemplateRequest
type GetWorkflowTemplateRequest struct {
// Required. The resource name of the workflow template, as described
// in https://cloud.google.com/apis/design/resource_names.
//
// - For `projects.regions.workflowTemplates.get`, the resource name of the
// template has the following format:
// `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
//
// - For `projects.locations.workflowTemplates.get`, the resource name of the
// template has the following format:
// `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Optional. The version of workflow template to retrieve. Only previously
// instantiated versions can be retrieved.
//
// If unspecified, retrieves the current version.
Version int32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`
// contains filtered or unexported fields
}
A request to fetch a workflow template.
func (*GetWorkflowTemplateRequest) Descriptor
func (*GetWorkflowTemplateRequest) Descriptor() ([]byte, []int)
Deprecated: Use GetWorkflowTemplateRequest.ProtoReflect.Descriptor instead.
func (*GetWorkflowTemplateRequest) GetName
func (x *GetWorkflowTemplateRequest) GetName() string
func (*GetWorkflowTemplateRequest) GetVersion
func (x *GetWorkflowTemplateRequest) GetVersion() int32
func (*GetWorkflowTemplateRequest) ProtoMessage
func (*GetWorkflowTemplateRequest) ProtoMessage()
func (*GetWorkflowTemplateRequest) ProtoReflect
func (x *GetWorkflowTemplateRequest) ProtoReflect() protoreflect.Message
func (*GetWorkflowTemplateRequest) Reset
func (x *GetWorkflowTemplateRequest) Reset()
func (*GetWorkflowTemplateRequest) String
func (x *GetWorkflowTemplateRequest) String() string
GkeClusterConfig
type GkeClusterConfig struct {
// Optional. A target GKE cluster to deploy to. It must be in the same project
// and region as the Dataproc cluster (the GKE cluster can be zonal or
// regional). Format:
// 'projects/{project}/locations/{location}/clusters/{cluster_id}'
GkeClusterTarget string `protobuf:"bytes,2,opt,name=gke_cluster_target,json=gkeClusterTarget,proto3" json:"gke_cluster_target,omitempty"`
// Optional. GKE node pools where workloads will be scheduled. At least one
// node pool must be assigned the `DEFAULT`
// [GkeNodePoolTarget.Role][google.cloud.dataproc.v1.GkeNodePoolTarget.Role].
// If a `GkeNodePoolTarget` is not specified, Dataproc constructs a `DEFAULT`
// `GkeNodePoolTarget`. Each role can be given to only one
// `GkeNodePoolTarget`. All node pools must have the same location settings.
NodePoolTarget []*GkeNodePoolTarget `protobuf:"bytes,3,rep,name=node_pool_target,json=nodePoolTarget,proto3" json:"node_pool_target,omitempty"`
// contains filtered or unexported fields
}
The cluster's GKE config.
func (*GkeClusterConfig) Descriptor
func (*GkeClusterConfig) Descriptor() ([]byte, []int)
Deprecated: Use GkeClusterConfig.ProtoReflect.Descriptor instead.
func (*GkeClusterConfig) GetGkeClusterTarget
func (x *GkeClusterConfig) GetGkeClusterTarget() string
func (*GkeClusterConfig) GetNodePoolTarget
func (x *GkeClusterConfig) GetNodePoolTarget() []*GkeNodePoolTarget
func (*GkeClusterConfig) ProtoMessage
func (*GkeClusterConfig) ProtoMessage()
func (*GkeClusterConfig) ProtoReflect
func (x *GkeClusterConfig) ProtoReflect() protoreflect.Message
func (*GkeClusterConfig) Reset
func (x *GkeClusterConfig) Reset()
func (*GkeClusterConfig) String
func (x *GkeClusterConfig) String() string
GkeNodePoolConfig
type GkeNodePoolConfig struct {
// Optional. The node pool configuration.
Config *GkeNodePoolConfig_GkeNodeConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"`
// Optional. The list of Compute Engine
// [zones](https://cloud.google.com/compute/docs/zones#available) where
// node pool nodes associated with a Dataproc on GKE virtual cluster
// will be located.
//
// **Note:** All node pools associated with a virtual cluster
// must be located in the same region as the virtual cluster, and they must
// be located in the same zone within that region.
//
// If a location is not specified during node pool creation, Dataproc on GKE
// will choose the zone.
Locations []string `protobuf:"bytes,13,rep,name=locations,proto3" json:"locations,omitempty"`
// Optional. The autoscaler configuration for this node pool. The autoscaler
// is enabled only when a valid configuration is present.
Autoscaling *GkeNodePoolConfig_GkeNodePoolAutoscalingConfig `protobuf:"bytes,4,opt,name=autoscaling,proto3" json:"autoscaling,omitempty"`
// contains filtered or unexported fields
}
The configuration of a GKE node pool used by a Dataproc-on-GKE cluster.
func (*GkeNodePoolConfig) Descriptor
func (*GkeNodePoolConfig) Descriptor() ([]byte, []int)
Deprecated: Use GkeNodePoolConfig.ProtoReflect.Descriptor instead.
func (*GkeNodePoolConfig) GetAutoscaling
func (x *GkeNodePoolConfig) GetAutoscaling() *GkeNodePoolConfig_GkeNodePoolAutoscalingConfig
func (*GkeNodePoolConfig) GetConfig
func (x *GkeNodePoolConfig) GetConfig() *GkeNodePoolConfig_GkeNodeConfig
func (*GkeNodePoolConfig) GetLocations
func (x *GkeNodePoolConfig) GetLocations() []string
func (*GkeNodePoolConfig) ProtoMessage
func (*GkeNodePoolConfig) ProtoMessage()
func (*GkeNodePoolConfig) ProtoReflect
func (x *GkeNodePoolConfig) ProtoReflect() protoreflect.Message
func (*GkeNodePoolConfig) Reset
func (x *GkeNodePoolConfig) Reset()
func (*GkeNodePoolConfig) String
func (x *GkeNodePoolConfig) String() string
GkeNodePoolConfig_GkeNodeConfig
type GkeNodePoolConfig_GkeNodeConfig struct {
// Optional. The name of a Compute Engine [machine
// type](https://cloud.google.com/compute/docs/machine-types).
MachineType string `protobuf:"bytes,1,opt,name=machine_type,json=machineType,proto3" json:"machine_type,omitempty"`
// Optional. The number of local SSD disks to attach to the node, which is
// limited by the maximum number of disks allowable per zone (see [Adding
// Local SSDs](https://cloud.google.com/compute/docs/disks/local-ssd)).
LocalSsdCount int32 `protobuf:"varint,7,opt,name=local_ssd_count,json=localSsdCount,proto3" json:"local_ssd_count,omitempty"`
// Optional. Whether the nodes are created as legacy [preemptible VM
// instances] (https://cloud.google.com/compute/docs/instances/preemptible).
// Also see
// [Spot][google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig.spot]
// VMs, preemptible VM instances without a maximum lifetime. Legacy and Spot
// preemptible nodes cannot be used in a node pool with the `CONTROLLER`
// [role]
// (/dataproc/docs/reference/rest/v1/projects.regions.clusters#role)
// or in the DEFAULT node pool if the CONTROLLER role is not assigned (the
// DEFAULT node pool will assume the CONTROLLER role).
Preemptible bool `protobuf:"varint,10,opt,name=preemptible,proto3" json:"preemptible,omitempty"`
// Optional. A list of [hardware
// accelerators](https://cloud.google.com/compute/docs/gpus) to attach to
// each node.
Accelerators []*GkeNodePoolConfig_GkeNodePoolAcceleratorConfig `protobuf:"bytes,11,rep,name=accelerators,proto3" json:"accelerators,omitempty"`
// Optional. [Minimum CPU
// platform](https://cloud.google.com/compute/docs/instances/specify-min-cpu-platform)
// to be used by this instance. The instance may be scheduled on the
// specified or a newer CPU platform. Specify the friendly names of CPU
// platforms, such as "Intel Haswell"` or Intel Sandy Bridge".
MinCpuPlatform string `protobuf:"bytes,13,opt,name=min_cpu_platform,json=minCpuPlatform,proto3" json:"min_cpu_platform,omitempty"`
// Optional. The [Customer Managed Encryption Key (CMEK)]
// (https://cloud.google.com/kubernetes-engine/docs/how-to/using-cmek)
// used to encrypt the boot disk attached to each node in the node pool.
// Specify the key using the following format:
// projects/KEY_PROJECT_ID/locations/LOCATION/keyRings/RING_NAME/cryptoKeys/KEY_NAME
.
BootDiskKmsKey string `protobuf:"bytes,23,opt,name=boot_disk_kms_key,json=bootDiskKmsKey,proto3" json:"boot_disk_kms_key,omitempty"`
// Optional. Whether the nodes are created as [Spot VM instances]
// (https://cloud.google.com/compute/docs/instances/spot).
// Spot VMs are the latest update to legacy
// [preemptible
// VMs][google.cloud.dataproc.v1.GkeNodePoolConfig.GkeNodeConfig.preemptible].
// Spot VMs do not have a maximum lifetime. Legacy and Spot preemptible
// nodes cannot be used in a node pool with the `CONTROLLER`
// [role](/dataproc/docs/reference/rest/v1/projects.regions.clusters#role)
// or in the DEFAULT node pool if the CONTROLLER role is not assigned (the
// DEFAULT node pool will assume the CONTROLLER role).
Spot bool `protobuf:"varint,32,opt,name=spot,proto3" json:"spot,omitempty"`
// contains filtered or unexported fields
}
Parameters that describe cluster nodes.
func (*GkeNodePoolConfig_GkeNodeConfig) Descriptor
func (*GkeNodePoolConfig_GkeNodeConfig) Descriptor() ([]byte, []int)
Deprecated: Use GkeNodePoolConfig_GkeNodeConfig.ProtoReflect.Descriptor instead.
func (*GkeNodePoolConfig_GkeNodeConfig) GetAccelerators
func (x *GkeNodePoolConfig_GkeNodeConfig) GetAccelerators() []*GkeNodePoolConfig_GkeNodePoolAcceleratorConfig
func (*GkeNodePoolConfig_GkeNodeConfig) GetBootDiskKmsKey
func (x *GkeNodePoolConfig_GkeNodeConfig) GetBootDiskKmsKey() string
func (*GkeNodePoolConfig_GkeNodeConfig) GetLocalSsdCount
func (x *GkeNodePoolConfig_GkeNodeConfig) GetLocalSsdCount() int32
func (*GkeNodePoolConfig_GkeNodeConfig) GetMachineType
func (x *GkeNodePoolConfig_GkeNodeConfig) GetMachineType() string
func (*GkeNodePoolConfig_GkeNodeConfig) GetMinCpuPlatform
func (x *GkeNodePoolConfig_GkeNodeConfig) GetMinCpuPlatform() string
func (*GkeNodePoolConfig_GkeNodeConfig) GetPreemptible
func (x *GkeNodePoolConfig_GkeNodeConfig) GetPreemptible() bool
func (*GkeNodePoolConfig_GkeNodeConfig) GetSpot
func (x *GkeNodePoolConfig_GkeNodeConfig) GetSpot() bool
func (*GkeNodePoolConfig_GkeNodeConfig) ProtoMessage
func (*GkeNodePoolConfig_GkeNodeConfig) ProtoMessage()
func (*GkeNodePoolConfig_GkeNodeConfig) ProtoReflect
func (x *GkeNodePoolConfig_GkeNodeConfig) ProtoReflect() protoreflect.Message
func (*GkeNodePoolConfig_GkeNodeConfig) Reset
func (x *GkeNodePoolConfig_GkeNodeConfig) Reset()
func (*GkeNodePoolConfig_GkeNodeConfig) String
func (x *GkeNodePoolConfig_GkeNodeConfig) String() string
GkeNodePoolConfig_GkeNodePoolAcceleratorConfig
type GkeNodePoolConfig_GkeNodePoolAcceleratorConfig struct {
// The number of accelerator cards exposed to an instance.
AcceleratorCount int64 `protobuf:"varint,1,opt,name=accelerator_count,json=acceleratorCount,proto3" json:"accelerator_count,omitempty"`
// The accelerator type resource namename (see GPUs on Compute Engine).
AcceleratorType string `protobuf:"bytes,2,opt,name=accelerator_type,json=acceleratorType,proto3" json:"accelerator_type,omitempty"`
// Size of partitions to create on the GPU. Valid values are described in
// the NVIDIA [mig user
// guide](https://docs.nvidia.com/datacenter/tesla/mig-user-guide/#partitioning).
GpuPartitionSize string `protobuf:"bytes,3,opt,name=gpu_partition_size,json=gpuPartitionSize,proto3" json:"gpu_partition_size,omitempty"`
// contains filtered or unexported fields
}
A GkeNodeConfigAcceleratorConfig represents a Hardware Accelerator request for a node pool.
func (*GkeNodePoolConfig_GkeNodePoolAcceleratorConfig) Descriptor
func (*GkeNodePoolConfig_GkeNodePoolAcceleratorConfig) Descriptor() ([]byte, []int)
Deprecated: Use GkeNodePoolConfig_GkeNodePoolAcceleratorConfig.ProtoReflect.Descriptor instead.
func (*GkeNodePoolConfig_GkeNodePoolAcceleratorConfig) GetAcceleratorCount
func (x *GkeNodePoolConfig_GkeNodePoolAcceleratorConfig) GetAcceleratorCount() int64
func (*GkeNodePoolConfig_GkeNodePoolAcceleratorConfig) GetAcceleratorType
func (x *GkeNodePoolConfig_GkeNodePoolAcceleratorConfig) GetAcceleratorType() string
func (*GkeNodePoolConfig_GkeNodePoolAcceleratorConfig) GetGpuPartitionSize
func (x *GkeNodePoolConfig_GkeNodePoolAcceleratorConfig) GetGpuPartitionSize() string
func (*GkeNodePoolConfig_GkeNodePoolAcceleratorConfig) ProtoMessage
func (*GkeNodePoolConfig_GkeNodePoolAcceleratorConfig) ProtoMessage()
func (*GkeNodePoolConfig_GkeNodePoolAcceleratorConfig) ProtoReflect
func (x *GkeNodePoolConfig_GkeNodePoolAcceleratorConfig) ProtoReflect() protoreflect.Message
func (*GkeNodePoolConfig_GkeNodePoolAcceleratorConfig) Reset
func (x *GkeNodePoolConfig_GkeNodePoolAcceleratorConfig) Reset()
func (*GkeNodePoolConfig_GkeNodePoolAcceleratorConfig) String
func (x *GkeNodePoolConfig_GkeNodePoolAcceleratorConfig) String() string
GkeNodePoolConfig_GkeNodePoolAutoscalingConfig
type GkeNodePoolConfig_GkeNodePoolAutoscalingConfig struct {
// The minimum number of nodes in the node pool. Must be >= 0 and <= max_node_count.="" minnodecount="">int32 `protobuf:"varint,2,opt,name=min_node_count,json=minNodeCount,proto3" json:"min_node_count,omitempty"`
// The maximum number of nodes in the node pool. Must be >= min_node_count,
// and must be > 0.
// **Note:** Quota must be sufficient to scale up the cluster.
MaxNodeCount int32 `protobuf:"varint,3,opt,name=max_node_count,json=maxNodeCount,proto3" json:"max_node_count,omitempty"`
// contains filtered or unexported fields
}
GkeNodePoolAutoscaling contains information the cluster autoscaler needs to adjust the size of the node pool to the current cluster usage.
func (*GkeNodePoolConfig_GkeNodePoolAutoscalingConfig) Descriptor
func (*GkeNodePoolConfig_GkeNodePoolAutoscalingConfig) Descriptor() ([]byte, []int)
Deprecated: Use GkeNodePoolConfig_GkeNodePoolAutoscalingConfig.ProtoReflect.Descriptor instead.
func (*GkeNodePoolConfig_GkeNodePoolAutoscalingConfig) GetMaxNodeCount
func (x *GkeNodePoolConfig_GkeNodePoolAutoscalingConfig) GetMaxNodeCount() int32
func (*GkeNodePoolConfig_GkeNodePoolAutoscalingConfig) GetMinNodeCount
func (x *GkeNodePoolConfig_GkeNodePoolAutoscalingConfig) GetMinNodeCount() int32
func (*GkeNodePoolConfig_GkeNodePoolAutoscalingConfig) ProtoMessage
func (*GkeNodePoolConfig_GkeNodePoolAutoscalingConfig) ProtoMessage()
func (*GkeNodePoolConfig_GkeNodePoolAutoscalingConfig) ProtoReflect
func (x *GkeNodePoolConfig_GkeNodePoolAutoscalingConfig) ProtoReflect() protoreflect.Message
func (*GkeNodePoolConfig_GkeNodePoolAutoscalingConfig) Reset
func (x *GkeNodePoolConfig_GkeNodePoolAutoscalingConfig) Reset()
func (*GkeNodePoolConfig_GkeNodePoolAutoscalingConfig) String
func (x *GkeNodePoolConfig_GkeNodePoolAutoscalingConfig) String() string
GkeNodePoolTarget
type GkeNodePoolTarget struct {
// Required. The target GKE node pool.
// Format:
// 'projects/{project}/locations/{location}/clusters/{cluster}/nodePools/{node_pool}'
NodePool string `protobuf:"bytes,1,opt,name=node_pool,json=nodePool,proto3" json:"node_pool,omitempty"`
// Required. The roles associated with the GKE node pool.
Roles []GkeNodePoolTarget_Role `protobuf:"varint,2,rep,packed,name=roles,proto3,enum=google.cloud.dataproc.v1.GkeNodePoolTarget_Role" json:"roles,omitempty"`
// Input only. The configuration for the GKE node pool.
//
// If specified, Dataproc attempts to create a node pool with the
// specified shape. If one with the same name already exists, it is
// verified against all specified fields. If a field differs, the
// virtual cluster creation will fail.
//
// If omitted, any node pool with the specified name is used. If a
// node pool with the specified name does not exist, Dataproc create a
// node pool with default values.
//
// This is an input only field. It will not be returned by the API.
NodePoolConfig *GkeNodePoolConfig `protobuf:"bytes,3,opt,name=node_pool_config,json=nodePoolConfig,proto3" json:"node_pool_config,omitempty"`
// contains filtered or unexported fields
}
GKE node pools that Dataproc workloads run on.
func (*GkeNodePoolTarget) Descriptor
func (*GkeNodePoolTarget) Descriptor() ([]byte, []int)
Deprecated: Use GkeNodePoolTarget.ProtoReflect.Descriptor instead.
func (*GkeNodePoolTarget) GetNodePool
func (x *GkeNodePoolTarget) GetNodePool() string
func (*GkeNodePoolTarget) GetNodePoolConfig
func (x *GkeNodePoolTarget) GetNodePoolConfig() *GkeNodePoolConfig
func (*GkeNodePoolTarget) GetRoles
func (x *GkeNodePoolTarget) GetRoles() []GkeNodePoolTarget_Role
func (*GkeNodePoolTarget) ProtoMessage
func (*GkeNodePoolTarget) ProtoMessage()
func (*GkeNodePoolTarget) ProtoReflect
func (x *GkeNodePoolTarget) ProtoReflect() protoreflect.Message
func (*GkeNodePoolTarget) Reset
func (x *GkeNodePoolTarget) Reset()
func (*GkeNodePoolTarget) String
func (x *GkeNodePoolTarget) String() string
GkeNodePoolTarget_Role
type GkeNodePoolTarget_Role int32
Role
specifies the tasks that will run on the node pool. Roles can be
specific to workloads. Exactly one
[GkeNodePoolTarget][google.cloud.dataproc.v1.GkeNodePoolTarget] within the
virtual cluster must have the DEFAULT
role, which is used to run all
workloads that are not associated with a node pool.
GkeNodePoolTarget_ROLE_UNSPECIFIED, GkeNodePoolTarget_DEFAULT, GkeNodePoolTarget_CONTROLLER, GkeNodePoolTarget_SPARK_DRIVER, GkeNodePoolTarget_SPARK_EXECUTOR
const (
// Role is unspecified.
GkeNodePoolTarget_ROLE_UNSPECIFIED GkeNodePoolTarget_Role = 0
// At least one node pool must have the `DEFAULT` role.
// Work assigned to a role that is not associated with a node pool
// is assigned to the node pool with the `DEFAULT` role. For example,
// work assigned to the `CONTROLLER` role will be assigned to the node pool
// with the `DEFAULT` role if no node pool has the `CONTROLLER` role.
GkeNodePoolTarget_DEFAULT GkeNodePoolTarget_Role = 1
// Run work associated with the Dataproc control plane (for example,
// controllers and webhooks). Very low resource requirements.
GkeNodePoolTarget_CONTROLLER GkeNodePoolTarget_Role = 2
// Run work associated with a Spark driver of a job.
GkeNodePoolTarget_SPARK_DRIVER GkeNodePoolTarget_Role = 3
// Run work associated with a Spark executor of a job.
GkeNodePoolTarget_SPARK_EXECUTOR GkeNodePoolTarget_Role = 4
)
func (GkeNodePoolTarget_Role) Descriptor
func (GkeNodePoolTarget_Role) Descriptor() protoreflect.EnumDescriptor
func (GkeNodePoolTarget_Role) Enum
func (x GkeNodePoolTarget_Role) Enum() *GkeNodePoolTarget_Role
func (GkeNodePoolTarget_Role) EnumDescriptor
func (GkeNodePoolTarget_Role) EnumDescriptor() ([]byte, []int)
Deprecated: Use GkeNodePoolTarget_Role.Descriptor instead.
func (GkeNodePoolTarget_Role) Number
func (x GkeNodePoolTarget_Role) Number() protoreflect.EnumNumber
func (GkeNodePoolTarget_Role) String
func (x GkeNodePoolTarget_Role) String() string
func (GkeNodePoolTarget_Role) Type
func (GkeNodePoolTarget_Role) Type() protoreflect.EnumType
HadoopJob
type HadoopJob struct {
// Required. Indicates the location of the driver's main class. Specify
// either the jar file that contains the main class or the main class name.
// To specify both, add the jar file to `jar_file_uris`, and then specify
// the main class name in this property.
//
// Types that are assignable to Driver:
//
// *HadoopJob_MainJarFileUri
// *HadoopJob_MainClass
Driver isHadoopJob_Driver `protobuf_oneof:"driver"`
// Optional. The arguments to pass to the driver. Do not
// include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as
// job properties, since a collision might occur that causes an incorrect job
// submission.
Args []string `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"`
// Optional. Jar file URIs to add to the CLASSPATHs of the
// Hadoop driver and tasks.
JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"`
// Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied
// to the working directory of Hadoop drivers and distributed tasks. Useful
// for naively parallel tasks.
FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris,proto3" json:"file_uris,omitempty"`
// Optional. HCFS URIs of archives to be extracted in the working directory of
// Hadoop drivers and tasks. Supported file types:
// .jar, .tar, .tar.gz, .tgz, or .zip.
ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris,proto3" json:"archive_uris,omitempty"`
// Optional. A mapping of property names to values, used to configure Hadoop.
// Properties that conflict with values set by the Dataproc API might be
// overwritten. Can include properties set in `/etc/hadoop/conf/*-site` and
// classes in user code.
Properties map[string]string `protobuf:"bytes,7,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Optional. The runtime log config for job execution.
LoggingConfig *LoggingConfig `protobuf:"bytes,8,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"`
// contains filtered or unexported fields
}
A Dataproc job for running Apache Hadoop MapReduce jobs on Apache Hadoop YARN.
func (*HadoopJob) Descriptor
Deprecated: Use HadoopJob.ProtoReflect.Descriptor instead.
func (*HadoopJob) GetArchiveUris
func (*HadoopJob) GetArgs
func (*HadoopJob) GetDriver
func (m *HadoopJob) GetDriver() isHadoopJob_Driver
func (*HadoopJob) GetFileUris
func (*HadoopJob) GetJarFileUris
func (*HadoopJob) GetLoggingConfig
func (x *HadoopJob) GetLoggingConfig() *LoggingConfig
func (*HadoopJob) GetMainClass
func (*HadoopJob) GetMainJarFileUri
func (*HadoopJob) GetProperties
func (*HadoopJob) ProtoMessage
func (*HadoopJob) ProtoMessage()
func (*HadoopJob) ProtoReflect
func (x *HadoopJob) ProtoReflect() protoreflect.Message
func (*HadoopJob) Reset
func (x *HadoopJob) Reset()
func (*HadoopJob) String
HadoopJob_MainClass
type HadoopJob_MainClass struct {
// The name of the driver's main class. The jar file containing the class
// must be in the default CLASSPATH or specified in `jar_file_uris`.
MainClass string `protobuf:"bytes,2,opt,name=main_class,json=mainClass,proto3,oneof"`
}
HadoopJob_MainJarFileUri
type HadoopJob_MainJarFileUri struct {
// The HCFS URI of the jar file containing the main class.
// Examples:
//
// 'gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar'
// 'hdfs:/tmp/test-samples/custom-wordcount.jar'
// 'file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar'
MainJarFileUri string `protobuf:"bytes,1,opt,name=main_jar_file_uri,json=mainJarFileUri,proto3,oneof"`
}
HiveJob
type HiveJob struct {
// Required. The sequence of Hive queries to execute, specified as either
// an HCFS file URI or a list of queries.
//
// Types that are assignable to Queries:
//
// *HiveJob_QueryFileUri
// *HiveJob_QueryList
Queries isHiveJob_Queries `protobuf_oneof:"queries"`
// Optional. Whether to continue executing queries if a query fails.
// The default value is `false`. Setting to `true` can be useful when
// executing independent parallel queries.
ContinueOnFailure bool `protobuf:"varint,3,opt,name=continue_on_failure,json=continueOnFailure,proto3" json:"continue_on_failure,omitempty"`
// Optional. Mapping of query variable names to values (equivalent to the
// Hive command: `SET name="value";`).
ScriptVariables map[string]string `protobuf:"bytes,4,rep,name=script_variables,json=scriptVariables,proto3" json:"script_variables,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Optional. A mapping of property names and values, used to configure Hive.
// Properties that conflict with values set by the Dataproc API might be
// overwritten. Can include properties set in `/etc/hadoop/conf/*-site.xml`,
// /etc/hive/conf/hive-site.xml, and classes in user code.
Properties map[string]string `protobuf:"bytes,5,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Optional. HCFS URIs of jar files to add to the CLASSPATH of the
// Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes
// and UDFs.
JarFileUris []string `protobuf:"bytes,6,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"`
// contains filtered or unexported fields
}
A Dataproc job for running Apache Hive queries on YARN.
func (*HiveJob) Descriptor
Deprecated: Use HiveJob.ProtoReflect.Descriptor instead.
func (*HiveJob) GetContinueOnFailure
func (*HiveJob) GetJarFileUris
func (*HiveJob) GetProperties
func (*HiveJob) GetQueries
func (m *HiveJob) GetQueries() isHiveJob_Queries
func (*HiveJob) GetQueryFileUri
func (*HiveJob) GetQueryList
func (*HiveJob) GetScriptVariables
func (*HiveJob) ProtoMessage
func (*HiveJob) ProtoMessage()
func (*HiveJob) ProtoReflect
func (x *HiveJob) ProtoReflect() protoreflect.Message
func (*HiveJob) Reset
func (x *HiveJob) Reset()
func (*HiveJob) String
HiveJob_QueryFileUri
type HiveJob_QueryFileUri struct {
// The HCFS URI of the script that contains Hive queries.
QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,proto3,oneof"`
}
HiveJob_QueryList
type HiveJob_QueryList struct {
// A list of queries.
QueryList *QueryList `protobuf:"bytes,2,opt,name=query_list,json=queryList,proto3,oneof"`
}
IdentityConfig
type IdentityConfig struct {
// Required. Map of user to service account.
UserServiceAccountMapping map[string]string `protobuf:"bytes,1,rep,name=user_service_account_mapping,json=userServiceAccountMapping,proto3" json:"user_service_account_mapping,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// contains filtered or unexported fields
}
Identity related configuration, including service account based secure multi-tenancy user mappings.
func (*IdentityConfig) Descriptor
func (*IdentityConfig) Descriptor() ([]byte, []int)
Deprecated: Use IdentityConfig.ProtoReflect.Descriptor instead.
func (*IdentityConfig) GetUserServiceAccountMapping
func (x *IdentityConfig) GetUserServiceAccountMapping() map[string]string
func (*IdentityConfig) ProtoMessage
func (*IdentityConfig) ProtoMessage()
func (*IdentityConfig) ProtoReflect
func (x *IdentityConfig) ProtoReflect() protoreflect.Message
func (*IdentityConfig) Reset
func (x *IdentityConfig) Reset()
func (*IdentityConfig) String
func (x *IdentityConfig) String() string
InstanceFlexibilityPolicy
type InstanceFlexibilityPolicy struct {
// Optional. Defines how the Group selects the provisioning model to ensure
// required reliability.
ProvisioningModelMix *InstanceFlexibilityPolicy_ProvisioningModelMix `protobuf:"bytes,1,opt,name=provisioning_model_mix,json=provisioningModelMix,proto3" json:"provisioning_model_mix,omitempty"`
// Optional. List of instance selection options that the group will use when
// creating new VMs.
InstanceSelectionList []*InstanceFlexibilityPolicy_InstanceSelection `protobuf:"bytes,2,rep,name=instance_selection_list,json=instanceSelectionList,proto3" json:"instance_selection_list,omitempty"`
// Output only. A list of instance selection results in the group.
InstanceSelectionResults []*InstanceFlexibilityPolicy_InstanceSelectionResult `protobuf:"bytes,3,rep,name=instance_selection_results,json=instanceSelectionResults,proto3" json:"instance_selection_results,omitempty"`
// contains filtered or unexported fields
}
Instance flexibility Policy allowing a mixture of VM shapes and provisioning models.
func (*InstanceFlexibilityPolicy) Descriptor
func (*InstanceFlexibilityPolicy) Descriptor() ([]byte, []int)
Deprecated: Use InstanceFlexibilityPolicy.ProtoReflect.Descriptor instead.
func (*InstanceFlexibilityPolicy) GetInstanceSelectionList
func (x *InstanceFlexibilityPolicy) GetInstanceSelectionList() []*InstanceFlexibilityPolicy_InstanceSelection
func (*InstanceFlexibilityPolicy) GetInstanceSelectionResults
func (x *InstanceFlexibilityPolicy) GetInstanceSelectionResults() []*InstanceFlexibilityPolicy_InstanceSelectionResult
func (*InstanceFlexibilityPolicy) GetProvisioningModelMix
func (x *InstanceFlexibilityPolicy) GetProvisioningModelMix() *InstanceFlexibilityPolicy_ProvisioningModelMix
func (*InstanceFlexibilityPolicy) ProtoMessage
func (*InstanceFlexibilityPolicy) ProtoMessage()
func (*InstanceFlexibilityPolicy) ProtoReflect
func (x *InstanceFlexibilityPolicy) ProtoReflect() protoreflect.Message
func (*InstanceFlexibilityPolicy) Reset
func (x *InstanceFlexibilityPolicy) Reset()
func (*InstanceFlexibilityPolicy) String
func (x *InstanceFlexibilityPolicy) String() string
InstanceFlexibilityPolicy_InstanceSelection
type InstanceFlexibilityPolicy_InstanceSelection struct {
// Optional. Full machine-type names, e.g. "n1-standard-16".
MachineTypes []string `protobuf:"bytes,1,rep,name=machine_types,json=machineTypes,proto3" json:"machine_types,omitempty"`
// Optional. Preference of this instance selection. Lower number means
// higher preference. Dataproc will first try to create a VM based on the
// machine-type with priority rank and fallback to next rank based on
// availability. Machine types and instance selections with the same
// priority have the same preference.
Rank int32 `protobuf:"varint,2,opt,name=rank,proto3" json:"rank,omitempty"`
// contains filtered or unexported fields
}
Defines machines types and a rank to which the machines types belong.
func (*InstanceFlexibilityPolicy_InstanceSelection) Descriptor
func (*InstanceFlexibilityPolicy_InstanceSelection) Descriptor() ([]byte, []int)
Deprecated: Use InstanceFlexibilityPolicy_InstanceSelection.ProtoReflect.Descriptor instead.
func (*InstanceFlexibilityPolicy_InstanceSelection) GetMachineTypes
func (x *InstanceFlexibilityPolicy_InstanceSelection) GetMachineTypes() []string
func (*InstanceFlexibilityPolicy_InstanceSelection) GetRank
func (x *InstanceFlexibilityPolicy_InstanceSelection) GetRank() int32
func (*InstanceFlexibilityPolicy_InstanceSelection) ProtoMessage
func (*InstanceFlexibilityPolicy_InstanceSelection) ProtoMessage()
func (*InstanceFlexibilityPolicy_InstanceSelection) ProtoReflect
func (x *InstanceFlexibilityPolicy_InstanceSelection) ProtoReflect() protoreflect.Message
func (*InstanceFlexibilityPolicy_InstanceSelection) Reset
func (x *InstanceFlexibilityPolicy_InstanceSelection) Reset()
func (*InstanceFlexibilityPolicy_InstanceSelection) String
func (x *InstanceFlexibilityPolicy_InstanceSelection) String() string
InstanceFlexibilityPolicy_InstanceSelectionResult
type InstanceFlexibilityPolicy_InstanceSelectionResult struct {
// Output only. Full machine-type names, e.g. "n1-standard-16".
MachineType *string `protobuf:"bytes,1,opt,name=machine_type,json=machineType,proto3,oneof" json:"machine_type,omitempty"`
// Output only. Number of VM provisioned with the machine_type.
VmCount *int32 `protobuf:"varint,2,opt,name=vm_count,json=vmCount,proto3,oneof" json:"vm_count,omitempty"`
// contains filtered or unexported fields
}
Defines a mapping from machine types to the number of VMs that are created with each machine type.
func (*InstanceFlexibilityPolicy_InstanceSelectionResult) Descriptor
func (*InstanceFlexibilityPolicy_InstanceSelectionResult) Descriptor() ([]byte, []int)
Deprecated: Use InstanceFlexibilityPolicy_InstanceSelectionResult.ProtoReflect.Descriptor instead.
func (*InstanceFlexibilityPolicy_InstanceSelectionResult) GetMachineType
func (x *InstanceFlexibilityPolicy_InstanceSelectionResult) GetMachineType() string
func (*InstanceFlexibilityPolicy_InstanceSelectionResult) GetVmCount
func (x *InstanceFlexibilityPolicy_InstanceSelectionResult) GetVmCount() int32
func (*InstanceFlexibilityPolicy_InstanceSelectionResult) ProtoMessage
func (*InstanceFlexibilityPolicy_InstanceSelectionResult) ProtoMessage()
func (*InstanceFlexibilityPolicy_InstanceSelectionResult) ProtoReflect
func (x *InstanceFlexibilityPolicy_InstanceSelectionResult) ProtoReflect() protoreflect.Message
func (*InstanceFlexibilityPolicy_InstanceSelectionResult) Reset
func (x *InstanceFlexibilityPolicy_InstanceSelectionResult) Reset()
func (*InstanceFlexibilityPolicy_InstanceSelectionResult) String
func (x *InstanceFlexibilityPolicy_InstanceSelectionResult) String() string
InstanceFlexibilityPolicy_ProvisioningModelMix
type InstanceFlexibilityPolicy_ProvisioningModelMix struct {
// Optional. The base capacity that will always use Standard VMs to avoid
// risk of more preemption than the minimum capacity you need. Dataproc will
// create only standard VMs until it reaches standard_capacity_base, then it
// will start using standard_capacity_percent_above_base to mix Spot with
// Standard VMs. eg. If 15 instances are requested and
// standard_capacity_base is 5, Dataproc will create 5 standard VMs and then
// start mixing spot and standard VMs for remaining 10 instances.
StandardCapacityBase *int32 `protobuf:"varint,1,opt,name=standard_capacity_base,json=standardCapacityBase,proto3,oneof" json:"standard_capacity_base,omitempty"`
// Optional. The percentage of target capacity that should use Standard VM.
// The remaining percentage will use Spot VMs. The percentage applies only
// to the capacity above standard_capacity_base. eg. If 15 instances are
// requested and standard_capacity_base is 5 and
// standard_capacity_percent_above_base is 30, Dataproc will create 5
// standard VMs and then start mixing spot and standard VMs for remaining 10
// instances. The mix will be 30% standard and 70% spot.
StandardCapacityPercentAboveBase *int32 `protobuf:"varint,2,opt,name=standard_capacity_percent_above_base,json=standardCapacityPercentAboveBase,proto3,oneof" json:"standard_capacity_percent_above_base,omitempty"`
// contains filtered or unexported fields
}
Defines how Dataproc should create VMs with a mixture of provisioning models.
func (*InstanceFlexibilityPolicy_ProvisioningModelMix) Descriptor
func (*InstanceFlexibilityPolicy_ProvisioningModelMix) Descriptor() ([]byte, []int)
Deprecated: Use InstanceFlexibilityPolicy_ProvisioningModelMix.ProtoReflect.Descriptor instead.
func (*InstanceFlexibilityPolicy_ProvisioningModelMix) GetStandardCapacityBase
func (x *InstanceFlexibilityPolicy_ProvisioningModelMix) GetStandardCapacityBase() int32
func (*InstanceFlexibilityPolicy_ProvisioningModelMix) GetStandardCapacityPercentAboveBase
func (x *InstanceFlexibilityPolicy_ProvisioningModelMix) GetStandardCapacityPercentAboveBase() int32
func (*InstanceFlexibilityPolicy_ProvisioningModelMix) ProtoMessage
func (*InstanceFlexibilityPolicy_ProvisioningModelMix) ProtoMessage()
func (*InstanceFlexibilityPolicy_ProvisioningModelMix) ProtoReflect
func (x *InstanceFlexibilityPolicy_ProvisioningModelMix) ProtoReflect() protoreflect.Message
func (*InstanceFlexibilityPolicy_ProvisioningModelMix) Reset
func (x *InstanceFlexibilityPolicy_ProvisioningModelMix) Reset()
func (*InstanceFlexibilityPolicy_ProvisioningModelMix) String
func (x *InstanceFlexibilityPolicy_ProvisioningModelMix) String() string
InstanceGroupAutoscalingPolicyConfig
type InstanceGroupAutoscalingPolicyConfig struct {
// Optional. Minimum number of instances for this group.
//
// Primary workers - Bounds: [2, max_instances]. Default: 2.
// Secondary workers - Bounds: [0, max_instances]. Default: 0.
MinInstances int32 `protobuf:"varint,1,opt,name=min_instances,json=minInstances,proto3" json:"min_instances,omitempty"`
// Required. Maximum number of instances for this group. Required for primary
// workers. Note that by default, clusters will not use secondary workers.
// Required for secondary workers if the minimum secondary instances is set.
//
// Primary workers - Bounds: [min_instances, ).
// Secondary workers - Bounds: [min_instances, ). Default: 0.
MaxInstances int32 `protobuf:"varint,2,opt,name=max_instances,json=maxInstances,proto3" json:"max_instances,omitempty"`
// Optional. Weight for the instance group, which is used to determine the
// fraction of total workers in the cluster from this instance group.
// For example, if primary workers have weight 2, and secondary workers have
// weight 1, the cluster will have approximately 2 primary workers for each
// secondary worker.
//
// The cluster may not reach the specified balance if constrained
// by min/max bounds or other autoscaling settings. For example, if
// `max_instances` for secondary workers is 0, then only primary workers will
// be added. The cluster can also be out of balance when created.
//
// If weight is not set on any instance group, the cluster will default to
// equal weight for all groups: the cluster will attempt to maintain an equal
// number of workers in each group within the configured size bounds for each
// group. If weight is set for one group only, the cluster will default to
// zero weight on the unset group. For example if weight is set only on
// primary workers, the cluster will use primary workers only and no
// secondary workers.
Weight int32 `protobuf:"varint,3,opt,name=weight,proto3" json:"weight,omitempty"`
// contains filtered or unexported fields
}
Configuration for the size bounds of an instance group, including its proportional size to other groups.
func (*InstanceGroupAutoscalingPolicyConfig) Descriptor
func (*InstanceGroupAutoscalingPolicyConfig) Descriptor() ([]byte, []int)
Deprecated: Use InstanceGroupAutoscalingPolicyConfig.ProtoReflect.Descriptor instead.
func (*InstanceGroupAutoscalingPolicyConfig) GetMaxInstances
func (x *InstanceGroupAutoscalingPolicyConfig) GetMaxInstances() int32
func (*InstanceGroupAutoscalingPolicyConfig) GetMinInstances
func (x *InstanceGroupAutoscalingPolicyConfig) GetMinInstances() int32
func (*InstanceGroupAutoscalingPolicyConfig) GetWeight
func (x *InstanceGroupAutoscalingPolicyConfig) GetWeight() int32
func (*InstanceGroupAutoscalingPolicyConfig) ProtoMessage
func (*InstanceGroupAutoscalingPolicyConfig) ProtoMessage()
func (*InstanceGroupAutoscalingPolicyConfig) ProtoReflect
func (x *InstanceGroupAutoscalingPolicyConfig) ProtoReflect() protoreflect.Message
func (*InstanceGroupAutoscalingPolicyConfig) Reset
func (x *InstanceGroupAutoscalingPolicyConfig) Reset()
func (*InstanceGroupAutoscalingPolicyConfig) String
func (x *InstanceGroupAutoscalingPolicyConfig) String() string
InstanceGroupConfig
type InstanceGroupConfig struct {
// Optional. The number of VM instances in the instance group.
// For [HA
// cluster](/dataproc/docs/concepts/configuring-clusters/high-availability)
// [master_config](#FIELDS.master_config) groups, **must be set to 3**.
// For standard cluster [master_config](#FIELDS.master_config) groups,
// **must be set to 1**.
NumInstances int32 `protobuf:"varint,1,opt,name=num_instances,json=numInstances,proto3" json:"num_instances,omitempty"`
// Output only. The list of instance names. Dataproc derives the names
// from `cluster_name`, `num_instances`, and the instance group.
InstanceNames []string `protobuf:"bytes,2,rep,name=instance_names,json=instanceNames,proto3" json:"instance_names,omitempty"`
// Output only. List of references to Compute Engine instances.
InstanceReferences []*InstanceReference `protobuf:"bytes,11,rep,name=instance_references,json=instanceReferences,proto3" json:"instance_references,omitempty"`
// Optional. The Compute Engine image resource used for cluster instances.
//
// The URI can represent an image or image family.
//
// Image examples:
//
// * `https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/[image-id]`
// * `projects/[project_id]/global/images/[image-id]`
// * `image-id`
//
// Image family examples. Dataproc will use the most recent
// image from the family:
//
// * `https://www.googleapis.com/compute/v1/projects/[project_id]/global/images/family/[custom-image-family-name]`
// * `projects/[project_id]/global/images/family/[custom-image-family-name]`
//
// If the URI is unspecified, it will be inferred from
// `SoftwareConfig.image_version` or the system default.
ImageUri string `protobuf:"bytes,3,opt,name=image_uri,json=imageUri,proto3" json:"image_uri,omitempty"`
// Optional. The Compute Engine machine type used for cluster instances.
//
// A full URL, partial URI, or short name are valid. Examples:
//
// * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2`
// * `projects/[project_id]/zones/[zone]/machineTypes/n1-standard-2`
// * `n1-standard-2`
//
// **Auto Zone Exception**: If you are using the Dataproc
// [Auto Zone
// Placement](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
// feature, you must use the short name of the machine type
// resource, for example, `n1-standard-2`.
MachineTypeUri string `protobuf:"bytes,4,opt,name=machine_type_uri,json=machineTypeUri,proto3" json:"machine_type_uri,omitempty"`
// Optional. Disk option config settings.
DiskConfig *DiskConfig `protobuf:"bytes,5,opt,name=disk_config,json=diskConfig,proto3" json:"disk_config,omitempty"`
// Output only. Specifies that this instance group contains preemptible
// instances.
IsPreemptible bool `protobuf:"varint,6,opt,name=is_preemptible,json=isPreemptible,proto3" json:"is_preemptible,omitempty"`
// Optional. Specifies the preemptibility of the instance group.
//
// The default value for master and worker groups is
// `NON_PREEMPTIBLE`. This default cannot be changed.
//
// The default value for secondary instances is
// `PREEMPTIBLE`.
Preemptibility InstanceGroupConfig_Preemptibility `protobuf:"varint,10,opt,name=preemptibility,proto3,enum=google.cloud.dataproc.v1.InstanceGroupConfig_Preemptibility" json:"preemptibility,omitempty"`
// Output only. The config for Compute Engine Instance Group
// Manager that manages this group.
// This is only used for preemptible instance groups.
ManagedGroupConfig *ManagedGroupConfig `protobuf:"bytes,7,opt,name=managed_group_config,json=managedGroupConfig,proto3" json:"managed_group_config,omitempty"`
// Optional. The Compute Engine accelerator configuration for these
// instances.
Accelerators []*AcceleratorConfig `protobuf:"bytes,8,rep,name=accelerators,proto3" json:"accelerators,omitempty"`
// Optional. Specifies the minimum cpu platform for the Instance Group.
// See [Dataproc -> Minimum CPU
// Platform](https://cloud.google.com/dataproc/docs/concepts/compute/dataproc-min-cpu).
MinCpuPlatform string `protobuf:"bytes,9,opt,name=min_cpu_platform,json=minCpuPlatform,proto3" json:"min_cpu_platform,omitempty"`
// Optional. The minimum number of primary worker instances to create.
// If `min_num_instances` is set, cluster creation will succeed if
// the number of primary workers created is at least equal to the
// `min_num_instances` number.
//
// Example: Cluster creation request with `num_instances` = `5` and
// `min_num_instances` = `3`:
//
// - If 4 VMs are created and 1 instance fails,
// the failed VM is deleted. The cluster is
// resized to 4 instances and placed in a `RUNNING` state.
// - If 2 instances are created and 3 instances fail,
// the cluster in placed in an `ERROR` state. The failed VMs
// are not deleted.
MinNumInstances int32 `protobuf:"varint,12,opt,name=min_num_instances,json=minNumInstances,proto3" json:"min_num_instances,omitempty"`
// Optional. Instance flexibility Policy allowing a mixture of VM shapes and
// provisioning models.
InstanceFlexibilityPolicy *InstanceFlexibilityPolicy `protobuf:"bytes,13,opt,name=instance_flexibility_policy,json=instanceFlexibilityPolicy,proto3" json:"instance_flexibility_policy,omitempty"`
// Optional. Configuration to handle the startup of instances during cluster
// create and update process.
StartupConfig *StartupConfig `protobuf:"bytes,14,opt,name=startup_config,json=startupConfig,proto3" json:"startup_config,omitempty"`
// contains filtered or unexported fields
}
The config settings for Compute Engine resources in an instance group, such as a master or worker group.
func (*InstanceGroupConfig) Descriptor
func (*InstanceGroupConfig) Descriptor() ([]byte, []int)
Deprecated: Use InstanceGroupConfig.ProtoReflect.Descriptor instead.
func (*InstanceGroupConfig) GetAccelerators
func (x *InstanceGroupConfig) GetAccelerators() []*AcceleratorConfig
func (*InstanceGroupConfig) GetDiskConfig
func (x *InstanceGroupConfig) GetDiskConfig() *DiskConfig
func (*InstanceGroupConfig) GetImageUri
func (x *InstanceGroupConfig) GetImageUri() string
func (*InstanceGroupConfig) GetInstanceFlexibilityPolicy
func (x *InstanceGroupConfig) GetInstanceFlexibilityPolicy() *InstanceFlexibilityPolicy
func (*InstanceGroupConfig) GetInstanceNames
func (x *InstanceGroupConfig) GetInstanceNames() []string
func (*InstanceGroupConfig) GetInstanceReferences
func (x *InstanceGroupConfig) GetInstanceReferences() []*InstanceReference
func (*InstanceGroupConfig) GetIsPreemptible
func (x *InstanceGroupConfig) GetIsPreemptible() bool
func (*InstanceGroupConfig) GetMachineTypeUri
func (x *InstanceGroupConfig) GetMachineTypeUri() string
func (*InstanceGroupConfig) GetManagedGroupConfig
func (x *InstanceGroupConfig) GetManagedGroupConfig() *ManagedGroupConfig
func (*InstanceGroupConfig) GetMinCpuPlatform
func (x *InstanceGroupConfig) GetMinCpuPlatform() string
func (*InstanceGroupConfig) GetMinNumInstances
func (x *InstanceGroupConfig) GetMinNumInstances() int32
func (*InstanceGroupConfig) GetNumInstances
func (x *InstanceGroupConfig) GetNumInstances() int32
func (*InstanceGroupConfig) GetPreemptibility
func (x *InstanceGroupConfig) GetPreemptibility() InstanceGroupConfig_Preemptibility
func (*InstanceGroupConfig) GetStartupConfig
func (x *InstanceGroupConfig) GetStartupConfig() *StartupConfig
func (*InstanceGroupConfig) ProtoMessage
func (*InstanceGroupConfig) ProtoMessage()
func (*InstanceGroupConfig) ProtoReflect
func (x *InstanceGroupConfig) ProtoReflect() protoreflect.Message
func (*InstanceGroupConfig) Reset
func (x *InstanceGroupConfig) Reset()
func (*InstanceGroupConfig) String
func (x *InstanceGroupConfig) String() string
InstanceGroupConfig_Preemptibility
type InstanceGroupConfig_Preemptibility int32
Controls the use of preemptible instances within the group.
InstanceGroupConfig_PREEMPTIBILITY_UNSPECIFIED, InstanceGroupConfig_NON_PREEMPTIBLE, InstanceGroupConfig_PREEMPTIBLE, InstanceGroupConfig_SPOT
const (
// Preemptibility is unspecified, the system will choose the
// appropriate setting for each instance group.
InstanceGroupConfig_PREEMPTIBILITY_UNSPECIFIED InstanceGroupConfig_Preemptibility = 0
// Instances are non-preemptible.
//
// This option is allowed for all instance groups and is the only valid
// value for Master and Worker instance groups.
InstanceGroupConfig_NON_PREEMPTIBLE InstanceGroupConfig_Preemptibility = 1
// Instances are [preemptible]
// (https://cloud.google.com/compute/docs/instances/preemptible).
//
// This option is allowed only for [secondary worker]
// (https://cloud.google.com/dataproc/docs/concepts/compute/secondary-vms)
// groups.
InstanceGroupConfig_PREEMPTIBLE InstanceGroupConfig_Preemptibility = 2
// Instances are [Spot VMs]
// (https://cloud.google.com/compute/docs/instances/spot).
//
// This option is allowed only for [secondary worker]
// (https://cloud.google.com/dataproc/docs/concepts/compute/secondary-vms)
// groups. Spot VMs are the latest version of [preemptible VMs]
// (https://cloud.google.com/compute/docs/instances/preemptible), and
// provide additional features.
InstanceGroupConfig_SPOT InstanceGroupConfig_Preemptibility = 3
)
func (InstanceGroupConfig_Preemptibility) Descriptor
func (InstanceGroupConfig_Preemptibility) Descriptor() protoreflect.EnumDescriptor
func (InstanceGroupConfig_Preemptibility) Enum
func (x InstanceGroupConfig_Preemptibility) Enum() *InstanceGroupConfig_Preemptibility
func (InstanceGroupConfig_Preemptibility) EnumDescriptor
func (InstanceGroupConfig_Preemptibility) EnumDescriptor() ([]byte, []int)
Deprecated: Use InstanceGroupConfig_Preemptibility.Descriptor instead.
func (InstanceGroupConfig_Preemptibility) Number
func (x InstanceGroupConfig_Preemptibility) Number() protoreflect.EnumNumber
func (InstanceGroupConfig_Preemptibility) String
func (x InstanceGroupConfig_Preemptibility) String() string
func (InstanceGroupConfig_Preemptibility) Type
func (InstanceGroupConfig_Preemptibility) Type() protoreflect.EnumType
InstanceReference
type InstanceReference struct {
// The user-friendly name of the Compute Engine instance.
InstanceName string `protobuf:"bytes,1,opt,name=instance_name,json=instanceName,proto3" json:"instance_name,omitempty"`
// The unique identifier of the Compute Engine instance.
InstanceId string `protobuf:"bytes,2,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"`
// The public RSA key used for sharing data with this instance.
PublicKey string `protobuf:"bytes,3,opt,name=public_key,json=publicKey,proto3" json:"public_key,omitempty"`
// The public ECIES key used for sharing data with this instance.
PublicEciesKey string `protobuf:"bytes,4,opt,name=public_ecies_key,json=publicEciesKey,proto3" json:"public_ecies_key,omitempty"`
// contains filtered or unexported fields
}
A reference to a Compute Engine instance.
func (*InstanceReference) Descriptor
func (*InstanceReference) Descriptor() ([]byte, []int)
Deprecated: Use InstanceReference.ProtoReflect.Descriptor instead.
func (*InstanceReference) GetInstanceId
func (x *InstanceReference) GetInstanceId() string
func (*InstanceReference) GetInstanceName
func (x *InstanceReference) GetInstanceName() string
func (*InstanceReference) GetPublicEciesKey
func (x *InstanceReference) GetPublicEciesKey() string
func (*InstanceReference) GetPublicKey
func (x *InstanceReference) GetPublicKey() string
func (*InstanceReference) ProtoMessage
func (*InstanceReference) ProtoMessage()
func (*InstanceReference) ProtoReflect
func (x *InstanceReference) ProtoReflect() protoreflect.Message
func (*InstanceReference) Reset
func (x *InstanceReference) Reset()
func (*InstanceReference) String
func (x *InstanceReference) String() string
InstantiateInlineWorkflowTemplateRequest
type InstantiateInlineWorkflowTemplateRequest struct {
// Required. The resource name of the region or location, as described
// in https://cloud.google.com/apis/design/resource_names.
//
// - For `projects.regions.workflowTemplates,instantiateinline`, the resource
// name of the region has the following format:
// `projects/{project_id}/regions/{region}`
//
// - For `projects.locations.workflowTemplates.instantiateinline`, the
// resource name of the location has the following format:
// `projects/{project_id}/locations/{location}`
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// Required. The workflow template to instantiate.
Template *WorkflowTemplate `protobuf:"bytes,2,opt,name=template,proto3" json:"template,omitempty"`
// Optional. A tag that prevents multiple concurrent workflow
// instances with the same tag from running. This mitigates risk of
// concurrent instances started due to retries.
//
// It is recommended to always set this value to a
// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
//
// The tag must contain only letters (a-z, A-Z), numbers (0-9),
// underscores (_), and hyphens (-). The maximum length is 40 characters.
RequestId string `protobuf:"bytes,3,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
// contains filtered or unexported fields
}
A request to instantiate an inline workflow template.
func (*InstantiateInlineWorkflowTemplateRequest) Descriptor
func (*InstantiateInlineWorkflowTemplateRequest) Descriptor() ([]byte, []int)
Deprecated: Use InstantiateInlineWorkflowTemplateRequest.ProtoReflect.Descriptor instead.
func (*InstantiateInlineWorkflowTemplateRequest) GetParent
func (x *InstantiateInlineWorkflowTemplateRequest) GetParent() string
func (*InstantiateInlineWorkflowTemplateRequest) GetRequestId
func (x *InstantiateInlineWorkflowTemplateRequest) GetRequestId() string
func (*InstantiateInlineWorkflowTemplateRequest) GetTemplate
func (x *InstantiateInlineWorkflowTemplateRequest) GetTemplate() *WorkflowTemplate
func (*InstantiateInlineWorkflowTemplateRequest) ProtoMessage
func (*InstantiateInlineWorkflowTemplateRequest) ProtoMessage()
func (*InstantiateInlineWorkflowTemplateRequest) ProtoReflect
func (x *InstantiateInlineWorkflowTemplateRequest) ProtoReflect() protoreflect.Message
func (*InstantiateInlineWorkflowTemplateRequest) Reset
func (x *InstantiateInlineWorkflowTemplateRequest) Reset()
func (*InstantiateInlineWorkflowTemplateRequest) String
func (x *InstantiateInlineWorkflowTemplateRequest) String() string
InstantiateWorkflowTemplateRequest
type InstantiateWorkflowTemplateRequest struct {
// Required. The resource name of the workflow template, as described
// in https://cloud.google.com/apis/design/resource_names.
//
// * For `projects.regions.workflowTemplates.instantiate`, the resource name
// of the template has the following format:
//
// `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
//
// - For `projects.locations.workflowTemplates.instantiate`, the resource name
// of the template has the following format:
// `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Optional. The version of workflow template to instantiate. If specified,
// the workflow will be instantiated only if the current version of
// the workflow template has the supplied version.
//
// This option cannot be used to instantiate a previous version of
// workflow template.
Version int32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`
// Optional. A tag that prevents multiple concurrent workflow
// instances with the same tag from running. This mitigates risk of
// concurrent instances started due to retries.
//
// It is recommended to always set this value to a
// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
//
// The tag must contain only letters (a-z, A-Z), numbers (0-9),
// underscores (_), and hyphens (-). The maximum length is 40 characters.
RequestId string `protobuf:"bytes,5,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
// Optional. Map from parameter names to values that should be used for those
// parameters. Values may not exceed 1000 characters.
Parameters map[string]string `protobuf:"bytes,6,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// contains filtered or unexported fields
}
A request to instantiate a workflow template.
func (*InstantiateWorkflowTemplateRequest) Descriptor
func (*InstantiateWorkflowTemplateRequest) Descriptor() ([]byte, []int)
Deprecated: Use InstantiateWorkflowTemplateRequest.ProtoReflect.Descriptor instead.
func (*InstantiateWorkflowTemplateRequest) GetName
func (x *InstantiateWorkflowTemplateRequest) GetName() string
func (*InstantiateWorkflowTemplateRequest) GetParameters
func (x *InstantiateWorkflowTemplateRequest) GetParameters() map[string]string
func (*InstantiateWorkflowTemplateRequest) GetRequestId
func (x *InstantiateWorkflowTemplateRequest) GetRequestId() string
func (*InstantiateWorkflowTemplateRequest) GetVersion
func (x *InstantiateWorkflowTemplateRequest) GetVersion() int32
func (*InstantiateWorkflowTemplateRequest) ProtoMessage
func (*InstantiateWorkflowTemplateRequest) ProtoMessage()
func (*InstantiateWorkflowTemplateRequest) ProtoReflect
func (x *InstantiateWorkflowTemplateRequest) ProtoReflect() protoreflect.Message
func (*InstantiateWorkflowTemplateRequest) Reset
func (x *InstantiateWorkflowTemplateRequest) Reset()
func (*InstantiateWorkflowTemplateRequest) String
func (x *InstantiateWorkflowTemplateRequest) String() string
Job
type Job struct { // Optional. The fully qualified reference to the job, which can be used to // obtain the equivalent REST path of the job resource. If this property // is not specified when a job is created, the server generates a //
// andjob_id
. Reference *JobReference `protobuf:"bytes,1,opt,name=reference,proto3" json:"reference,omitempty"` // Required. Job information, including how, when, and where to // run the job. Placement *JobPlacement `protobuf:"bytes,2,opt,name=placement,proto3" json:"placement,omitempty"` // Required. The application/framework-specific portion of the job. // // Types that are assignable to TypeJob: // // *Job_HadoopJob // *Job_SparkJob // *Job_PysparkJob // *Job_HiveJob // *Job_PigJob // *Job_SparkRJob // *Job_SparkSqlJob // *Job_PrestoJob // *Job_TrinoJob // *Job_FlinkJob TypeJob isJob_TypeJob `protobuf_oneof:"type_job"` // Output only. The job status. Additional application-specific // status information might be contained in thetype_job
yarn_applications
fields. Status *JobStatus `protobuf:"bytes,8,opt,name=status,proto3" json:"status,omitempty"` // Output only. The previous job status. StatusHistory []*JobStatus `protobuf:"bytes,13,rep,name=status_history,json=statusHistory,proto3" json:"status_history,omitempty"` // Output only. The collection of YARN applications spun up by this job. // // **Beta** Feature: This report is available for testing purposes only. It // might be changed before final release. YarnApplications []*YarnApplication `protobuf:"bytes,9,rep,name=yarn_applications,json=yarnApplications,proto3" json:"yarn_applications,omitempty"` // Output only. A URI pointing to the location of the stdout of the job's // driver program. DriverOutputResourceUri string `protobuf:"bytes,17,opt,name=driver_output_resource_uri,json=driverOutputResourceUri,proto3" json:"driver_output_resource_uri,omitempty"` // Output only. If present, the location of miscellaneous control files // which can be used as part of job setup and handling. If not present, // control files might be placed in the same location as `driver_output_uri`. DriverControlFilesUri string `protobuf:"bytes,15,opt,name=driver_control_files_uri,json=driverControlFilesUri,proto3" json:"driver_control_files_uri,omitempty"` // Optional. The labels to associate with this job. // Label **keys** must contain 1 to 63 characters, and must conform to // [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). // Label **values** can be empty, but, if present, must contain 1 to 63 // characters, and must conform to [RFC // 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be // associated with a job. Labels map[string]string `protobuf:"bytes,18,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Optional. Job scheduling configuration. Scheduling *JobScheduling `protobuf:"bytes,20,opt,name=scheduling,proto3" json:"scheduling,omitempty"` // Output only. A UUID that uniquely identifies a job within the project // over time. This is in contrast to a user-settable reference.job_id that // might be reused over time. JobUuid string `protobuf:"bytes,22,opt,name=job_uuid,json=jobUuid,proto3" json:"job_uuid,omitempty"` // Output only. Indicates whether the job is completed. If the value is // `false`, the job is still in progress. If `true`, the job is completed, and // `status.state` field will indicate if it was successful, failed, // or cancelled. Done bool `protobuf:"varint,24,opt,name=done,proto3" json:"done,omitempty"` // Optional. Driver scheduling configuration. DriverSchedulingConfig *DriverSchedulingConfig `protobuf:"bytes,27,opt,name=driver_scheduling_config,json=driverSchedulingConfig,proto3" json:"driver_scheduling_config,omitempty"` // contains filtered or unexported fields }
A Dataproc job resource.
func (*Job) Descriptor
Deprecated: Use Job.ProtoReflect.Descriptor instead.
func (*Job) GetDone
func (*Job) GetDriverControlFilesUri
func (*Job) GetDriverOutputResourceUri
func (*Job) GetDriverSchedulingConfig
func (x *Job) GetDriverSchedulingConfig() *DriverSchedulingConfig
func (*Job) GetFlinkJob
func (*Job) GetHadoopJob
func (*Job) GetHiveJob
func (*Job) GetJobUuid
func (*Job) GetLabels
func (*Job) GetPigJob
func (*Job) GetPlacement
func (x *Job) GetPlacement() *JobPlacement
func (*Job) GetPrestoJob
func (*Job) GetPysparkJob
func (x *Job) GetPysparkJob() *PySparkJob
func (*Job) GetReference
func (x *Job) GetReference() *JobReference
func (*Job) GetScheduling
func (x *Job) GetScheduling() *JobScheduling
func (*Job) GetSparkJob
func (*Job) GetSparkRJob
func (*Job) GetSparkSqlJob
func (x *Job) GetSparkSqlJob() *SparkSqlJob
func (*Job) GetStatus
func (*Job) GetStatusHistory
func (*Job) GetTrinoJob
func (*Job) GetTypeJob
func (m *Job) GetTypeJob() isJob_TypeJob
func (*Job) GetYarnApplications
func (x *Job) GetYarnApplications() []*YarnApplication
func (*Job) ProtoMessage
func (*Job) ProtoMessage()
func (*Job) ProtoReflect
func (x *Job) ProtoReflect() protoreflect.Message
func (*Job) Reset
func (x *Job) Reset()
func (*Job) String
JobControllerClient
type JobControllerClient interface {
// Submits a job to a cluster.
SubmitJob(ctx context.Context, in *SubmitJobRequest, opts ...grpc.CallOption) (*Job, error)
// Submits job to a cluster.
SubmitJobAsOperation(ctx context.Context, in *SubmitJobRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
// Gets the resource representation for a job in a project.
GetJob(ctx context.Context, in *GetJobRequest, opts ...grpc.CallOption) (*Job, error)
// Lists regions/{region}/jobs in a project.
ListJobs(ctx context.Context, in *ListJobsRequest, opts ...grpc.CallOption) (*ListJobsResponse, error)
// Updates a job in a project.
UpdateJob(ctx context.Context, in *UpdateJobRequest, opts ...grpc.CallOption) (*Job, error)
// Starts a job cancellation request. To access the job resource
// after cancellation, call
// [regions/{region}/jobs.list](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/list)
// or
// [regions/{region}/jobs.get](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/get).
CancelJob(ctx context.Context, in *CancelJobRequest, opts ...grpc.CallOption) (*Job, error)
// Deletes the job from the project. If the job is active, the delete fails,
// and the response returns `FAILED_PRECONDITION`.
DeleteJob(ctx context.Context, in *DeleteJobRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
}
JobControllerClient is the client API for JobController service.
For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
func NewJobControllerClient
func NewJobControllerClient(cc grpc.ClientConnInterface) JobControllerClient
JobControllerServer
type JobControllerServer interface {
// Submits a job to a cluster.
SubmitJob(context.Context, *SubmitJobRequest) (*Job, error)
// Submits job to a cluster.
SubmitJobAsOperation(context.Context, *SubmitJobRequest) (*longrunningpb.Operation, error)
// Gets the resource representation for a job in a project.
GetJob(context.Context, *GetJobRequest) (*Job, error)
// Lists regions/{region}/jobs in a project.
ListJobs(context.Context, *ListJobsRequest) (*ListJobsResponse, error)
// Updates a job in a project.
UpdateJob(context.Context, *UpdateJobRequest) (*Job, error)
// Starts a job cancellation request. To access the job resource
// after cancellation, call
// [regions/{region}/jobs.list](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/list)
// or
// [regions/{region}/jobs.get](https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs/get).
CancelJob(context.Context, *CancelJobRequest) (*Job, error)
// Deletes the job from the project. If the job is active, the delete fails,
// and the response returns `FAILED_PRECONDITION`.
DeleteJob(context.Context, *DeleteJobRequest) (*emptypb.Empty, error)
}
JobControllerServer is the server API for JobController service.
JobMetadata
type JobMetadata struct {
// Output only. The job id.
JobId string `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
// Output only. Most recent job status.
Status *JobStatus `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"`
// Output only. Operation type.
OperationType string `protobuf:"bytes,3,opt,name=operation_type,json=operationType,proto3" json:"operation_type,omitempty"`
// Output only. Job submission time.
StartTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
// contains filtered or unexported fields
}
Job Operation metadata.
func (*JobMetadata) Descriptor
func (*JobMetadata) Descriptor() ([]byte, []int)
Deprecated: Use JobMetadata.ProtoReflect.Descriptor instead.
func (*JobMetadata) GetJobId
func (x *JobMetadata) GetJobId() string
func (*JobMetadata) GetOperationType
func (x *JobMetadata) GetOperationType() string
func (*JobMetadata) GetStartTime
func (x *JobMetadata) GetStartTime() *timestamppb.Timestamp
func (*JobMetadata) GetStatus
func (x *JobMetadata) GetStatus() *JobStatus
func (*JobMetadata) ProtoMessage
func (*JobMetadata) ProtoMessage()
func (*JobMetadata) ProtoReflect
func (x *JobMetadata) ProtoReflect() protoreflect.Message
func (*JobMetadata) Reset
func (x *JobMetadata) Reset()
func (*JobMetadata) String
func (x *JobMetadata) String() string
JobPlacement
type JobPlacement struct {
// Required. The name of the cluster where the job will be submitted.
ClusterName string `protobuf:"bytes,1,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
// Output only. A cluster UUID generated by the Dataproc service when
// the job is submitted.
ClusterUuid string `protobuf:"bytes,2,opt,name=cluster_uuid,json=clusterUuid,proto3" json:"cluster_uuid,omitempty"`
// Optional. Cluster labels to identify a cluster where the job will be
// submitted.
ClusterLabels map[string]string `protobuf:"bytes,3,rep,name=cluster_labels,json=clusterLabels,proto3" json:"cluster_labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// contains filtered or unexported fields
}
Dataproc job config.
func (*JobPlacement) Descriptor
func (*JobPlacement) Descriptor() ([]byte, []int)
Deprecated: Use JobPlacement.ProtoReflect.Descriptor instead.
func (*JobPlacement) GetClusterLabels
func (x *JobPlacement) GetClusterLabels() map[string]string
func (*JobPlacement) GetClusterName
func (x *JobPlacement) GetClusterName() string
func (*JobPlacement) GetClusterUuid
func (x *JobPlacement) GetClusterUuid() string
func (*JobPlacement) ProtoMessage
func (*JobPlacement) ProtoMessage()
func (*JobPlacement) ProtoReflect
func (x *JobPlacement) ProtoReflect() protoreflect.Message
func (*JobPlacement) Reset
func (x *JobPlacement) Reset()
func (*JobPlacement) String
func (x *JobPlacement) String() string
JobReference
type JobReference struct {
// Optional. The ID of the Google Cloud Platform project that the job belongs
// to. If specified, must match the request project ID.
ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
// Optional. The job ID, which must be unique within the project.
//
// The ID must contain only letters (a-z, A-Z), numbers (0-9),
// underscores (_), or hyphens (-). The maximum length is 100 characters.
//
// If not specified by the caller, the job ID will be provided by the server.
JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
// contains filtered or unexported fields
}
Encapsulates the full scoping used to reference a job.
func (*JobReference) Descriptor
func (*JobReference) Descriptor() ([]byte, []int)
Deprecated: Use JobReference.ProtoReflect.Descriptor instead.
func (*JobReference) GetJobId
func (x *JobReference) GetJobId() string
func (*JobReference) GetProjectId
func (x *JobReference) GetProjectId() string
func (*JobReference) ProtoMessage
func (*JobReference) ProtoMessage()
func (*JobReference) ProtoReflect
func (x *JobReference) ProtoReflect() protoreflect.Message
func (*JobReference) Reset
func (x *JobReference) Reset()
func (*JobReference) String
func (x *JobReference) String() string
JobScheduling
type JobScheduling struct {
// Optional. Maximum number of times per hour a driver can be restarted as
// a result of driver exiting with non-zero code before job is
// reported failed.
//
// A job might be reported as thrashing if the driver exits with a non-zero
// code four times within a 10-minute window.
//
// Maximum value is 10.
//
// **Note:** This restartable job option is not supported in Dataproc
// [workflow templates]
// (https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template).
MaxFailuresPerHour int32 `protobuf:"varint,1,opt,name=max_failures_per_hour,json=maxFailuresPerHour,proto3" json:"max_failures_per_hour,omitempty"`
// Optional. Maximum total number of times a driver can be restarted as a
// result of the driver exiting with a non-zero code. After the maximum number
// is reached, the job will be reported as failed.
//
// Maximum value is 240.
//
// **Note:** Currently, this restartable job option is
// not supported in Dataproc
// [workflow
// templates](https://cloud.google.com/dataproc/docs/concepts/workflows/using-workflows#adding_jobs_to_a_template).
MaxFailuresTotal int32 `protobuf:"varint,2,opt,name=max_failures_total,json=maxFailuresTotal,proto3" json:"max_failures_total,omitempty"`
// contains filtered or unexported fields
}
Job scheduling options.
func (*JobScheduling) Descriptor
func (*JobScheduling) Descriptor() ([]byte, []int)
Deprecated: Use JobScheduling.ProtoReflect.Descriptor instead.
func (*JobScheduling) GetMaxFailuresPerHour
func (x *JobScheduling) GetMaxFailuresPerHour() int32
func (*JobScheduling) GetMaxFailuresTotal
func (x *JobScheduling) GetMaxFailuresTotal() int32
func (*JobScheduling) ProtoMessage
func (*JobScheduling) ProtoMessage()
func (*JobScheduling) ProtoReflect
func (x *JobScheduling) ProtoReflect() protoreflect.Message
func (*JobScheduling) Reset
func (x *JobScheduling) Reset()
func (*JobScheduling) String
func (x *JobScheduling) String() string
JobStatus
type JobStatus struct {
// Output only. A state message specifying the overall job state.
State JobStatus_State `protobuf:"varint,1,opt,name=state,proto3,enum=google.cloud.dataproc.v1.JobStatus_State" json:"state,omitempty"`
// Optional. Output only. Job state details, such as an error
// description if the state is `ERROR`.
Details string `protobuf:"bytes,2,opt,name=details,proto3" json:"details,omitempty"`
// Output only. The time when this state was entered.
StateStartTime *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=state_start_time,json=stateStartTime,proto3" json:"state_start_time,omitempty"`
// Output only. Additional state information, which includes
// status reported by the agent.
Substate JobStatus_Substate `protobuf:"varint,7,opt,name=substate,proto3,enum=google.cloud.dataproc.v1.JobStatus_Substate" json:"substate,omitempty"`
// contains filtered or unexported fields
}
Dataproc job status.
func (*JobStatus) Descriptor
Deprecated: Use JobStatus.ProtoReflect.Descriptor instead.
func (*JobStatus) GetDetails
func (*JobStatus) GetState
func (x *JobStatus) GetState() JobStatus_State
func (*JobStatus) GetStateStartTime
func (x *JobStatus) GetStateStartTime() *timestamppb.Timestamp
func (*JobStatus) GetSubstate
func (x *JobStatus) GetSubstate() JobStatus_Substate
func (*JobStatus) ProtoMessage
func (*JobStatus) ProtoMessage()
func (*JobStatus) ProtoReflect
func (x *JobStatus) ProtoReflect() protoreflect.Message
func (*JobStatus) Reset
func (x *JobStatus) Reset()
func (*JobStatus) String
JobStatus_State
type JobStatus_State int32
The job state.
JobStatus_STATE_UNSPECIFIED, JobStatus_PENDING, JobStatus_SETUP_DONE, JobStatus_RUNNING, JobStatus_CANCEL_PENDING, JobStatus_CANCEL_STARTED, JobStatus_CANCELLED, JobStatus_DONE, JobStatus_ERROR, JobStatus_ATTEMPT_FAILURE
const (
// The job state is unknown.
JobStatus_STATE_UNSPECIFIED JobStatus_State = 0
// The job is pending; it has been submitted, but is not yet running.
JobStatus_PENDING JobStatus_State = 1
// Job has been received by the service and completed initial setup;
// it will soon be submitted to the cluster.
JobStatus_SETUP_DONE JobStatus_State = 8
// The job is running on the cluster.
JobStatus_RUNNING JobStatus_State = 2
// A CancelJob request has been received, but is pending.
JobStatus_CANCEL_PENDING JobStatus_State = 3
// Transient in-flight resources have been canceled, and the request to
// cancel the running job has been issued to the cluster.
JobStatus_CANCEL_STARTED JobStatus_State = 7
// The job cancellation was successful.
JobStatus_CANCELLED JobStatus_State = 4
// The job has completed successfully.
JobStatus_DONE JobStatus_State = 5
// The job has completed, but encountered an error.
JobStatus_ERROR JobStatus_State = 6
// Job attempt has failed. The detail field contains failure details for
// this attempt.
//
// Applies to restartable jobs only.
JobStatus_ATTEMPT_FAILURE JobStatus_State = 9
)
func (JobStatus_State) Descriptor
func (JobStatus_State) Descriptor() protoreflect.EnumDescriptor
func (JobStatus_State) Enum
func (x JobStatus_State) Enum() *JobStatus_State
func (JobStatus_State) EnumDescriptor
func (JobStatus_State) EnumDescriptor() ([]byte, []int)
Deprecated: Use JobStatus_State.Descriptor instead.
func (JobStatus_State) Number
func (x JobStatus_State) Number() protoreflect.EnumNumber
func (JobStatus_State) String
func (x JobStatus_State) String() string
func (JobStatus_State) Type
func (JobStatus_State) Type() protoreflect.EnumType
JobStatus_Substate
type JobStatus_Substate int32
The job substate.
JobStatus_UNSPECIFIED, JobStatus_SUBMITTED, JobStatus_QUEUED, JobStatus_STALE_STATUS
const (
// The job substate is unknown.
JobStatus_UNSPECIFIED JobStatus_Substate = 0
// The Job is submitted to the agent.
//
// Applies to RUNNING state.
JobStatus_SUBMITTED JobStatus_Substate = 1
// The Job has been received and is awaiting execution (it might be waiting
// for a condition to be met). See the "details" field for the reason for
// the delay.
//
// Applies to RUNNING state.
JobStatus_QUEUED JobStatus_Substate = 2
// The agent-reported status is out of date, which can be caused by a
// loss of communication between the agent and Dataproc. If the
// agent does not send a timely update, the job will fail.
//
// Applies to RUNNING state.
JobStatus_STALE_STATUS JobStatus_Substate = 3
)
func (JobStatus_Substate) Descriptor
func (JobStatus_Substate) Descriptor() protoreflect.EnumDescriptor
func (JobStatus_Substate) Enum
func (x JobStatus_Substate) Enum() *JobStatus_Substate
func (JobStatus_Substate) EnumDescriptor
func (JobStatus_Substate) EnumDescriptor() ([]byte, []int)
Deprecated: Use JobStatus_Substate.Descriptor instead.
func (JobStatus_Substate) Number
func (x JobStatus_Substate) Number() protoreflect.EnumNumber
func (JobStatus_Substate) String
func (x JobStatus_Substate) String() string
func (JobStatus_Substate) Type
func (JobStatus_Substate) Type() protoreflect.EnumType
Job_FlinkJob
type Job_FlinkJob struct {
// Optional. Job is a Flink job.
FlinkJob *FlinkJob `protobuf:"bytes,29,opt,name=flink_job,json=flinkJob,proto3,oneof"`
}
Job_HadoopJob
type Job_HadoopJob struct {
// Optional. Job is a Hadoop job.
HadoopJob *HadoopJob `protobuf:"bytes,3,opt,name=hadoop_job,json=hadoopJob,proto3,oneof"`
}
Job_HiveJob
type Job_HiveJob struct {
// Optional. Job is a Hive job.
HiveJob *HiveJob `protobuf:"bytes,6,opt,name=hive_job,json=hiveJob,proto3,oneof"`
}
Job_PigJob
type Job_PigJob struct {
// Optional. Job is a Pig job.
PigJob *PigJob `protobuf:"bytes,7,opt,name=pig_job,json=pigJob,proto3,oneof"`
}
Job_PrestoJob
type Job_PrestoJob struct {
// Optional. Job is a Presto job.
PrestoJob *PrestoJob `protobuf:"bytes,23,opt,name=presto_job,json=prestoJob,proto3,oneof"`
}
Job_PysparkJob
type Job_PysparkJob struct {
// Optional. Job is a PySpark job.
PysparkJob *PySparkJob `protobuf:"bytes,5,opt,name=pyspark_job,json=pysparkJob,proto3,oneof"`
}
Job_SparkJob
type Job_SparkJob struct {
// Optional. Job is a Spark job.
SparkJob *SparkJob `protobuf:"bytes,4,opt,name=spark_job,json=sparkJob,proto3,oneof"`
}
Job_SparkRJob
type Job_SparkRJob struct {
// Optional. Job is a SparkR job.
SparkRJob *SparkRJob `protobuf:"bytes,21,opt,name=spark_r_job,json=sparkRJob,proto3,oneof"`
}
Job_SparkSqlJob
type Job_SparkSqlJob struct {
// Optional. Job is a SparkSql job.
SparkSqlJob *SparkSqlJob `protobuf:"bytes,12,opt,name=spark_sql_job,json=sparkSqlJob,proto3,oneof"`
}
Job_TrinoJob
type Job_TrinoJob struct {
// Optional. Job is a Trino job.
TrinoJob *TrinoJob `protobuf:"bytes,28,opt,name=trino_job,json=trinoJob,proto3,oneof"`
}
JupyterConfig
type JupyterConfig struct {
// Optional. Kernel
Kernel JupyterConfig_Kernel `protobuf:"varint,1,opt,name=kernel,proto3,enum=google.cloud.dataproc.v1.JupyterConfig_Kernel" json:"kernel,omitempty"`
// Optional. Display name, shown in the Jupyter kernelspec card.
DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName,proto3" json:"display_name,omitempty"`
// contains filtered or unexported fields
}
Jupyter configuration for an interactive session.
func (*JupyterConfig) Descriptor
func (*JupyterConfig) Descriptor() ([]byte, []int)
Deprecated: Use JupyterConfig.ProtoReflect.Descriptor instead.
func (*JupyterConfig) GetDisplayName
func (x *JupyterConfig) GetDisplayName() string
func (*JupyterConfig) GetKernel
func (x *JupyterConfig) GetKernel() JupyterConfig_Kernel
func (*JupyterConfig) ProtoMessage
func (*JupyterConfig) ProtoMessage()
func (*JupyterConfig) ProtoReflect
func (x *JupyterConfig) ProtoReflect() protoreflect.Message
func (*JupyterConfig) Reset
func (x *JupyterConfig) Reset()
func (*JupyterConfig) String
func (x *JupyterConfig) String() string
JupyterConfig_Kernel
type JupyterConfig_Kernel int32
Jupyter kernel types.
JupyterConfig_KERNEL_UNSPECIFIED, JupyterConfig_PYTHON, JupyterConfig_SCALA
const (
// The kernel is unknown.
JupyterConfig_KERNEL_UNSPECIFIED JupyterConfig_Kernel = 0
// Python kernel.
JupyterConfig_PYTHON JupyterConfig_Kernel = 1
// Scala kernel.
JupyterConfig_SCALA JupyterConfig_Kernel = 2
)
func (JupyterConfig_Kernel) Descriptor
func (JupyterConfig_Kernel) Descriptor() protoreflect.EnumDescriptor
func (JupyterConfig_Kernel) Enum
func (x JupyterConfig_Kernel) Enum() *JupyterConfig_Kernel
func (JupyterConfig_Kernel) EnumDescriptor
func (JupyterConfig_Kernel) EnumDescriptor() ([]byte, []int)
Deprecated: Use JupyterConfig_Kernel.Descriptor instead.
func (JupyterConfig_Kernel) Number
func (x JupyterConfig_Kernel) Number() protoreflect.EnumNumber
func (JupyterConfig_Kernel) String
func (x JupyterConfig_Kernel) String() string
func (JupyterConfig_Kernel) Type
func (JupyterConfig_Kernel) Type() protoreflect.EnumType
KerberosConfig
type KerberosConfig struct {
// Optional. Flag to indicate whether to Kerberize the cluster (default:
// false). Set this field to true to enable Kerberos on a cluster.
EnableKerberos bool `protobuf:"varint,1,opt,name=enable_kerberos,json=enableKerberos,proto3" json:"enable_kerberos,omitempty"`
// Optional. The Cloud Storage URI of a KMS encrypted file containing the root
// principal password.
RootPrincipalPasswordUri string `protobuf:"bytes,2,opt,name=root_principal_password_uri,json=rootPrincipalPasswordUri,proto3" json:"root_principal_password_uri,omitempty"`
// Optional. The URI of the KMS key used to encrypt sensitive
// files.
KmsKeyUri string `protobuf:"bytes,3,opt,name=kms_key_uri,json=kmsKeyUri,proto3" json:"kms_key_uri,omitempty"`
// Optional. The Cloud Storage URI of the keystore file used for SSL
// encryption. If not provided, Dataproc will provide a self-signed
// certificate.
KeystoreUri string `protobuf:"bytes,4,opt,name=keystore_uri,json=keystoreUri,proto3" json:"keystore_uri,omitempty"`
// Optional. The Cloud Storage URI of the truststore file used for SSL
// encryption. If not provided, Dataproc will provide a self-signed
// certificate.
TruststoreUri string `protobuf:"bytes,5,opt,name=truststore_uri,json=truststoreUri,proto3" json:"truststore_uri,omitempty"`
// Optional. The Cloud Storage URI of a KMS encrypted file containing the
// password to the user provided keystore. For the self-signed certificate,
// this password is generated by Dataproc.
KeystorePasswordUri string `protobuf:"bytes,6,opt,name=keystore_password_uri,json=keystorePasswordUri,proto3" json:"keystore_password_uri,omitempty"`
// Optional. The Cloud Storage URI of a KMS encrypted file containing the
// password to the user provided key. For the self-signed certificate, this
// password is generated by Dataproc.
KeyPasswordUri string `protobuf:"bytes,7,opt,name=key_password_uri,json=keyPasswordUri,proto3" json:"key_password_uri,omitempty"`
// Optional. The Cloud Storage URI of a KMS encrypted file containing the
// password to the user provided truststore. For the self-signed certificate,
// this password is generated by Dataproc.
TruststorePasswordUri string `protobuf:"bytes,8,opt,name=truststore_password_uri,json=truststorePasswordUri,proto3" json:"truststore_password_uri,omitempty"`
// Optional. The remote realm the Dataproc on-cluster KDC will trust, should
// the user enable cross realm trust.
CrossRealmTrustRealm string `protobuf:"bytes,9,opt,name=cross_realm_trust_realm,json=crossRealmTrustRealm,proto3" json:"cross_realm_trust_realm,omitempty"`
// Optional. The KDC (IP or hostname) for the remote trusted realm in a cross
// realm trust relationship.
CrossRealmTrustKdc string `protobuf:"bytes,10,opt,name=cross_realm_trust_kdc,json=crossRealmTrustKdc,proto3" json:"cross_realm_trust_kdc,omitempty"`
// Optional. The admin server (IP or hostname) for the remote trusted realm in
// a cross realm trust relationship.
CrossRealmTrustAdminServer string `protobuf:"bytes,11,opt,name=cross_realm_trust_admin_server,json=crossRealmTrustAdminServer,proto3" json:"cross_realm_trust_admin_server,omitempty"`
// Optional. The Cloud Storage URI of a KMS encrypted file containing the
// shared password between the on-cluster Kerberos realm and the remote
// trusted realm, in a cross realm trust relationship.
CrossRealmTrustSharedPasswordUri string `protobuf:"bytes,12,opt,name=cross_realm_trust_shared_password_uri,json=crossRealmTrustSharedPasswordUri,proto3" json:"cross_realm_trust_shared_password_uri,omitempty"`
// Optional. The Cloud Storage URI of a KMS encrypted file containing the
// master key of the KDC database.
KdcDbKeyUri string `protobuf:"bytes,13,opt,name=kdc_db_key_uri,json=kdcDbKeyUri,proto3" json:"kdc_db_key_uri,omitempty"`
// Optional. The lifetime of the ticket granting ticket, in hours.
// If not specified, or user specifies 0, then default value 10
// will be used.
TgtLifetimeHours int32 `protobuf:"varint,14,opt,name=tgt_lifetime_hours,json=tgtLifetimeHours,proto3" json:"tgt_lifetime_hours,omitempty"`
// Optional. The name of the on-cluster Kerberos realm.
// If not specified, the uppercased domain of hostnames will be the realm.
Realm string `protobuf:"bytes,15,opt,name=realm,proto3" json:"realm,omitempty"`
// contains filtered or unexported fields
}
Specifies Kerberos related configuration.
func (*KerberosConfig) Descriptor
func (*KerberosConfig) Descriptor() ([]byte, []int)
Deprecated: Use KerberosConfig.ProtoReflect.Descriptor instead.
func (*KerberosConfig) GetCrossRealmTrustAdminServer
func (x *KerberosConfig) GetCrossRealmTrustAdminServer() string
func (*KerberosConfig) GetCrossRealmTrustKdc
func (x *KerberosConfig) GetCrossRealmTrustKdc() string
func (*KerberosConfig) GetCrossRealmTrustRealm
func (x *KerberosConfig) GetCrossRealmTrustRealm() string
func (*KerberosConfig) GetCrossRealmTrustSharedPasswordUri
func (x *KerberosConfig) GetCrossRealmTrustSharedPasswordUri() string
func (*KerberosConfig) GetEnableKerberos
func (x *KerberosConfig) GetEnableKerberos() bool
func (*KerberosConfig) GetKdcDbKeyUri
func (x *KerberosConfig) GetKdcDbKeyUri() string
func (*KerberosConfig) GetKeyPasswordUri
func (x *KerberosConfig) GetKeyPasswordUri() string
func (*KerberosConfig) GetKeystorePasswordUri
func (x *KerberosConfig) GetKeystorePasswordUri() string
func (*KerberosConfig) GetKeystoreUri
func (x *KerberosConfig) GetKeystoreUri() string
func (*KerberosConfig) GetKmsKeyUri
func (x *KerberosConfig) GetKmsKeyUri() string
func (*KerberosConfig) GetRealm
func (x *KerberosConfig) GetRealm() string
func (*KerberosConfig) GetRootPrincipalPasswordUri
func (x *KerberosConfig) GetRootPrincipalPasswordUri() string
func (*KerberosConfig) GetTgtLifetimeHours
func (x *KerberosConfig) GetTgtLifetimeHours() int32
func (*KerberosConfig) GetTruststorePasswordUri
func (x *KerberosConfig) GetTruststorePasswordUri() string
func (*KerberosConfig) GetTruststoreUri
func (x *KerberosConfig) GetTruststoreUri() string
func (*KerberosConfig) ProtoMessage
func (*KerberosConfig) ProtoMessage()
func (*KerberosConfig) ProtoReflect
func (x *KerberosConfig) ProtoReflect() protoreflect.Message
func (*KerberosConfig) Reset
func (x *KerberosConfig) Reset()
func (*KerberosConfig) String
func (x *KerberosConfig) String() string
KubernetesClusterConfig
type KubernetesClusterConfig struct {
// Optional. A namespace within the Kubernetes cluster to deploy into. If this
// namespace does not exist, it is created. If it exists, Dataproc verifies
// that another Dataproc VirtualCluster is not installed into it. If not
// specified, the name of the Dataproc Cluster is used.
KubernetesNamespace string `protobuf:"bytes,1,opt,name=kubernetes_namespace,json=kubernetesNamespace,proto3" json:"kubernetes_namespace,omitempty"`
// Types that are assignable to Config:
//
// *KubernetesClusterConfig_GkeClusterConfig
Config isKubernetesClusterConfig_Config `protobuf_oneof:"config"`
// Optional. The software configuration for this Dataproc cluster running on
// Kubernetes.
KubernetesSoftwareConfig *KubernetesSoftwareConfig `protobuf:"bytes,3,opt,name=kubernetes_software_config,json=kubernetesSoftwareConfig,proto3" json:"kubernetes_software_config,omitempty"`
// contains filtered or unexported fields
}
The configuration for running the Dataproc cluster on Kubernetes.
func (*KubernetesClusterConfig) Descriptor
func (*KubernetesClusterConfig) Descriptor() ([]byte, []int)
Deprecated: Use KubernetesClusterConfig.ProtoReflect.Descriptor instead.
func (*KubernetesClusterConfig) GetConfig
func (m *KubernetesClusterConfig) GetConfig() isKubernetesClusterConfig_Config
func (*KubernetesClusterConfig) GetGkeClusterConfig
func (x *KubernetesClusterConfig) GetGkeClusterConfig() *GkeClusterConfig
func (*KubernetesClusterConfig) GetKubernetesNamespace
func (x *KubernetesClusterConfig) GetKubernetesNamespace() string
func (*KubernetesClusterConfig) GetKubernetesSoftwareConfig
func (x *KubernetesClusterConfig) GetKubernetesSoftwareConfig() *KubernetesSoftwareConfig
func (*KubernetesClusterConfig) ProtoMessage
func (*KubernetesClusterConfig) ProtoMessage()
func (*KubernetesClusterConfig) ProtoReflect
func (x *KubernetesClusterConfig) ProtoReflect() protoreflect.Message
func (*KubernetesClusterConfig) Reset
func (x *KubernetesClusterConfig) Reset()
func (*KubernetesClusterConfig) String
func (x *KubernetesClusterConfig) String() string
KubernetesClusterConfig_GkeClusterConfig
type KubernetesClusterConfig_GkeClusterConfig struct {
// Required. The configuration for running the Dataproc cluster on GKE.
GkeClusterConfig *GkeClusterConfig `protobuf:"bytes,2,opt,name=gke_cluster_config,json=gkeClusterConfig,proto3,oneof"`
}
KubernetesSoftwareConfig
type KubernetesSoftwareConfig struct {
// The components that should be installed in this Dataproc cluster. The key
// must be a string from the KubernetesComponent enumeration. The value is
// the version of the software to be installed.
// At least one entry must be specified.
ComponentVersion map[string]string `protobuf:"bytes,1,rep,name=component_version,json=componentVersion,proto3" json:"component_version,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// The properties to set on daemon config files.
//
// Property keys are specified in `prefix:property` format, for example
// `spark:spark.kubernetes.container.image`. The following are supported
// prefixes and their mappings:
//
// * spark: `spark-defaults.conf`
//
// For more information, see [Cluster
// properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
Properties map[string]string `protobuf:"bytes,2,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// contains filtered or unexported fields
}
The software configuration for this Dataproc cluster running on Kubernetes.
func (*KubernetesSoftwareConfig) Descriptor
func (*KubernetesSoftwareConfig) Descriptor() ([]byte, []int)
Deprecated: Use KubernetesSoftwareConfig.ProtoReflect.Descriptor instead.
func (*KubernetesSoftwareConfig) GetComponentVersion
func (x *KubernetesSoftwareConfig) GetComponentVersion() map[string]string
func (*KubernetesSoftwareConfig) GetProperties
func (x *KubernetesSoftwareConfig) GetProperties() map[string]string
func (*KubernetesSoftwareConfig) ProtoMessage
func (*KubernetesSoftwareConfig) ProtoMessage()
func (*KubernetesSoftwareConfig) ProtoReflect
func (x *KubernetesSoftwareConfig) ProtoReflect() protoreflect.Message
func (*KubernetesSoftwareConfig) Reset
func (x *KubernetesSoftwareConfig) Reset()
func (*KubernetesSoftwareConfig) String
func (x *KubernetesSoftwareConfig) String() string
LifecycleConfig
type LifecycleConfig struct {
// Optional. The duration to keep the cluster alive while idling (when no jobs
// are running). Passing this threshold will cause the cluster to be
// deleted. Minimum value is 5 minutes; maximum value is 14 days (see JSON
// representation of
// [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
IdleDeleteTtl *durationpb.Duration `protobuf:"bytes,1,opt,name=idle_delete_ttl,json=idleDeleteTtl,proto3" json:"idle_delete_ttl,omitempty"`
// Either the exact time the cluster should be deleted at or
// the cluster maximum age.
//
// Types that are assignable to Ttl:
//
// *LifecycleConfig_AutoDeleteTime
// *LifecycleConfig_AutoDeleteTtl
Ttl isLifecycleConfig_Ttl `protobuf_oneof:"ttl"`
// Output only. The time when cluster became idle (most recent job finished)
// and became eligible for deletion due to idleness (see JSON representation
// of
// [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
IdleStartTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=idle_start_time,json=idleStartTime,proto3" json:"idle_start_time,omitempty"`
// contains filtered or unexported fields
}
Specifies the cluster auto-delete schedule configuration.
func (*LifecycleConfig) Descriptor
func (*LifecycleConfig) Descriptor() ([]byte, []int)
Deprecated: Use LifecycleConfig.ProtoReflect.Descriptor instead.
func (*LifecycleConfig) GetAutoDeleteTime
func (x *LifecycleConfig) GetAutoDeleteTime() *timestamppb.Timestamp
func (*LifecycleConfig) GetAutoDeleteTtl
func (x *LifecycleConfig) GetAutoDeleteTtl() *durationpb.Duration
func (*LifecycleConfig) GetIdleDeleteTtl
func (x *LifecycleConfig) GetIdleDeleteTtl() *durationpb.Duration
func (*LifecycleConfig) GetIdleStartTime
func (x *LifecycleConfig) GetIdleStartTime() *timestamppb.Timestamp
func (*LifecycleConfig) GetTtl
func (m *LifecycleConfig) GetTtl() isLifecycleConfig_Ttl
func (*LifecycleConfig) ProtoMessage
func (*LifecycleConfig) ProtoMessage()
func (*LifecycleConfig) ProtoReflect
func (x *LifecycleConfig) ProtoReflect() protoreflect.Message
func (*LifecycleConfig) Reset
func (x *LifecycleConfig) Reset()
func (*LifecycleConfig) String
func (x *LifecycleConfig) String() string
LifecycleConfig_AutoDeleteTime
type LifecycleConfig_AutoDeleteTime struct {
// Optional. The time when cluster will be auto-deleted (see JSON
// representation of
// [Timestamp](https://developers.google.com/protocol-buffers/docs/proto3#json)).
AutoDeleteTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=auto_delete_time,json=autoDeleteTime,proto3,oneof"`
}
LifecycleConfig_AutoDeleteTtl
type LifecycleConfig_AutoDeleteTtl struct {
// Optional. The lifetime duration of cluster. The cluster will be
// auto-deleted at the end of this period. Minimum value is 10 minutes;
// maximum value is 14 days (see JSON representation of
// [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
AutoDeleteTtl *durationpb.Duration `protobuf:"bytes,3,opt,name=auto_delete_ttl,json=autoDeleteTtl,proto3,oneof"`
}
ListAutoscalingPoliciesRequest
type ListAutoscalingPoliciesRequest struct {
// Required. The "resource name" of the region or location, as described
// in https://cloud.google.com/apis/design/resource_names.
//
// - For `projects.regions.autoscalingPolicies.list`, the resource name
// of the region has the following format:
// `projects/{project_id}/regions/{region}`
//
// - For `projects.locations.autoscalingPolicies.list`, the resource name
// of the location has the following format:
// `projects/{project_id}/locations/{location}`
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// Optional. The maximum number of results to return in each response.
// Must be less than or equal to 1000. Defaults to 100.
PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
// Optional. The page token, returned by a previous call, to request the
// next page of results.
PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
// contains filtered or unexported fields
}
A request to list autoscaling policies in a project.
func (*ListAutoscalingPoliciesRequest) Descriptor
func (*ListAutoscalingPoliciesRequest) Descriptor() ([]byte, []int)
Deprecated: Use ListAutoscalingPoliciesRequest.ProtoReflect.Descriptor instead.
func (*ListAutoscalingPoliciesRequest) GetPageSize
func (x *ListAutoscalingPoliciesRequest) GetPageSize() int32
func (*ListAutoscalingPoliciesRequest) GetPageToken
func (x *ListAutoscalingPoliciesRequest) GetPageToken() string
func (*ListAutoscalingPoliciesRequest) GetParent
func (x *ListAutoscalingPoliciesRequest) GetParent() string
func (*ListAutoscalingPoliciesRequest) ProtoMessage
func (*ListAutoscalingPoliciesRequest) ProtoMessage()
func (*ListAutoscalingPoliciesRequest) ProtoReflect
func (x *ListAutoscalingPoliciesRequest) ProtoReflect() protoreflect.Message
func (*ListAutoscalingPoliciesRequest) Reset
func (x *ListAutoscalingPoliciesRequest) Reset()
func (*ListAutoscalingPoliciesRequest) String
func (x *ListAutoscalingPoliciesRequest) String() string
ListAutoscalingPoliciesResponse
type ListAutoscalingPoliciesResponse struct {
// Output only. Autoscaling policies list.
Policies []*AutoscalingPolicy `protobuf:"bytes,1,rep,name=policies,proto3" json:"policies,omitempty"`
// Output only. This token is included in the response if there are more
// results to fetch.
NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
// contains filtered or unexported fields
}
A response to a request to list autoscaling policies in a project.
func (*ListAutoscalingPoliciesResponse) Descriptor
func (*ListAutoscalingPoliciesResponse) Descriptor() ([]byte, []int)
Deprecated: Use ListAutoscalingPoliciesResponse.ProtoReflect.Descriptor instead.
func (*ListAutoscalingPoliciesResponse) GetNextPageToken
func (x *ListAutoscalingPoliciesResponse) GetNextPageToken() string
func (*ListAutoscalingPoliciesResponse) GetPolicies
func (x *ListAutoscalingPoliciesResponse) GetPolicies() []*AutoscalingPolicy
func (*ListAutoscalingPoliciesResponse) ProtoMessage
func (*ListAutoscalingPoliciesResponse) ProtoMessage()
func (*ListAutoscalingPoliciesResponse) ProtoReflect
func (x *ListAutoscalingPoliciesResponse) ProtoReflect() protoreflect.Message
func (*ListAutoscalingPoliciesResponse) Reset
func (x *ListAutoscalingPoliciesResponse) Reset()
func (*ListAutoscalingPoliciesResponse) String
func (x *ListAutoscalingPoliciesResponse) String() string
ListBatchesRequest
type ListBatchesRequest struct {
// Required. The parent, which owns this collection of batches.
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// Optional. The maximum number of batches to return in each response.
// The service may return fewer than this value.
// The default page size is 20; the maximum page size is 1000.
PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
// Optional. A page token received from a previous `ListBatches` call.
// Provide this token to retrieve the subsequent page.
PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
// Optional. A filter for the batches to return in the response.
//
// A filter is a logical expression constraining the values of various fields
// in each batch resource. Filters are case sensitive, and may contain
// multiple clauses combined with logical operators (AND/OR).
// Supported fields are `batch_id`, `batch_uuid`, `state`, and `create_time`.
//
// e.g. `state = RUNNING and create_time < "2023-01-01t00:00:00z"`="" filters="" for="" batches="" in="" state="" running="" that="" were="" created="" before="" 2023-01-01="" see="" https://google.aip.dev/assets/misc/ebnf-filtering.txt="" for="" a="" detailed="" description="" of="" the="" filter="" syntax="" and="" a="" list="" of="" supported="" comparisons.="" filter="">string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"`
// Optional. Field(s) on which to sort the list of batches.
//
// Currently the only supported sort orders are unspecified (empty) and
// `create_time desc` to sort by most recently created batches first.
//
// See https://google.aip.dev/132#ordering for more details.
OrderBy string `protobuf:"bytes,5,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"`
// contains filtered or unexported fields
}
A request to list batch workloads in a project.
func (*ListBatchesRequest) Descriptor
func (*ListBatchesRequest) Descriptor() ([]byte, []int)
Deprecated: Use ListBatchesRequest.ProtoReflect.Descriptor instead.
func (*ListBatchesRequest) GetFilter
func (x *ListBatchesRequest) GetFilter() string
func (*ListBatchesRequest) GetOrderBy
func (x *ListBatchesRequest) GetOrderBy() string
func (*ListBatchesRequest) GetPageSize
func (x *ListBatchesRequest) GetPageSize() int32
func (*ListBatchesRequest) GetPageToken
func (x *ListBatchesRequest) GetPageToken() string
func (*ListBatchesRequest) GetParent
func (x *ListBatchesRequest) GetParent() string
func (*ListBatchesRequest) ProtoMessage
func (*ListBatchesRequest) ProtoMessage()
func (*ListBatchesRequest) ProtoReflect
func (x *ListBatchesRequest) ProtoReflect() protoreflect.Message
func (*ListBatchesRequest) Reset
func (x *ListBatchesRequest) Reset()
func (*ListBatchesRequest) String
func (x *ListBatchesRequest) String() string
ListBatchesResponse
type ListBatchesResponse struct {
// The batches from the specified collection.
Batches []*Batch `protobuf:"bytes,1,rep,name=batches,proto3" json:"batches,omitempty"`
// A token, which can be sent as `page_token` to retrieve the next page.
// If this field is omitted, there are no subsequent pages.
NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
// Output only. List of Batches that could not be included in the response.
// Attempting to get one of these resources may indicate why it was not
// included in the list response.
Unreachable []string `protobuf:"bytes,3,rep,name=unreachable,proto3" json:"unreachable,omitempty"`
// contains filtered or unexported fields
}
A list of batch workloads.
func (*ListBatchesResponse) Descriptor
func (*ListBatchesResponse) Descriptor() ([]byte, []int)
Deprecated: Use ListBatchesResponse.ProtoReflect.Descriptor instead.
func (*ListBatchesResponse) GetBatches
func (x *ListBatchesResponse) GetBatches() []*Batch
func (*ListBatchesResponse) GetNextPageToken
func (x *ListBatchesResponse) GetNextPageToken() string
func (*ListBatchesResponse) GetUnreachable
func (x *ListBatchesResponse) GetUnreachable() []string
func (*ListBatchesResponse) ProtoMessage
func (*ListBatchesResponse) ProtoMessage()
func (*ListBatchesResponse) ProtoReflect
func (x *ListBatchesResponse) ProtoReflect() protoreflect.Message
func (*ListBatchesResponse) Reset
func (x *ListBatchesResponse) Reset()
func (*ListBatchesResponse) String
func (x *ListBatchesResponse) String() string
ListClustersRequest
type ListClustersRequest struct {
// Required. The ID of the Google Cloud Platform project that the cluster
// belongs to.
ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
// Required. The Dataproc region in which to handle the request.
Region string `protobuf:"bytes,4,opt,name=region,proto3" json:"region,omitempty"`
// Optional. A filter constraining the clusters to list. Filters are
// case-sensitive and have the following syntax:
//
// field = value [AND [field = value]] ...
//
// where **field** is one of `status.state`, `clusterName`, or `labels.[KEY]`,
// and `[KEY]` is a label key. **value** can be `*` to match all values.
// `status.state` can be one of the following: `ACTIVE`, `INACTIVE`,
// `CREATING`, `RUNNING`, `ERROR`, `DELETING`, `UPDATING`, `STOPPING`, or
// `STOPPED`. `ACTIVE` contains the `CREATING`, `UPDATING`, and `RUNNING`
// states. `INACTIVE` contains the `DELETING`, `ERROR`, `STOPPING`, and
// `STOPPED` states. `clusterName` is the name of the cluster provided at
// creation time. Only the logical `AND` operator is supported;
// space-separated items are treated as having an implicit `AND` operator.
//
// Example filter:
//
// status.state = ACTIVE AND clusterName = mycluster
// AND labels.env = staging AND labels.starred = *
Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"`
// Optional. The standard List page size.
PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
// Optional. The standard List page token.
PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
// contains filtered or unexported fields
}
A request to list the clusters in a project.
func (*ListClustersRequest) Descriptor
func (*ListClustersRequest) Descriptor() ([]byte, []int)
Deprecated: Use ListClustersRequest.ProtoReflect.Descriptor instead.
func (*ListClustersRequest) GetFilter
func (x *ListClustersRequest) GetFilter() string
func (*ListClustersRequest) GetPageSize
func (x *ListClustersRequest) GetPageSize() int32
func (*ListClustersRequest) GetPageToken
func (x *ListClustersRequest) GetPageToken() string
func (*ListClustersRequest) GetProjectId
func (x *ListClustersRequest) GetProjectId() string
func (*ListClustersRequest) GetRegion
func (x *ListClustersRequest) GetRegion() string
func (*ListClustersRequest) ProtoMessage
func (*ListClustersRequest) ProtoMessage()
func (*ListClustersRequest) ProtoReflect
func (x *ListClustersRequest) ProtoReflect() protoreflect.Message
func (*ListClustersRequest) Reset
func (x *ListClustersRequest) Reset()
func (*ListClustersRequest) String
func (x *ListClustersRequest) String() string
ListClustersResponse
type ListClustersResponse struct {
// Output only. The clusters in the project.
Clusters []*Cluster `protobuf:"bytes,1,rep,name=clusters,proto3" json:"clusters,omitempty"`
// Output only. This token is included in the response if there are more
// results to fetch. To fetch additional results, provide this value as the
// `page_token` in a subsequent `ListClustersRequest`.
NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
// contains filtered or unexported fields
}
The list of all clusters in a project.
func (*ListClustersResponse) Descriptor
func (*ListClustersResponse) Descriptor() ([]byte, []int)
Deprecated: Use ListClustersResponse.ProtoReflect.Descriptor instead.
func (*ListClustersResponse) GetClusters
func (x *ListClustersResponse) GetClusters() []*Cluster
func (*ListClustersResponse) GetNextPageToken
func (x *ListClustersResponse) GetNextPageToken() string
func (*ListClustersResponse) ProtoMessage
func (*ListClustersResponse) ProtoMessage()
func (*ListClustersResponse) ProtoReflect
func (x *ListClustersResponse) ProtoReflect() protoreflect.Message
func (*ListClustersResponse) Reset
func (x *ListClustersResponse) Reset()
func (*ListClustersResponse) String
func (x *ListClustersResponse) String() string
ListJobsRequest
type ListJobsRequest struct {
// Required. The ID of the Google Cloud Platform project that the job
// belongs to.
ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
// Required. The Dataproc region in which to handle the request.
Region string `protobuf:"bytes,6,opt,name=region,proto3" json:"region,omitempty"`
// Optional. The number of results to return in each response.
PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
// Optional. The page token, returned by a previous call, to request the
// next page of results.
PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
// Optional. If set, the returned jobs list includes only jobs that were
// submitted to the named cluster.
ClusterName string `protobuf:"bytes,4,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
// Optional. Specifies enumerated categories of jobs to list.
// (default = match ALL jobs).
//
// If `filter` is provided, `jobStateMatcher` will be ignored.
JobStateMatcher ListJobsRequest_JobStateMatcher `protobuf:"varint,5,opt,name=job_state_matcher,json=jobStateMatcher,proto3,enum=google.cloud.dataproc.v1.ListJobsRequest_JobStateMatcher" json:"job_state_matcher,omitempty"`
// Optional. A filter constraining the jobs to list. Filters are
// case-sensitive and have the following syntax:
//
// [field = value] AND [field [= value]] ...
//
// where **field** is `status.state` or `labels.[KEY]`, and `[KEY]` is a label
// key. **value** can be `*` to match all values.
// `status.state` can be either `ACTIVE` or `NON_ACTIVE`.
// Only the logical `AND` operator is supported; space-separated items are
// treated as having an implicit `AND` operator.
//
// Example filter:
//
// status.state = ACTIVE AND labels.env = staging AND labels.starred = *
Filter string `protobuf:"bytes,7,opt,name=filter,proto3" json:"filter,omitempty"`
// contains filtered or unexported fields
}
A request to list jobs in a project.
func (*ListJobsRequest) Descriptor
func (*ListJobsRequest) Descriptor() ([]byte, []int)
Deprecated: Use ListJobsRequest.ProtoReflect.Descriptor instead.
func (*ListJobsRequest) GetClusterName
func (x *ListJobsRequest) GetClusterName() string
func (*ListJobsRequest) GetFilter
func (x *ListJobsRequest) GetFilter() string
func (*ListJobsRequest) GetJobStateMatcher
func (x *ListJobsRequest) GetJobStateMatcher() ListJobsRequest_JobStateMatcher
func (*ListJobsRequest) GetPageSize
func (x *ListJobsRequest) GetPageSize() int32
func (*ListJobsRequest) GetPageToken
func (x *ListJobsRequest) GetPageToken() string
func (*ListJobsRequest) GetProjectId
func (x *ListJobsRequest) GetProjectId() string
func (*ListJobsRequest) GetRegion
func (x *ListJobsRequest) GetRegion() string
func (*ListJobsRequest) ProtoMessage
func (*ListJobsRequest) ProtoMessage()
func (*ListJobsRequest) ProtoReflect
func (x *ListJobsRequest) ProtoReflect() protoreflect.Message
func (*ListJobsRequest) Reset
func (x *ListJobsRequest) Reset()
func (*ListJobsRequest) String
func (x *ListJobsRequest) String() string
ListJobsRequest_JobStateMatcher
type ListJobsRequest_JobStateMatcher int32
A matcher that specifies categories of job states.
ListJobsRequest_ALL, ListJobsRequest_ACTIVE, ListJobsRequest_NON_ACTIVE
const (
// Match all jobs, regardless of state.
ListJobsRequest_ALL ListJobsRequest_JobStateMatcher = 0
// Only match jobs in non-terminal states: PENDING, RUNNING, or
// CANCEL_PENDING.
ListJobsRequest_ACTIVE ListJobsRequest_JobStateMatcher = 1
// Only match jobs in terminal states: CANCELLED, DONE, or ERROR.
ListJobsRequest_NON_ACTIVE ListJobsRequest_JobStateMatcher = 2
)
func (ListJobsRequest_JobStateMatcher) Descriptor
func (ListJobsRequest_JobStateMatcher) Descriptor() protoreflect.EnumDescriptor
func (ListJobsRequest_JobStateMatcher) Enum
func (x ListJobsRequest_JobStateMatcher) Enum() *ListJobsRequest_JobStateMatcher
func (ListJobsRequest_JobStateMatcher) EnumDescriptor
func (ListJobsRequest_JobStateMatcher) EnumDescriptor() ([]byte, []int)
Deprecated: Use ListJobsRequest_JobStateMatcher.Descriptor instead.
func (ListJobsRequest_JobStateMatcher) Number
func (x ListJobsRequest_JobStateMatcher) Number() protoreflect.EnumNumber
func (ListJobsRequest_JobStateMatcher) String
func (x ListJobsRequest_JobStateMatcher) String() string
func (ListJobsRequest_JobStateMatcher) Type
func (ListJobsRequest_JobStateMatcher) Type() protoreflect.EnumType
ListJobsResponse
type ListJobsResponse struct {
// Output only. Jobs list.
Jobs []*Job `protobuf:"bytes,1,rep,name=jobs,proto3" json:"jobs,omitempty"`
// Optional. This token is included in the response if there are more results
// to fetch. To fetch additional results, provide this value as the
// `page_token` in a subsequent ListJobsRequest
.
NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
// Output only. List of jobs with
// [kms_key][google.cloud.dataproc.v1.EncryptionConfig.kms_key]-encrypted
// parameters that could not be decrypted. A response to a `jobs.get` request
// may indicate the reason for the decryption failure for a specific job.
Unreachable []string `protobuf:"bytes,3,rep,name=unreachable,proto3" json:"unreachable,omitempty"`
// contains filtered or unexported fields
}
A list of jobs in a project.
func (*ListJobsResponse) Descriptor
func (*ListJobsResponse) Descriptor() ([]byte, []int)
Deprecated: Use ListJobsResponse.ProtoReflect.Descriptor instead.
func (*ListJobsResponse) GetJobs
func (x *ListJobsResponse) GetJobs() []*Job
func (*ListJobsResponse) GetNextPageToken
func (x *ListJobsResponse) GetNextPageToken() string
func (*ListJobsResponse) GetUnreachable
func (x *ListJobsResponse) GetUnreachable() []string
func (*ListJobsResponse) ProtoMessage
func (*ListJobsResponse) ProtoMessage()
func (*ListJobsResponse) ProtoReflect
func (x *ListJobsResponse) ProtoReflect() protoreflect.Message
func (*ListJobsResponse) Reset
func (x *ListJobsResponse) Reset()
func (*ListJobsResponse) String
func (x *ListJobsResponse) String() string
ListSessionTemplatesRequest
type ListSessionTemplatesRequest struct {
// Required. The parent that owns this collection of session templates.
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// Optional. The maximum number of sessions to return in each response.
// The service may return fewer than this value.
PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
// Optional. A page token received from a previous `ListSessions` call.
// Provide this token to retrieve the subsequent page.
PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
// Optional. A filter for the session templates to return in the response.
// Filters are case sensitive and have the following syntax:
//
// [field = value] AND [field [= value]] ...
Filter string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"`
// contains filtered or unexported fields
}
A request to list session templates in a project.
func (*ListSessionTemplatesRequest) Descriptor
func (*ListSessionTemplatesRequest) Descriptor() ([]byte, []int)
Deprecated: Use ListSessionTemplatesRequest.ProtoReflect.Descriptor instead.
func (*ListSessionTemplatesRequest) GetFilter
func (x *ListSessionTemplatesRequest) GetFilter() string
func (*ListSessionTemplatesRequest) GetPageSize
func (x *ListSessionTemplatesRequest) GetPageSize() int32
func (*ListSessionTemplatesRequest) GetPageToken
func (x *ListSessionTemplatesRequest) GetPageToken() string
func (*ListSessionTemplatesRequest) GetParent
func (x *ListSessionTemplatesRequest) GetParent() string
func (*ListSessionTemplatesRequest) ProtoMessage
func (*ListSessionTemplatesRequest) ProtoMessage()
func (*ListSessionTemplatesRequest) ProtoReflect
func (x *ListSessionTemplatesRequest) ProtoReflect() protoreflect.Message
func (*ListSessionTemplatesRequest) Reset
func (x *ListSessionTemplatesRequest) Reset()
func (*ListSessionTemplatesRequest) String
func (x *ListSessionTemplatesRequest) String() string
ListSessionTemplatesResponse
type ListSessionTemplatesResponse struct {
// Output only. Session template list
SessionTemplates []*SessionTemplate `protobuf:"bytes,1,rep,name=session_templates,json=sessionTemplates,proto3" json:"session_templates,omitempty"`
// A token, which can be sent as `page_token` to retrieve the next page.
// If this field is omitted, there are no subsequent pages.
NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
// contains filtered or unexported fields
}
A list of session templates.
func (*ListSessionTemplatesResponse) Descriptor
func (*ListSessionTemplatesResponse) Descriptor() ([]byte, []int)
Deprecated: Use ListSessionTemplatesResponse.ProtoReflect.Descriptor instead.
func (*ListSessionTemplatesResponse) GetNextPageToken
func (x *ListSessionTemplatesResponse) GetNextPageToken() string
func (*ListSessionTemplatesResponse) GetSessionTemplates
func (x *ListSessionTemplatesResponse) GetSessionTemplates() []*SessionTemplate
func (*ListSessionTemplatesResponse) ProtoMessage
func (*ListSessionTemplatesResponse) ProtoMessage()
func (*ListSessionTemplatesResponse) ProtoReflect
func (x *ListSessionTemplatesResponse) ProtoReflect() protoreflect.Message
func (*ListSessionTemplatesResponse) Reset
func (x *ListSessionTemplatesResponse) Reset()
func (*ListSessionTemplatesResponse) String
func (x *ListSessionTemplatesResponse) String() string
ListSessionsRequest
type ListSessionsRequest struct {
// Required. The parent, which owns this collection of sessions.
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// Optional. The maximum number of sessions to return in each response.
// The service may return fewer than this value.
PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
// Optional. A page token received from a previous `ListSessions` call.
// Provide this token to retrieve the subsequent page.
PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
// Optional. A filter for the sessions to return in the response.
//
// A filter is a logical expression constraining the values of various fields
// in each session resource. Filters are case sensitive, and may contain
// multiple clauses combined with logical operators (AND, OR).
// Supported fields are `session_id`, `session_uuid`, `state`, `create_time`,
// and `labels`.
//
// Example: `state = ACTIVE and create_time < "2023-01-01t00:00:00z"`="" is="" a="" filter="" for="" sessions="" in="" an="" active="" state="" that="" were="" created="" before="" 2023-01-01.="" `state="ACTIVE" and="" labels.environment="production`" is="" a="" filter="" for="" sessions="" in="" an="" active="" state="" that="" have="" a="" production="" environment="" label.="" see="" https://google.aip.dev/assets/misc/ebnf-filtering.txt="" for="" a="" detailed="" description="" of="" the="" filter="" syntax="" and="" a="" list="" of="" supported="" comparators.="" filter="">string `protobuf:"bytes,4,opt,name=filter,proto3" json:"filter,omitempty"`
// contains filtered or unexported fields
}
A request to list sessions in a project.
func (*ListSessionsRequest) Descriptor
func (*ListSessionsRequest) Descriptor() ([]byte, []int)
Deprecated: Use ListSessionsRequest.ProtoReflect.Descriptor instead.
func (*ListSessionsRequest) GetFilter
func (x *ListSessionsRequest) GetFilter() string
func (*ListSessionsRequest) GetPageSize
func (x *ListSessionsRequest) GetPageSize() int32
func (*ListSessionsRequest) GetPageToken
func (x *ListSessionsRequest) GetPageToken() string
func (*ListSessionsRequest) GetParent
func (x *ListSessionsRequest) GetParent() string
func (*ListSessionsRequest) ProtoMessage
func (*ListSessionsRequest) ProtoMessage()
func (*ListSessionsRequest) ProtoReflect
func (x *ListSessionsRequest) ProtoReflect() protoreflect.Message
func (*ListSessionsRequest) Reset
func (x *ListSessionsRequest) Reset()
func (*ListSessionsRequest) String
func (x *ListSessionsRequest) String() string
ListSessionsResponse
type ListSessionsResponse struct {
// Output only. The sessions from the specified collection.
Sessions []*Session `protobuf:"bytes,1,rep,name=sessions,proto3" json:"sessions,omitempty"`
// A token, which can be sent as `page_token`, to retrieve the next page.
// If this field is omitted, there are no subsequent pages.
NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
// contains filtered or unexported fields
}
A list of interactive sessions.
func (*ListSessionsResponse) Descriptor
func (*ListSessionsResponse) Descriptor() ([]byte, []int)
Deprecated: Use ListSessionsResponse.ProtoReflect.Descriptor instead.
func (*ListSessionsResponse) GetNextPageToken
func (x *ListSessionsResponse) GetNextPageToken() string
func (*ListSessionsResponse) GetSessions
func (x *ListSessionsResponse) GetSessions() []*Session
func (*ListSessionsResponse) ProtoMessage
func (*ListSessionsResponse) ProtoMessage()
func (*ListSessionsResponse) ProtoReflect
func (x *ListSessionsResponse) ProtoReflect() protoreflect.Message
func (*ListSessionsResponse) Reset
func (x *ListSessionsResponse) Reset()
func (*ListSessionsResponse) String
func (x *ListSessionsResponse) String() string
ListWorkflowTemplatesRequest
type ListWorkflowTemplatesRequest struct {
// Required. The resource name of the region or location, as described
// in https://cloud.google.com/apis/design/resource_names.
//
// - For `projects.regions.workflowTemplates,list`, the resource
// name of the region has the following format:
// `projects/{project_id}/regions/{region}`
//
// - For `projects.locations.workflowTemplates.list`, the
// resource name of the location has the following format:
// `projects/{project_id}/locations/{location}`
Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"`
// Optional. The maximum number of results to return in each response.
PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
// Optional. The page token, returned by a previous call, to request the
// next page of results.
PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
// contains filtered or unexported fields
}
A request to list workflow templates in a project.
func (*ListWorkflowTemplatesRequest) Descriptor
func (*ListWorkflowTemplatesRequest) Descriptor() ([]byte, []int)
Deprecated: Use ListWorkflowTemplatesRequest.ProtoReflect.Descriptor instead.
func (*ListWorkflowTemplatesRequest) GetPageSize
func (x *ListWorkflowTemplatesRequest) GetPageSize() int32
func (*ListWorkflowTemplatesRequest) GetPageToken
func (x *ListWorkflowTemplatesRequest) GetPageToken() string
func (*ListWorkflowTemplatesRequest) GetParent
func (x *ListWorkflowTemplatesRequest) GetParent() string
func (*ListWorkflowTemplatesRequest) ProtoMessage
func (*ListWorkflowTemplatesRequest) ProtoMessage()
func (*ListWorkflowTemplatesRequest) ProtoReflect
func (x *ListWorkflowTemplatesRequest) ProtoReflect() protoreflect.Message
func (*ListWorkflowTemplatesRequest) Reset
func (x *ListWorkflowTemplatesRequest) Reset()
func (*ListWorkflowTemplatesRequest) String
func (x *ListWorkflowTemplatesRequest) String() string
ListWorkflowTemplatesResponse
type ListWorkflowTemplatesResponse struct {
// Output only. WorkflowTemplates list.
Templates []*WorkflowTemplate `protobuf:"bytes,1,rep,name=templates,proto3" json:"templates,omitempty"`
// Output only. This token is included in the response if there are more
// results to fetch. To fetch additional results, provide this value as the
// page_token in a subsequent ListWorkflowTemplatesRequest
.
NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
// Output only. List of workflow templates that could not be included in the
// response. Attempting to get one of these resources may indicate why it was
// not included in the list response.
Unreachable []string `protobuf:"bytes,3,rep,name=unreachable,proto3" json:"unreachable,omitempty"`
// contains filtered or unexported fields
}
A response to a request to list workflow templates in a project.
func (*ListWorkflowTemplatesResponse) Descriptor
func (*ListWorkflowTemplatesResponse) Descriptor() ([]byte, []int)
Deprecated: Use ListWorkflowTemplatesResponse.ProtoReflect.Descriptor instead.
func (*ListWorkflowTemplatesResponse) GetNextPageToken
func (x *ListWorkflowTemplatesResponse) GetNextPageToken() string
func (*ListWorkflowTemplatesResponse) GetTemplates
func (x *ListWorkflowTemplatesResponse) GetTemplates() []*WorkflowTemplate
func (*ListWorkflowTemplatesResponse) GetUnreachable
func (x *ListWorkflowTemplatesResponse) GetUnreachable() []string
func (*ListWorkflowTemplatesResponse) ProtoMessage
func (*ListWorkflowTemplatesResponse) ProtoMessage()
func (*ListWorkflowTemplatesResponse) ProtoReflect
func (x *ListWorkflowTemplatesResponse) ProtoReflect() protoreflect.Message
func (*ListWorkflowTemplatesResponse) Reset
func (x *ListWorkflowTemplatesResponse) Reset()
func (*ListWorkflowTemplatesResponse) String
func (x *ListWorkflowTemplatesResponse) String() string
LoggingConfig
type LoggingConfig struct {
// The per-package log levels for the driver. This can include
// "root" package name to configure rootLogger.
// Examples:
// - 'com.google = FATAL'
// - 'root = INFO'
// - 'org.apache = DEBUG'
DriverLogLevels map[string]LoggingConfig_Level `protobuf:"bytes,2,rep,name=driver_log_levels,json=driverLogLevels,proto3" json:"driver_log_levels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3,enum=google.cloud.dataproc.v1.LoggingConfig_Level"`
// contains filtered or unexported fields
}
The runtime logging config of the job.
func (*LoggingConfig) Descriptor
func (*LoggingConfig) Descriptor() ([]byte, []int)
Deprecated: Use LoggingConfig.ProtoReflect.Descriptor instead.
func (*LoggingConfig) GetDriverLogLevels
func (x *LoggingConfig) GetDriverLogLevels() map[string]LoggingConfig_Level
func (*LoggingConfig) ProtoMessage
func (*LoggingConfig) ProtoMessage()
func (*LoggingConfig) ProtoReflect
func (x *LoggingConfig) ProtoReflect() protoreflect.Message
func (*LoggingConfig) Reset
func (x *LoggingConfig) Reset()
func (*LoggingConfig) String
func (x *LoggingConfig) String() string
LoggingConfig_Level
type LoggingConfig_Level int32
The Log4j level for job execution. When running an Apache Hive job, Cloud Dataproc configures the Hive client to an equivalent verbosity level.
LoggingConfig_LEVEL_UNSPECIFIED, LoggingConfig_ALL, LoggingConfig_TRACE, LoggingConfig_DEBUG, LoggingConfig_INFO, LoggingConfig_WARN, LoggingConfig_ERROR, LoggingConfig_FATAL, LoggingConfig_OFF
const (
// Level is unspecified. Use default level for log4j.
LoggingConfig_LEVEL_UNSPECIFIED LoggingConfig_Level = 0
// Use ALL level for log4j.
LoggingConfig_ALL LoggingConfig_Level = 1
// Use TRACE level for log4j.
LoggingConfig_TRACE LoggingConfig_Level = 2
// Use DEBUG level for log4j.
LoggingConfig_DEBUG LoggingConfig_Level = 3
// Use INFO level for log4j.
LoggingConfig_INFO LoggingConfig_Level = 4
// Use WARN level for log4j.
LoggingConfig_WARN LoggingConfig_Level = 5
// Use ERROR level for log4j.
LoggingConfig_ERROR LoggingConfig_Level = 6
// Use FATAL level for log4j.
LoggingConfig_FATAL LoggingConfig_Level = 7
// Turn off log4j.
LoggingConfig_OFF LoggingConfig_Level = 8
)
func (LoggingConfig_Level) Descriptor
func (LoggingConfig_Level) Descriptor() protoreflect.EnumDescriptor
func (LoggingConfig_Level) Enum
func (x LoggingConfig_Level) Enum() *LoggingConfig_Level
func (LoggingConfig_Level) EnumDescriptor
func (LoggingConfig_Level) EnumDescriptor() ([]byte, []int)
Deprecated: Use LoggingConfig_Level.Descriptor instead.
func (LoggingConfig_Level) Number
func (x LoggingConfig_Level) Number() protoreflect.EnumNumber
func (LoggingConfig_Level) String
func (x LoggingConfig_Level) String() string
func (LoggingConfig_Level) Type
func (LoggingConfig_Level) Type() protoreflect.EnumType
ManagedCluster
type ManagedCluster struct {
// Required. The cluster name prefix. A unique cluster name will be formed by
// appending a random suffix.
//
// The name must contain only lower-case letters (a-z), numbers (0-9),
// and hyphens (-). Must begin with a letter. Cannot begin or end with
// hyphen. Must consist of between 2 and 35 characters.
ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
// Required. The cluster configuration.
Config *ClusterConfig `protobuf:"bytes,3,opt,name=config,proto3" json:"config,omitempty"`
// Optional. The labels to associate with this cluster.
//
// Label keys must be between 1 and 63 characters long, and must conform to
// the following PCRE regular expression:
// [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
//
// Label values must be between 1 and 63 characters long, and must conform to
// the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
//
// No more than 32 labels can be associated with a given cluster.
Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// contains filtered or unexported fields
}
Cluster that is managed by the workflow.
func (*ManagedCluster) Descriptor
func (*ManagedCluster) Descriptor() ([]byte, []int)
Deprecated: Use ManagedCluster.ProtoReflect.Descriptor instead.
func (*ManagedCluster) GetClusterName
func (x *ManagedCluster) GetClusterName() string
func (*ManagedCluster) GetConfig
func (x *ManagedCluster) GetConfig() *ClusterConfig
func (*ManagedCluster) GetLabels
func (x *ManagedCluster) GetLabels() map[string]string
func (*ManagedCluster) ProtoMessage
func (*ManagedCluster) ProtoMessage()
func (*ManagedCluster) ProtoReflect
func (x *ManagedCluster) ProtoReflect() protoreflect.Message
func (*ManagedCluster) Reset
func (x *ManagedCluster) Reset()
func (*ManagedCluster) String
func (x *ManagedCluster) String() string
ManagedGroupConfig
type ManagedGroupConfig struct {
// Output only. The name of the Instance Template used for the Managed
// Instance Group.
InstanceTemplateName string `protobuf:"bytes,1,opt,name=instance_template_name,json=instanceTemplateName,proto3" json:"instance_template_name,omitempty"`
// Output only. The name of the Instance Group Manager for this group.
InstanceGroupManagerName string `protobuf:"bytes,2,opt,name=instance_group_manager_name,json=instanceGroupManagerName,proto3" json:"instance_group_manager_name,omitempty"`
// Output only. The partial URI to the instance group manager for this group.
// E.g. projects/my-project/regions/us-central1/instanceGroupManagers/my-igm.
InstanceGroupManagerUri string `protobuf:"bytes,3,opt,name=instance_group_manager_uri,json=instanceGroupManagerUri,proto3" json:"instance_group_manager_uri,omitempty"`
// contains filtered or unexported fields
}
Specifies the resources used to actively manage an instance group.
func (*ManagedGroupConfig) Descriptor
func (*ManagedGroupConfig) Descriptor() ([]byte, []int)
Deprecated: Use ManagedGroupConfig.ProtoReflect.Descriptor instead.
func (*ManagedGroupConfig) GetInstanceGroupManagerName
func (x *ManagedGroupConfig) GetInstanceGroupManagerName() string
func (*ManagedGroupConfig) GetInstanceGroupManagerUri
func (x *ManagedGroupConfig) GetInstanceGroupManagerUri() string
func (*ManagedGroupConfig) GetInstanceTemplateName
func (x *ManagedGroupConfig) GetInstanceTemplateName() string
func (*ManagedGroupConfig) ProtoMessage
func (*ManagedGroupConfig) ProtoMessage()
func (*ManagedGroupConfig) ProtoReflect
func (x *ManagedGroupConfig) ProtoReflect() protoreflect.Message
func (*ManagedGroupConfig) Reset
func (x *ManagedGroupConfig) Reset()
func (*ManagedGroupConfig) String
func (x *ManagedGroupConfig) String() string
MetastoreConfig
type MetastoreConfig struct {
// Required. Resource name of an existing Dataproc Metastore service.
//
// Example:
//
// * `projects/[project_id]/locations/[dataproc_region]/services/[service-name]`
DataprocMetastoreService string `protobuf:"bytes,1,opt,name=dataproc_metastore_service,json=dataprocMetastoreService,proto3" json:"dataproc_metastore_service,omitempty"`
// contains filtered or unexported fields
}
Specifies a Metastore configuration.
func (*MetastoreConfig) Descriptor
func (*MetastoreConfig) Descriptor() ([]byte, []int)
Deprecated: Use MetastoreConfig.ProtoReflect.Descriptor instead.
func (*MetastoreConfig) GetDataprocMetastoreService
func (x *MetastoreConfig) GetDataprocMetastoreService() string
func (*MetastoreConfig) ProtoMessage
func (*MetastoreConfig) ProtoMessage()
func (*MetastoreConfig) ProtoReflect
func (x *MetastoreConfig) ProtoReflect() protoreflect.Message
func (*MetastoreConfig) Reset
func (x *MetastoreConfig) Reset()
func (*MetastoreConfig) String
func (x *MetastoreConfig) String() string
NodeGroup
type NodeGroup struct {
// The Node group [resource name](https://aip.dev/122).
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Required. Node group roles.
Roles []NodeGroup_Role `protobuf:"varint,2,rep,packed,name=roles,proto3,enum=google.cloud.dataproc.v1.NodeGroup_Role" json:"roles,omitempty"`
// Optional. The node group instance group configuration.
NodeGroupConfig *InstanceGroupConfig `protobuf:"bytes,3,opt,name=node_group_config,json=nodeGroupConfig,proto3" json:"node_group_config,omitempty"`
// Optional. Node group labels.
//
// - Label **keys** must consist of from 1 to 63 characters and conform to
// [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
// - Label **values** can be empty. If specified, they must consist of from
// 1 to 63 characters and conform to [RFC 1035]
// (https://www.ietf.org/rfc/rfc1035.txt).
// - The node group must have no more than 32 labels.
Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// contains filtered or unexported fields
}
Dataproc Node Group.
The Dataproc NodeGroup
resource is not related to the
Dataproc [NodeGroupAffinity][google.cloud.dataproc.v1.NodeGroupAffinity]
resource.
func (*NodeGroup) Descriptor
Deprecated: Use NodeGroup.ProtoReflect.Descriptor instead.
func (*NodeGroup) GetLabels
func (*NodeGroup) GetName
func (*NodeGroup) GetNodeGroupConfig
func (x *NodeGroup) GetNodeGroupConfig() *InstanceGroupConfig
func (*NodeGroup) GetRoles
func (x *NodeGroup) GetRoles() []NodeGroup_Role
func (*NodeGroup) ProtoMessage
func (*NodeGroup) ProtoMessage()
func (*NodeGroup) ProtoReflect
func (x *NodeGroup) ProtoReflect() protoreflect.Message
func (*NodeGroup) Reset
func (x *NodeGroup) Reset()
func (*NodeGroup) String
NodeGroupAffinity
type NodeGroupAffinity struct {
// Required. The URI of a
// sole-tenant [node group
// resource](https://cloud.google.com/compute/docs/reference/rest/v1/nodeGroups)
// that the cluster will be created on.
//
// A full URL, partial URI, or node group name are valid. Examples:
//
// * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]/nodeGroups/node-group-1`
// * `projects/[project_id]/zones/[zone]/nodeGroups/node-group-1`
// * `node-group-1`
NodeGroupUri string `protobuf:"bytes,1,opt,name=node_group_uri,json=nodeGroupUri,proto3" json:"node_group_uri,omitempty"`
// contains filtered or unexported fields
}
Node Group Affinity for clusters using sole-tenant node groups.
The Dataproc NodeGroupAffinity
resource is not related to the
Dataproc [NodeGroup][google.cloud.dataproc.v1.NodeGroup] resource.
func (*NodeGroupAffinity) Descriptor
func (*NodeGroupAffinity) Descriptor() ([]byte, []int)
Deprecated: Use NodeGroupAffinity.ProtoReflect.Descriptor instead.
func (*NodeGroupAffinity) GetNodeGroupUri
func (x *NodeGroupAffinity) GetNodeGroupUri() string
func (*NodeGroupAffinity) ProtoMessage
func (*NodeGroupAffinity) ProtoMessage()
func (*NodeGroupAffinity) ProtoReflect
func (x *NodeGroupAffinity) ProtoReflect() protoreflect.Message
func (*NodeGroupAffinity) Reset
func (x *NodeGroupAffinity) Reset()
func (*NodeGroupAffinity) String
func (x *NodeGroupAffinity) String() string
NodeGroupControllerClient
type NodeGroupControllerClient interface {
// Creates a node group in a cluster. The returned
// [Operation.metadata][google.longrunning.Operation.metadata] is
// [NodeGroupOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#nodegroupoperationmetadata).
CreateNodeGroup(ctx context.Context, in *CreateNodeGroupRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
// Resizes a node group in a cluster. The returned
// [Operation.metadata][google.longrunning.Operation.metadata] is
// [NodeGroupOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#nodegroupoperationmetadata).
ResizeNodeGroup(ctx context.Context, in *ResizeNodeGroupRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
// Gets the resource representation for a node group in a
// cluster.
GetNodeGroup(ctx context.Context, in *GetNodeGroupRequest, opts ...grpc.CallOption) (*NodeGroup, error)
}
NodeGroupControllerClient is the client API for NodeGroupController service.
For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
func NewNodeGroupControllerClient
func NewNodeGroupControllerClient(cc grpc.ClientConnInterface) NodeGroupControllerClient
NodeGroupControllerServer
type NodeGroupControllerServer interface {
// Creates a node group in a cluster. The returned
// [Operation.metadata][google.longrunning.Operation.metadata] is
// [NodeGroupOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#nodegroupoperationmetadata).
CreateNodeGroup(context.Context, *CreateNodeGroupRequest) (*longrunningpb.Operation, error)
// Resizes a node group in a cluster. The returned
// [Operation.metadata][google.longrunning.Operation.metadata] is
// [NodeGroupOperationMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#nodegroupoperationmetadata).
ResizeNodeGroup(context.Context, *ResizeNodeGroupRequest) (*longrunningpb.Operation, error)
// Gets the resource representation for a node group in a
// cluster.
GetNodeGroup(context.Context, *GetNodeGroupRequest) (*NodeGroup, error)
}
NodeGroupControllerServer is the server API for NodeGroupController service.
NodeGroupOperationMetadata
type NodeGroupOperationMetadata struct {
// Output only. Node group ID for the operation.
NodeGroupId string `protobuf:"bytes,1,opt,name=node_group_id,json=nodeGroupId,proto3" json:"node_group_id,omitempty"`
// Output only. Cluster UUID associated with the node group operation.
ClusterUuid string `protobuf:"bytes,2,opt,name=cluster_uuid,json=clusterUuid,proto3" json:"cluster_uuid,omitempty"`
// Output only. Current operation status.
Status *ClusterOperationStatus `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"`
// Output only. The previous operation status.
StatusHistory []*ClusterOperationStatus `protobuf:"bytes,4,rep,name=status_history,json=statusHistory,proto3" json:"status_history,omitempty"`
// The operation type.
OperationType NodeGroupOperationMetadata_NodeGroupOperationType `protobuf:"varint,5,opt,name=operation_type,json=operationType,proto3,enum=google.cloud.dataproc.v1.NodeGroupOperationMetadata_NodeGroupOperationType" json:"operation_type,omitempty"`
// Output only. Short description of operation.
Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"`
// Output only. Labels associated with the operation.
Labels map[string]string `protobuf:"bytes,7,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Output only. Errors encountered during operation execution.
Warnings []string `protobuf:"bytes,8,rep,name=warnings,proto3" json:"warnings,omitempty"`
// contains filtered or unexported fields
}
Metadata describing the node group operation.
func (*NodeGroupOperationMetadata) Descriptor
func (*NodeGroupOperationMetadata) Descriptor() ([]byte, []int)
Deprecated: Use NodeGroupOperationMetadata.ProtoReflect.Descriptor instead.
func (*NodeGroupOperationMetadata) GetClusterUuid
func (x *NodeGroupOperationMetadata) GetClusterUuid() string
func (*NodeGroupOperationMetadata) GetDescription
func (x *NodeGroupOperationMetadata) GetDescription() string
func (*NodeGroupOperationMetadata) GetLabels
func (x *NodeGroupOperationMetadata) GetLabels() map[string]string
func (*NodeGroupOperationMetadata) GetNodeGroupId
func (x *NodeGroupOperationMetadata) GetNodeGroupId() string
func (*NodeGroupOperationMetadata) GetOperationType
func (x *NodeGroupOperationMetadata) GetOperationType() NodeGroupOperationMetadata_NodeGroupOperationType
func (*NodeGroupOperationMetadata) GetStatus
func (x *NodeGroupOperationMetadata) GetStatus() *ClusterOperationStatus
func (*NodeGroupOperationMetadata) GetStatusHistory
func (x *NodeGroupOperationMetadata) GetStatusHistory() []*ClusterOperationStatus
func (*NodeGroupOperationMetadata) GetWarnings
func (x *NodeGroupOperationMetadata) GetWarnings() []string
func (*NodeGroupOperationMetadata) ProtoMessage
func (*NodeGroupOperationMetadata) ProtoMessage()
func (*NodeGroupOperationMetadata) ProtoReflect
func (x *NodeGroupOperationMetadata) ProtoReflect() protoreflect.Message
func (*NodeGroupOperationMetadata) Reset
func (x *NodeGroupOperationMetadata) Reset()
func (*NodeGroupOperationMetadata) String
func (x *NodeGroupOperationMetadata) String() string
NodeGroupOperationMetadata_NodeGroupOperationType
type NodeGroupOperationMetadata_NodeGroupOperationType int32
Operation type for node group resources.
NodeGroupOperationMetadata_NODE_GROUP_OPERATION_TYPE_UNSPECIFIED, NodeGroupOperationMetadata_CREATE, NodeGroupOperationMetadata_UPDATE, NodeGroupOperationMetadata_DELETE, NodeGroupOperationMetadata_RESIZE
const (
// Node group operation type is unknown.
NodeGroupOperationMetadata_NODE_GROUP_OPERATION_TYPE_UNSPECIFIED NodeGroupOperationMetadata_NodeGroupOperationType = 0
// Create node group operation type.
NodeGroupOperationMetadata_CREATE NodeGroupOperationMetadata_NodeGroupOperationType = 1
// Update node group operation type.
NodeGroupOperationMetadata_UPDATE NodeGroupOperationMetadata_NodeGroupOperationType = 2
// Delete node group operation type.
NodeGroupOperationMetadata_DELETE NodeGroupOperationMetadata_NodeGroupOperationType = 3
// Resize node group operation type.
NodeGroupOperationMetadata_RESIZE NodeGroupOperationMetadata_NodeGroupOperationType = 4
)
func (NodeGroupOperationMetadata_NodeGroupOperationType) Descriptor
func (NodeGroupOperationMetadata_NodeGroupOperationType) Descriptor() protoreflect.EnumDescriptor
func (NodeGroupOperationMetadata_NodeGroupOperationType) Enum
func (x NodeGroupOperationMetadata_NodeGroupOperationType) Enum() *NodeGroupOperationMetadata_NodeGroupOperationType
func (NodeGroupOperationMetadata_NodeGroupOperationType) EnumDescriptor
func (NodeGroupOperationMetadata_NodeGroupOperationType) EnumDescriptor() ([]byte, []int)
Deprecated: Use NodeGroupOperationMetadata_NodeGroupOperationType.Descriptor instead.
func (NodeGroupOperationMetadata_NodeGroupOperationType) Number
func (x NodeGroupOperationMetadata_NodeGroupOperationType) Number() protoreflect.EnumNumber
func (NodeGroupOperationMetadata_NodeGroupOperationType) String
func (x NodeGroupOperationMetadata_NodeGroupOperationType) String() string
func (NodeGroupOperationMetadata_NodeGroupOperationType) Type
NodeGroup_Role
type NodeGroup_Role int32
Node pool roles.
NodeGroup_ROLE_UNSPECIFIED, NodeGroup_DRIVER
const (
// Required unspecified role.
NodeGroup_ROLE_UNSPECIFIED NodeGroup_Role = 0
// Job drivers run on the node pool.
NodeGroup_DRIVER NodeGroup_Role = 1
)
func (NodeGroup_Role) Descriptor
func (NodeGroup_Role) Descriptor() protoreflect.EnumDescriptor
func (NodeGroup_Role) Enum
func (x NodeGroup_Role) Enum() *NodeGroup_Role
func (NodeGroup_Role) EnumDescriptor
func (NodeGroup_Role) EnumDescriptor() ([]byte, []int)
Deprecated: Use NodeGroup_Role.Descriptor instead.
func (NodeGroup_Role) Number
func (x NodeGroup_Role) Number() protoreflect.EnumNumber
func (NodeGroup_Role) String
func (x NodeGroup_Role) String() string
func (NodeGroup_Role) Type
func (NodeGroup_Role) Type() protoreflect.EnumType
NodeInitializationAction
type NodeInitializationAction struct {
// Required. Cloud Storage URI of executable file.
ExecutableFile string `protobuf:"bytes,1,opt,name=executable_file,json=executableFile,proto3" json:"executable_file,omitempty"`
// Optional. Amount of time executable has to complete. Default is
// 10 minutes (see JSON representation of
// [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
//
// Cluster creation fails with an explanatory error message (the
// name of the executable that caused the error and the exceeded timeout
// period) if the executable is not completed at end of the timeout period.
ExecutionTimeout *durationpb.Duration `protobuf:"bytes,2,opt,name=execution_timeout,json=executionTimeout,proto3" json:"execution_timeout,omitempty"`
// contains filtered or unexported fields
}
Specifies an executable to run on a fully configured node and a timeout period for executable completion.
func (*NodeInitializationAction) Descriptor
func (*NodeInitializationAction) Descriptor() ([]byte, []int)
Deprecated: Use NodeInitializationAction.ProtoReflect.Descriptor instead.
func (*NodeInitializationAction) GetExecutableFile
func (x *NodeInitializationAction) GetExecutableFile() string
func (*NodeInitializationAction) GetExecutionTimeout
func (x *NodeInitializationAction) GetExecutionTimeout() *durationpb.Duration
func (*NodeInitializationAction) ProtoMessage
func (*NodeInitializationAction) ProtoMessage()
func (*NodeInitializationAction) ProtoReflect
func (x *NodeInitializationAction) ProtoReflect() protoreflect.Message
func (*NodeInitializationAction) Reset
func (x *NodeInitializationAction) Reset()
func (*NodeInitializationAction) String
func (x *NodeInitializationAction) String() string
OrderedJob
type OrderedJob struct {
// Required. The step id. The id must be unique among all jobs
// within the template.
//
// The step id is used as prefix for job id, as job
// `goog-dataproc-workflow-step-id` label, and in
// [prerequisiteStepIds][google.cloud.dataproc.v1.OrderedJob.prerequisite_step_ids]
// field from other steps.
//
// The id must contain only letters (a-z, A-Z), numbers (0-9),
// underscores (_), and hyphens (-). Cannot begin or end with underscore
// or hyphen. Must consist of between 3 and 50 characters.
StepId string `protobuf:"bytes,1,opt,name=step_id,json=stepId,proto3" json:"step_id,omitempty"`
// Required. The job definition.
//
// Types that are assignable to JobType:
//
// *OrderedJob_HadoopJob
// *OrderedJob_SparkJob
// *OrderedJob_PysparkJob
// *OrderedJob_HiveJob
// *OrderedJob_PigJob
// *OrderedJob_SparkRJob
// *OrderedJob_SparkSqlJob
// *OrderedJob_PrestoJob
// *OrderedJob_TrinoJob
// *OrderedJob_FlinkJob
JobType isOrderedJob_JobType `protobuf_oneof:"job_type"`
// Optional. The labels to associate with this job.
//
// Label keys must be between 1 and 63 characters long, and must conform to
// the following regular expression:
// [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}
//
// Label values must be between 1 and 63 characters long, and must conform to
// the following regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63}
//
// No more than 32 labels can be associated with a given job.
Labels map[string]string `protobuf:"bytes,8,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Optional. Job scheduling configuration.
Scheduling *JobScheduling `protobuf:"bytes,9,opt,name=scheduling,proto3" json:"scheduling,omitempty"`
// Optional. The optional list of prerequisite job step_ids.
// If not specified, the job will start at the beginning of workflow.
PrerequisiteStepIds []string `protobuf:"bytes,10,rep,name=prerequisite_step_ids,json=prerequisiteStepIds,proto3" json:"prerequisite_step_ids,omitempty"`
// contains filtered or unexported fields
}
A job executed by the workflow.
func (*OrderedJob) Descriptor
func (*OrderedJob) Descriptor() ([]byte, []int)
Deprecated: Use OrderedJob.ProtoReflect.Descriptor instead.
func (*OrderedJob) GetFlinkJob
func (x *OrderedJob) GetFlinkJob() *FlinkJob
func (*OrderedJob) GetHadoopJob
func (x *OrderedJob) GetHadoopJob() *HadoopJob
func (*OrderedJob) GetHiveJob
func (x *OrderedJob) GetHiveJob() *HiveJob
func (*OrderedJob) GetJobType
func (m *OrderedJob) GetJobType() isOrderedJob_JobType
func (*OrderedJob) GetLabels
func (x *OrderedJob) GetLabels() map[string]string
func (*OrderedJob) GetPigJob
func (x *OrderedJob) GetPigJob() *PigJob
func (*OrderedJob) GetPrerequisiteStepIds
func (x *OrderedJob) GetPrerequisiteStepIds() []string
func (*OrderedJob) GetPrestoJob
func (x *OrderedJob) GetPrestoJob() *PrestoJob
func (*OrderedJob) GetPysparkJob
func (x *OrderedJob) GetPysparkJob() *PySparkJob
func (*OrderedJob) GetScheduling
func (x *OrderedJob) GetScheduling() *JobScheduling
func (*OrderedJob) GetSparkJob
func (x *OrderedJob) GetSparkJob() *SparkJob
func (*OrderedJob) GetSparkRJob
func (x *OrderedJob) GetSparkRJob() *SparkRJob
func (*OrderedJob) GetSparkSqlJob
func (x *OrderedJob) GetSparkSqlJob() *SparkSqlJob
func (*OrderedJob) GetStepId
func (x *OrderedJob) GetStepId() string
func (*OrderedJob) GetTrinoJob
func (x *OrderedJob) GetTrinoJob() *TrinoJob
func (*OrderedJob) ProtoMessage
func (*OrderedJob) ProtoMessage()
func (*OrderedJob) ProtoReflect
func (x *OrderedJob) ProtoReflect() protoreflect.Message
func (*OrderedJob) Reset
func (x *OrderedJob) Reset()
func (*OrderedJob) String
func (x *OrderedJob) String() string
OrderedJob_FlinkJob
type OrderedJob_FlinkJob struct {
// Optional. Job is a Flink job.
FlinkJob *FlinkJob `protobuf:"bytes,14,opt,name=flink_job,json=flinkJob,proto3,oneof"`
}
OrderedJob_HadoopJob
type OrderedJob_HadoopJob struct {
// Optional. Job is a Hadoop job.
HadoopJob *HadoopJob `protobuf:"bytes,2,opt,name=hadoop_job,json=hadoopJob,proto3,oneof"`
}
OrderedJob_HiveJob
type OrderedJob_HiveJob struct {
// Optional. Job is a Hive job.
HiveJob *HiveJob `protobuf:"bytes,5,opt,name=hive_job,json=hiveJob,proto3,oneof"`
}
OrderedJob_PigJob
type OrderedJob_PigJob struct {
// Optional. Job is a Pig job.
PigJob *PigJob `protobuf:"bytes,6,opt,name=pig_job,json=pigJob,proto3,oneof"`
}
OrderedJob_PrestoJob
type OrderedJob_PrestoJob struct {
// Optional. Job is a Presto job.
PrestoJob *PrestoJob `protobuf:"bytes,12,opt,name=presto_job,json=prestoJob,proto3,oneof"`
}
OrderedJob_PysparkJob
type OrderedJob_PysparkJob struct {
// Optional. Job is a PySpark job.
PysparkJob *PySparkJob `protobuf:"bytes,4,opt,name=pyspark_job,json=pysparkJob,proto3,oneof"`
}
OrderedJob_SparkJob
type OrderedJob_SparkJob struct {
// Optional. Job is a Spark job.
SparkJob *SparkJob `protobuf:"bytes,3,opt,name=spark_job,json=sparkJob,proto3,oneof"`
}
OrderedJob_SparkRJob
type OrderedJob_SparkRJob struct {
// Optional. Job is a SparkR job.
SparkRJob *SparkRJob `protobuf:"bytes,11,opt,name=spark_r_job,json=sparkRJob,proto3,oneof"`
}
OrderedJob_SparkSqlJob
type OrderedJob_SparkSqlJob struct {
// Optional. Job is a SparkSql job.
SparkSqlJob *SparkSqlJob `protobuf:"bytes,7,opt,name=spark_sql_job,json=sparkSqlJob,proto3,oneof"`
}
OrderedJob_TrinoJob
type OrderedJob_TrinoJob struct {
// Optional. Job is a Trino job.
TrinoJob *TrinoJob `protobuf:"bytes,13,opt,name=trino_job,json=trinoJob,proto3,oneof"`
}
ParameterValidation
type ParameterValidation struct {
// Required. The type of validation to be performed.
//
// Types that are assignable to ValidationType:
//
// *ParameterValidation_Regex
// *ParameterValidation_Values
ValidationType isParameterValidation_ValidationType `protobuf_oneof:"validation_type"`
// contains filtered or unexported fields
}
Configuration for parameter validation.
func (*ParameterValidation) Descriptor
func (*ParameterValidation) Descriptor() ([]byte, []int)
Deprecated: Use ParameterValidation.ProtoReflect.Descriptor instead.
func (*ParameterValidation) GetRegex
func (x *ParameterValidation) GetRegex() *RegexValidation
func (*ParameterValidation) GetValidationType
func (m *ParameterValidation) GetValidationType() isParameterValidation_ValidationType
func (*ParameterValidation) GetValues
func (x *ParameterValidation) GetValues() *ValueValidation
func (*ParameterValidation) ProtoMessage
func (*ParameterValidation) ProtoMessage()
func (*ParameterValidation) ProtoReflect
func (x *ParameterValidation) ProtoReflect() protoreflect.Message
func (*ParameterValidation) Reset
func (x *ParameterValidation) Reset()
func (*ParameterValidation) String
func (x *ParameterValidation) String() string
ParameterValidation_Regex
type ParameterValidation_Regex struct {
// Validation based on regular expressions.
Regex *RegexValidation `protobuf:"bytes,1,opt,name=regex,proto3,oneof"`
}
ParameterValidation_Values
type ParameterValidation_Values struct {
// Validation based on a list of allowed values.
Values *ValueValidation `protobuf:"bytes,2,opt,name=values,proto3,oneof"`
}
PeripheralsConfig
type PeripheralsConfig struct {
// Optional. Resource name of an existing Dataproc Metastore service.
//
// Example:
//
// * `projects/[project_id]/locations/[region]/services/[service_id]`
MetastoreService string `protobuf:"bytes,1,opt,name=metastore_service,json=metastoreService,proto3" json:"metastore_service,omitempty"`
// Optional. The Spark History Server configuration for the workload.
SparkHistoryServerConfig *SparkHistoryServerConfig `protobuf:"bytes,2,opt,name=spark_history_server_config,json=sparkHistoryServerConfig,proto3" json:"spark_history_server_config,omitempty"`
// contains filtered or unexported fields
}
Auxiliary services configuration for a workload.
func (*PeripheralsConfig) Descriptor
func (*PeripheralsConfig) Descriptor() ([]byte, []int)
Deprecated: Use PeripheralsConfig.ProtoReflect.Descriptor instead.
func (*PeripheralsConfig) GetMetastoreService
func (x *PeripheralsConfig) GetMetastoreService() string
func (*PeripheralsConfig) GetSparkHistoryServerConfig
func (x *PeripheralsConfig) GetSparkHistoryServerConfig() *SparkHistoryServerConfig
func (*PeripheralsConfig) ProtoMessage
func (*PeripheralsConfig) ProtoMessage()
func (*PeripheralsConfig) ProtoReflect
func (x *PeripheralsConfig) ProtoReflect() protoreflect.Message
func (*PeripheralsConfig) Reset
func (x *PeripheralsConfig) Reset()
func (*PeripheralsConfig) String
func (x *PeripheralsConfig) String() string
PigJob
type PigJob struct {
// Required. The sequence of Pig queries to execute, specified as an HCFS
// file URI or a list of queries.
//
// Types that are assignable to Queries:
//
// *PigJob_QueryFileUri
// *PigJob_QueryList
Queries isPigJob_Queries `protobuf_oneof:"queries"`
// Optional. Whether to continue executing queries if a query fails.
// The default value is `false`. Setting to `true` can be useful when
// executing independent parallel queries.
ContinueOnFailure bool `protobuf:"varint,3,opt,name=continue_on_failure,json=continueOnFailure,proto3" json:"continue_on_failure,omitempty"`
// Optional. Mapping of query variable names to values (equivalent to the Pig
// command: `name=[value]`).
ScriptVariables map[string]string `protobuf:"bytes,4,rep,name=script_variables,json=scriptVariables,proto3" json:"script_variables,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Optional. A mapping of property names to values, used to configure Pig.
// Properties that conflict with values set by the Dataproc API might be
// overwritten. Can include properties set in `/etc/hadoop/conf/*-site.xml`,
// /etc/pig/conf/pig.properties, and classes in user code.
Properties map[string]string `protobuf:"bytes,5,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Optional. HCFS URIs of jar files to add to the CLASSPATH of
// the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs.
JarFileUris []string `protobuf:"bytes,6,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"`
// Optional. The runtime log config for job execution.
LoggingConfig *LoggingConfig `protobuf:"bytes,7,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"`
// contains filtered or unexported fields
}
A Dataproc job for running Apache Pig queries on YARN.
func (*PigJob) Descriptor
Deprecated: Use PigJob.ProtoReflect.Descriptor instead.
func (*PigJob) GetContinueOnFailure
func (*PigJob) GetJarFileUris
func (*PigJob) GetLoggingConfig
func (x *PigJob) GetLoggingConfig() *LoggingConfig
func (*PigJob) GetProperties
func (*PigJob) GetQueries
func (m *PigJob) GetQueries() isPigJob_Queries
func (*PigJob) GetQueryFileUri
func (*PigJob) GetQueryList
func (*PigJob) GetScriptVariables
func (*PigJob) ProtoMessage
func (*PigJob) ProtoMessage()
func (*PigJob) ProtoReflect
func (x *PigJob) ProtoReflect() protoreflect.Message
func (*PigJob) Reset
func (x *PigJob) Reset()
func (*PigJob) String
PigJob_QueryFileUri
type PigJob_QueryFileUri struct {
// The HCFS URI of the script that contains the Pig queries.
QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,proto3,oneof"`
}
PigJob_QueryList
type PigJob_QueryList struct {
// A list of queries.
QueryList *QueryList `protobuf:"bytes,2,opt,name=query_list,json=queryList,proto3,oneof"`
}
PrestoJob
type PrestoJob struct {
// Required. The sequence of Presto queries to execute, specified as
// either an HCFS file URI or as a list of queries.
//
// Types that are assignable to Queries:
//
// *PrestoJob_QueryFileUri
// *PrestoJob_QueryList
Queries isPrestoJob_Queries `protobuf_oneof:"queries"`
// Optional. Whether to continue executing queries if a query fails.
// The default value is `false`. Setting to `true` can be useful when
// executing independent parallel queries.
ContinueOnFailure bool `protobuf:"varint,3,opt,name=continue_on_failure,json=continueOnFailure,proto3" json:"continue_on_failure,omitempty"`
// Optional. The format in which query output will be displayed. See the
// Presto documentation for supported output formats
OutputFormat string `protobuf:"bytes,4,opt,name=output_format,json=outputFormat,proto3" json:"output_format,omitempty"`
// Optional. Presto client tags to attach to this query
ClientTags []string `protobuf:"bytes,5,rep,name=client_tags,json=clientTags,proto3" json:"client_tags,omitempty"`
// Optional. A mapping of property names to values. Used to set Presto
// [session properties](https://prestodb.io/docs/current/sql/set-session.html)
// Equivalent to using the --session flag in the Presto CLI
Properties map[string]string `protobuf:"bytes,6,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Optional. The runtime log config for job execution.
LoggingConfig *LoggingConfig `protobuf:"bytes,7,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"`
// contains filtered or unexported fields
}
A Dataproc job for running Presto queries. IMPORTANT: The Dataproc Presto Optional Component must be enabled when the cluster is created to submit a Presto job to the cluster.
func (*PrestoJob) Descriptor
Deprecated: Use PrestoJob.ProtoReflect.Descriptor instead.
func (*PrestoJob) GetClientTags
func (*PrestoJob) GetContinueOnFailure
func (*PrestoJob) GetLoggingConfig
func (x *PrestoJob) GetLoggingConfig() *LoggingConfig
func (*PrestoJob) GetOutputFormat
func (*PrestoJob) GetProperties
func (*PrestoJob) GetQueries
func (m *PrestoJob) GetQueries() isPrestoJob_Queries
func (*PrestoJob) GetQueryFileUri
func (*PrestoJob) GetQueryList
func (*PrestoJob) ProtoMessage
func (*PrestoJob) ProtoMessage()
func (*PrestoJob) ProtoReflect
func (x *PrestoJob) ProtoReflect() protoreflect.Message
func (*PrestoJob) Reset
func (x *PrestoJob) Reset()
func (*PrestoJob) String
PrestoJob_QueryFileUri
type PrestoJob_QueryFileUri struct {
// The HCFS URI of the script that contains SQL queries.
QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,proto3,oneof"`
}
PrestoJob_QueryList
type PrestoJob_QueryList struct {
// A list of queries.
QueryList *QueryList `protobuf:"bytes,2,opt,name=query_list,json=queryList,proto3,oneof"`
}
PyPiRepositoryConfig
type PyPiRepositoryConfig struct {
// Optional. PyPi repository address
PypiRepository string `protobuf:"bytes,1,opt,name=pypi_repository,json=pypiRepository,proto3" json:"pypi_repository,omitempty"`
// contains filtered or unexported fields
}
Configuration for PyPi repository
func (*PyPiRepositoryConfig) Descriptor
func (*PyPiRepositoryConfig) Descriptor() ([]byte, []int)
Deprecated: Use PyPiRepositoryConfig.ProtoReflect.Descriptor instead.
func (*PyPiRepositoryConfig) GetPypiRepository
func (x *PyPiRepositoryConfig) GetPypiRepository() string
func (*PyPiRepositoryConfig) ProtoMessage
func (*PyPiRepositoryConfig) ProtoMessage()
func (*PyPiRepositoryConfig) ProtoReflect
func (x *PyPiRepositoryConfig) ProtoReflect() protoreflect.Message
func (*PyPiRepositoryConfig) Reset
func (x *PyPiRepositoryConfig) Reset()
func (*PyPiRepositoryConfig) String
func (x *PyPiRepositoryConfig) String() string
PySparkBatch
type PySparkBatch struct {
// Required. The HCFS URI of the main Python file to use as the Spark driver.
// Must be a .py file.
MainPythonFileUri string `protobuf:"bytes,1,opt,name=main_python_file_uri,json=mainPythonFileUri,proto3" json:"main_python_file_uri,omitempty"`
// Optional. The arguments to pass to the driver. Do not include arguments
// that can be set as batch properties, such as `--conf`, since a collision
// can occur that causes an incorrect batch submission.
Args []string `protobuf:"bytes,2,rep,name=args,proto3" json:"args,omitempty"`
// Optional. HCFS file URIs of Python files to pass to the PySpark
// framework. Supported file types: `.py`, `.egg`, and `.zip`.
PythonFileUris []string `protobuf:"bytes,3,rep,name=python_file_uris,json=pythonFileUris,proto3" json:"python_file_uris,omitempty"`
// Optional. HCFS URIs of jar files to add to the classpath of the
// Spark driver and tasks.
JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"`
// Optional. HCFS URIs of files to be placed in the working directory of
// each executor.
FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris,proto3" json:"file_uris,omitempty"`
// Optional. HCFS URIs of archives to be extracted into the working directory
// of each executor. Supported file types:
// `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris,proto3" json:"archive_uris,omitempty"`
// contains filtered or unexported fields
}
A configuration for running an Apache PySpark batch workload.
func (*PySparkBatch) Descriptor
func (*PySparkBatch) Descriptor() ([]byte, []int)
Deprecated: Use PySparkBatch.ProtoReflect.Descriptor instead.
func (*PySparkBatch) GetArchiveUris
func (x *PySparkBatch) GetArchiveUris() []string
func (*PySparkBatch) GetArgs
func (x *PySparkBatch) GetArgs() []string
func (*PySparkBatch) GetFileUris
func (x *PySparkBatch) GetFileUris() []string
func (*PySparkBatch) GetJarFileUris
func (x *PySparkBatch) GetJarFileUris() []string
func (*PySparkBatch) GetMainPythonFileUri
func (x *PySparkBatch) GetMainPythonFileUri() string
func (*PySparkBatch) GetPythonFileUris
func (x *PySparkBatch) GetPythonFileUris() []string
func (*PySparkBatch) ProtoMessage
func (*PySparkBatch) ProtoMessage()
func (*PySparkBatch) ProtoReflect
func (x *PySparkBatch) ProtoReflect() protoreflect.Message
func (*PySparkBatch) Reset
func (x *PySparkBatch) Reset()
func (*PySparkBatch) String
func (x *PySparkBatch) String() string
PySparkJob
type PySparkJob struct {
// Required. The HCFS URI of the main Python file to use as the driver. Must
// be a .py file.
MainPythonFileUri string `protobuf:"bytes,1,opt,name=main_python_file_uri,json=mainPythonFileUri,proto3" json:"main_python_file_uri,omitempty"`
// Optional. The arguments to pass to the driver. Do not include arguments,
// such as `--conf`, that can be set as job properties, since a collision may
// occur that causes an incorrect job submission.
Args []string `protobuf:"bytes,2,rep,name=args,proto3" json:"args,omitempty"`
// Optional. HCFS file URIs of Python files to pass to the PySpark
// framework. Supported file types: .py, .egg, and .zip.
PythonFileUris []string `protobuf:"bytes,3,rep,name=python_file_uris,json=pythonFileUris,proto3" json:"python_file_uris,omitempty"`
// Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
// Python driver and tasks.
JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"`
// Optional. HCFS URIs of files to be placed in the working directory of
// each executor. Useful for naively parallel tasks.
FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris,proto3" json:"file_uris,omitempty"`
// Optional. HCFS URIs of archives to be extracted into the working directory
// of each executor. Supported file types:
// .jar, .tar, .tar.gz, .tgz, and .zip.
ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris,proto3" json:"archive_uris,omitempty"`
// Optional. A mapping of property names to values, used to configure PySpark.
// Properties that conflict with values set by the Dataproc API might be
// overwritten. Can include properties set in
// /etc/spark/conf/spark-defaults.conf and classes in user code.
Properties map[string]string `protobuf:"bytes,7,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Optional. The runtime log config for job execution.
LoggingConfig *LoggingConfig `protobuf:"bytes,8,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"`
// contains filtered or unexported fields
}
A Dataproc job for running Apache PySpark applications on YARN.
func (*PySparkJob) Descriptor
func (*PySparkJob) Descriptor() ([]byte, []int)
Deprecated: Use PySparkJob.ProtoReflect.Descriptor instead.
func (*PySparkJob) GetArchiveUris
func (x *PySparkJob) GetArchiveUris() []string
func (*PySparkJob) GetArgs
func (x *PySparkJob) GetArgs() []string
func (*PySparkJob) GetFileUris
func (x *PySparkJob) GetFileUris() []string
func (*PySparkJob) GetJarFileUris
func (x *PySparkJob) GetJarFileUris() []string
func (*PySparkJob) GetLoggingConfig
func (x *PySparkJob) GetLoggingConfig() *LoggingConfig
func (*PySparkJob) GetMainPythonFileUri
func (x *PySparkJob) GetMainPythonFileUri() string
func (*PySparkJob) GetProperties
func (x *PySparkJob) GetProperties() map[string]string
func (*PySparkJob) GetPythonFileUris
func (x *PySparkJob) GetPythonFileUris() []string
func (*PySparkJob) ProtoMessage
func (*PySparkJob) ProtoMessage()
func (*PySparkJob) ProtoReflect
func (x *PySparkJob) ProtoReflect() protoreflect.Message
func (*PySparkJob) Reset
func (x *PySparkJob) Reset()
func (*PySparkJob) String
func (x *PySparkJob) String() string
QueryList
type QueryList struct {
// Required. The queries to execute. You do not need to end a query expression
// with a semicolon. Multiple queries can be specified in one
// string by separating each with a semicolon. Here is an example of a
// Dataproc API snippet that uses a QueryList to specify a HiveJob:
//
// "hiveJob": {
// "queryList": {
// "queries": [
// "query1",
// "query2",
// "query3;query4",
// ]
// }
// }
Queries []string `protobuf:"bytes,1,rep,name=queries,proto3" json:"queries,omitempty"`
// contains filtered or unexported fields
}
A list of queries to run on a cluster.
func (*QueryList) Descriptor
Deprecated: Use QueryList.ProtoReflect.Descriptor instead.
func (*QueryList) GetQueries
func (*QueryList) ProtoMessage
func (*QueryList) ProtoMessage()
func (*QueryList) ProtoReflect
func (x *QueryList) ProtoReflect() protoreflect.Message
func (*QueryList) Reset
func (x *QueryList) Reset()
func (*QueryList) String
RegexValidation
type RegexValidation struct {
// Required. RE2 regular expressions used to validate the parameter's value.
// The value must match the regex in its entirety (substring
// matches are not sufficient).
Regexes []string `protobuf:"bytes,1,rep,name=regexes,proto3" json:"regexes,omitempty"`
// contains filtered or unexported fields
}
Validation based on regular expressions.
func (*RegexValidation) Descriptor
func (*RegexValidation) Descriptor() ([]byte, []int)
Deprecated: Use RegexValidation.ProtoReflect.Descriptor instead.
func (*RegexValidation) GetRegexes
func (x *RegexValidation) GetRegexes() []string
func (*RegexValidation) ProtoMessage
func (*RegexValidation) ProtoMessage()
func (*RegexValidation) ProtoReflect
func (x *RegexValidation) ProtoReflect() protoreflect.Message
func (*RegexValidation) Reset
func (x *RegexValidation) Reset()
func (*RegexValidation) String
func (x *RegexValidation) String() string
RepositoryConfig
type RepositoryConfig struct {
// Optional. Configuration for PyPi repository.
PypiRepositoryConfig *PyPiRepositoryConfig `protobuf:"bytes,1,opt,name=pypi_repository_config,json=pypiRepositoryConfig,proto3" json:"pypi_repository_config,omitempty"`
// contains filtered or unexported fields
}
Configuration for dependency repositories
func (*RepositoryConfig) Descriptor
func (*RepositoryConfig) Descriptor() ([]byte, []int)
Deprecated: Use RepositoryConfig.ProtoReflect.Descriptor instead.
func (*RepositoryConfig) GetPypiRepositoryConfig
func (x *RepositoryConfig) GetPypiRepositoryConfig() *PyPiRepositoryConfig
func (*RepositoryConfig) ProtoMessage
func (*RepositoryConfig) ProtoMessage()
func (*RepositoryConfig) ProtoReflect
func (x *RepositoryConfig) ProtoReflect() protoreflect.Message
func (*RepositoryConfig) Reset
func (x *RepositoryConfig) Reset()
func (*RepositoryConfig) String
func (x *RepositoryConfig) String() string
ReservationAffinity
type ReservationAffinity struct {
// Optional. Type of reservation to consume
ConsumeReservationType ReservationAffinity_Type `protobuf:"varint,1,opt,name=consume_reservation_type,json=consumeReservationType,proto3,enum=google.cloud.dataproc.v1.ReservationAffinity_Type" json:"consume_reservation_type,omitempty"`
// Optional. Corresponds to the label key of reservation resource.
Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
// Optional. Corresponds to the label values of reservation resource.
Values []string `protobuf:"bytes,3,rep,name=values,proto3" json:"values,omitempty"`
// contains filtered or unexported fields
}
Reservation Affinity for consuming Zonal reservation.
func (*ReservationAffinity) Descriptor
func (*ReservationAffinity) Descriptor() ([]byte, []int)
Deprecated: Use ReservationAffinity.ProtoReflect.Descriptor instead.
func (*ReservationAffinity) GetConsumeReservationType
func (x *ReservationAffinity) GetConsumeReservationType() ReservationAffinity_Type
func (*ReservationAffinity) GetKey
func (x *ReservationAffinity) GetKey() string
func (*ReservationAffinity) GetValues
func (x *ReservationAffinity) GetValues() []string
func (*ReservationAffinity) ProtoMessage
func (*ReservationAffinity) ProtoMessage()
func (*ReservationAffinity) ProtoReflect
func (x *ReservationAffinity) ProtoReflect() protoreflect.Message
func (*ReservationAffinity) Reset
func (x *ReservationAffinity) Reset()
func (*ReservationAffinity) String
func (x *ReservationAffinity) String() string
ReservationAffinity_Type
type ReservationAffinity_Type int32
Indicates whether to consume capacity from an reservation or not.
ReservationAffinity_TYPE_UNSPECIFIED, ReservationAffinity_NO_RESERVATION, ReservationAffinity_ANY_RESERVATION, ReservationAffinity_SPECIFIC_RESERVATION
const (
ReservationAffinity_TYPE_UNSPECIFIED ReservationAffinity_Type = 0
// Do not consume from any allocated capacity.
ReservationAffinity_NO_RESERVATION ReservationAffinity_Type = 1
// Consume any reservation available.
ReservationAffinity_ANY_RESERVATION ReservationAffinity_Type = 2
// Must consume from a specific reservation. Must specify key value fields
// for specifying the reservations.
ReservationAffinity_SPECIFIC_RESERVATION ReservationAffinity_Type = 3
)
func (ReservationAffinity_Type) Descriptor
func (ReservationAffinity_Type) Descriptor() protoreflect.EnumDescriptor
func (ReservationAffinity_Type) Enum
func (x ReservationAffinity_Type) Enum() *ReservationAffinity_Type
func (ReservationAffinity_Type) EnumDescriptor
func (ReservationAffinity_Type) EnumDescriptor() ([]byte, []int)
Deprecated: Use ReservationAffinity_Type.Descriptor instead.
func (ReservationAffinity_Type) Number
func (x ReservationAffinity_Type) Number() protoreflect.EnumNumber
func (ReservationAffinity_Type) String
func (x ReservationAffinity_Type) String() string
func (ReservationAffinity_Type) Type
func (ReservationAffinity_Type) Type() protoreflect.EnumType
ResizeNodeGroupRequest
type ResizeNodeGroupRequest struct {
// Required. The name of the node group to resize.
// Format:
// `projects/{project}/regions/{region}/clusters/{cluster}/nodeGroups/{nodeGroup}`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Required. The number of running instances for the node group to maintain.
// The group adds or removes instances to maintain the number of instances
// specified by this parameter.
Size int32 `protobuf:"varint,2,opt,name=size,proto3" json:"size,omitempty"`
// Optional. A unique ID used to identify the request. If the server receives
// two
// [ResizeNodeGroupRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.ResizeNodeGroupRequests)
// with the same ID, the second request is ignored and the
// first [google.longrunning.Operation][google.longrunning.Operation] created
// and stored in the backend is returned.
//
// Recommendation: Set this value to a
// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
//
// The ID must contain only letters (a-z, A-Z), numbers (0-9),
// underscores (_), and hyphens (-). The maximum length is 40 characters.
RequestId string `protobuf:"bytes,3,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
// Optional. Timeout for graceful YARN decommissioning. [Graceful
// decommissioning]
// (https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/scaling-clusters#graceful_decommissioning)
// allows the removal of nodes from the Compute Engine node group
// without interrupting jobs in progress. This timeout specifies how long to
// wait for jobs in progress to finish before forcefully removing nodes (and
// potentially interrupting jobs). Default timeout is 0 (for forceful
// decommission), and the maximum allowed timeout is 1 day. (see JSON
// representation of
// [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
//
// Only supported on Dataproc image versions 1.2 and higher.
GracefulDecommissionTimeout *durationpb.Duration `protobuf:"bytes,4,opt,name=graceful_decommission_timeout,json=gracefulDecommissionTimeout,proto3" json:"graceful_decommission_timeout,omitempty"`
// contains filtered or unexported fields
}
A request to resize a node group.
func (*ResizeNodeGroupRequest) Descriptor
func (*ResizeNodeGroupRequest) Descriptor() ([]byte, []int)
Deprecated: Use ResizeNodeGroupRequest.ProtoReflect.Descriptor instead.
func (*ResizeNodeGroupRequest) GetGracefulDecommissionTimeout
func (x *ResizeNodeGroupRequest) GetGracefulDecommissionTimeout() *durationpb.Duration
func (*ResizeNodeGroupRequest) GetName
func (x *ResizeNodeGroupRequest) GetName() string
func (*ResizeNodeGroupRequest) GetRequestId
func (x *ResizeNodeGroupRequest) GetRequestId() string
func (*ResizeNodeGroupRequest) GetSize
func (x *ResizeNodeGroupRequest) GetSize() int32
func (*ResizeNodeGroupRequest) ProtoMessage
func (*ResizeNodeGroupRequest) ProtoMessage()
func (*ResizeNodeGroupRequest) ProtoReflect
func (x *ResizeNodeGroupRequest) ProtoReflect() protoreflect.Message
func (*ResizeNodeGroupRequest) Reset
func (x *ResizeNodeGroupRequest) Reset()
func (*ResizeNodeGroupRequest) String
func (x *ResizeNodeGroupRequest) String() string
RuntimeConfig
type RuntimeConfig struct {
// Optional. Version of the batch runtime.
Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
// Optional. Optional custom container image for the job runtime environment.
// If not specified, a default container image will be used.
ContainerImage string `protobuf:"bytes,2,opt,name=container_image,json=containerImage,proto3" json:"container_image,omitempty"`
// Optional. A mapping of property names to values, which are used to
// configure workload execution.
Properties map[string]string `protobuf:"bytes,3,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Optional. Dependency repository configuration.
RepositoryConfig *RepositoryConfig `protobuf:"bytes,5,opt,name=repository_config,json=repositoryConfig,proto3" json:"repository_config,omitempty"`
// Optional. Autotuning configuration of the workload.
AutotuningConfig *AutotuningConfig `protobuf:"bytes,6,opt,name=autotuning_config,json=autotuningConfig,proto3" json:"autotuning_config,omitempty"`
// Optional. Cohort identifier. Identifies families of the workloads having
// the same shape, e.g. daily ETL jobs.
Cohort string `protobuf:"bytes,7,opt,name=cohort,proto3" json:"cohort,omitempty"`
// contains filtered or unexported fields
}
Runtime configuration for a workload.
func (*RuntimeConfig) Descriptor
func (*RuntimeConfig) Descriptor() ([]byte, []int)
Deprecated: Use RuntimeConfig.ProtoReflect.Descriptor instead.
func (*RuntimeConfig) GetAutotuningConfig
func (x *RuntimeConfig) GetAutotuningConfig() *AutotuningConfig
func (*RuntimeConfig) GetCohort
func (x *RuntimeConfig) GetCohort() string
func (*RuntimeConfig) GetContainerImage
func (x *RuntimeConfig) GetContainerImage() string
func (*RuntimeConfig) GetProperties
func (x *RuntimeConfig) GetProperties() map[string]string
func (*RuntimeConfig) GetRepositoryConfig
func (x *RuntimeConfig) GetRepositoryConfig() *RepositoryConfig
func (*RuntimeConfig) GetVersion
func (x *RuntimeConfig) GetVersion() string
func (*RuntimeConfig) ProtoMessage
func (*RuntimeConfig) ProtoMessage()
func (*RuntimeConfig) ProtoReflect
func (x *RuntimeConfig) ProtoReflect() protoreflect.Message
func (*RuntimeConfig) Reset
func (x *RuntimeConfig) Reset()
func (*RuntimeConfig) String
func (x *RuntimeConfig) String() string
RuntimeInfo
type RuntimeInfo struct {
// Output only. Map of remote access endpoints (such as web interfaces and
// APIs) to their URIs.
Endpoints map[string]string `protobuf:"bytes,1,rep,name=endpoints,proto3" json:"endpoints,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Output only. A URI pointing to the location of the stdout and stderr of the
// workload.
OutputUri string `protobuf:"bytes,2,opt,name=output_uri,json=outputUri,proto3" json:"output_uri,omitempty"`
// Output only. A URI pointing to the location of the diagnostics tarball.
DiagnosticOutputUri string `protobuf:"bytes,3,opt,name=diagnostic_output_uri,json=diagnosticOutputUri,proto3" json:"diagnostic_output_uri,omitempty"`
// Output only. Approximate workload resource usage, calculated when
// the workload completes (see [Dataproc Serverless pricing]
// (https://cloud.google.com/dataproc-serverless/pricing)).
//
// **Note:** This metric calculation may change in the future, for
// example, to capture cumulative workload resource
// consumption during workload execution (see the
// [Dataproc Serverless release notes]
// (https://cloud.google.com/dataproc-serverless/docs/release-notes)
// for announcements, changes, fixes
// and other Dataproc developments).
ApproximateUsage *UsageMetrics `protobuf:"bytes,6,opt,name=approximate_usage,json=approximateUsage,proto3" json:"approximate_usage,omitempty"`
// Output only. Snapshot of current workload resource usage.
CurrentUsage *UsageSnapshot `protobuf:"bytes,7,opt,name=current_usage,json=currentUsage,proto3" json:"current_usage,omitempty"`
// contains filtered or unexported fields
}
Runtime information about workload execution.
func (*RuntimeInfo) Descriptor
func (*RuntimeInfo) Descriptor() ([]byte, []int)
Deprecated: Use RuntimeInfo.ProtoReflect.Descriptor instead.
func (*RuntimeInfo) GetApproximateUsage
func (x *RuntimeInfo) GetApproximateUsage() *UsageMetrics
func (*RuntimeInfo) GetCurrentUsage
func (x *RuntimeInfo) GetCurrentUsage() *UsageSnapshot
func (*RuntimeInfo) GetDiagnosticOutputUri
func (x *RuntimeInfo) GetDiagnosticOutputUri() string
func (*RuntimeInfo) GetEndpoints
func (x *RuntimeInfo) GetEndpoints() map[string]string
func (*RuntimeInfo) GetOutputUri
func (x *RuntimeInfo) GetOutputUri() string
func (*RuntimeInfo) ProtoMessage
func (*RuntimeInfo) ProtoMessage()
func (*RuntimeInfo) ProtoReflect
func (x *RuntimeInfo) ProtoReflect() protoreflect.Message
func (*RuntimeInfo) Reset
func (x *RuntimeInfo) Reset()
func (*RuntimeInfo) String
func (x *RuntimeInfo) String() string
SecurityConfig
type SecurityConfig struct {
// Optional. Kerberos related configuration.
KerberosConfig *KerberosConfig `protobuf:"bytes,1,opt,name=kerberos_config,json=kerberosConfig,proto3" json:"kerberos_config,omitempty"`
// Optional. Identity related configuration, including service account based
// secure multi-tenancy user mappings.
IdentityConfig *IdentityConfig `protobuf:"bytes,2,opt,name=identity_config,json=identityConfig,proto3" json:"identity_config,omitempty"`
// contains filtered or unexported fields
}
Security related configuration, including encryption, Kerberos, etc.
func (*SecurityConfig) Descriptor
func (*SecurityConfig) Descriptor() ([]byte, []int)
Deprecated: Use SecurityConfig.ProtoReflect.Descriptor instead.
func (*SecurityConfig) GetIdentityConfig
func (x *SecurityConfig) GetIdentityConfig() *IdentityConfig
func (*SecurityConfig) GetKerberosConfig
func (x *SecurityConfig) GetKerberosConfig() *KerberosConfig
func (*SecurityConfig) ProtoMessage
func (*SecurityConfig) ProtoMessage()
func (*SecurityConfig) ProtoReflect
func (x *SecurityConfig) ProtoReflect() protoreflect.Message
func (*SecurityConfig) Reset
func (x *SecurityConfig) Reset()
func (*SecurityConfig) String
func (x *SecurityConfig) String() string
Session
type Session struct {
// Required. The resource name of the session.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Output only. A session UUID (Unique Universal Identifier). The service
// generates this value when it creates the session.
Uuid string `protobuf:"bytes,2,opt,name=uuid,proto3" json:"uuid,omitempty"`
// Output only. The time when the session was created.
CreateTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
// The session configuration.
//
// Types that are assignable to SessionConfig:
//
// *Session_JupyterSession
// *Session_SparkConnectSession
SessionConfig isSession_SessionConfig `protobuf_oneof:"session_config"`
// Output only. Runtime information about session execution.
RuntimeInfo *RuntimeInfo `protobuf:"bytes,6,opt,name=runtime_info,json=runtimeInfo,proto3" json:"runtime_info,omitempty"`
// Output only. A state of the session.
State Session_State `protobuf:"varint,7,opt,name=state,proto3,enum=google.cloud.dataproc.v1.Session_State" json:"state,omitempty"`
// Output only. Session state details, such as the failure
// description if the state is `FAILED`.
StateMessage string `protobuf:"bytes,8,opt,name=state_message,json=stateMessage,proto3" json:"state_message,omitempty"`
// Output only. The time when the session entered the current state.
StateTime *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=state_time,json=stateTime,proto3" json:"state_time,omitempty"`
// Output only. The email address of the user who created the session.
Creator string `protobuf:"bytes,10,opt,name=creator,proto3" json:"creator,omitempty"`
// Optional. The labels to associate with the session.
// Label **keys** must contain 1 to 63 characters, and must conform to
// [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
// Label **values** may be empty, but, if present, must contain 1 to 63
// characters, and must conform to [RFC
// 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
// associated with a session.
Labels map[string]string `protobuf:"bytes,11,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Optional. Runtime configuration for the session execution.
RuntimeConfig *RuntimeConfig `protobuf:"bytes,12,opt,name=runtime_config,json=runtimeConfig,proto3" json:"runtime_config,omitempty"`
// Optional. Environment configuration for the session execution.
EnvironmentConfig *EnvironmentConfig `protobuf:"bytes,13,opt,name=environment_config,json=environmentConfig,proto3" json:"environment_config,omitempty"`
// Optional. The email address of the user who owns the session.
User string `protobuf:"bytes,14,opt,name=user,proto3" json:"user,omitempty"`
// Output only. Historical state information for the session.
StateHistory []*Session_SessionStateHistory `protobuf:"bytes,15,rep,name=state_history,json=stateHistory,proto3" json:"state_history,omitempty"`
// Optional. The session template used by the session.
//
// Only resource names, including project ID and location, are valid.
//
// Example:
// * `https://www.googleapis.com/compute/v1/projects/[project_id]/locations/[dataproc_region]/sessionTemplates/[template_id]`
// * `projects/[project_id]/locations/[dataproc_region]/sessionTemplates/[template_id]`
//
// The template must be in the same project and Dataproc region as the
// session.
SessionTemplate string `protobuf:"bytes,16,opt,name=session_template,json=sessionTemplate,proto3" json:"session_template,omitempty"`
// contains filtered or unexported fields
}
A representation of a session.
func (*Session) Descriptor
Deprecated: Use Session.ProtoReflect.Descriptor instead.
func (*Session) GetCreateTime
func (x *Session) GetCreateTime() *timestamppb.Timestamp
func (*Session) GetCreator
func (*Session) GetEnvironmentConfig
func (x *Session) GetEnvironmentConfig() *EnvironmentConfig
func (*Session) GetJupyterSession
func (x *Session) GetJupyterSession() *JupyterConfig
func (*Session) GetLabels
func (*Session) GetName
func (*Session) GetRuntimeConfig
func (x *Session) GetRuntimeConfig() *RuntimeConfig
func (*Session) GetRuntimeInfo
func (x *Session) GetRuntimeInfo() *RuntimeInfo
func (*Session) GetSessionConfig
func (m *Session) GetSessionConfig() isSession_SessionConfig
func (*Session) GetSessionTemplate
func (*Session) GetSparkConnectSession
func (x *Session) GetSparkConnectSession() *SparkConnectConfig
func (*Session) GetState
func (x *Session) GetState() Session_State
func (*Session) GetStateHistory
func (x *Session) GetStateHistory() []*Session_SessionStateHistory
func (*Session) GetStateMessage
func (*Session) GetStateTime
func (x *Session) GetStateTime() *timestamppb.Timestamp
func (*Session) GetUser
func (*Session) GetUuid
func (*Session) ProtoMessage
func (*Session) ProtoMessage()
func (*Session) ProtoReflect
func (x *Session) ProtoReflect() protoreflect.Message
func (*Session) Reset
func (x *Session) Reset()
func (*Session) String
SessionControllerClient
type SessionControllerClient interface {
// Create an interactive session asynchronously.
CreateSession(ctx context.Context, in *CreateSessionRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
// Gets the resource representation for an interactive session.
GetSession(ctx context.Context, in *GetSessionRequest, opts ...grpc.CallOption) (*Session, error)
// Lists interactive sessions.
ListSessions(ctx context.Context, in *ListSessionsRequest, opts ...grpc.CallOption) (*ListSessionsResponse, error)
// Terminates the interactive session.
TerminateSession(ctx context.Context, in *TerminateSessionRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
// Deletes the interactive session resource. If the session is not in terminal
// state, it is terminated, and then deleted.
DeleteSession(ctx context.Context, in *DeleteSessionRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
}
SessionControllerClient is the client API for SessionController service.
For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
func NewSessionControllerClient
func NewSessionControllerClient(cc grpc.ClientConnInterface) SessionControllerClient
SessionControllerServer
type SessionControllerServer interface {
// Create an interactive session asynchronously.
CreateSession(context.Context, *CreateSessionRequest) (*longrunningpb.Operation, error)
// Gets the resource representation for an interactive session.
GetSession(context.Context, *GetSessionRequest) (*Session, error)
// Lists interactive sessions.
ListSessions(context.Context, *ListSessionsRequest) (*ListSessionsResponse, error)
// Terminates the interactive session.
TerminateSession(context.Context, *TerminateSessionRequest) (*longrunningpb.Operation, error)
// Deletes the interactive session resource. If the session is not in terminal
// state, it is terminated, and then deleted.
DeleteSession(context.Context, *DeleteSessionRequest) (*longrunningpb.Operation, error)
}
SessionControllerServer is the server API for SessionController service.
SessionOperationMetadata
type SessionOperationMetadata struct {
// Name of the session for the operation.
Session string `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"`
// Session UUID for the operation.
SessionUuid string `protobuf:"bytes,2,opt,name=session_uuid,json=sessionUuid,proto3" json:"session_uuid,omitempty"`
// The time when the operation was created.
CreateTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
// The time when the operation was finished.
DoneTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=done_time,json=doneTime,proto3" json:"done_time,omitempty"`
// The operation type.
OperationType SessionOperationMetadata_SessionOperationType `protobuf:"varint,6,opt,name=operation_type,json=operationType,proto3,enum=google.cloud.dataproc.v1.SessionOperationMetadata_SessionOperationType" json:"operation_type,omitempty"`
// Short description of the operation.
Description string `protobuf:"bytes,7,opt,name=description,proto3" json:"description,omitempty"`
// Labels associated with the operation.
Labels map[string]string `protobuf:"bytes,8,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Warnings encountered during operation execution.
Warnings []string `protobuf:"bytes,9,rep,name=warnings,proto3" json:"warnings,omitempty"`
// contains filtered or unexported fields
}
Metadata describing the Session operation.
func (*SessionOperationMetadata) Descriptor
func (*SessionOperationMetadata) Descriptor() ([]byte, []int)
Deprecated: Use SessionOperationMetadata.ProtoReflect.Descriptor instead.
func (*SessionOperationMetadata) GetCreateTime
func (x *SessionOperationMetadata) GetCreateTime() *timestamppb.Timestamp
func (*SessionOperationMetadata) GetDescription
func (x *SessionOperationMetadata) GetDescription() string
func (*SessionOperationMetadata) GetDoneTime
func (x *SessionOperationMetadata) GetDoneTime() *timestamppb.Timestamp
func (*SessionOperationMetadata) GetLabels
func (x *SessionOperationMetadata) GetLabels() map[string]string
func (*SessionOperationMetadata) GetOperationType
func (x *SessionOperationMetadata) GetOperationType() SessionOperationMetadata_SessionOperationType
func (*SessionOperationMetadata) GetSession
func (x *SessionOperationMetadata) GetSession() string
func (*SessionOperationMetadata) GetSessionUuid
func (x *SessionOperationMetadata) GetSessionUuid() string
func (*SessionOperationMetadata) GetWarnings
func (x *SessionOperationMetadata) GetWarnings() []string
func (*SessionOperationMetadata) ProtoMessage
func (*SessionOperationMetadata) ProtoMessage()
func (*SessionOperationMetadata) ProtoReflect
func (x *SessionOperationMetadata) ProtoReflect() protoreflect.Message
func (*SessionOperationMetadata) Reset
func (x *SessionOperationMetadata) Reset()
func (*SessionOperationMetadata) String
func (x *SessionOperationMetadata) String() string
SessionOperationMetadata_SessionOperationType
type SessionOperationMetadata_SessionOperationType int32
Operation type for Session resources
SessionOperationMetadata_SESSION_OPERATION_TYPE_UNSPECIFIED, SessionOperationMetadata_CREATE, SessionOperationMetadata_TERMINATE, SessionOperationMetadata_DELETE
const (
// Session operation type is unknown.
SessionOperationMetadata_SESSION_OPERATION_TYPE_UNSPECIFIED SessionOperationMetadata_SessionOperationType = 0
// Create Session operation type.
SessionOperationMetadata_CREATE SessionOperationMetadata_SessionOperationType = 1
// Terminate Session operation type.
SessionOperationMetadata_TERMINATE SessionOperationMetadata_SessionOperationType = 2
// Delete Session operation type.
SessionOperationMetadata_DELETE SessionOperationMetadata_SessionOperationType = 3
)
func (SessionOperationMetadata_SessionOperationType) Descriptor
func (SessionOperationMetadata_SessionOperationType) Descriptor() protoreflect.EnumDescriptor
func (SessionOperationMetadata_SessionOperationType) Enum
func (x SessionOperationMetadata_SessionOperationType) Enum() *SessionOperationMetadata_SessionOperationType
func (SessionOperationMetadata_SessionOperationType) EnumDescriptor
func (SessionOperationMetadata_SessionOperationType) EnumDescriptor() ([]byte, []int)
Deprecated: Use SessionOperationMetadata_SessionOperationType.Descriptor instead.
func (SessionOperationMetadata_SessionOperationType) Number
func (x SessionOperationMetadata_SessionOperationType) Number() protoreflect.EnumNumber
func (SessionOperationMetadata_SessionOperationType) String
func (x SessionOperationMetadata_SessionOperationType) String() string
func (SessionOperationMetadata_SessionOperationType) Type
SessionTemplate
type SessionTemplate struct {
// Required. The resource name of the session template.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Optional. Brief description of the template.
Description string `protobuf:"bytes,9,opt,name=description,proto3" json:"description,omitempty"`
// Output only. The time when the template was created.
CreateTime *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
// The session configuration.
//
// Types that are assignable to SessionConfig:
//
// *SessionTemplate_JupyterSession
// *SessionTemplate_SparkConnectSession
SessionConfig isSessionTemplate_SessionConfig `protobuf_oneof:"session_config"`
// Output only. The email address of the user who created the template.
Creator string `protobuf:"bytes,5,opt,name=creator,proto3" json:"creator,omitempty"`
// Optional. Labels to associate with sessions created using this template.
// Label **keys** must contain 1 to 63 characters, and must conform to
// [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
// Label **values** can be empty, but, if present, must contain 1 to 63
// characters and conform to [RFC
// 1035](https://www.ietf.org/rfc/rfc1035.txt). No more than 32 labels can be
// associated with a session.
Labels map[string]string `protobuf:"bytes,6,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Optional. Runtime configuration for session execution.
RuntimeConfig *RuntimeConfig `protobuf:"bytes,7,opt,name=runtime_config,json=runtimeConfig,proto3" json:"runtime_config,omitempty"`
// Optional. Environment configuration for session execution.
EnvironmentConfig *EnvironmentConfig `protobuf:"bytes,8,opt,name=environment_config,json=environmentConfig,proto3" json:"environment_config,omitempty"`
// Output only. The time the template was last updated.
UpdateTime *timestamppb.Timestamp `protobuf:"bytes,10,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
// Output only. A session template UUID (Unique Universal Identifier). The
// service generates this value when it creates the session template.
Uuid string `protobuf:"bytes,12,opt,name=uuid,proto3" json:"uuid,omitempty"`
// contains filtered or unexported fields
}
A representation of a session template.
func (*SessionTemplate) Descriptor
func (*SessionTemplate) Descriptor() ([]byte, []int)
Deprecated: Use SessionTemplate.ProtoReflect.Descriptor instead.
func (*SessionTemplate) GetCreateTime
func (x *SessionTemplate) GetCreateTime() *timestamppb.Timestamp
func (*SessionTemplate) GetCreator
func (x *SessionTemplate) GetCreator() string
func (*SessionTemplate) GetDescription
func (x *SessionTemplate) GetDescription() string
func (*SessionTemplate) GetEnvironmentConfig
func (x *SessionTemplate) GetEnvironmentConfig() *EnvironmentConfig
func (*SessionTemplate) GetJupyterSession
func (x *SessionTemplate) GetJupyterSession() *JupyterConfig
func (*SessionTemplate) GetLabels
func (x *SessionTemplate) GetLabels() map[string]string
func (*SessionTemplate) GetName
func (x *SessionTemplate) GetName() string
func (*SessionTemplate) GetRuntimeConfig
func (x *SessionTemplate) GetRuntimeConfig() *RuntimeConfig
func (*SessionTemplate) GetSessionConfig
func (m *SessionTemplate) GetSessionConfig() isSessionTemplate_SessionConfig
func (*SessionTemplate) GetSparkConnectSession
func (x *SessionTemplate) GetSparkConnectSession() *SparkConnectConfig
func (*SessionTemplate) GetUpdateTime
func (x *SessionTemplate) GetUpdateTime() *timestamppb.Timestamp
func (*SessionTemplate) GetUuid
func (x *SessionTemplate) GetUuid() string
func (*SessionTemplate) ProtoMessage
func (*SessionTemplate) ProtoMessage()
func (*SessionTemplate) ProtoReflect
func (x *SessionTemplate) ProtoReflect() protoreflect.Message
func (*SessionTemplate) Reset
func (x *SessionTemplate) Reset()
func (*SessionTemplate) String
func (x *SessionTemplate) String() string
SessionTemplateControllerClient
type SessionTemplateControllerClient interface {
// Create a session template synchronously.
CreateSessionTemplate(ctx context.Context, in *CreateSessionTemplateRequest, opts ...grpc.CallOption) (*SessionTemplate, error)
// Updates the session template synchronously.
UpdateSessionTemplate(ctx context.Context, in *UpdateSessionTemplateRequest, opts ...grpc.CallOption) (*SessionTemplate, error)
// Gets the resource representation for a session template.
GetSessionTemplate(ctx context.Context, in *GetSessionTemplateRequest, opts ...grpc.CallOption) (*SessionTemplate, error)
// Lists session templates.
ListSessionTemplates(ctx context.Context, in *ListSessionTemplatesRequest, opts ...grpc.CallOption) (*ListSessionTemplatesResponse, error)
// Deletes a session template.
DeleteSessionTemplate(ctx context.Context, in *DeleteSessionTemplateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
}
SessionTemplateControllerClient is the client API for SessionTemplateController service.
For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
func NewSessionTemplateControllerClient
func NewSessionTemplateControllerClient(cc grpc.ClientConnInterface) SessionTemplateControllerClient
SessionTemplateControllerServer
type SessionTemplateControllerServer interface {
// Create a session template synchronously.
CreateSessionTemplate(context.Context, *CreateSessionTemplateRequest) (*SessionTemplate, error)
// Updates the session template synchronously.
UpdateSessionTemplate(context.Context, *UpdateSessionTemplateRequest) (*SessionTemplate, error)
// Gets the resource representation for a session template.
GetSessionTemplate(context.Context, *GetSessionTemplateRequest) (*SessionTemplate, error)
// Lists session templates.
ListSessionTemplates(context.Context, *ListSessionTemplatesRequest) (*ListSessionTemplatesResponse, error)
// Deletes a session template.
DeleteSessionTemplate(context.Context, *DeleteSessionTemplateRequest) (*emptypb.Empty, error)
}
SessionTemplateControllerServer is the server API for SessionTemplateController service.
SessionTemplate_JupyterSession
type SessionTemplate_JupyterSession struct {
// Optional. Jupyter session config.
JupyterSession *JupyterConfig `protobuf:"bytes,3,opt,name=jupyter_session,json=jupyterSession,proto3,oneof"`
}
SessionTemplate_SparkConnectSession
type SessionTemplate_SparkConnectSession struct {
// Optional. Spark Connect session config.
SparkConnectSession *SparkConnectConfig `protobuf:"bytes,11,opt,name=spark_connect_session,json=sparkConnectSession,proto3,oneof"`
}
Session_JupyterSession
type Session_JupyterSession struct {
// Optional. Jupyter session config.
JupyterSession *JupyterConfig `protobuf:"bytes,4,opt,name=jupyter_session,json=jupyterSession,proto3,oneof"`
}
Session_SessionStateHistory
type Session_SessionStateHistory struct {
// Output only. The state of the session at this point in the session
// history.
State Session_State `protobuf:"varint,1,opt,name=state,proto3,enum=google.cloud.dataproc.v1.Session_State" json:"state,omitempty"`
// Output only. Details about the state at this point in the session
// history.
StateMessage string `protobuf:"bytes,2,opt,name=state_message,json=stateMessage,proto3" json:"state_message,omitempty"`
// Output only. The time when the session entered the historical state.
StateStartTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=state_start_time,json=stateStartTime,proto3" json:"state_start_time,omitempty"`
// contains filtered or unexported fields
}
Historical state information.
func (*Session_SessionStateHistory) Descriptor
func (*Session_SessionStateHistory) Descriptor() ([]byte, []int)
Deprecated: Use Session_SessionStateHistory.ProtoReflect.Descriptor instead.
func (*Session_SessionStateHistory) GetState
func (x *Session_SessionStateHistory) GetState() Session_State
func (*Session_SessionStateHistory) GetStateMessage
func (x *Session_SessionStateHistory) GetStateMessage() string
func (*Session_SessionStateHistory) GetStateStartTime
func (x *Session_SessionStateHistory) GetStateStartTime() *timestamppb.Timestamp
func (*Session_SessionStateHistory) ProtoMessage
func (*Session_SessionStateHistory) ProtoMessage()
func (*Session_SessionStateHistory) ProtoReflect
func (x *Session_SessionStateHistory) ProtoReflect() protoreflect.Message
func (*Session_SessionStateHistory) Reset
func (x *Session_SessionStateHistory) Reset()
func (*Session_SessionStateHistory) String
func (x *Session_SessionStateHistory) String() string
Session_SparkConnectSession
type Session_SparkConnectSession struct {
// Optional. Spark Connect session config.
SparkConnectSession *SparkConnectConfig `protobuf:"bytes,17,opt,name=spark_connect_session,json=sparkConnectSession,proto3,oneof"`
}
Session_State
type Session_State int32
The session state.
Session_STATE_UNSPECIFIED, Session_CREATING, Session_ACTIVE, Session_TERMINATING, Session_TERMINATED, Session_FAILED
const (
// The session state is unknown.
Session_STATE_UNSPECIFIED Session_State = 0
// The session is created prior to running.
Session_CREATING Session_State = 1
// The session is running.
Session_ACTIVE Session_State = 2
// The session is terminating.
Session_TERMINATING Session_State = 3
// The session is terminated successfully.
Session_TERMINATED Session_State = 4
// The session is no longer running due to an error.
Session_FAILED Session_State = 5
)
func (Session_State) Descriptor
func (Session_State) Descriptor() protoreflect.EnumDescriptor
func (Session_State) Enum
func (x Session_State) Enum() *Session_State
func (Session_State) EnumDescriptor
func (Session_State) EnumDescriptor() ([]byte, []int)
Deprecated: Use Session_State.Descriptor instead.
func (Session_State) Number
func (x Session_State) Number() protoreflect.EnumNumber
func (Session_State) String
func (x Session_State) String() string
func (Session_State) Type
func (Session_State) Type() protoreflect.EnumType
ShieldedInstanceConfig
type ShieldedInstanceConfig struct {
// Optional. Defines whether instances have Secure Boot enabled.
EnableSecureBoot *bool `protobuf:"varint,1,opt,name=enable_secure_boot,json=enableSecureBoot,proto3,oneof" json:"enable_secure_boot,omitempty"`
// Optional. Defines whether instances have the vTPM enabled.
EnableVtpm *bool `protobuf:"varint,2,opt,name=enable_vtpm,json=enableVtpm,proto3,oneof" json:"enable_vtpm,omitempty"`
// Optional. Defines whether instances have integrity monitoring enabled.
EnableIntegrityMonitoring *bool `protobuf:"varint,3,opt,name=enable_integrity_monitoring,json=enableIntegrityMonitoring,proto3,oneof" json:"enable_integrity_monitoring,omitempty"`
// contains filtered or unexported fields
}
Shielded Instance Config for clusters using Compute Engine Shielded VMs.
func (*ShieldedInstanceConfig) Descriptor
func (*ShieldedInstanceConfig) Descriptor() ([]byte, []int)
Deprecated: Use ShieldedInstanceConfig.ProtoReflect.Descriptor instead.
func (*ShieldedInstanceConfig) GetEnableIntegrityMonitoring
func (x *ShieldedInstanceConfig) GetEnableIntegrityMonitoring() bool
func (*ShieldedInstanceConfig) GetEnableSecureBoot
func (x *ShieldedInstanceConfig) GetEnableSecureBoot() bool
func (*ShieldedInstanceConfig) GetEnableVtpm
func (x *ShieldedInstanceConfig) GetEnableVtpm() bool
func (*ShieldedInstanceConfig) ProtoMessage
func (*ShieldedInstanceConfig) ProtoMessage()
func (*ShieldedInstanceConfig) ProtoReflect
func (x *ShieldedInstanceConfig) ProtoReflect() protoreflect.Message
func (*ShieldedInstanceConfig) Reset
func (x *ShieldedInstanceConfig) Reset()
func (*ShieldedInstanceConfig) String
func (x *ShieldedInstanceConfig) String() string
SoftwareConfig
type SoftwareConfig struct {
// Optional. The version of software inside the cluster. It must be one of the
// supported [Dataproc
// Versions](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#supported-dataproc-image-versions),
// such as "1.2" (including a subminor version, such as "1.2.29"), or the
// ["preview"
// version](https://cloud.google.com/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
// If unspecified, it defaults to the latest Debian version.
ImageVersion string `protobuf:"bytes,1,opt,name=image_version,json=imageVersion,proto3" json:"image_version,omitempty"`
// Optional. The properties to set on daemon config files.
//
// Property keys are specified in `prefix:property` format, for example
// `core:hadoop.tmp.dir`. The following are supported prefixes
// and their mappings:
//
// * capacity-scheduler: `capacity-scheduler.xml`
// * core: `core-site.xml`
// * distcp: `distcp-default.xml`
// * hdfs: `hdfs-site.xml`
// * hive: `hive-site.xml`
// * mapred: `mapred-site.xml`
// * pig: `pig.properties`
// * spark: `spark-defaults.conf`
// * yarn: `yarn-site.xml`
//
// For more information, see [Cluster
// properties](https://cloud.google.com/dataproc/docs/concepts/cluster-properties).
Properties map[string]string `protobuf:"bytes,2,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Optional. The set of components to activate on the cluster.
OptionalComponents []Component `protobuf:"varint,3,rep,packed,name=optional_components,json=optionalComponents,proto3,enum=google.cloud.dataproc.v1.Component" json:"optional_components,omitempty"`
// contains filtered or unexported fields
}
Specifies the selection and config of software inside the cluster.
func (*SoftwareConfig) Descriptor
func (*SoftwareConfig) Descriptor() ([]byte, []int)
Deprecated: Use SoftwareConfig.ProtoReflect.Descriptor instead.
func (*SoftwareConfig) GetImageVersion
func (x *SoftwareConfig) GetImageVersion() string
func (*SoftwareConfig) GetOptionalComponents
func (x *SoftwareConfig) GetOptionalComponents() []Component
func (*SoftwareConfig) GetProperties
func (x *SoftwareConfig) GetProperties() map[string]string
func (*SoftwareConfig) ProtoMessage
func (*SoftwareConfig) ProtoMessage()
func (*SoftwareConfig) ProtoReflect
func (x *SoftwareConfig) ProtoReflect() protoreflect.Message
func (*SoftwareConfig) Reset
func (x *SoftwareConfig) Reset()
func (*SoftwareConfig) String
func (x *SoftwareConfig) String() string
SparkBatch
type SparkBatch struct {
// The specification of the main method to call to drive the Spark
// workload. Specify either the jar file that contains the main class or the
// main class name. To pass both a main jar and a main class in that jar, add
// the jar to `jar_file_uris`, and then specify the main class
// name in `main_class`.
//
// Types that are assignable to Driver:
//
// *SparkBatch_MainJarFileUri
// *SparkBatch_MainClass
Driver isSparkBatch_Driver `protobuf_oneof:"driver"`
// Optional. The arguments to pass to the driver. Do not include arguments
// that can be set as batch properties, such as `--conf`, since a collision
// can occur that causes an incorrect batch submission.
Args []string `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"`
// Optional. HCFS URIs of jar files to add to the classpath of the
// Spark driver and tasks.
JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"`
// Optional. HCFS URIs of files to be placed in the working directory of
// each executor.
FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris,proto3" json:"file_uris,omitempty"`
// Optional. HCFS URIs of archives to be extracted into the working directory
// of each executor. Supported file types:
// `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris,proto3" json:"archive_uris,omitempty"`
// contains filtered or unexported fields
}
A configuration for running an Apache Spark batch workload.
func (*SparkBatch) Descriptor
func (*SparkBatch) Descriptor() ([]byte, []int)
Deprecated: Use SparkBatch.ProtoReflect.Descriptor instead.
func (*SparkBatch) GetArchiveUris
func (x *SparkBatch) GetArchiveUris() []string
func (*SparkBatch) GetArgs
func (x *SparkBatch) GetArgs() []string
func (*SparkBatch) GetDriver
func (m *SparkBatch) GetDriver() isSparkBatch_Driver
func (*SparkBatch) GetFileUris
func (x *SparkBatch) GetFileUris() []string
func (*SparkBatch) GetJarFileUris
func (x *SparkBatch) GetJarFileUris() []string
func (*SparkBatch) GetMainClass
func (x *SparkBatch) GetMainClass() string
func (*SparkBatch) GetMainJarFileUri
func (x *SparkBatch) GetMainJarFileUri() string
func (*SparkBatch) ProtoMessage
func (*SparkBatch) ProtoMessage()
func (*SparkBatch) ProtoReflect
func (x *SparkBatch) ProtoReflect() protoreflect.Message
func (*SparkBatch) Reset
func (x *SparkBatch) Reset()
func (*SparkBatch) String
func (x *SparkBatch) String() string
SparkBatch_MainClass
type SparkBatch_MainClass struct {
// Optional. The name of the driver main class. The jar file that contains
// the class must be in the classpath or specified in `jar_file_uris`.
MainClass string `protobuf:"bytes,2,opt,name=main_class,json=mainClass,proto3,oneof"`
}
SparkBatch_MainJarFileUri
type SparkBatch_MainJarFileUri struct {
// Optional. The HCFS URI of the jar file that contains the main class.
MainJarFileUri string `protobuf:"bytes,1,opt,name=main_jar_file_uri,json=mainJarFileUri,proto3,oneof"`
}
SparkConnectConfig
type SparkConnectConfig struct {
// contains filtered or unexported fields
}
Spark Connect configuration for an interactive session.
func (*SparkConnectConfig) Descriptor
func (*SparkConnectConfig) Descriptor() ([]byte, []int)
Deprecated: Use SparkConnectConfig.ProtoReflect.Descriptor instead.
func (*SparkConnectConfig) ProtoMessage
func (*SparkConnectConfig) ProtoMessage()
func (*SparkConnectConfig) ProtoReflect
func (x *SparkConnectConfig) ProtoReflect() protoreflect.Message
func (*SparkConnectConfig) Reset
func (x *SparkConnectConfig) Reset()
func (*SparkConnectConfig) String
func (x *SparkConnectConfig) String() string
SparkHistoryServerConfig
type SparkHistoryServerConfig struct {
// Optional. Resource name of an existing Dataproc Cluster to act as a Spark
// History Server for the workload.
//
// Example:
//
// * `projects/[project_id]/regions/[region]/clusters/[cluster_name]`
DataprocCluster string `protobuf:"bytes,1,opt,name=dataproc_cluster,json=dataprocCluster,proto3" json:"dataproc_cluster,omitempty"`
// contains filtered or unexported fields
}
Spark History Server configuration for the workload.
func (*SparkHistoryServerConfig) Descriptor
func (*SparkHistoryServerConfig) Descriptor() ([]byte, []int)
Deprecated: Use SparkHistoryServerConfig.ProtoReflect.Descriptor instead.
func (*SparkHistoryServerConfig) GetDataprocCluster
func (x *SparkHistoryServerConfig) GetDataprocCluster() string
func (*SparkHistoryServerConfig) ProtoMessage
func (*SparkHistoryServerConfig) ProtoMessage()
func (*SparkHistoryServerConfig) ProtoReflect
func (x *SparkHistoryServerConfig) ProtoReflect() protoreflect.Message
func (*SparkHistoryServerConfig) Reset
func (x *SparkHistoryServerConfig) Reset()
func (*SparkHistoryServerConfig) String
func (x *SparkHistoryServerConfig) String() string
SparkJob
type SparkJob struct {
// Required. The specification of the main method to call to drive the job.
// Specify either the jar file that contains the main class or the main class
// name. To pass both a main jar and a main class in that jar, add the jar to
// [jarFileUris][google.cloud.dataproc.v1.SparkJob.jar_file_uris], and then
// specify the main class name in
// [mainClass][google.cloud.dataproc.v1.SparkJob.main_class].
//
// Types that are assignable to Driver:
//
// *SparkJob_MainJarFileUri
// *SparkJob_MainClass
Driver isSparkJob_Driver `protobuf_oneof:"driver"`
// Optional. The arguments to pass to the driver. Do not include arguments,
// such as `--conf`, that can be set as job properties, since a collision may
// occur that causes an incorrect job submission.
Args []string `protobuf:"bytes,3,rep,name=args,proto3" json:"args,omitempty"`
// Optional. HCFS URIs of jar files to add to the CLASSPATHs of the
// Spark driver and tasks.
JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"`
// Optional. HCFS URIs of files to be placed in the working directory of
// each executor. Useful for naively parallel tasks.
FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris,proto3" json:"file_uris,omitempty"`
// Optional. HCFS URIs of archives to be extracted into the working directory
// of each executor. Supported file types:
// .jar, .tar, .tar.gz, .tgz, and .zip.
ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris,proto3" json:"archive_uris,omitempty"`
// Optional. A mapping of property names to values, used to configure Spark.
// Properties that conflict with values set by the Dataproc API might be
// overwritten. Can include properties set in
// /etc/spark/conf/spark-defaults.conf and classes in user code.
Properties map[string]string `protobuf:"bytes,7,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Optional. The runtime log config for job execution.
LoggingConfig *LoggingConfig `protobuf:"bytes,8,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"`
// contains filtered or unexported fields
}
A Dataproc job for running Apache Spark applications on YARN.
func (*SparkJob) Descriptor
Deprecated: Use SparkJob.ProtoReflect.Descriptor instead.
func (*SparkJob) GetArchiveUris
func (*SparkJob) GetArgs
func (*SparkJob) GetDriver
func (m *SparkJob) GetDriver() isSparkJob_Driver
func (*SparkJob) GetFileUris
func (*SparkJob) GetJarFileUris
func (*SparkJob) GetLoggingConfig
func (x *SparkJob) GetLoggingConfig() *LoggingConfig
func (*SparkJob) GetMainClass
func (*SparkJob) GetMainJarFileUri
func (*SparkJob) GetProperties
func (*SparkJob) ProtoMessage
func (*SparkJob) ProtoMessage()
func (*SparkJob) ProtoReflect
func (x *SparkJob) ProtoReflect() protoreflect.Message
func (*SparkJob) Reset
func (x *SparkJob) Reset()
func (*SparkJob) String
SparkJob_MainClass
type SparkJob_MainClass struct {
// The name of the driver's main class. The jar file that contains the class
// must be in the default CLASSPATH or specified in
// SparkJob.jar_file_uris.
MainClass string `protobuf:"bytes,2,opt,name=main_class,json=mainClass,proto3,oneof"`
}
SparkJob_MainJarFileUri
type SparkJob_MainJarFileUri struct {
// The HCFS URI of the jar file that contains the main class.
MainJarFileUri string `protobuf:"bytes,1,opt,name=main_jar_file_uri,json=mainJarFileUri,proto3,oneof"`
}
SparkRBatch
type SparkRBatch struct {
// Required. The HCFS URI of the main R file to use as the driver.
// Must be a `.R` or `.r` file.
MainRFileUri string `protobuf:"bytes,1,opt,name=main_r_file_uri,json=mainRFileUri,proto3" json:"main_r_file_uri,omitempty"`
// Optional. The arguments to pass to the Spark driver. Do not include
// arguments that can be set as batch properties, such as `--conf`, since a
// collision can occur that causes an incorrect batch submission.
Args []string `protobuf:"bytes,2,rep,name=args,proto3" json:"args,omitempty"`
// Optional. HCFS URIs of files to be placed in the working directory of
// each executor.
FileUris []string `protobuf:"bytes,3,rep,name=file_uris,json=fileUris,proto3" json:"file_uris,omitempty"`
// Optional. HCFS URIs of archives to be extracted into the working directory
// of each executor. Supported file types:
// `.jar`, `.tar`, `.tar.gz`, `.tgz`, and `.zip`.
ArchiveUris []string `protobuf:"bytes,4,rep,name=archive_uris,json=archiveUris,proto3" json:"archive_uris,omitempty"`
// contains filtered or unexported fields
}
A configuration for running an Apache SparkR batch workload.
func (*SparkRBatch) Descriptor
func (*SparkRBatch) Descriptor() ([]byte, []int)
Deprecated: Use SparkRBatch.ProtoReflect.Descriptor instead.
func (*SparkRBatch) GetArchiveUris
func (x *SparkRBatch) GetArchiveUris() []string
func (*SparkRBatch) GetArgs
func (x *SparkRBatch) GetArgs() []string
func (*SparkRBatch) GetFileUris
func (x *SparkRBatch) GetFileUris() []string
func (*SparkRBatch) GetMainRFileUri
func (x *SparkRBatch) GetMainRFileUri() string
func (*SparkRBatch) ProtoMessage
func (*SparkRBatch) ProtoMessage()
func (*SparkRBatch) ProtoReflect
func (x *SparkRBatch) ProtoReflect() protoreflect.Message
func (*SparkRBatch) Reset
func (x *SparkRBatch) Reset()
func (*SparkRBatch) String
func (x *SparkRBatch) String() string
SparkRJob
type SparkRJob struct {
// Required. The HCFS URI of the main R file to use as the driver.
// Must be a .R file.
MainRFileUri string `protobuf:"bytes,1,opt,name=main_r_file_uri,json=mainRFileUri,proto3" json:"main_r_file_uri,omitempty"`
// Optional. The arguments to pass to the driver. Do not include arguments,
// such as `--conf`, that can be set as job properties, since a collision may
// occur that causes an incorrect job submission.
Args []string `protobuf:"bytes,2,rep,name=args,proto3" json:"args,omitempty"`
// Optional. HCFS URIs of files to be placed in the working directory of
// each executor. Useful for naively parallel tasks.
FileUris []string `protobuf:"bytes,3,rep,name=file_uris,json=fileUris,proto3" json:"file_uris,omitempty"`
// Optional. HCFS URIs of archives to be extracted into the working directory
// of each executor. Supported file types:
// .jar, .tar, .tar.gz, .tgz, and .zip.
ArchiveUris []string `protobuf:"bytes,4,rep,name=archive_uris,json=archiveUris,proto3" json:"archive_uris,omitempty"`
// Optional. A mapping of property names to values, used to configure SparkR.
// Properties that conflict with values set by the Dataproc API might be
// overwritten. Can include properties set in
// /etc/spark/conf/spark-defaults.conf and classes in user code.
Properties map[string]string `protobuf:"bytes,5,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Optional. The runtime log config for job execution.
LoggingConfig *LoggingConfig `protobuf:"bytes,6,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"`
// contains filtered or unexported fields
}
A Dataproc job for running Apache SparkR applications on YARN.
func (*SparkRJob) Descriptor
Deprecated: Use SparkRJob.ProtoReflect.Descriptor instead.
func (*SparkRJob) GetArchiveUris
func (*SparkRJob) GetArgs
func (*SparkRJob) GetFileUris
func (*SparkRJob) GetLoggingConfig
func (x *SparkRJob) GetLoggingConfig() *LoggingConfig
func (*SparkRJob) GetMainRFileUri
func (*SparkRJob) GetProperties
func (*SparkRJob) ProtoMessage
func (*SparkRJob) ProtoMessage()
func (*SparkRJob) ProtoReflect
func (x *SparkRJob) ProtoReflect() protoreflect.Message
func (*SparkRJob) Reset
func (x *SparkRJob) Reset()
func (*SparkRJob) String
SparkSqlBatch
type SparkSqlBatch struct {
// Required. The HCFS URI of the script that contains Spark SQL queries to
// execute.
QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,proto3" json:"query_file_uri,omitempty"`
// Optional. Mapping of query variable names to values (equivalent to the
// Spark SQL command: `SET name="value";`).
QueryVariables map[string]string `protobuf:"bytes,2,rep,name=query_variables,json=queryVariables,proto3" json:"query_variables,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
JarFileUris []string `protobuf:"bytes,3,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"`
// contains filtered or unexported fields
}
A configuration for running Apache Spark SQL queries as a batch workload.
func (*SparkSqlBatch) Descriptor
func (*SparkSqlBatch) Descriptor() ([]byte, []int)
Deprecated: Use SparkSqlBatch.ProtoReflect.Descriptor instead.
func (*SparkSqlBatch) GetJarFileUris
func (x *SparkSqlBatch) GetJarFileUris() []string
func (*SparkSqlBatch) GetQueryFileUri
func (x *SparkSqlBatch) GetQueryFileUri() string
func (*SparkSqlBatch) GetQueryVariables
func (x *SparkSqlBatch) GetQueryVariables() map[string]string
func (*SparkSqlBatch) ProtoMessage
func (*SparkSqlBatch) ProtoMessage()
func (*SparkSqlBatch) ProtoReflect
func (x *SparkSqlBatch) ProtoReflect() protoreflect.Message
func (*SparkSqlBatch) Reset
func (x *SparkSqlBatch) Reset()
func (*SparkSqlBatch) String
func (x *SparkSqlBatch) String() string
SparkSqlJob
type SparkSqlJob struct {
// Required. The sequence of Spark SQL queries to execute, specified as
// either an HCFS file URI or as a list of queries.
//
// Types that are assignable to Queries:
//
// *SparkSqlJob_QueryFileUri
// *SparkSqlJob_QueryList
Queries isSparkSqlJob_Queries `protobuf_oneof:"queries"`
// Optional. Mapping of query variable names to values (equivalent to the
// Spark SQL command: SET `name="value";`).
ScriptVariables map[string]string `protobuf:"bytes,3,rep,name=script_variables,json=scriptVariables,proto3" json:"script_variables,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Optional. A mapping of property names to values, used to configure
// Spark SQL's SparkConf. Properties that conflict with values set by the
// Dataproc API might be overwritten.
Properties map[string]string `protobuf:"bytes,4,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH.
JarFileUris []string `protobuf:"bytes,56,rep,name=jar_file_uris,json=jarFileUris,proto3" json:"jar_file_uris,omitempty"`
// Optional. The runtime log config for job execution.
LoggingConfig *LoggingConfig `protobuf:"bytes,6,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"`
// contains filtered or unexported fields
}
A Dataproc job for running Apache Spark SQL queries.
func (*SparkSqlJob) Descriptor
func (*SparkSqlJob) Descriptor() ([]byte, []int)
Deprecated: Use SparkSqlJob.ProtoReflect.Descriptor instead.
func (*SparkSqlJob) GetJarFileUris
func (x *SparkSqlJob) GetJarFileUris() []string
func (*SparkSqlJob) GetLoggingConfig
func (x *SparkSqlJob) GetLoggingConfig() *LoggingConfig
func (*SparkSqlJob) GetProperties
func (x *SparkSqlJob) GetProperties() map[string]string
func (*SparkSqlJob) GetQueries
func (m *SparkSqlJob) GetQueries() isSparkSqlJob_Queries
func (*SparkSqlJob) GetQueryFileUri
func (x *SparkSqlJob) GetQueryFileUri() string
func (*SparkSqlJob) GetQueryList
func (x *SparkSqlJob) GetQueryList() *QueryList
func (*SparkSqlJob) GetScriptVariables
func (x *SparkSqlJob) GetScriptVariables() map[string]string
func (*SparkSqlJob) ProtoMessage
func (*SparkSqlJob) ProtoMessage()
func (*SparkSqlJob) ProtoReflect
func (x *SparkSqlJob) ProtoReflect() protoreflect.Message
func (*SparkSqlJob) Reset
func (x *SparkSqlJob) Reset()
func (*SparkSqlJob) String
func (x *SparkSqlJob) String() string
SparkSqlJob_QueryFileUri
type SparkSqlJob_QueryFileUri struct {
// The HCFS URI of the script that contains SQL queries.
QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,proto3,oneof"`
}
SparkSqlJob_QueryList
type SparkSqlJob_QueryList struct {
// A list of queries.
QueryList *QueryList `protobuf:"bytes,2,opt,name=query_list,json=queryList,proto3,oneof"`
}
StartClusterRequest
type StartClusterRequest struct {
// Required. The ID of the Google Cloud Platform project the
// cluster belongs to.
ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
// Required. The Dataproc region in which to handle the request.
Region string `protobuf:"bytes,2,opt,name=region,proto3" json:"region,omitempty"`
// Required. The cluster name.
ClusterName string `protobuf:"bytes,3,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
// Optional. Specifying the `cluster_uuid` means the RPC will fail
// (with error NOT_FOUND) if a cluster with the specified UUID does not exist.
ClusterUuid string `protobuf:"bytes,4,opt,name=cluster_uuid,json=clusterUuid,proto3" json:"cluster_uuid,omitempty"`
// Optional. A unique ID used to identify the request. If the server
// receives two
// [StartClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StartClusterRequest)s
// with the same id, then the second request will be ignored and the
// first [google.longrunning.Operation][google.longrunning.Operation] created
// and stored in the backend is returned.
//
// Recommendation: Set this value to a
// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
//
// The ID must contain only letters (a-z, A-Z), numbers (0-9),
// underscores (_), and hyphens (-). The maximum length is 40 characters.
RequestId string `protobuf:"bytes,5,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
// contains filtered or unexported fields
}
A request to start a cluster.
func (*StartClusterRequest) Descriptor
func (*StartClusterRequest) Descriptor() ([]byte, []int)
Deprecated: Use StartClusterRequest.ProtoReflect.Descriptor instead.
func (*StartClusterRequest) GetClusterName
func (x *StartClusterRequest) GetClusterName() string
func (*StartClusterRequest) GetClusterUuid
func (x *StartClusterRequest) GetClusterUuid() string
func (*StartClusterRequest) GetProjectId
func (x *StartClusterRequest) GetProjectId() string
func (*StartClusterRequest) GetRegion
func (x *StartClusterRequest) GetRegion() string
func (*StartClusterRequest) GetRequestId
func (x *StartClusterRequest) GetRequestId() string
func (*StartClusterRequest) ProtoMessage
func (*StartClusterRequest) ProtoMessage()
func (*StartClusterRequest) ProtoReflect
func (x *StartClusterRequest) ProtoReflect() protoreflect.Message
func (*StartClusterRequest) Reset
func (x *StartClusterRequest) Reset()
func (*StartClusterRequest) String
func (x *StartClusterRequest) String() string
StartupConfig
type StartupConfig struct {
// Optional. The config setting to enable cluster creation/ updation to be
// successful only after required_registration_fraction of instances are up
// and running. This configuration is applicable to only secondary workers for
// now. The cluster will fail if required_registration_fraction of instances
// are not available. This will include instance creation, agent registration,
// and service registration (if enabled).
RequiredRegistrationFraction *float64 `protobuf:"fixed64,1,opt,name=required_registration_fraction,json=requiredRegistrationFraction,proto3,oneof" json:"required_registration_fraction,omitempty"`
// contains filtered or unexported fields
}
Configuration to handle the startup of instances during cluster create and update process.
func (*StartupConfig) Descriptor
func (*StartupConfig) Descriptor() ([]byte, []int)
Deprecated: Use StartupConfig.ProtoReflect.Descriptor instead.
func (*StartupConfig) GetRequiredRegistrationFraction
func (x *StartupConfig) GetRequiredRegistrationFraction() float64
func (*StartupConfig) ProtoMessage
func (*StartupConfig) ProtoMessage()
func (*StartupConfig) ProtoReflect
func (x *StartupConfig) ProtoReflect() protoreflect.Message
func (*StartupConfig) Reset
func (x *StartupConfig) Reset()
func (*StartupConfig) String
func (x *StartupConfig) String() string
StopClusterRequest
type StopClusterRequest struct {
// Required. The ID of the Google Cloud Platform project the
// cluster belongs to.
ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
// Required. The Dataproc region in which to handle the request.
Region string `protobuf:"bytes,2,opt,name=region,proto3" json:"region,omitempty"`
// Required. The cluster name.
ClusterName string `protobuf:"bytes,3,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
// Optional. Specifying the `cluster_uuid` means the RPC will fail
// (with error NOT_FOUND) if a cluster with the specified UUID does not exist.
ClusterUuid string `protobuf:"bytes,4,opt,name=cluster_uuid,json=clusterUuid,proto3" json:"cluster_uuid,omitempty"`
// Optional. A unique ID used to identify the request. If the server
// receives two
// [StopClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.StopClusterRequest)s
// with the same id, then the second request will be ignored and the
// first [google.longrunning.Operation][google.longrunning.Operation] created
// and stored in the backend is returned.
//
// Recommendation: Set this value to a
// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
//
// The ID must contain only letters (a-z, A-Z), numbers (0-9),
// underscores (_), and hyphens (-). The maximum length is 40 characters.
RequestId string `protobuf:"bytes,5,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
// contains filtered or unexported fields
}
A request to stop a cluster.
func (*StopClusterRequest) Descriptor
func (*StopClusterRequest) Descriptor() ([]byte, []int)
Deprecated: Use StopClusterRequest.ProtoReflect.Descriptor instead.
func (*StopClusterRequest) GetClusterName
func (x *StopClusterRequest) GetClusterName() string
func (*StopClusterRequest) GetClusterUuid
func (x *StopClusterRequest) GetClusterUuid() string
func (*StopClusterRequest) GetProjectId
func (x *StopClusterRequest) GetProjectId() string
func (*StopClusterRequest) GetRegion
func (x *StopClusterRequest) GetRegion() string
func (*StopClusterRequest) GetRequestId
func (x *StopClusterRequest) GetRequestId() string
func (*StopClusterRequest) ProtoMessage
func (*StopClusterRequest) ProtoMessage()
func (*StopClusterRequest) ProtoReflect
func (x *StopClusterRequest) ProtoReflect() protoreflect.Message
func (*StopClusterRequest) Reset
func (x *StopClusterRequest) Reset()
func (*StopClusterRequest) String
func (x *StopClusterRequest) String() string
SubmitJobRequest
type SubmitJobRequest struct {
// Required. The ID of the Google Cloud Platform project that the job
// belongs to.
ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
// Required. The Dataproc region in which to handle the request.
Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
// Required. The job resource.
Job *Job `protobuf:"bytes,2,opt,name=job,proto3" json:"job,omitempty"`
// Optional. A unique id used to identify the request. If the server
// receives two
// [SubmitJobRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.SubmitJobRequest)s
// with the same id, then the second request will be ignored and the
// first [Job][google.cloud.dataproc.v1.Job] created and stored in the backend
// is returned.
//
// It is recommended to always set this value to a
// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
//
// The id must contain only letters (a-z, A-Z), numbers (0-9),
// underscores (_), and hyphens (-). The maximum length is 40 characters.
RequestId string `protobuf:"bytes,4,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
// contains filtered or unexported fields
}
A request to submit a job.
func (*SubmitJobRequest) Descriptor
func (*SubmitJobRequest) Descriptor() ([]byte, []int)
Deprecated: Use SubmitJobRequest.ProtoReflect.Descriptor instead.
func (*SubmitJobRequest) GetJob
func (x *SubmitJobRequest) GetJob() *Job
func (*SubmitJobRequest) GetProjectId
func (x *SubmitJobRequest) GetProjectId() string
func (*SubmitJobRequest) GetRegion
func (x *SubmitJobRequest) GetRegion() string
func (*SubmitJobRequest) GetRequestId
func (x *SubmitJobRequest) GetRequestId() string
func (*SubmitJobRequest) ProtoMessage
func (*SubmitJobRequest) ProtoMessage()
func (*SubmitJobRequest) ProtoReflect
func (x *SubmitJobRequest) ProtoReflect() protoreflect.Message
func (*SubmitJobRequest) Reset
func (x *SubmitJobRequest) Reset()
func (*SubmitJobRequest) String
func (x *SubmitJobRequest) String() string
TemplateParameter
type TemplateParameter struct {
// Required. Parameter name.
// The parameter name is used as the key, and paired with the
// parameter value, which are passed to the template when the template
// is instantiated.
// The name must contain only capital letters (A-Z), numbers (0-9), and
// underscores (_), and must not start with a number. The maximum length is
// 40 characters.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Required. Paths to all fields that the parameter replaces.
// A field is allowed to appear in at most one parameter's list of field
// paths.
//
// A field path is similar in syntax to a
// [google.protobuf.FieldMask][google.protobuf.FieldMask]. For example, a
// field path that references the zone field of a workflow template's cluster
// selector would be specified as `placement.clusterSelector.zone`.
//
// Also, field paths can reference fields using the following syntax:
//
// * Values in maps can be referenced by key:
// - labels['key']
// - placement.clusterSelector.clusterLabels['key']
// - placement.managedCluster.labels['key']
// - placement.clusterSelector.clusterLabels['key']
// - jobs['step-id'].labels['key']
//
// * Jobs in the jobs list can be referenced by step-id:
// - jobs['step-id'].hadoopJob.mainJarFileUri
// - jobs['step-id'].hiveJob.queryFileUri
// - jobs['step-id'].pySparkJob.mainPythonFileUri
// - jobs['step-id'].hadoopJob.jarFileUris[0]
// - jobs['step-id'].hadoopJob.archiveUris[0]
// - jobs['step-id'].hadoopJob.fileUris[0]
// - jobs['step-id'].pySparkJob.pythonFileUris[0]
//
// * Items in repeated fields can be referenced by a zero-based index:
// - jobs['step-id'].sparkJob.args[0]
//
// * Other examples:
// - jobs['step-id'].hadoopJob.properties['key']
// - jobs['step-id'].hadoopJob.args[0]
// - jobs['step-id'].hiveJob.scriptVariables['key']
// - jobs['step-id'].hadoopJob.mainJarFileUri
// - placement.clusterSelector.zone
//
// It may not be possible to parameterize maps and repeated fields in their
// entirety since only individual map values and individual items in repeated
// fields can be referenced. For example, the following field paths are
// invalid:
//
// - placement.clusterSelector.clusterLabels
// - jobs['step-id'].sparkJob.args
Fields []string `protobuf:"bytes,2,rep,name=fields,proto3" json:"fields,omitempty"`
// Optional. Brief description of the parameter.
// Must not exceed 1024 characters.
Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
// Optional. Validation rules to be applied to this parameter's value.
Validation *ParameterValidation `protobuf:"bytes,4,opt,name=validation,proto3" json:"validation,omitempty"`
// contains filtered or unexported fields
}
A configurable parameter that replaces one or more fields in the template. Parameterizable fields:
- Labels
- File uris
- Job properties
- Job arguments
- Script variables
- Main class (in HadoopJob and SparkJob)
- Zone (in ClusterSelector)
func (*TemplateParameter) Descriptor
func (*TemplateParameter) Descriptor() ([]byte, []int)
Deprecated: Use TemplateParameter.ProtoReflect.Descriptor instead.
func (*TemplateParameter) GetDescription
func (x *TemplateParameter) GetDescription() string
func (*TemplateParameter) GetFields
func (x *TemplateParameter) GetFields() []string
func (*TemplateParameter) GetName
func (x *TemplateParameter) GetName() string
func (*TemplateParameter) GetValidation
func (x *TemplateParameter) GetValidation() *ParameterValidation
func (*TemplateParameter) ProtoMessage
func (*TemplateParameter) ProtoMessage()
func (*TemplateParameter) ProtoReflect
func (x *TemplateParameter) ProtoReflect() protoreflect.Message
func (*TemplateParameter) Reset
func (x *TemplateParameter) Reset()
func (*TemplateParameter) String
func (x *TemplateParameter) String() string
TerminateSessionRequest
type TerminateSessionRequest struct {
// Required. The name of the session resource to terminate.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Optional. A unique ID used to identify the request. If the service
// receives two
// [TerminateSessionRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.TerminateSessionRequest)s
// with the same ID, the second request is ignored.
//
// Recommendation: Set this value to a
// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
//
// The value must contain only letters (a-z, A-Z), numbers (0-9),
// underscores (_), and hyphens (-). The maximum length is 40 characters.
RequestId string `protobuf:"bytes,2,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
// contains filtered or unexported fields
}
A request to terminate an interactive session.
func (*TerminateSessionRequest) Descriptor
func (*TerminateSessionRequest) Descriptor() ([]byte, []int)
Deprecated: Use TerminateSessionRequest.ProtoReflect.Descriptor instead.
func (*TerminateSessionRequest) GetName
func (x *TerminateSessionRequest) GetName() string
func (*TerminateSessionRequest) GetRequestId
func (x *TerminateSessionRequest) GetRequestId() string
func (*TerminateSessionRequest) ProtoMessage
func (*TerminateSessionRequest) ProtoMessage()
func (*TerminateSessionRequest) ProtoReflect
func (x *TerminateSessionRequest) ProtoReflect() protoreflect.Message
func (*TerminateSessionRequest) Reset
func (x *TerminateSessionRequest) Reset()
func (*TerminateSessionRequest) String
func (x *TerminateSessionRequest) String() string
TrinoJob
type TrinoJob struct {
// Required. The sequence of Trino queries to execute, specified as
// either an HCFS file URI or as a list of queries.
//
// Types that are assignable to Queries:
//
// *TrinoJob_QueryFileUri
// *TrinoJob_QueryList
Queries isTrinoJob_Queries `protobuf_oneof:"queries"`
// Optional. Whether to continue executing queries if a query fails.
// The default value is `false`. Setting to `true` can be useful when
// executing independent parallel queries.
ContinueOnFailure bool `protobuf:"varint,3,opt,name=continue_on_failure,json=continueOnFailure,proto3" json:"continue_on_failure,omitempty"`
// Optional. The format in which query output will be displayed. See the
// Trino documentation for supported output formats
OutputFormat string `protobuf:"bytes,4,opt,name=output_format,json=outputFormat,proto3" json:"output_format,omitempty"`
// Optional. Trino client tags to attach to this query
ClientTags []string `protobuf:"bytes,5,rep,name=client_tags,json=clientTags,proto3" json:"client_tags,omitempty"`
// Optional. A mapping of property names to values. Used to set Trino
// [session properties](https://trino.io/docs/current/sql/set-session.html)
// Equivalent to using the --session flag in the Trino CLI
Properties map[string]string `protobuf:"bytes,6,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Optional. The runtime log config for job execution.
LoggingConfig *LoggingConfig `protobuf:"bytes,7,opt,name=logging_config,json=loggingConfig,proto3" json:"logging_config,omitempty"`
// contains filtered or unexported fields
}
A Dataproc job for running Trino queries. IMPORTANT: The Dataproc Trino Optional Component must be enabled when the cluster is created to submit a Trino job to the cluster.
func (*TrinoJob) Descriptor
Deprecated: Use TrinoJob.ProtoReflect.Descriptor instead.
func (*TrinoJob) GetClientTags
func (*TrinoJob) GetContinueOnFailure
func (*TrinoJob) GetLoggingConfig
func (x *TrinoJob) GetLoggingConfig() *LoggingConfig
func (*TrinoJob) GetOutputFormat
func (*TrinoJob) GetProperties
func (*TrinoJob) GetQueries
func (m *TrinoJob) GetQueries() isTrinoJob_Queries
func (*TrinoJob) GetQueryFileUri
func (*TrinoJob) GetQueryList
func (*TrinoJob) ProtoMessage
func (*TrinoJob) ProtoMessage()
func (*TrinoJob) ProtoReflect
func (x *TrinoJob) ProtoReflect() protoreflect.Message
func (*TrinoJob) Reset
func (x *TrinoJob) Reset()
func (*TrinoJob) String
TrinoJob_QueryFileUri
type TrinoJob_QueryFileUri struct {
// The HCFS URI of the script that contains SQL queries.
QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,proto3,oneof"`
}
TrinoJob_QueryList
type TrinoJob_QueryList struct {
// A list of queries.
QueryList *QueryList `protobuf:"bytes,2,opt,name=query_list,json=queryList,proto3,oneof"`
}
UnimplementedAutoscalingPolicyServiceServer
type UnimplementedAutoscalingPolicyServiceServer struct {
}
UnimplementedAutoscalingPolicyServiceServer can be embedded to have forward compatible implementations.
func (*UnimplementedAutoscalingPolicyServiceServer) CreateAutoscalingPolicy
func (*UnimplementedAutoscalingPolicyServiceServer) CreateAutoscalingPolicy(context.Context, *CreateAutoscalingPolicyRequest) (*AutoscalingPolicy, error)
func (*UnimplementedAutoscalingPolicyServiceServer) DeleteAutoscalingPolicy
func (*UnimplementedAutoscalingPolicyServiceServer) DeleteAutoscalingPolicy(context.Context, *DeleteAutoscalingPolicyRequest) (*emptypb.Empty, error)
func (*UnimplementedAutoscalingPolicyServiceServer) GetAutoscalingPolicy
func (*UnimplementedAutoscalingPolicyServiceServer) GetAutoscalingPolicy(context.Context, *GetAutoscalingPolicyRequest) (*AutoscalingPolicy, error)
func (*UnimplementedAutoscalingPolicyServiceServer) ListAutoscalingPolicies
func (*UnimplementedAutoscalingPolicyServiceServer) ListAutoscalingPolicies(context.Context, *ListAutoscalingPoliciesRequest) (*ListAutoscalingPoliciesResponse, error)
func (*UnimplementedAutoscalingPolicyServiceServer) UpdateAutoscalingPolicy
func (*UnimplementedAutoscalingPolicyServiceServer) UpdateAutoscalingPolicy(context.Context, *UpdateAutoscalingPolicyRequest) (*AutoscalingPolicy, error)
UnimplementedBatchControllerServer
type UnimplementedBatchControllerServer struct {
}
UnimplementedBatchControllerServer can be embedded to have forward compatible implementations.
func (*UnimplementedBatchControllerServer) CreateBatch
func (*UnimplementedBatchControllerServer) CreateBatch(context.Context, *CreateBatchRequest) (*longrunningpb.Operation, error)
func (*UnimplementedBatchControllerServer) DeleteBatch
func (*UnimplementedBatchControllerServer) DeleteBatch(context.Context, *DeleteBatchRequest) (*emptypb.Empty, error)
func (*UnimplementedBatchControllerServer) GetBatch
func (*UnimplementedBatchControllerServer) GetBatch(context.Context, *GetBatchRequest) (*Batch, error)
func (*UnimplementedBatchControllerServer) ListBatches
func (*UnimplementedBatchControllerServer) ListBatches(context.Context, *ListBatchesRequest) (*ListBatchesResponse, error)
UnimplementedClusterControllerServer
type UnimplementedClusterControllerServer struct {
}
UnimplementedClusterControllerServer can be embedded to have forward compatible implementations.
func (*UnimplementedClusterControllerServer) CreateCluster
func (*UnimplementedClusterControllerServer) CreateCluster(context.Context, *CreateClusterRequest) (*longrunningpb.Operation, error)
func (*UnimplementedClusterControllerServer) DeleteCluster
func (*UnimplementedClusterControllerServer) DeleteCluster(context.Context, *DeleteClusterRequest) (*longrunningpb.Operation, error)
func (*UnimplementedClusterControllerServer) DiagnoseCluster
func (*UnimplementedClusterControllerServer) DiagnoseCluster(context.Context, *DiagnoseClusterRequest) (*longrunningpb.Operation, error)
func (*UnimplementedClusterControllerServer) GetCluster
func (*UnimplementedClusterControllerServer) GetCluster(context.Context, *GetClusterRequest) (*Cluster, error)
func (*UnimplementedClusterControllerServer) ListClusters
func (*UnimplementedClusterControllerServer) ListClusters(context.Context, *ListClustersRequest) (*ListClustersResponse, error)
func (*UnimplementedClusterControllerServer) StartCluster
func (*UnimplementedClusterControllerServer) StartCluster(context.Context, *StartClusterRequest) (*longrunningpb.Operation, error)
func (*UnimplementedClusterControllerServer) StopCluster
func (*UnimplementedClusterControllerServer) StopCluster(context.Context, *StopClusterRequest) (*longrunningpb.Operation, error)
func (*UnimplementedClusterControllerServer) UpdateCluster
func (*UnimplementedClusterControllerServer) UpdateCluster(context.Context, *UpdateClusterRequest) (*longrunningpb.Operation, error)
UnimplementedJobControllerServer
type UnimplementedJobControllerServer struct {
}
UnimplementedJobControllerServer can be embedded to have forward compatible implementations.
func (*UnimplementedJobControllerServer) CancelJob
func (*UnimplementedJobControllerServer) CancelJob(context.Context, *CancelJobRequest) (*Job, error)
func (*UnimplementedJobControllerServer) DeleteJob
func (*UnimplementedJobControllerServer) DeleteJob(context.Context, *DeleteJobRequest) (*emptypb.Empty, error)
func (*UnimplementedJobControllerServer) GetJob
func (*UnimplementedJobControllerServer) GetJob(context.Context, *GetJobRequest) (*Job, error)
func (*UnimplementedJobControllerServer) ListJobs
func (*UnimplementedJobControllerServer) ListJobs(context.Context, *ListJobsRequest) (*ListJobsResponse, error)
func (*UnimplementedJobControllerServer) SubmitJob
func (*UnimplementedJobControllerServer) SubmitJob(context.Context, *SubmitJobRequest) (*Job, error)
func (*UnimplementedJobControllerServer) SubmitJobAsOperation
func (*UnimplementedJobControllerServer) SubmitJobAsOperation(context.Context, *SubmitJobRequest) (*longrunningpb.Operation, error)
func (*UnimplementedJobControllerServer) UpdateJob
func (*UnimplementedJobControllerServer) UpdateJob(context.Context, *UpdateJobRequest) (*Job, error)
UnimplementedNodeGroupControllerServer
type UnimplementedNodeGroupControllerServer struct {
}
UnimplementedNodeGroupControllerServer can be embedded to have forward compatible implementations.
func (*UnimplementedNodeGroupControllerServer) CreateNodeGroup
func (*UnimplementedNodeGroupControllerServer) CreateNodeGroup(context.Context, *CreateNodeGroupRequest) (*longrunningpb.Operation, error)
func (*UnimplementedNodeGroupControllerServer) GetNodeGroup
func (*UnimplementedNodeGroupControllerServer) GetNodeGroup(context.Context, *GetNodeGroupRequest) (*NodeGroup, error)
func (*UnimplementedNodeGroupControllerServer) ResizeNodeGroup
func (*UnimplementedNodeGroupControllerServer) ResizeNodeGroup(context.Context, *ResizeNodeGroupRequest) (*longrunningpb.Operation, error)
UnimplementedSessionControllerServer
type UnimplementedSessionControllerServer struct {
}
UnimplementedSessionControllerServer can be embedded to have forward compatible implementations.
func (*UnimplementedSessionControllerServer) CreateSession
func (*UnimplementedSessionControllerServer) CreateSession(context.Context, *CreateSessionRequest) (*longrunningpb.Operation, error)
func (*UnimplementedSessionControllerServer) DeleteSession
func (*UnimplementedSessionControllerServer) DeleteSession(context.Context, *DeleteSessionRequest) (*longrunningpb.Operation, error)
func (*UnimplementedSessionControllerServer) GetSession
func (*UnimplementedSessionControllerServer) GetSession(context.Context, *GetSessionRequest) (*Session, error)
func (*UnimplementedSessionControllerServer) ListSessions
func (*UnimplementedSessionControllerServer) ListSessions(context.Context, *ListSessionsRequest) (*ListSessionsResponse, error)
func (*UnimplementedSessionControllerServer) TerminateSession
func (*UnimplementedSessionControllerServer) TerminateSession(context.Context, *TerminateSessionRequest) (*longrunningpb.Operation, error)
UnimplementedSessionTemplateControllerServer
type UnimplementedSessionTemplateControllerServer struct {
}
UnimplementedSessionTemplateControllerServer can be embedded to have forward compatible implementations.
func (*UnimplementedSessionTemplateControllerServer) CreateSessionTemplate
func (*UnimplementedSessionTemplateControllerServer) CreateSessionTemplate(context.Context, *CreateSessionTemplateRequest) (*SessionTemplate, error)
func (*UnimplementedSessionTemplateControllerServer) DeleteSessionTemplate
func (*UnimplementedSessionTemplateControllerServer) DeleteSessionTemplate(context.Context, *DeleteSessionTemplateRequest) (*emptypb.Empty, error)
func (*UnimplementedSessionTemplateControllerServer) GetSessionTemplate
func (*UnimplementedSessionTemplateControllerServer) GetSessionTemplate(context.Context, *GetSessionTemplateRequest) (*SessionTemplate, error)
func (*UnimplementedSessionTemplateControllerServer) ListSessionTemplates
func (*UnimplementedSessionTemplateControllerServer) ListSessionTemplates(context.Context, *ListSessionTemplatesRequest) (*ListSessionTemplatesResponse, error)
func (*UnimplementedSessionTemplateControllerServer) UpdateSessionTemplate
func (*UnimplementedSessionTemplateControllerServer) UpdateSessionTemplate(context.Context, *UpdateSessionTemplateRequest) (*SessionTemplate, error)
UnimplementedWorkflowTemplateServiceServer
type UnimplementedWorkflowTemplateServiceServer struct {
}
UnimplementedWorkflowTemplateServiceServer can be embedded to have forward compatible implementations.
func (*UnimplementedWorkflowTemplateServiceServer) CreateWorkflowTemplate
func (*UnimplementedWorkflowTemplateServiceServer) CreateWorkflowTemplate(context.Context, *CreateWorkflowTemplateRequest) (*WorkflowTemplate, error)
func (*UnimplementedWorkflowTemplateServiceServer) DeleteWorkflowTemplate
func (*UnimplementedWorkflowTemplateServiceServer) DeleteWorkflowTemplate(context.Context, *DeleteWorkflowTemplateRequest) (*emptypb.Empty, error)
func (*UnimplementedWorkflowTemplateServiceServer) GetWorkflowTemplate
func (*UnimplementedWorkflowTemplateServiceServer) GetWorkflowTemplate(context.Context, *GetWorkflowTemplateRequest) (*WorkflowTemplate, error)
func (*UnimplementedWorkflowTemplateServiceServer) InstantiateInlineWorkflowTemplate
func (*UnimplementedWorkflowTemplateServiceServer) InstantiateInlineWorkflowTemplate(context.Context, *InstantiateInlineWorkflowTemplateRequest) (*longrunningpb.Operation, error)
func (*UnimplementedWorkflowTemplateServiceServer) InstantiateWorkflowTemplate
func (*UnimplementedWorkflowTemplateServiceServer) InstantiateWorkflowTemplate(context.Context, *InstantiateWorkflowTemplateRequest) (*longrunningpb.Operation, error)
func (*UnimplementedWorkflowTemplateServiceServer) ListWorkflowTemplates
func (*UnimplementedWorkflowTemplateServiceServer) ListWorkflowTemplates(context.Context, *ListWorkflowTemplatesRequest) (*ListWorkflowTemplatesResponse, error)
func (*UnimplementedWorkflowTemplateServiceServer) UpdateWorkflowTemplate
func (*UnimplementedWorkflowTemplateServiceServer) UpdateWorkflowTemplate(context.Context, *UpdateWorkflowTemplateRequest) (*WorkflowTemplate, error)
UpdateAutoscalingPolicyRequest
type UpdateAutoscalingPolicyRequest struct {
// Required. The updated autoscaling policy.
Policy *AutoscalingPolicy `protobuf:"bytes,1,opt,name=policy,proto3" json:"policy,omitempty"`
// contains filtered or unexported fields
}
A request to update an autoscaling policy.
func (*UpdateAutoscalingPolicyRequest) Descriptor
func (*UpdateAutoscalingPolicyRequest) Descriptor() ([]byte, []int)
Deprecated: Use UpdateAutoscalingPolicyRequest.ProtoReflect.Descriptor instead.
func (*UpdateAutoscalingPolicyRequest) GetPolicy
func (x *UpdateAutoscalingPolicyRequest) GetPolicy() *AutoscalingPolicy
func (*UpdateAutoscalingPolicyRequest) ProtoMessage
func (*UpdateAutoscalingPolicyRequest) ProtoMessage()
func (*UpdateAutoscalingPolicyRequest) ProtoReflect
func (x *UpdateAutoscalingPolicyRequest) ProtoReflect() protoreflect.Message
func (*UpdateAutoscalingPolicyRequest) Reset
func (x *UpdateAutoscalingPolicyRequest) Reset()
func (*UpdateAutoscalingPolicyRequest) String
func (x *UpdateAutoscalingPolicyRequest) String() string
UpdateClusterRequest
type UpdateClusterRequest struct {
// Required. The ID of the Google Cloud Platform project the
// cluster belongs to.
ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
// Required. The Dataproc region in which to handle the request.
Region string `protobuf:"bytes,5,opt,name=region,proto3" json:"region,omitempty"`
// Required. The cluster name.
ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
// Required. The changes to the cluster.
Cluster *Cluster `protobuf:"bytes,3,opt,name=cluster,proto3" json:"cluster,omitempty"`
// Optional. Timeout for graceful YARN decommissioning. Graceful
// decommissioning allows removing nodes from the cluster without
// interrupting jobs in progress. Timeout specifies how long to wait for jobs
// in progress to finish before forcefully removing nodes (and potentially
// interrupting jobs). Default timeout is 0 (for forceful decommission), and
// the maximum allowed timeout is 1 day. (see JSON representation of
// [Duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
//
// Only supported on Dataproc image versions 1.2 and higher.
GracefulDecommissionTimeout *durationpb.Duration `protobuf:"bytes,6,opt,name=graceful_decommission_timeout,json=gracefulDecommissionTimeout,proto3" json:"graceful_decommission_timeout,omitempty"`
// Required. Specifies the path, relative to `Cluster`, of
// the field to update. For example, to change the number of workers
// in a cluster to 5, the `update_mask` parameter would be
// specified as `config.worker_config.num_instances`,
// and the `PATCH` request body would specify the new value, as follows:
//
// {
// "config":{
// "workerConfig":{
// "numInstances":"5"
// }
// }
// }
//
// Similarly, to change the number of preemptible workers in a cluster to 5,
// the `update_mask` parameter would be
// `config.secondary_worker_config.num_instances`, and the `PATCH` request
// body would be set as follows:
//
// {
// "config":{
// "secondaryWorkerConfig":{
// "numInstances":"5"
// }
// }
// }
//
// Note: Currently, only the following fields can be updated:
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
// Mask Purpose labels Update labels config.worker_config.num_instances Resize primary worker group config.secondary_worker_config.num_instances Resize secondary worker group config.autoscaling_config.policy_uri Use, stop using, or
// change autoscaling policies
UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,4,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
// Optional. A unique ID used to identify the request. If the server
// receives two
// [UpdateClusterRequest](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#google.cloud.dataproc.v1.UpdateClusterRequest)s
// with the same id, then the second request will be ignored and the
// first [google.longrunning.Operation][google.longrunning.Operation] created
// and stored in the backend is returned.
//
// It is recommended to always set this value to a
// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
//
// The ID must contain only letters (a-z, A-Z), numbers (0-9),
// underscores (_), and hyphens (-). The maximum length is 40 characters.
RequestId string `protobuf:"bytes,7,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
// contains filtered or unexported fields
}
A request to update a cluster.
func (*UpdateClusterRequest) Descriptor
func (*UpdateClusterRequest) Descriptor() ([]byte, []int)
Deprecated: Use UpdateClusterRequest.ProtoReflect.Descriptor instead.
func (*UpdateClusterRequest) GetCluster
func (x *UpdateClusterRequest) GetCluster() *Cluster
func (*UpdateClusterRequest) GetClusterName
func (x *UpdateClusterRequest) GetClusterName() string
func (*UpdateClusterRequest) GetGracefulDecommissionTimeout
func (x *UpdateClusterRequest) GetGracefulDecommissionTimeout() *durationpb.Duration
func (*UpdateClusterRequest) GetProjectId
func (x *UpdateClusterRequest) GetProjectId() string
func (*UpdateClusterRequest) GetRegion
func (x *UpdateClusterRequest) GetRegion() string
func (*UpdateClusterRequest) GetRequestId
func (x *UpdateClusterRequest) GetRequestId() string
func (*UpdateClusterRequest) GetUpdateMask
func (x *UpdateClusterRequest) GetUpdateMask() *fieldmaskpb.FieldMask
func (*UpdateClusterRequest) ProtoMessage
func (*UpdateClusterRequest) ProtoMessage()
func (*UpdateClusterRequest) ProtoReflect
func (x *UpdateClusterRequest) ProtoReflect() protoreflect.Message
func (*UpdateClusterRequest) Reset
func (x *UpdateClusterRequest) Reset()
func (*UpdateClusterRequest) String
func (x *UpdateClusterRequest) String() string
UpdateJobRequest
type UpdateJobRequest struct {
// Required. The ID of the Google Cloud Platform project that the job
// belongs to.
ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
// Required. The Dataproc region in which to handle the request.
Region string `protobuf:"bytes,2,opt,name=region,proto3" json:"region,omitempty"`
// Required. The job ID.
JobId string `protobuf:"bytes,3,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
// Required. The changes to the job.
Job *Job `protobuf:"bytes,4,opt,name=job,proto3" json:"job,omitempty"`
// Required. Specifies the path, relative to Job
, of
// the field to update. For example, to update the labels of a Job the
// update_mask
parameter would be specified as
// labels
, and the `PATCH` request body would specify the new
// value. Note: Currently, labels
is the only
// field that can be updated.
UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,5,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
// contains filtered or unexported fields
}
A request to update a job.
func (*UpdateJobRequest) Descriptor
func (*UpdateJobRequest) Descriptor() ([]byte, []int)
Deprecated: Use UpdateJobRequest.ProtoReflect.Descriptor instead.
func (*UpdateJobRequest) GetJob
func (x *UpdateJobRequest) GetJob() *Job
func (*UpdateJobRequest) GetJobId
func (x *UpdateJobRequest) GetJobId() string
func (*UpdateJobRequest) GetProjectId
func (x *UpdateJobRequest) GetProjectId() string
func (*UpdateJobRequest) GetRegion
func (x *UpdateJobRequest) GetRegion() string
func (*UpdateJobRequest) GetUpdateMask
func (x *UpdateJobRequest) GetUpdateMask() *fieldmaskpb.FieldMask
func (*UpdateJobRequest) ProtoMessage
func (*UpdateJobRequest) ProtoMessage()
func (*UpdateJobRequest) ProtoReflect
func (x *UpdateJobRequest) ProtoReflect() protoreflect.Message
func (*UpdateJobRequest) Reset
func (x *UpdateJobRequest) Reset()
func (*UpdateJobRequest) String
func (x *UpdateJobRequest) String() string
UpdateSessionTemplateRequest
type UpdateSessionTemplateRequest struct {
// Required. The updated session template.
SessionTemplate *SessionTemplate `protobuf:"bytes,1,opt,name=session_template,json=sessionTemplate,proto3" json:"session_template,omitempty"`
// contains filtered or unexported fields
}
A request to update a session template.
func (*UpdateSessionTemplateRequest) Descriptor
func (*UpdateSessionTemplateRequest) Descriptor() ([]byte, []int)
Deprecated: Use UpdateSessionTemplateRequest.ProtoReflect.Descriptor instead.
func (*UpdateSessionTemplateRequest) GetSessionTemplate
func (x *UpdateSessionTemplateRequest) GetSessionTemplate() *SessionTemplate
func (*UpdateSessionTemplateRequest) ProtoMessage
func (*UpdateSessionTemplateRequest) ProtoMessage()
func (*UpdateSessionTemplateRequest) ProtoReflect
func (x *UpdateSessionTemplateRequest) ProtoReflect() protoreflect.Message
func (*UpdateSessionTemplateRequest) Reset
func (x *UpdateSessionTemplateRequest) Reset()
func (*UpdateSessionTemplateRequest) String
func (x *UpdateSessionTemplateRequest) String() string
UpdateWorkflowTemplateRequest
type UpdateWorkflowTemplateRequest struct {
// Required. The updated workflow template.
//
// The `template.version` field must match the current version.
Template *WorkflowTemplate `protobuf:"bytes,1,opt,name=template,proto3" json:"template,omitempty"`
// contains filtered or unexported fields
}
A request to update a workflow template.
func (*UpdateWorkflowTemplateRequest) Descriptor
func (*UpdateWorkflowTemplateRequest) Descriptor() ([]byte, []int)
Deprecated: Use UpdateWorkflowTemplateRequest.ProtoReflect.Descriptor instead.
func (*UpdateWorkflowTemplateRequest) GetTemplate
func (x *UpdateWorkflowTemplateRequest) GetTemplate() *WorkflowTemplate
func (*UpdateWorkflowTemplateRequest) ProtoMessage
func (*UpdateWorkflowTemplateRequest) ProtoMessage()
func (*UpdateWorkflowTemplateRequest) ProtoReflect
func (x *UpdateWorkflowTemplateRequest) ProtoReflect() protoreflect.Message
func (*UpdateWorkflowTemplateRequest) Reset
func (x *UpdateWorkflowTemplateRequest) Reset()
func (*UpdateWorkflowTemplateRequest) String
func (x *UpdateWorkflowTemplateRequest) String() string
UsageMetrics
type UsageMetrics struct {
// Optional. DCU (Dataproc Compute Units) usage in (`milliDCU` x `seconds`)
// (see [Dataproc Serverless pricing]
// (https://cloud.google.com/dataproc-serverless/pricing)).
MilliDcuSeconds int64 `protobuf:"varint,1,opt,name=milli_dcu_seconds,json=milliDcuSeconds,proto3" json:"milli_dcu_seconds,omitempty"`
// Optional. Shuffle storage usage in (`GB` x `seconds`) (see
// [Dataproc Serverless pricing]
// (https://cloud.google.com/dataproc-serverless/pricing)).
ShuffleStorageGbSeconds int64 `protobuf:"varint,2,opt,name=shuffle_storage_gb_seconds,json=shuffleStorageGbSeconds,proto3" json:"shuffle_storage_gb_seconds,omitempty"`
// Optional. Accelerator usage in (`milliAccelerator` x `seconds`) (see
// [Dataproc Serverless pricing]
// (https://cloud.google.com/dataproc-serverless/pricing)).
MilliAcceleratorSeconds int64 `protobuf:"varint,3,opt,name=milli_accelerator_seconds,json=milliAcceleratorSeconds,proto3" json:"milli_accelerator_seconds,omitempty"`
// Optional. Accelerator type being used, if any
AcceleratorType string `protobuf:"bytes,4,opt,name=accelerator_type,json=acceleratorType,proto3" json:"accelerator_type,omitempty"`
// contains filtered or unexported fields
}
Usage metrics represent approximate total resources consumed by a workload.
func (*UsageMetrics) Descriptor
func (*UsageMetrics) Descriptor() ([]byte, []int)
Deprecated: Use UsageMetrics.ProtoReflect.Descriptor instead.
func (*UsageMetrics) GetAcceleratorType
func (x *UsageMetrics) GetAcceleratorType() string
func (*UsageMetrics) GetMilliAcceleratorSeconds
func (x *UsageMetrics) GetMilliAcceleratorSeconds() int64
func (*UsageMetrics) GetMilliDcuSeconds
func (x *UsageMetrics) GetMilliDcuSeconds() int64
func (*UsageMetrics) GetShuffleStorageGbSeconds
func (x *UsageMetrics) GetShuffleStorageGbSeconds() int64
func (*UsageMetrics) ProtoMessage
func (*UsageMetrics) ProtoMessage()
func (*UsageMetrics) ProtoReflect
func (x *UsageMetrics) ProtoReflect() protoreflect.Message
func (*UsageMetrics) Reset
func (x *UsageMetrics) Reset()
func (*UsageMetrics) String
func (x *UsageMetrics) String() string
UsageSnapshot
type UsageSnapshot struct {
// Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) (see
// [Dataproc Serverless pricing]
// (https://cloud.google.com/dataproc-serverless/pricing)).
MilliDcu int64 `protobuf:"varint,1,opt,name=milli_dcu,json=milliDcu,proto3" json:"milli_dcu,omitempty"`
// Optional. Shuffle Storage in gigabytes (GB). (see [Dataproc Serverless
// pricing] (https://cloud.google.com/dataproc-serverless/pricing))
ShuffleStorageGb int64 `protobuf:"varint,2,opt,name=shuffle_storage_gb,json=shuffleStorageGb,proto3" json:"shuffle_storage_gb,omitempty"`
// Optional. Milli (one-thousandth) Dataproc Compute Units (DCUs) charged at
// premium tier (see [Dataproc Serverless pricing]
// (https://cloud.google.com/dataproc-serverless/pricing)).
MilliDcuPremium int64 `protobuf:"varint,4,opt,name=milli_dcu_premium,json=milliDcuPremium,proto3" json:"milli_dcu_premium,omitempty"`
// Optional. Shuffle Storage in gigabytes (GB) charged at premium tier. (see
// [Dataproc Serverless pricing]
// (https://cloud.google.com/dataproc-serverless/pricing))
ShuffleStorageGbPremium int64 `protobuf:"varint,5,opt,name=shuffle_storage_gb_premium,json=shuffleStorageGbPremium,proto3" json:"shuffle_storage_gb_premium,omitempty"`
// Optional. Milli (one-thousandth) accelerator. (see [Dataproc
// Serverless pricing] (https://cloud.google.com/dataproc-serverless/pricing))
MilliAccelerator int64 `protobuf:"varint,6,opt,name=milli_accelerator,json=milliAccelerator,proto3" json:"milli_accelerator,omitempty"`
// Optional. Accelerator type being used, if any
AcceleratorType string `protobuf:"bytes,7,opt,name=accelerator_type,json=acceleratorType,proto3" json:"accelerator_type,omitempty"`
// Optional. The timestamp of the usage snapshot.
SnapshotTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=snapshot_time,json=snapshotTime,proto3" json:"snapshot_time,omitempty"`
// contains filtered or unexported fields
}
The usage snapshot represents the resources consumed by a workload at a specified time.
func (*UsageSnapshot) Descriptor
func (*UsageSnapshot) Descriptor() ([]byte, []int)
Deprecated: Use UsageSnapshot.ProtoReflect.Descriptor instead.
func (*UsageSnapshot) GetAcceleratorType
func (x *UsageSnapshot) GetAcceleratorType() string
func (*UsageSnapshot) GetMilliAccelerator
func (x *UsageSnapshot) GetMilliAccelerator() int64
func (*UsageSnapshot) GetMilliDcu
func (x *UsageSnapshot) GetMilliDcu() int64
func (*UsageSnapshot) GetMilliDcuPremium
func (x *UsageSnapshot) GetMilliDcuPremium() int64
func (*UsageSnapshot) GetShuffleStorageGb
func (x *UsageSnapshot) GetShuffleStorageGb() int64
func (*UsageSnapshot) GetShuffleStorageGbPremium
func (x *UsageSnapshot) GetShuffleStorageGbPremium() int64
func (*UsageSnapshot) GetSnapshotTime
func (x *UsageSnapshot) GetSnapshotTime() *timestamppb.Timestamp
func (*UsageSnapshot) ProtoMessage
func (*UsageSnapshot) ProtoMessage()
func (*UsageSnapshot) ProtoReflect
func (x *UsageSnapshot) ProtoReflect() protoreflect.Message
func (*UsageSnapshot) Reset
func (x *UsageSnapshot) Reset()
func (*UsageSnapshot) String
func (x *UsageSnapshot) String() string
ValueValidation
type ValueValidation struct {
// Required. List of allowed values for the parameter.
Values []string `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
// contains filtered or unexported fields
}
Validation based on a list of allowed values.
func (*ValueValidation) Descriptor
func (*ValueValidation) Descriptor() ([]byte, []int)
Deprecated: Use ValueValidation.ProtoReflect.Descriptor instead.
func (*ValueValidation) GetValues
func (x *ValueValidation) GetValues() []string
func (*ValueValidation) ProtoMessage
func (*ValueValidation) ProtoMessage()
func (*ValueValidation) ProtoReflect
func (x *ValueValidation) ProtoReflect() protoreflect.Message
func (*ValueValidation) Reset
func (x *ValueValidation) Reset()
func (*ValueValidation) String
func (x *ValueValidation) String() string
VirtualClusterConfig
type VirtualClusterConfig struct {
// Optional. A Cloud Storage bucket used to stage job
// dependencies, config files, and job driver console output.
// If you do not specify a staging bucket, Cloud
// Dataproc will determine a Cloud Storage location (US,
// ASIA, or EU) for your cluster's staging bucket according to the
// Compute Engine zone where your cluster is deployed, and then create
// and manage this project-level, per-location bucket (see
// [Dataproc staging and temp
// buckets](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/staging-bucket)).
// **This field requires a Cloud Storage bucket name, not a `gs://...` URI to
// a Cloud Storage bucket.**
StagingBucket string `protobuf:"bytes,1,opt,name=staging_bucket,json=stagingBucket,proto3" json:"staging_bucket,omitempty"`
// Types that are assignable to InfrastructureConfig:
//
// *VirtualClusterConfig_KubernetesClusterConfig
InfrastructureConfig isVirtualClusterConfig_InfrastructureConfig `protobuf_oneof:"infrastructure_config"`
// Optional. Configuration of auxiliary services used by this cluster.
AuxiliaryServicesConfig *AuxiliaryServicesConfig `protobuf:"bytes,7,opt,name=auxiliary_services_config,json=auxiliaryServicesConfig,proto3" json:"auxiliary_services_config,omitempty"`
// contains filtered or unexported fields
}
The Dataproc cluster config for a cluster that does not directly control the underlying compute resources, such as a Dataproc-on-GKE cluster.
func (*VirtualClusterConfig) Descriptor
func (*VirtualClusterConfig) Descriptor() ([]byte, []int)
Deprecated: Use VirtualClusterConfig.ProtoReflect.Descriptor instead.
func (*VirtualClusterConfig) GetAuxiliaryServicesConfig
func (x *VirtualClusterConfig) GetAuxiliaryServicesConfig() *AuxiliaryServicesConfig
func (*VirtualClusterConfig) GetInfrastructureConfig
func (m *VirtualClusterConfig) GetInfrastructureConfig() isVirtualClusterConfig_InfrastructureConfig
func (*VirtualClusterConfig) GetKubernetesClusterConfig
func (x *VirtualClusterConfig) GetKubernetesClusterConfig() *KubernetesClusterConfig
func (*VirtualClusterConfig) GetStagingBucket
func (x *VirtualClusterConfig) GetStagingBucket() string
func (*VirtualClusterConfig) ProtoMessage
func (*VirtualClusterConfig) ProtoMessage()
func (*VirtualClusterConfig) ProtoReflect
func (x *VirtualClusterConfig) ProtoReflect() protoreflect.Message
func (*VirtualClusterConfig) Reset
func (x *VirtualClusterConfig) Reset()
func (*VirtualClusterConfig) String
func (x *VirtualClusterConfig) String() string
VirtualClusterConfig_KubernetesClusterConfig
type VirtualClusterConfig_KubernetesClusterConfig struct {
// Required. The configuration for running the Dataproc cluster on
// Kubernetes.
KubernetesClusterConfig *KubernetesClusterConfig `protobuf:"bytes,6,opt,name=kubernetes_cluster_config,json=kubernetesClusterConfig,proto3,oneof"`
}
WorkflowGraph
type WorkflowGraph struct {
// Output only. The workflow nodes.
Nodes []*WorkflowNode `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"`
// contains filtered or unexported fields
}
The workflow graph.
func (*WorkflowGraph) Descriptor
func (*WorkflowGraph) Descriptor() ([]byte, []int)
Deprecated: Use WorkflowGraph.ProtoReflect.Descriptor instead.
func (*WorkflowGraph) GetNodes
func (x *WorkflowGraph) GetNodes() []*WorkflowNode
func (*WorkflowGraph) ProtoMessage
func (*WorkflowGraph) ProtoMessage()
func (*WorkflowGraph) ProtoReflect
func (x *WorkflowGraph) ProtoReflect() protoreflect.Message
func (*WorkflowGraph) Reset
func (x *WorkflowGraph) Reset()
func (*WorkflowGraph) String
func (x *WorkflowGraph) String() string
WorkflowMetadata
type WorkflowMetadata struct {
// Output only. The resource name of the workflow template as described
// in https://cloud.google.com/apis/design/resource_names.
//
// - For `projects.regions.workflowTemplates`, the resource name of the
// template has the following format:
// `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
//
// - For `projects.locations.workflowTemplates`, the resource name of the
// template has the following format:
// `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`
Template string `protobuf:"bytes,1,opt,name=template,proto3" json:"template,omitempty"`
// Output only. The version of template at the time of
// workflow instantiation.
Version int32 `protobuf:"varint,2,opt,name=version,proto3" json:"version,omitempty"`
// Output only. The create cluster operation metadata.
CreateCluster *ClusterOperation `protobuf:"bytes,3,opt,name=create_cluster,json=createCluster,proto3" json:"create_cluster,omitempty"`
// Output only. The workflow graph.
Graph *WorkflowGraph `protobuf:"bytes,4,opt,name=graph,proto3" json:"graph,omitempty"`
// Output only. The delete cluster operation metadata.
DeleteCluster *ClusterOperation `protobuf:"bytes,5,opt,name=delete_cluster,json=deleteCluster,proto3" json:"delete_cluster,omitempty"`
// Output only. The workflow state.
State WorkflowMetadata_State `protobuf:"varint,6,opt,name=state,proto3,enum=google.cloud.dataproc.v1.WorkflowMetadata_State" json:"state,omitempty"`
// Output only. The name of the target cluster.
ClusterName string `protobuf:"bytes,7,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
// Map from parameter names to values that were used for those parameters.
Parameters map[string]string `protobuf:"bytes,8,rep,name=parameters,proto3" json:"parameters,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Output only. Workflow start time.
StartTime *timestamppb.Timestamp `protobuf:"bytes,9,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
// Output only. Workflow end time.
EndTime *timestamppb.Timestamp `protobuf:"bytes,10,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
// Output only. The UUID of target cluster.
ClusterUuid string `protobuf:"bytes,11,opt,name=cluster_uuid,json=clusterUuid,proto3" json:"cluster_uuid,omitempty"`
// Output only. The timeout duration for the DAG of jobs, expressed in seconds
// (see [JSON representation of
// duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
DagTimeout *durationpb.Duration `protobuf:"bytes,12,opt,name=dag_timeout,json=dagTimeout,proto3" json:"dag_timeout,omitempty"`
// Output only. DAG start time, only set for workflows with
// [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when
// DAG begins.
DagStartTime *timestamppb.Timestamp `protobuf:"bytes,13,opt,name=dag_start_time,json=dagStartTime,proto3" json:"dag_start_time,omitempty"`
// Output only. DAG end time, only set for workflows with
// [dag_timeout][google.cloud.dataproc.v1.WorkflowMetadata.dag_timeout] when
// DAG ends.
DagEndTime *timestamppb.Timestamp `protobuf:"bytes,14,opt,name=dag_end_time,json=dagEndTime,proto3" json:"dag_end_time,omitempty"`
// contains filtered or unexported fields
}
A Dataproc workflow template resource.
func (*WorkflowMetadata) Descriptor
func (*WorkflowMetadata) Descriptor() ([]byte, []int)
Deprecated: Use WorkflowMetadata.ProtoReflect.Descriptor instead.
func (*WorkflowMetadata) GetClusterName
func (x *WorkflowMetadata) GetClusterName() string
func (*WorkflowMetadata) GetClusterUuid
func (x *WorkflowMetadata) GetClusterUuid() string
func (*WorkflowMetadata) GetCreateCluster
func (x *WorkflowMetadata) GetCreateCluster() *ClusterOperation
func (*WorkflowMetadata) GetDagEndTime
func (x *WorkflowMetadata) GetDagEndTime() *timestamppb.Timestamp
func (*WorkflowMetadata) GetDagStartTime
func (x *WorkflowMetadata) GetDagStartTime() *timestamppb.Timestamp
func (*WorkflowMetadata) GetDagTimeout
func (x *WorkflowMetadata) GetDagTimeout() *durationpb.Duration
func (*WorkflowMetadata) GetDeleteCluster
func (x *WorkflowMetadata) GetDeleteCluster() *ClusterOperation
func (*WorkflowMetadata) GetEndTime
func (x *WorkflowMetadata) GetEndTime() *timestamppb.Timestamp
func (*WorkflowMetadata) GetGraph
func (x *WorkflowMetadata) GetGraph() *WorkflowGraph
func (*WorkflowMetadata) GetParameters
func (x *WorkflowMetadata) GetParameters() map[string]string
func (*WorkflowMetadata) GetStartTime
func (x *WorkflowMetadata) GetStartTime() *timestamppb.Timestamp
func (*WorkflowMetadata) GetState
func (x *WorkflowMetadata) GetState() WorkflowMetadata_State
func (*WorkflowMetadata) GetTemplate
func (x *WorkflowMetadata) GetTemplate() string
func (*WorkflowMetadata) GetVersion
func (x *WorkflowMetadata) GetVersion() int32
func (*WorkflowMetadata) ProtoMessage
func (*WorkflowMetadata) ProtoMessage()
func (*WorkflowMetadata) ProtoReflect
func (x *WorkflowMetadata) ProtoReflect() protoreflect.Message
func (*WorkflowMetadata) Reset
func (x *WorkflowMetadata) Reset()
func (*WorkflowMetadata) String
func (x *WorkflowMetadata) String() string
WorkflowMetadata_State
type WorkflowMetadata_State int32
The operation state.
WorkflowMetadata_UNKNOWN, WorkflowMetadata_PENDING, WorkflowMetadata_RUNNING, WorkflowMetadata_DONE
const (
// Unused.
WorkflowMetadata_UNKNOWN WorkflowMetadata_State = 0
// The operation has been created.
WorkflowMetadata_PENDING WorkflowMetadata_State = 1
// The operation is running.
WorkflowMetadata_RUNNING WorkflowMetadata_State = 2
// The operation is done; either cancelled or completed.
WorkflowMetadata_DONE WorkflowMetadata_State = 3
)
func (WorkflowMetadata_State) Descriptor
func (WorkflowMetadata_State) Descriptor() protoreflect.EnumDescriptor
func (WorkflowMetadata_State) Enum
func (x WorkflowMetadata_State) Enum() *WorkflowMetadata_State
func (WorkflowMetadata_State) EnumDescriptor
func (WorkflowMetadata_State) EnumDescriptor() ([]byte, []int)
Deprecated: Use WorkflowMetadata_State.Descriptor instead.
func (WorkflowMetadata_State) Number
func (x WorkflowMetadata_State) Number() protoreflect.EnumNumber
func (WorkflowMetadata_State) String
func (x WorkflowMetadata_State) String() string
func (WorkflowMetadata_State) Type
func (WorkflowMetadata_State) Type() protoreflect.EnumType
WorkflowNode
type WorkflowNode struct {
// Output only. The name of the node.
StepId string `protobuf:"bytes,1,opt,name=step_id,json=stepId,proto3" json:"step_id,omitempty"`
// Output only. Node's prerequisite nodes.
PrerequisiteStepIds []string `protobuf:"bytes,2,rep,name=prerequisite_step_ids,json=prerequisiteStepIds,proto3" json:"prerequisite_step_ids,omitempty"`
// Output only. The job id; populated after the node enters RUNNING state.
JobId string `protobuf:"bytes,3,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
// Output only. The node state.
State WorkflowNode_NodeState `protobuf:"varint,5,opt,name=state,proto3,enum=google.cloud.dataproc.v1.WorkflowNode_NodeState" json:"state,omitempty"`
// Output only. The error detail.
Error string `protobuf:"bytes,6,opt,name=error,proto3" json:"error,omitempty"`
// contains filtered or unexported fields
}
The workflow node.
func (*WorkflowNode) Descriptor
func (*WorkflowNode) Descriptor() ([]byte, []int)
Deprecated: Use WorkflowNode.ProtoReflect.Descriptor instead.
func (*WorkflowNode) GetError
func (x *WorkflowNode) GetError() string
func (*WorkflowNode) GetJobId
func (x *WorkflowNode) GetJobId() string
func (*WorkflowNode) GetPrerequisiteStepIds
func (x *WorkflowNode) GetPrerequisiteStepIds() []string
func (*WorkflowNode) GetState
func (x *WorkflowNode) GetState() WorkflowNode_NodeState
func (*WorkflowNode) GetStepId
func (x *WorkflowNode) GetStepId() string
func (*WorkflowNode) ProtoMessage
func (*WorkflowNode) ProtoMessage()
func (*WorkflowNode) ProtoReflect
func (x *WorkflowNode) ProtoReflect() protoreflect.Message
func (*WorkflowNode) Reset
func (x *WorkflowNode) Reset()
func (*WorkflowNode) String
func (x *WorkflowNode) String() string
WorkflowNode_NodeState
type WorkflowNode_NodeState int32
The workflow node state.
WorkflowNode_NODE_STATE_UNSPECIFIED, WorkflowNode_BLOCKED, WorkflowNode_RUNNABLE, WorkflowNode_RUNNING, WorkflowNode_COMPLETED, WorkflowNode_FAILED
const (
// State is unspecified.
WorkflowNode_NODE_STATE_UNSPECIFIED WorkflowNode_NodeState = 0
// The node is awaiting prerequisite node to finish.
WorkflowNode_BLOCKED WorkflowNode_NodeState = 1
// The node is runnable but not running.
WorkflowNode_RUNNABLE WorkflowNode_NodeState = 2
// The node is running.
WorkflowNode_RUNNING WorkflowNode_NodeState = 3
// The node completed successfully.
WorkflowNode_COMPLETED WorkflowNode_NodeState = 4
// The node failed. A node can be marked FAILED because
// its ancestor or peer failed.
WorkflowNode_FAILED WorkflowNode_NodeState = 5
)
func (WorkflowNode_NodeState) Descriptor
func (WorkflowNode_NodeState) Descriptor() protoreflect.EnumDescriptor
func (WorkflowNode_NodeState) Enum
func (x WorkflowNode_NodeState) Enum() *WorkflowNode_NodeState
func (WorkflowNode_NodeState) EnumDescriptor
func (WorkflowNode_NodeState) EnumDescriptor() ([]byte, []int)
Deprecated: Use WorkflowNode_NodeState.Descriptor instead.
func (WorkflowNode_NodeState) Number
func (x WorkflowNode_NodeState) Number() protoreflect.EnumNumber
func (WorkflowNode_NodeState) String
func (x WorkflowNode_NodeState) String() string
func (WorkflowNode_NodeState) Type
func (WorkflowNode_NodeState) Type() protoreflect.EnumType
WorkflowTemplate
type WorkflowTemplate struct {
Id string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
// Output only. The resource name of the workflow template, as described
// in https://cloud.google.com/apis/design/resource_names.
//
// - For `projects.regions.workflowTemplates`, the resource name of the
// template has the following format:
// `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}`
//
// - For `projects.locations.workflowTemplates`, the resource name of the
// template has the following format:
// `projects/{project_id}/locations/{location}/workflowTemplates/{template_id}`
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Optional. Used to perform a consistent read-modify-write.
//
// This field should be left blank for a `CreateWorkflowTemplate` request. It
// is required for an `UpdateWorkflowTemplate` request, and must match the
// current server version. A typical update template flow would fetch the
// current template with a `GetWorkflowTemplate` request, which will return
// the current template with the `version` field filled in with the
// current server version. The user updates other fields in the template,
// then returns it as part of the `UpdateWorkflowTemplate` request.
Version int32 `protobuf:"varint,3,opt,name=version,proto3" json:"version,omitempty"`
// Output only. The time template was created.
CreateTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=create_time,json=createTime,proto3" json:"create_time,omitempty"`
// Output only. The time template was last updated.
UpdateTime *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=update_time,json=updateTime,proto3" json:"update_time,omitempty"`
// Optional. The labels to associate with this template. These labels
// will be propagated to all jobs and clusters created by the workflow
// instance.
//
// Label **keys** must contain 1 to 63 characters, and must conform to
// [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
//
// Label **values** may be empty, but, if present, must contain 1 to 63
// characters, and must conform to
// [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
//
// No more than 32 labels can be associated with a template.
Labels map[string]string `protobuf:"bytes,6,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
// Required. WorkflowTemplate scheduling information.
Placement *WorkflowTemplatePlacement `protobuf:"bytes,7,opt,name=placement,proto3" json:"placement,omitempty"`
// Required. The Directed Acyclic Graph of Jobs to submit.
Jobs []*OrderedJob `protobuf:"bytes,8,rep,name=jobs,proto3" json:"jobs,omitempty"`
// Optional. Template parameters whose values are substituted into the
// template. Values for parameters must be provided when the template is
// instantiated.
Parameters []*TemplateParameter `protobuf:"bytes,9,rep,name=parameters,proto3" json:"parameters,omitempty"`
// Optional. Timeout duration for the DAG of jobs, expressed in seconds (see
// [JSON representation of
// duration](https://developers.google.com/protocol-buffers/docs/proto3#json)).
// The timeout duration must be from 10 minutes ("600s") to 24 hours
// ("86400s"). The timer begins when the first job is submitted. If the
// workflow is running at the end of the timeout period, any remaining jobs
// are cancelled, the workflow is ended, and if the workflow was running on a
// [managed
// cluster](/dataproc/docs/concepts/workflows/using-workflows#configuring_or_selecting_a_cluster),
// the cluster is deleted.
DagTimeout *durationpb.Duration `protobuf:"bytes,10,opt,name=dag_timeout,json=dagTimeout,proto3" json:"dag_timeout,omitempty"`
// Optional. Encryption settings for encrypting workflow template job
// arguments.
EncryptionConfig *WorkflowTemplate_EncryptionConfig `protobuf:"bytes,11,opt,name=encryption_config,json=encryptionConfig,proto3" json:"encryption_config,omitempty"`
// contains filtered or unexported fields
}
A Dataproc workflow template resource.
func (*WorkflowTemplate) Descriptor
func (*WorkflowTemplate) Descriptor() ([]byte, []int)
Deprecated: Use WorkflowTemplate.ProtoReflect.Descriptor instead.
func (*WorkflowTemplate) GetCreateTime
func (x *WorkflowTemplate) GetCreateTime() *timestamppb.Timestamp
func (*WorkflowTemplate) GetDagTimeout
func (x *WorkflowTemplate) GetDagTimeout() *durationpb.Duration
func (*WorkflowTemplate) GetEncryptionConfig
func (x *WorkflowTemplate) GetEncryptionConfig() *WorkflowTemplate_EncryptionConfig
func (*WorkflowTemplate) GetId
func (x *WorkflowTemplate) GetId() string
func (*WorkflowTemplate) GetJobs
func (x *WorkflowTemplate) GetJobs() []*OrderedJob
func (*WorkflowTemplate) GetLabels
func (x *WorkflowTemplate) GetLabels() map[string]string
func (*WorkflowTemplate) GetName
func (x *WorkflowTemplate) GetName() string
func (*WorkflowTemplate) GetParameters
func (x *WorkflowTemplate) GetParameters() []*TemplateParameter
func (*WorkflowTemplate) GetPlacement
func (x *WorkflowTemplate) GetPlacement() *WorkflowTemplatePlacement
func (*WorkflowTemplate) GetUpdateTime
func (x *WorkflowTemplate) GetUpdateTime() *timestamppb.Timestamp
func (*WorkflowTemplate) GetVersion
func (x *WorkflowTemplate) GetVersion() int32
func (*WorkflowTemplate) ProtoMessage
func (*WorkflowTemplate) ProtoMessage()
func (*WorkflowTemplate) ProtoReflect
func (x *WorkflowTemplate) ProtoReflect() protoreflect.Message
func (*WorkflowTemplate) Reset
func (x *WorkflowTemplate) Reset()
func (*WorkflowTemplate) String
func (x *WorkflowTemplate) String() string
WorkflowTemplatePlacement
type WorkflowTemplatePlacement struct {
// Required. Specifies where workflow executes; either on a managed
// cluster or an existing cluster chosen by labels.
//
// Types that are assignable to Placement:
//
// *WorkflowTemplatePlacement_ManagedCluster
// *WorkflowTemplatePlacement_ClusterSelector
Placement isWorkflowTemplatePlacement_Placement `protobuf_oneof:"placement"`
// contains filtered or unexported fields
}
Specifies workflow execution target.
Either managed_cluster
or cluster_selector
is required.
func (*WorkflowTemplatePlacement) Descriptor
func (*WorkflowTemplatePlacement) Descriptor() ([]byte, []int)
Deprecated: Use WorkflowTemplatePlacement.ProtoReflect.Descriptor instead.
func (*WorkflowTemplatePlacement) GetClusterSelector
func (x *WorkflowTemplatePlacement) GetClusterSelector() *ClusterSelector
func (*WorkflowTemplatePlacement) GetManagedCluster
func (x *WorkflowTemplatePlacement) GetManagedCluster() *ManagedCluster
func (*WorkflowTemplatePlacement) GetPlacement
func (m *WorkflowTemplatePlacement) GetPlacement() isWorkflowTemplatePlacement_Placement
func (*WorkflowTemplatePlacement) ProtoMessage
func (*WorkflowTemplatePlacement) ProtoMessage()
func (*WorkflowTemplatePlacement) ProtoReflect
func (x *WorkflowTemplatePlacement) ProtoReflect() protoreflect.Message
func (*WorkflowTemplatePlacement) Reset
func (x *WorkflowTemplatePlacement) Reset()
func (*WorkflowTemplatePlacement) String
func (x *WorkflowTemplatePlacement) String() string
WorkflowTemplatePlacement_ClusterSelector
type WorkflowTemplatePlacement_ClusterSelector struct {
// Optional. A selector that chooses target cluster for jobs based
// on metadata.
//
// The selector is evaluated at the time each job is submitted.
ClusterSelector *ClusterSelector `protobuf:"bytes,2,opt,name=cluster_selector,json=clusterSelector,proto3,oneof"`
}
WorkflowTemplatePlacement_ManagedCluster
type WorkflowTemplatePlacement_ManagedCluster struct {
// A cluster that is managed by the workflow.
ManagedCluster *ManagedCluster `protobuf:"bytes,1,opt,name=managed_cluster,json=managedCluster,proto3,oneof"`
}
WorkflowTemplateServiceClient
type WorkflowTemplateServiceClient interface {
// Creates new workflow template.
CreateWorkflowTemplate(ctx context.Context, in *CreateWorkflowTemplateRequest, opts ...grpc.CallOption) (*WorkflowTemplate, error)
// Retrieves the latest workflow template.
//
// Can retrieve previously instantiated template by specifying optional
// version parameter.
GetWorkflowTemplate(ctx context.Context, in *GetWorkflowTemplateRequest, opts ...grpc.CallOption) (*WorkflowTemplate, error)
// Instantiates a template and begins execution.
//
// The returned Operation can be used to track execution of
// workflow by polling
// [operations.get][google.longrunning.Operations.GetOperation].
// The Operation will complete when entire workflow is finished.
//
// The running workflow can be aborted via
// [operations.cancel][google.longrunning.Operations.CancelOperation].
// This will cause any inflight jobs to be cancelled and workflow-owned
// clusters to be deleted.
//
// The [Operation.metadata][google.longrunning.Operation.metadata] will be
// [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata).
// Also see [Using
// WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata).
//
// On successful completion,
// [Operation.response][google.longrunning.Operation.response] will be
// [Empty][google.protobuf.Empty].
InstantiateWorkflowTemplate(ctx context.Context, in *InstantiateWorkflowTemplateRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
// Instantiates a template and begins execution.
//
// This method is equivalent to executing the sequence
// [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate],
// [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate],
// [DeleteWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.DeleteWorkflowTemplate].
//
// The returned Operation can be used to track execution of
// workflow by polling
// [operations.get][google.longrunning.Operations.GetOperation].
// The Operation will complete when entire workflow is finished.
//
// The running workflow can be aborted via
// [operations.cancel][google.longrunning.Operations.CancelOperation].
// This will cause any inflight jobs to be cancelled and workflow-owned
// clusters to be deleted.
//
// The [Operation.metadata][google.longrunning.Operation.metadata] will be
// [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata).
// Also see [Using
// WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata).
//
// On successful completion,
// [Operation.response][google.longrunning.Operation.response] will be
// [Empty][google.protobuf.Empty].
InstantiateInlineWorkflowTemplate(ctx context.Context, in *InstantiateInlineWorkflowTemplateRequest, opts ...grpc.CallOption) (*longrunningpb.Operation, error)
// Updates (replaces) workflow template. The updated template
// must contain version that matches the current server version.
UpdateWorkflowTemplate(ctx context.Context, in *UpdateWorkflowTemplateRequest, opts ...grpc.CallOption) (*WorkflowTemplate, error)
// Lists workflows that match the specified filter in the request.
ListWorkflowTemplates(ctx context.Context, in *ListWorkflowTemplatesRequest, opts ...grpc.CallOption) (*ListWorkflowTemplatesResponse, error)
// Deletes a workflow template. It does not cancel in-progress workflows.
DeleteWorkflowTemplate(ctx context.Context, in *DeleteWorkflowTemplateRequest, opts ...grpc.CallOption) (*emptypb.Empty, error)
}
WorkflowTemplateServiceClient is the client API for WorkflowTemplateService service.
For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
func NewWorkflowTemplateServiceClient
func NewWorkflowTemplateServiceClient(cc grpc.ClientConnInterface) WorkflowTemplateServiceClient
WorkflowTemplateServiceServer
type WorkflowTemplateServiceServer interface {
// Creates new workflow template.
CreateWorkflowTemplate(context.Context, *CreateWorkflowTemplateRequest) (*WorkflowTemplate, error)
// Retrieves the latest workflow template.
//
// Can retrieve previously instantiated template by specifying optional
// version parameter.
GetWorkflowTemplate(context.Context, *GetWorkflowTemplateRequest) (*WorkflowTemplate, error)
// Instantiates a template and begins execution.
//
// The returned Operation can be used to track execution of
// workflow by polling
// [operations.get][google.longrunning.Operations.GetOperation].
// The Operation will complete when entire workflow is finished.
//
// The running workflow can be aborted via
// [operations.cancel][google.longrunning.Operations.CancelOperation].
// This will cause any inflight jobs to be cancelled and workflow-owned
// clusters to be deleted.
//
// The [Operation.metadata][google.longrunning.Operation.metadata] will be
// [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata).
// Also see [Using
// WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata).
//
// On successful completion,
// [Operation.response][google.longrunning.Operation.response] will be
// [Empty][google.protobuf.Empty].
InstantiateWorkflowTemplate(context.Context, *InstantiateWorkflowTemplateRequest) (*longrunningpb.Operation, error)
// Instantiates a template and begins execution.
//
// This method is equivalent to executing the sequence
// [CreateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.CreateWorkflowTemplate],
// [InstantiateWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.InstantiateWorkflowTemplate],
// [DeleteWorkflowTemplate][google.cloud.dataproc.v1.WorkflowTemplateService.DeleteWorkflowTemplate].
//
// The returned Operation can be used to track execution of
// workflow by polling
// [operations.get][google.longrunning.Operations.GetOperation].
// The Operation will complete when entire workflow is finished.
//
// The running workflow can be aborted via
// [operations.cancel][google.longrunning.Operations.CancelOperation].
// This will cause any inflight jobs to be cancelled and workflow-owned
// clusters to be deleted.
//
// The [Operation.metadata][google.longrunning.Operation.metadata] will be
// [WorkflowMetadata](https://cloud.google.com/dataproc/docs/reference/rpc/google.cloud.dataproc.v1#workflowmetadata).
// Also see [Using
// WorkflowMetadata](https://cloud.google.com/dataproc/docs/concepts/workflows/debugging#using_workflowmetadata).
//
// On successful completion,
// [Operation.response][google.longrunning.Operation.response] will be
// [Empty][google.protobuf.Empty].
InstantiateInlineWorkflowTemplate(context.Context, *InstantiateInlineWorkflowTemplateRequest) (*longrunningpb.Operation, error)
// Updates (replaces) workflow template. The updated template
// must contain version that matches the current server version.
UpdateWorkflowTemplate(context.Context, *UpdateWorkflowTemplateRequest) (*WorkflowTemplate, error)
// Lists workflows that match the specified filter in the request.
ListWorkflowTemplates(context.Context, *ListWorkflowTemplatesRequest) (*ListWorkflowTemplatesResponse, error)
// Deletes a workflow template. It does not cancel in-progress workflows.
DeleteWorkflowTemplate(context.Context, *DeleteWorkflowTemplateRequest) (*emptypb.Empty, error)
}
WorkflowTemplateServiceServer is the server API for WorkflowTemplateService service.
WorkflowTemplate_EncryptionConfig
type WorkflowTemplate_EncryptionConfig struct {
// Optional. The Cloud KMS key name to use for encrypting
// workflow template job arguments.
//
// When this this key is provided, the following workflow template
// [job arguments]
// (https://cloud.google.com/dataproc/docs/concepts/workflows/use-workflows#adding_jobs_to_a_template),
// if present, are
// [CMEK
// encrypted](https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/customer-managed-encryption#use_cmek_with_workflow_template_data):
//
// * [FlinkJob
// args](https://cloud.google.com/dataproc/docs/reference/rest/v1/FlinkJob)
// * [HadoopJob
// args](https://cloud.google.com/dataproc/docs/reference/rest/v1/HadoopJob)
// * [SparkJob
// args](https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkJob)
// * [SparkRJob
// args](https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkRJob)
// * [PySparkJob
// args](https://cloud.google.com/dataproc/docs/reference/rest/v1/PySparkJob)
// - [SparkSqlJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/SparkSqlJob)
// scriptVariables and queryList.queries
// - [HiveJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/HiveJob)
// scriptVariables and queryList.queries
// - [PigJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/PigJob)
// scriptVariables and queryList.queries
// - [PrestoJob](https://cloud.google.com/dataproc/docs/reference/rest/v1/PrestoJob)
// scriptVariables and queryList.queries
KmsKey string `protobuf:"bytes,1,opt,name=kms_key,json=kmsKey,proto3" json:"kms_key,omitempty"`
// contains filtered or unexported fields
}
Encryption settings for encrypting workflow template job arguments.
func (*WorkflowTemplate_EncryptionConfig) Descriptor
func (*WorkflowTemplate_EncryptionConfig) Descriptor() ([]byte, []int)
Deprecated: Use WorkflowTemplate_EncryptionConfig.ProtoReflect.Descriptor instead.
func (*WorkflowTemplate_EncryptionConfig) GetKmsKey
func (x *WorkflowTemplate_EncryptionConfig) GetKmsKey() string
func (*WorkflowTemplate_EncryptionConfig) ProtoMessage
func (*WorkflowTemplate_EncryptionConfig) ProtoMessage()
func (*WorkflowTemplate_EncryptionConfig) ProtoReflect
func (x *WorkflowTemplate_EncryptionConfig) ProtoReflect() protoreflect.Message
func (*WorkflowTemplate_EncryptionConfig) Reset
func (x *WorkflowTemplate_EncryptionConfig) Reset()
func (*WorkflowTemplate_EncryptionConfig) String
func (x *WorkflowTemplate_EncryptionConfig) String() string
YarnApplication
type YarnApplication struct {
// Required. The application name.
Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
// Required. The application state.
State YarnApplication_State `protobuf:"varint,2,opt,name=state,proto3,enum=google.cloud.dataproc.v1.YarnApplication_State" json:"state,omitempty"`
// Required. The numerical progress of the application, from 1 to 100.
Progress float32 `protobuf:"fixed32,3,opt,name=progress,proto3" json:"progress,omitempty"`
// Optional. The HTTP URL of the ApplicationMaster, HistoryServer, or
// TimelineServer that provides application-specific information. The URL uses
// the internal hostname, and requires a proxy server for resolution and,
// possibly, access.
TrackingUrl string `protobuf:"bytes,4,opt,name=tracking_url,json=trackingUrl,proto3" json:"tracking_url,omitempty"`
// contains filtered or unexported fields
}
A YARN application created by a job. Application information is a subset of
org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto
.
Beta Feature: This report is available for testing purposes only. It may be changed before final release.
func (*YarnApplication) Descriptor
func (*YarnApplication) Descriptor() ([]byte, []int)
Deprecated: Use YarnApplication.ProtoReflect.Descriptor instead.
func (*YarnApplication) GetName
func (x *YarnApplication) GetName() string
func (*YarnApplication) GetProgress
func (x *YarnApplication) GetProgress() float32
func (*YarnApplication) GetState
func (x *YarnApplication) GetState() YarnApplication_State
func (*YarnApplication) GetTrackingUrl
func (x *YarnApplication) GetTrackingUrl() string
func (*YarnApplication) ProtoMessage
func (*YarnApplication) ProtoMessage()
func (*YarnApplication) ProtoReflect
func (x *YarnApplication) ProtoReflect() protoreflect.Message
func (*YarnApplication) Reset
func (x *YarnApplication) Reset()
func (*YarnApplication) String
func (x *YarnApplication) String() string
YarnApplication_State
type YarnApplication_State int32
The application state, corresponding to
YarnProtos.YarnApplicationStateProto
.
YarnApplication_STATE_UNSPECIFIED, YarnApplication_NEW, YarnApplication_NEW_SAVING, YarnApplication_SUBMITTED, YarnApplication_ACCEPTED, YarnApplication_RUNNING, YarnApplication_FINISHED, YarnApplication_FAILED, YarnApplication_KILLED
const (
// Status is unspecified.
YarnApplication_STATE_UNSPECIFIED YarnApplication_State = 0
// Status is NEW.
YarnApplication_NEW YarnApplication_State = 1
// Status is NEW_SAVING.
YarnApplication_NEW_SAVING YarnApplication_State = 2
// Status is SUBMITTED.
YarnApplication_SUBMITTED YarnApplication_State = 3
// Status is ACCEPTED.
YarnApplication_ACCEPTED YarnApplication_State = 4
// Status is RUNNING.
YarnApplication_RUNNING YarnApplication_State = 5
// Status is FINISHED.
YarnApplication_FINISHED YarnApplication_State = 6
// Status is FAILED.
YarnApplication_FAILED YarnApplication_State = 7
// Status is KILLED.
YarnApplication_KILLED YarnApplication_State = 8
)
func (YarnApplication_State) Descriptor
func (YarnApplication_State) Descriptor() protoreflect.EnumDescriptor
func (YarnApplication_State) Enum
func (x YarnApplication_State) Enum() *YarnApplication_State
func (YarnApplication_State) EnumDescriptor
func (YarnApplication_State) EnumDescriptor() ([]byte, []int)
Deprecated: Use YarnApplication_State.Descriptor instead.
func (YarnApplication_State) Number
func (x YarnApplication_State) Number() protoreflect.EnumNumber
func (YarnApplication_State) String
func (x YarnApplication_State) String() string
func (YarnApplication_State) Type
func (YarnApplication_State) Type() protoreflect.EnumType