mirror of
https://github.com/genuinetools/reg.git
synced 2024-09-19 16:51:01 -04:00
2278 lines
93 KiB
Go
2278 lines
93 KiB
Go
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||
|
// source: google/cloud/dataproc/v1beta2/clusters.proto
|
||
|
|
||
|
package dataproc // import "google.golang.org/genproto/googleapis/cloud/dataproc/v1beta2"
|
||
|
|
||
|
import proto "github.com/golang/protobuf/proto"
|
||
|
import fmt "fmt"
|
||
|
import math "math"
|
||
|
import duration "github.com/golang/protobuf/ptypes/duration"
|
||
|
import timestamp "github.com/golang/protobuf/ptypes/timestamp"
|
||
|
import _ "google.golang.org/genproto/googleapis/api/annotations"
|
||
|
import longrunning "google.golang.org/genproto/googleapis/longrunning"
|
||
|
import field_mask "google.golang.org/genproto/protobuf/field_mask"
|
||
|
|
||
|
import (
|
||
|
context "golang.org/x/net/context"
|
||
|
grpc "google.golang.org/grpc"
|
||
|
)
|
||
|
|
||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||
|
var _ = proto.Marshal
|
||
|
var _ = fmt.Errorf
|
||
|
var _ = math.Inf
|
||
|
|
||
|
// This is a compile-time assertion to ensure that this generated file
|
||
|
// is compatible with the proto package it is being compiled against.
|
||
|
// A compilation error at this line likely means your copy of the
|
||
|
// proto package needs to be updated.
|
||
|
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||
|
|
||
|
// The cluster state.
|
||
|
type ClusterStatus_State int32
|
||
|
|
||
|
const (
|
||
|
// The cluster state is unknown.
|
||
|
ClusterStatus_UNKNOWN ClusterStatus_State = 0
|
||
|
// The cluster is being created and set up. It is not ready for use.
|
||
|
ClusterStatus_CREATING ClusterStatus_State = 1
|
||
|
// The cluster is currently running and healthy. It is ready for use.
|
||
|
ClusterStatus_RUNNING ClusterStatus_State = 2
|
||
|
// The cluster encountered an error. It is not ready for use.
|
||
|
ClusterStatus_ERROR ClusterStatus_State = 3
|
||
|
// The cluster is being deleted. It cannot be used.
|
||
|
ClusterStatus_DELETING ClusterStatus_State = 4
|
||
|
// The cluster is being updated. It continues to accept and process jobs.
|
||
|
ClusterStatus_UPDATING ClusterStatus_State = 5
|
||
|
)
|
||
|
|
||
|
var ClusterStatus_State_name = map[int32]string{
|
||
|
0: "UNKNOWN",
|
||
|
1: "CREATING",
|
||
|
2: "RUNNING",
|
||
|
3: "ERROR",
|
||
|
4: "DELETING",
|
||
|
5: "UPDATING",
|
||
|
}
|
||
|
var ClusterStatus_State_value = map[string]int32{
|
||
|
"UNKNOWN": 0,
|
||
|
"CREATING": 1,
|
||
|
"RUNNING": 2,
|
||
|
"ERROR": 3,
|
||
|
"DELETING": 4,
|
||
|
"UPDATING": 5,
|
||
|
}
|
||
|
|
||
|
func (x ClusterStatus_State) String() string {
|
||
|
return proto.EnumName(ClusterStatus_State_name, int32(x))
|
||
|
}
|
||
|
func (ClusterStatus_State) EnumDescriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_clusters_92d23f68998e82a3, []int{9, 0}
|
||
|
}
|
||
|
|
||
|
// The cluster substate.
|
||
|
type ClusterStatus_Substate int32
|
||
|
|
||
|
const (
|
||
|
// The cluster substate is unknown.
|
||
|
ClusterStatus_UNSPECIFIED ClusterStatus_Substate = 0
|
||
|
// The cluster is known to be in an unhealthy state
|
||
|
// (for example, critical daemons are not running or HDFS capacity is
|
||
|
// exhausted).
|
||
|
//
|
||
|
// Applies to RUNNING state.
|
||
|
ClusterStatus_UNHEALTHY ClusterStatus_Substate = 1
|
||
|
// The agent-reported status is out of date (may occur if
|
||
|
// Cloud Dataproc loses communication with Agent).
|
||
|
//
|
||
|
// Applies to RUNNING state.
|
||
|
ClusterStatus_STALE_STATUS ClusterStatus_Substate = 2
|
||
|
)
|
||
|
|
||
|
var ClusterStatus_Substate_name = map[int32]string{
|
||
|
0: "UNSPECIFIED",
|
||
|
1: "UNHEALTHY",
|
||
|
2: "STALE_STATUS",
|
||
|
}
|
||
|
var ClusterStatus_Substate_value = map[string]int32{
|
||
|
"UNSPECIFIED": 0,
|
||
|
"UNHEALTHY": 1,
|
||
|
"STALE_STATUS": 2,
|
||
|
}
|
||
|
|
||
|
func (x ClusterStatus_Substate) String() string {
|
||
|
return proto.EnumName(ClusterStatus_Substate_name, int32(x))
|
||
|
}
|
||
|
func (ClusterStatus_Substate) EnumDescriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_clusters_92d23f68998e82a3, []int{9, 1}
|
||
|
}
|
||
|
|
||
|
// Describes the identifying information, config, and status of
|
||
|
// a cluster of Compute Engine instances.
|
||
|
type Cluster struct {
|
||
|
// Required. The Google Cloud Platform project ID that the cluster belongs to.
|
||
|
ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
|
||
|
// Required. The cluster name. Cluster names within a project must be
|
||
|
// unique. Names of deleted clusters can be reused.
|
||
|
ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
|
||
|
// Required. The cluster config. Note that Cloud Dataproc may set
|
||
|
// default values, and values may change when clusters are updated.
|
||
|
Config *ClusterConfig `protobuf:"bytes,3,opt,name=config,proto3" json:"config,omitempty"`
|
||
|
// Optional. The labels to associate with this cluster.
|
||
|
// Label **keys** must contain 1 to 63 characters, and must conform to
|
||
|
// [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
|
||
|
// Label **values** may be empty, but, if present, must contain 1 to 63
|
||
|
// characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt).
|
||
|
// No more than 32 labels can be associated with a cluster.
|
||
|
Labels map[string]string `protobuf:"bytes,8,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||
|
// Output only. Cluster status.
|
||
|
Status *ClusterStatus `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty"`
|
||
|
// Output only. The previous cluster status.
|
||
|
StatusHistory []*ClusterStatus `protobuf:"bytes,7,rep,name=status_history,json=statusHistory,proto3" json:"status_history,omitempty"`
|
||
|
// Output only. A cluster UUID (Unique Universal Identifier). Cloud Dataproc
|
||
|
// generates this value when it creates the cluster.
|
||
|
ClusterUuid string `protobuf:"bytes,6,opt,name=cluster_uuid,json=clusterUuid,proto3" json:"cluster_uuid,omitempty"`
|
||
|
// Contains cluster daemon metrics such as HDFS and YARN stats.
|
||
|
//
|
||
|
// **Beta Feature**: This report is available for testing purposes only. It may
|
||
|
// be changed before final release.
|
||
|
Metrics *ClusterMetrics `protobuf:"bytes,9,opt,name=metrics,proto3" json:"metrics,omitempty"`
|
||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||
|
XXX_unrecognized []byte `json:"-"`
|
||
|
XXX_sizecache int32 `json:"-"`
|
||
|
}
|
||
|
|
||
|
func (m *Cluster) Reset() { *m = Cluster{} }
|
||
|
func (m *Cluster) String() string { return proto.CompactTextString(m) }
|
||
|
func (*Cluster) ProtoMessage() {}
|
||
|
func (*Cluster) Descriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_clusters_92d23f68998e82a3, []int{0}
|
||
|
}
|
||
|
func (m *Cluster) XXX_Unmarshal(b []byte) error {
|
||
|
return xxx_messageInfo_Cluster.Unmarshal(m, b)
|
||
|
}
|
||
|
func (m *Cluster) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||
|
return xxx_messageInfo_Cluster.Marshal(b, m, deterministic)
|
||
|
}
|
||
|
func (dst *Cluster) XXX_Merge(src proto.Message) {
|
||
|
xxx_messageInfo_Cluster.Merge(dst, src)
|
||
|
}
|
||
|
func (m *Cluster) XXX_Size() int {
|
||
|
return xxx_messageInfo_Cluster.Size(m)
|
||
|
}
|
||
|
func (m *Cluster) XXX_DiscardUnknown() {
|
||
|
xxx_messageInfo_Cluster.DiscardUnknown(m)
|
||
|
}
|
||
|
|
||
|
var xxx_messageInfo_Cluster proto.InternalMessageInfo
|
||
|
|
||
|
func (m *Cluster) GetProjectId() string {
|
||
|
if m != nil {
|
||
|
return m.ProjectId
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
func (m *Cluster) GetClusterName() string {
|
||
|
if m != nil {
|
||
|
return m.ClusterName
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
func (m *Cluster) GetConfig() *ClusterConfig {
|
||
|
if m != nil {
|
||
|
return m.Config
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func (m *Cluster) GetLabels() map[string]string {
|
||
|
if m != nil {
|
||
|
return m.Labels
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func (m *Cluster) GetStatus() *ClusterStatus {
|
||
|
if m != nil {
|
||
|
return m.Status
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func (m *Cluster) GetStatusHistory() []*ClusterStatus {
|
||
|
if m != nil {
|
||
|
return m.StatusHistory
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func (m *Cluster) GetClusterUuid() string {
|
||
|
if m != nil {
|
||
|
return m.ClusterUuid
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
func (m *Cluster) GetMetrics() *ClusterMetrics {
|
||
|
if m != nil {
|
||
|
return m.Metrics
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
// The cluster config.
|
||
|
type ClusterConfig struct {
|
||
|
// Optional. A Cloud Storage staging bucket used for sharing generated
|
||
|
// SSH keys and config. If you do not specify a staging bucket, Cloud
|
||
|
// Dataproc will determine an appropriate Cloud Storage location (US,
|
||
|
// ASIA, or EU) for your cluster's staging bucket according to the Google
|
||
|
// Compute Engine zone where your cluster is deployed, and then it will create
|
||
|
// and manage this project-level, per-location bucket for you.
|
||
|
ConfigBucket string `protobuf:"bytes,1,opt,name=config_bucket,json=configBucket,proto3" json:"config_bucket,omitempty"`
|
||
|
// Required. The shared Compute Engine config settings for
|
||
|
// all instances in a cluster.
|
||
|
GceClusterConfig *GceClusterConfig `protobuf:"bytes,8,opt,name=gce_cluster_config,json=gceClusterConfig,proto3" json:"gce_cluster_config,omitempty"`
|
||
|
// Optional. The Compute Engine config settings for
|
||
|
// the master instance in a cluster.
|
||
|
MasterConfig *InstanceGroupConfig `protobuf:"bytes,9,opt,name=master_config,json=masterConfig,proto3" json:"master_config,omitempty"`
|
||
|
// Optional. The Compute Engine config settings for
|
||
|
// worker instances in a cluster.
|
||
|
WorkerConfig *InstanceGroupConfig `protobuf:"bytes,10,opt,name=worker_config,json=workerConfig,proto3" json:"worker_config,omitempty"`
|
||
|
// Optional. The Compute Engine config settings for
|
||
|
// additional worker instances in a cluster.
|
||
|
SecondaryWorkerConfig *InstanceGroupConfig `protobuf:"bytes,12,opt,name=secondary_worker_config,json=secondaryWorkerConfig,proto3" json:"secondary_worker_config,omitempty"`
|
||
|
// Optional. The config settings for software inside the cluster.
|
||
|
SoftwareConfig *SoftwareConfig `protobuf:"bytes,13,opt,name=software_config,json=softwareConfig,proto3" json:"software_config,omitempty"`
|
||
|
// Optional. The config setting for auto delete cluster schedule.
|
||
|
LifecycleConfig *LifecycleConfig `protobuf:"bytes,14,opt,name=lifecycle_config,json=lifecycleConfig,proto3" json:"lifecycle_config,omitempty"`
|
||
|
// Optional. Commands to execute on each node after config is
|
||
|
// completed. By default, executables are run on master and all worker nodes.
|
||
|
// You can test a node's <code>role</code> metadata to run an executable on
|
||
|
// a master or worker node, as shown below using `curl` (you can also use `wget`):
|
||
|
//
|
||
|
// ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role)
|
||
|
// if [[ "${ROLE}" == 'Master' ]]; then
|
||
|
// ... master specific actions ...
|
||
|
// else
|
||
|
// ... worker specific actions ...
|
||
|
// fi
|
||
|
InitializationActions []*NodeInitializationAction `protobuf:"bytes,11,rep,name=initialization_actions,json=initializationActions,proto3" json:"initialization_actions,omitempty"`
|
||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||
|
XXX_unrecognized []byte `json:"-"`
|
||
|
XXX_sizecache int32 `json:"-"`
|
||
|
}
|
||
|
|
||
|
func (m *ClusterConfig) Reset() { *m = ClusterConfig{} }
|
||
|
func (m *ClusterConfig) String() string { return proto.CompactTextString(m) }
|
||
|
func (*ClusterConfig) ProtoMessage() {}
|
||
|
func (*ClusterConfig) Descriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_clusters_92d23f68998e82a3, []int{1}
|
||
|
}
|
||
|
func (m *ClusterConfig) XXX_Unmarshal(b []byte) error {
|
||
|
return xxx_messageInfo_ClusterConfig.Unmarshal(m, b)
|
||
|
}
|
||
|
func (m *ClusterConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||
|
return xxx_messageInfo_ClusterConfig.Marshal(b, m, deterministic)
|
||
|
}
|
||
|
func (dst *ClusterConfig) XXX_Merge(src proto.Message) {
|
||
|
xxx_messageInfo_ClusterConfig.Merge(dst, src)
|
||
|
}
|
||
|
func (m *ClusterConfig) XXX_Size() int {
|
||
|
return xxx_messageInfo_ClusterConfig.Size(m)
|
||
|
}
|
||
|
func (m *ClusterConfig) XXX_DiscardUnknown() {
|
||
|
xxx_messageInfo_ClusterConfig.DiscardUnknown(m)
|
||
|
}
|
||
|
|
||
|
var xxx_messageInfo_ClusterConfig proto.InternalMessageInfo
|
||
|
|
||
|
func (m *ClusterConfig) GetConfigBucket() string {
|
||
|
if m != nil {
|
||
|
return m.ConfigBucket
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
func (m *ClusterConfig) GetGceClusterConfig() *GceClusterConfig {
|
||
|
if m != nil {
|
||
|
return m.GceClusterConfig
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func (m *ClusterConfig) GetMasterConfig() *InstanceGroupConfig {
|
||
|
if m != nil {
|
||
|
return m.MasterConfig
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func (m *ClusterConfig) GetWorkerConfig() *InstanceGroupConfig {
|
||
|
if m != nil {
|
||
|
return m.WorkerConfig
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func (m *ClusterConfig) GetSecondaryWorkerConfig() *InstanceGroupConfig {
|
||
|
if m != nil {
|
||
|
return m.SecondaryWorkerConfig
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func (m *ClusterConfig) GetSoftwareConfig() *SoftwareConfig {
|
||
|
if m != nil {
|
||
|
return m.SoftwareConfig
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func (m *ClusterConfig) GetLifecycleConfig() *LifecycleConfig {
|
||
|
if m != nil {
|
||
|
return m.LifecycleConfig
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func (m *ClusterConfig) GetInitializationActions() []*NodeInitializationAction {
|
||
|
if m != nil {
|
||
|
return m.InitializationActions
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
// Common config settings for resources of Compute Engine cluster
|
||
|
// instances, applicable to all instances in the cluster.
|
||
|
type GceClusterConfig struct {
|
||
|
// Optional. The zone where the Compute Engine cluster will be located.
|
||
|
// On a create request, it is required in the "global" region. If omitted
|
||
|
// in a non-global Cloud Dataproc region, the service will pick a zone in the
|
||
|
// corresponding Compute Engine region. On a get request, zone will always be
|
||
|
// present.
|
||
|
//
|
||
|
// A full URL, partial URI, or short name are valid. Examples:
|
||
|
//
|
||
|
// * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`
|
||
|
// * `projects/[project_id]/zones/[zone]`
|
||
|
// * `us-central1-f`
|
||
|
ZoneUri string `protobuf:"bytes,1,opt,name=zone_uri,json=zoneUri,proto3" json:"zone_uri,omitempty"`
|
||
|
// Optional. The Compute Engine network to be used for machine
|
||
|
// communications. Cannot be specified with subnetwork_uri. If neither
|
||
|
// `network_uri` nor `subnetwork_uri` is specified, the "default" network of
|
||
|
// the project is used, if it exists. Cannot be a "Custom Subnet Network" (see
|
||
|
// [Using Subnetworks](/compute/docs/subnetworks) for more information).
|
||
|
//
|
||
|
// A full URL, partial URI, or short name are valid. Examples:
|
||
|
//
|
||
|
// * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default`
|
||
|
// * `projects/[project_id]/regions/global/default`
|
||
|
// * `default`
|
||
|
NetworkUri string `protobuf:"bytes,2,opt,name=network_uri,json=networkUri,proto3" json:"network_uri,omitempty"`
|
||
|
// Optional. The Compute Engine subnetwork to be used for machine
|
||
|
// communications. Cannot be specified with network_uri.
|
||
|
//
|
||
|
// A full URL, partial URI, or short name are valid. Examples:
|
||
|
//
|
||
|
// * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0`
|
||
|
// * `projects/[project_id]/regions/us-east1/sub0`
|
||
|
// * `sub0`
|
||
|
SubnetworkUri string `protobuf:"bytes,6,opt,name=subnetwork_uri,json=subnetworkUri,proto3" json:"subnetwork_uri,omitempty"`
|
||
|
// Optional. If true, all instances in the cluster will only have internal IP
|
||
|
// addresses. By default, clusters are not restricted to internal IP addresses,
|
||
|
// and will have ephemeral external IP addresses assigned to each instance.
|
||
|
// This `internal_ip_only` restriction can only be enabled for subnetwork
|
||
|
// enabled networks, and all off-cluster dependencies must be configured to be
|
||
|
// accessible without external IP addresses.
|
||
|
InternalIpOnly bool `protobuf:"varint,7,opt,name=internal_ip_only,json=internalIpOnly,proto3" json:"internal_ip_only,omitempty"`
|
||
|
// Optional. The service account of the instances. Defaults to the default
|
||
|
// Compute Engine service account. Custom service accounts need
|
||
|
// permissions equivalent to the following IAM roles:
|
||
|
//
|
||
|
// * roles/logging.logWriter
|
||
|
// * roles/storage.objectAdmin
|
||
|
//
|
||
|
// (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts
|
||
|
// for more information).
|
||
|
// Example: `[account_id]@[project_id].iam.gserviceaccount.com`
|
||
|
ServiceAccount string `protobuf:"bytes,8,opt,name=service_account,json=serviceAccount,proto3" json:"service_account,omitempty"`
|
||
|
// Optional. The URIs of service account scopes to be included in
|
||
|
// Compute Engine instances. The following base set of scopes is always
|
||
|
// included:
|
||
|
//
|
||
|
// * https://www.googleapis.com/auth/cloud.useraccounts.readonly
|
||
|
// * https://www.googleapis.com/auth/devstorage.read_write
|
||
|
// * https://www.googleapis.com/auth/logging.write
|
||
|
//
|
||
|
// If no scopes are specified, the following defaults are also provided:
|
||
|
//
|
||
|
// * https://www.googleapis.com/auth/bigquery
|
||
|
// * https://www.googleapis.com/auth/bigtable.admin.table
|
||
|
// * https://www.googleapis.com/auth/bigtable.data
|
||
|
// * https://www.googleapis.com/auth/devstorage.full_control
|
||
|
ServiceAccountScopes []string `protobuf:"bytes,3,rep,name=service_account_scopes,json=serviceAccountScopes,proto3" json:"service_account_scopes,omitempty"`
|
||
|
// The Compute Engine tags to add to all instances (see
|
||
|
// [Tagging instances](/compute/docs/label-or-tag-resources#tags)).
|
||
|
Tags []string `protobuf:"bytes,4,rep,name=tags,proto3" json:"tags,omitempty"`
|
||
|
// The Compute Engine metadata entries to add to all instances (see
|
||
|
// [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)).
|
||
|
Metadata map[string]string `protobuf:"bytes,5,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||
|
XXX_unrecognized []byte `json:"-"`
|
||
|
XXX_sizecache int32 `json:"-"`
|
||
|
}
|
||
|
|
||
|
func (m *GceClusterConfig) Reset() { *m = GceClusterConfig{} }
|
||
|
func (m *GceClusterConfig) String() string { return proto.CompactTextString(m) }
|
||
|
func (*GceClusterConfig) ProtoMessage() {}
|
||
|
func (*GceClusterConfig) Descriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_clusters_92d23f68998e82a3, []int{2}
|
||
|
}
|
||
|
func (m *GceClusterConfig) XXX_Unmarshal(b []byte) error {
|
||
|
return xxx_messageInfo_GceClusterConfig.Unmarshal(m, b)
|
||
|
}
|
||
|
func (m *GceClusterConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||
|
return xxx_messageInfo_GceClusterConfig.Marshal(b, m, deterministic)
|
||
|
}
|
||
|
func (dst *GceClusterConfig) XXX_Merge(src proto.Message) {
|
||
|
xxx_messageInfo_GceClusterConfig.Merge(dst, src)
|
||
|
}
|
||
|
func (m *GceClusterConfig) XXX_Size() int {
|
||
|
return xxx_messageInfo_GceClusterConfig.Size(m)
|
||
|
}
|
||
|
func (m *GceClusterConfig) XXX_DiscardUnknown() {
|
||
|
xxx_messageInfo_GceClusterConfig.DiscardUnknown(m)
|
||
|
}
|
||
|
|
||
|
var xxx_messageInfo_GceClusterConfig proto.InternalMessageInfo
|
||
|
|
||
|
func (m *GceClusterConfig) GetZoneUri() string {
|
||
|
if m != nil {
|
||
|
return m.ZoneUri
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
func (m *GceClusterConfig) GetNetworkUri() string {
|
||
|
if m != nil {
|
||
|
return m.NetworkUri
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
func (m *GceClusterConfig) GetSubnetworkUri() string {
|
||
|
if m != nil {
|
||
|
return m.SubnetworkUri
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
func (m *GceClusterConfig) GetInternalIpOnly() bool {
|
||
|
if m != nil {
|
||
|
return m.InternalIpOnly
|
||
|
}
|
||
|
return false
|
||
|
}
|
||
|
|
||
|
func (m *GceClusterConfig) GetServiceAccount() string {
|
||
|
if m != nil {
|
||
|
return m.ServiceAccount
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
func (m *GceClusterConfig) GetServiceAccountScopes() []string {
|
||
|
if m != nil {
|
||
|
return m.ServiceAccountScopes
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func (m *GceClusterConfig) GetTags() []string {
|
||
|
if m != nil {
|
||
|
return m.Tags
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func (m *GceClusterConfig) GetMetadata() map[string]string {
|
||
|
if m != nil {
|
||
|
return m.Metadata
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
// Optional. The config settings for Compute Engine resources in
|
||
|
// an instance group, such as a master or worker group.
|
||
|
type InstanceGroupConfig struct {
|
||
|
// Optional. The number of VM instances in the instance group.
|
||
|
// For master instance groups, must be set to 1.
|
||
|
NumInstances int32 `protobuf:"varint,1,opt,name=num_instances,json=numInstances,proto3" json:"num_instances,omitempty"`
|
||
|
// Output only. The list of instance names. Cloud Dataproc derives the names
|
||
|
// from `cluster_name`, `num_instances`, and the instance group.
|
||
|
InstanceNames []string `protobuf:"bytes,2,rep,name=instance_names,json=instanceNames,proto3" json:"instance_names,omitempty"`
|
||
|
// Output only. The Compute Engine image resource used for cluster
|
||
|
// instances. Inferred from `SoftwareConfig.image_version`.
|
||
|
ImageUri string `protobuf:"bytes,3,opt,name=image_uri,json=imageUri,proto3" json:"image_uri,omitempty"`
|
||
|
// Optional. The Compute Engine machine type used for cluster instances.
|
||
|
//
|
||
|
// A full URL, partial URI, or short name are valid. Examples:
|
||
|
//
|
||
|
// * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`
|
||
|
// * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`
|
||
|
// * `n1-standard-2`
|
||
|
//
|
||
|
// **Auto Zone Exception**: If you are using the Cloud Dataproc
|
||
|
// [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
|
||
|
// feature, you must use the short name of the machine type
|
||
|
// resource, for example, `n1-standard-2`.
|
||
|
MachineTypeUri string `protobuf:"bytes,4,opt,name=machine_type_uri,json=machineTypeUri,proto3" json:"machine_type_uri,omitempty"`
|
||
|
// Optional. Disk option config settings.
|
||
|
DiskConfig *DiskConfig `protobuf:"bytes,5,opt,name=disk_config,json=diskConfig,proto3" json:"disk_config,omitempty"`
|
||
|
// Optional. Specifies that this instance group contains preemptible instances.
|
||
|
IsPreemptible bool `protobuf:"varint,6,opt,name=is_preemptible,json=isPreemptible,proto3" json:"is_preemptible,omitempty"`
|
||
|
// Output only. The config for Compute Engine Instance Group
|
||
|
// Manager that manages this group.
|
||
|
// This is only used for preemptible instance groups.
|
||
|
ManagedGroupConfig *ManagedGroupConfig `protobuf:"bytes,7,opt,name=managed_group_config,json=managedGroupConfig,proto3" json:"managed_group_config,omitempty"`
|
||
|
// Optional. The Compute Engine accelerator configuration for these
|
||
|
// instances.
|
||
|
//
|
||
|
// **Beta Feature**: This feature is still under development. It may be
|
||
|
// changed before final release.
|
||
|
Accelerators []*AcceleratorConfig `protobuf:"bytes,8,rep,name=accelerators,proto3" json:"accelerators,omitempty"`
|
||
|
// Optional. Specifies the minimum cpu platform for the Instance Group.
|
||
|
// See [Cloud Dataproc→Minimum CPU Platform]
|
||
|
// (/dataproc/docs/concepts/compute/dataproc-min-cpu).
|
||
|
MinCpuPlatform string `protobuf:"bytes,9,opt,name=min_cpu_platform,json=minCpuPlatform,proto3" json:"min_cpu_platform,omitempty"`
|
||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||
|
XXX_unrecognized []byte `json:"-"`
|
||
|
XXX_sizecache int32 `json:"-"`
|
||
|
}
|
||
|
|
||
|
func (m *InstanceGroupConfig) Reset() { *m = InstanceGroupConfig{} }
|
||
|
func (m *InstanceGroupConfig) String() string { return proto.CompactTextString(m) }
|
||
|
func (*InstanceGroupConfig) ProtoMessage() {}
|
||
|
func (*InstanceGroupConfig) Descriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_clusters_92d23f68998e82a3, []int{3}
|
||
|
}
|
||
|
func (m *InstanceGroupConfig) XXX_Unmarshal(b []byte) error {
|
||
|
return xxx_messageInfo_InstanceGroupConfig.Unmarshal(m, b)
|
||
|
}
|
||
|
func (m *InstanceGroupConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||
|
return xxx_messageInfo_InstanceGroupConfig.Marshal(b, m, deterministic)
|
||
|
}
|
||
|
func (dst *InstanceGroupConfig) XXX_Merge(src proto.Message) {
|
||
|
xxx_messageInfo_InstanceGroupConfig.Merge(dst, src)
|
||
|
}
|
||
|
func (m *InstanceGroupConfig) XXX_Size() int {
|
||
|
return xxx_messageInfo_InstanceGroupConfig.Size(m)
|
||
|
}
|
||
|
func (m *InstanceGroupConfig) XXX_DiscardUnknown() {
|
||
|
xxx_messageInfo_InstanceGroupConfig.DiscardUnknown(m)
|
||
|
}
|
||
|
|
||
|
var xxx_messageInfo_InstanceGroupConfig proto.InternalMessageInfo
|
||
|
|
||
|
func (m *InstanceGroupConfig) GetNumInstances() int32 {
|
||
|
if m != nil {
|
||
|
return m.NumInstances
|
||
|
}
|
||
|
return 0
|
||
|
}
|
||
|
|
||
|
func (m *InstanceGroupConfig) GetInstanceNames() []string {
|
||
|
if m != nil {
|
||
|
return m.InstanceNames
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func (m *InstanceGroupConfig) GetImageUri() string {
|
||
|
if m != nil {
|
||
|
return m.ImageUri
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
func (m *InstanceGroupConfig) GetMachineTypeUri() string {
|
||
|
if m != nil {
|
||
|
return m.MachineTypeUri
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
func (m *InstanceGroupConfig) GetDiskConfig() *DiskConfig {
|
||
|
if m != nil {
|
||
|
return m.DiskConfig
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func (m *InstanceGroupConfig) GetIsPreemptible() bool {
|
||
|
if m != nil {
|
||
|
return m.IsPreemptible
|
||
|
}
|
||
|
return false
|
||
|
}
|
||
|
|
||
|
func (m *InstanceGroupConfig) GetManagedGroupConfig() *ManagedGroupConfig {
|
||
|
if m != nil {
|
||
|
return m.ManagedGroupConfig
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func (m *InstanceGroupConfig) GetAccelerators() []*AcceleratorConfig {
|
||
|
if m != nil {
|
||
|
return m.Accelerators
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func (m *InstanceGroupConfig) GetMinCpuPlatform() string {
|
||
|
if m != nil {
|
||
|
return m.MinCpuPlatform
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
// Specifies the resources used to actively manage an instance group.
|
||
|
type ManagedGroupConfig struct {
|
||
|
// Output only. The name of the Instance Template used for the Managed
|
||
|
// Instance Group.
|
||
|
InstanceTemplateName string `protobuf:"bytes,1,opt,name=instance_template_name,json=instanceTemplateName,proto3" json:"instance_template_name,omitempty"`
|
||
|
// Output only. The name of the Instance Group Manager for this group.
|
||
|
InstanceGroupManagerName string `protobuf:"bytes,2,opt,name=instance_group_manager_name,json=instanceGroupManagerName,proto3" json:"instance_group_manager_name,omitempty"`
|
||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||
|
XXX_unrecognized []byte `json:"-"`
|
||
|
XXX_sizecache int32 `json:"-"`
|
||
|
}
|
||
|
|
||
|
func (m *ManagedGroupConfig) Reset() { *m = ManagedGroupConfig{} }
|
||
|
func (m *ManagedGroupConfig) String() string { return proto.CompactTextString(m) }
|
||
|
func (*ManagedGroupConfig) ProtoMessage() {}
|
||
|
func (*ManagedGroupConfig) Descriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_clusters_92d23f68998e82a3, []int{4}
|
||
|
}
|
||
|
func (m *ManagedGroupConfig) XXX_Unmarshal(b []byte) error {
|
||
|
return xxx_messageInfo_ManagedGroupConfig.Unmarshal(m, b)
|
||
|
}
|
||
|
func (m *ManagedGroupConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||
|
return xxx_messageInfo_ManagedGroupConfig.Marshal(b, m, deterministic)
|
||
|
}
|
||
|
func (dst *ManagedGroupConfig) XXX_Merge(src proto.Message) {
|
||
|
xxx_messageInfo_ManagedGroupConfig.Merge(dst, src)
|
||
|
}
|
||
|
func (m *ManagedGroupConfig) XXX_Size() int {
|
||
|
return xxx_messageInfo_ManagedGroupConfig.Size(m)
|
||
|
}
|
||
|
func (m *ManagedGroupConfig) XXX_DiscardUnknown() {
|
||
|
xxx_messageInfo_ManagedGroupConfig.DiscardUnknown(m)
|
||
|
}
|
||
|
|
||
|
var xxx_messageInfo_ManagedGroupConfig proto.InternalMessageInfo
|
||
|
|
||
|
func (m *ManagedGroupConfig) GetInstanceTemplateName() string {
|
||
|
if m != nil {
|
||
|
return m.InstanceTemplateName
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
func (m *ManagedGroupConfig) GetInstanceGroupManagerName() string {
|
||
|
if m != nil {
|
||
|
return m.InstanceGroupManagerName
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
// Specifies the type and number of accelerator cards attached to the instances
|
||
|
// of an instance group (see [GPUs on Compute Engine](/compute/docs/gpus/)).
|
||
|
type AcceleratorConfig struct {
|
||
|
// Full URL, partial URI, or short name of the accelerator type resource to
|
||
|
// expose to this instance. See [Compute Engine AcceleratorTypes](
|
||
|
// /compute/docs/reference/beta/acceleratorTypes)
|
||
|
//
|
||
|
// Examples
|
||
|
// * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
|
||
|
// * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80`
|
||
|
// * `nvidia-tesla-k80`
|
||
|
//
|
||
|
// **Auto Zone Exception**: If you are using the Cloud Dataproc
|
||
|
// [Auto Zone Placement](/dataproc/docs/concepts/configuring-clusters/auto-zone#using_auto_zone_placement)
|
||
|
// feature, you must use the short name of the accelerator type
|
||
|
// resource, for example, `nvidia-tesla-k80`.
|
||
|
AcceleratorTypeUri string `protobuf:"bytes,1,opt,name=accelerator_type_uri,json=acceleratorTypeUri,proto3" json:"accelerator_type_uri,omitempty"`
|
||
|
// The number of the accelerator cards of this type exposed to this instance.
|
||
|
AcceleratorCount int32 `protobuf:"varint,2,opt,name=accelerator_count,json=acceleratorCount,proto3" json:"accelerator_count,omitempty"`
|
||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||
|
XXX_unrecognized []byte `json:"-"`
|
||
|
XXX_sizecache int32 `json:"-"`
|
||
|
}
|
||
|
|
||
|
func (m *AcceleratorConfig) Reset() { *m = AcceleratorConfig{} }
|
||
|
func (m *AcceleratorConfig) String() string { return proto.CompactTextString(m) }
|
||
|
func (*AcceleratorConfig) ProtoMessage() {}
|
||
|
func (*AcceleratorConfig) Descriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_clusters_92d23f68998e82a3, []int{5}
|
||
|
}
|
||
|
func (m *AcceleratorConfig) XXX_Unmarshal(b []byte) error {
|
||
|
return xxx_messageInfo_AcceleratorConfig.Unmarshal(m, b)
|
||
|
}
|
||
|
func (m *AcceleratorConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||
|
return xxx_messageInfo_AcceleratorConfig.Marshal(b, m, deterministic)
|
||
|
}
|
||
|
func (dst *AcceleratorConfig) XXX_Merge(src proto.Message) {
|
||
|
xxx_messageInfo_AcceleratorConfig.Merge(dst, src)
|
||
|
}
|
||
|
func (m *AcceleratorConfig) XXX_Size() int {
|
||
|
return xxx_messageInfo_AcceleratorConfig.Size(m)
|
||
|
}
|
||
|
func (m *AcceleratorConfig) XXX_DiscardUnknown() {
|
||
|
xxx_messageInfo_AcceleratorConfig.DiscardUnknown(m)
|
||
|
}
|
||
|
|
||
|
var xxx_messageInfo_AcceleratorConfig proto.InternalMessageInfo
|
||
|
|
||
|
func (m *AcceleratorConfig) GetAcceleratorTypeUri() string {
|
||
|
if m != nil {
|
||
|
return m.AcceleratorTypeUri
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
func (m *AcceleratorConfig) GetAcceleratorCount() int32 {
|
||
|
if m != nil {
|
||
|
return m.AcceleratorCount
|
||
|
}
|
||
|
return 0
|
||
|
}
|
||
|
|
||
|
// Specifies the config of disk options for a group of VM instances.
|
||
|
type DiskConfig struct {
|
||
|
// Optional. Type of the boot disk (default is "pd-standard").
|
||
|
// Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or
|
||
|
// "pd-standard" (Persistent Disk Hard Disk Drive).
|
||
|
BootDiskType string `protobuf:"bytes,3,opt,name=boot_disk_type,json=bootDiskType,proto3" json:"boot_disk_type,omitempty"`
|
||
|
// Optional. Size in GB of the boot disk (default is 500GB).
|
||
|
BootDiskSizeGb int32 `protobuf:"varint,1,opt,name=boot_disk_size_gb,json=bootDiskSizeGb,proto3" json:"boot_disk_size_gb,omitempty"`
|
||
|
// Optional. Number of attached SSDs, from 0 to 4 (default is 0).
|
||
|
// If SSDs are not attached, the boot disk is used to store runtime logs and
|
||
|
// [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data.
|
||
|
// If one or more SSDs are attached, this runtime bulk
|
||
|
// data is spread across them, and the boot disk contains only basic
|
||
|
// config and installed binaries.
|
||
|
NumLocalSsds int32 `protobuf:"varint,2,opt,name=num_local_ssds,json=numLocalSsds,proto3" json:"num_local_ssds,omitempty"`
|
||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||
|
XXX_unrecognized []byte `json:"-"`
|
||
|
XXX_sizecache int32 `json:"-"`
|
||
|
}
|
||
|
|
||
|
func (m *DiskConfig) Reset() { *m = DiskConfig{} }
|
||
|
func (m *DiskConfig) String() string { return proto.CompactTextString(m) }
|
||
|
func (*DiskConfig) ProtoMessage() {}
|
||
|
func (*DiskConfig) Descriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_clusters_92d23f68998e82a3, []int{6}
|
||
|
}
|
||
|
func (m *DiskConfig) XXX_Unmarshal(b []byte) error {
|
||
|
return xxx_messageInfo_DiskConfig.Unmarshal(m, b)
|
||
|
}
|
||
|
func (m *DiskConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||
|
return xxx_messageInfo_DiskConfig.Marshal(b, m, deterministic)
|
||
|
}
|
||
|
func (dst *DiskConfig) XXX_Merge(src proto.Message) {
|
||
|
xxx_messageInfo_DiskConfig.Merge(dst, src)
|
||
|
}
|
||
|
func (m *DiskConfig) XXX_Size() int {
|
||
|
return xxx_messageInfo_DiskConfig.Size(m)
|
||
|
}
|
||
|
func (m *DiskConfig) XXX_DiscardUnknown() {
|
||
|
xxx_messageInfo_DiskConfig.DiscardUnknown(m)
|
||
|
}
|
||
|
|
||
|
var xxx_messageInfo_DiskConfig proto.InternalMessageInfo
|
||
|
|
||
|
func (m *DiskConfig) GetBootDiskType() string {
|
||
|
if m != nil {
|
||
|
return m.BootDiskType
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
func (m *DiskConfig) GetBootDiskSizeGb() int32 {
|
||
|
if m != nil {
|
||
|
return m.BootDiskSizeGb
|
||
|
}
|
||
|
return 0
|
||
|
}
|
||
|
|
||
|
func (m *DiskConfig) GetNumLocalSsds() int32 {
|
||
|
if m != nil {
|
||
|
return m.NumLocalSsds
|
||
|
}
|
||
|
return 0
|
||
|
}
|
||
|
|
||
|
// Specifies the cluster auto delete related schedule configuration.
|
||
|
type LifecycleConfig struct {
|
||
|
// Optional. The longest duration that cluster would keep alive while staying
|
||
|
// idle; passing this threshold will cause cluster to be auto-deleted.
|
||
|
IdleDeleteTtl *duration.Duration `protobuf:"bytes,1,opt,name=idle_delete_ttl,json=idleDeleteTtl,proto3" json:"idle_delete_ttl,omitempty"`
|
||
|
// Optional. Either the exact time the cluster should be deleted at or
|
||
|
// the cluster maximum age.
|
||
|
//
|
||
|
// Types that are valid to be assigned to Ttl:
|
||
|
// *LifecycleConfig_AutoDeleteTime
|
||
|
// *LifecycleConfig_AutoDeleteTtl
|
||
|
Ttl isLifecycleConfig_Ttl `protobuf_oneof:"ttl"`
|
||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||
|
XXX_unrecognized []byte `json:"-"`
|
||
|
XXX_sizecache int32 `json:"-"`
|
||
|
}
|
||
|
|
||
|
func (m *LifecycleConfig) Reset() { *m = LifecycleConfig{} }
|
||
|
func (m *LifecycleConfig) String() string { return proto.CompactTextString(m) }
|
||
|
func (*LifecycleConfig) ProtoMessage() {}
|
||
|
func (*LifecycleConfig) Descriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_clusters_92d23f68998e82a3, []int{7}
|
||
|
}
|
||
|
func (m *LifecycleConfig) XXX_Unmarshal(b []byte) error {
|
||
|
return xxx_messageInfo_LifecycleConfig.Unmarshal(m, b)
|
||
|
}
|
||
|
func (m *LifecycleConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||
|
return xxx_messageInfo_LifecycleConfig.Marshal(b, m, deterministic)
|
||
|
}
|
||
|
func (dst *LifecycleConfig) XXX_Merge(src proto.Message) {
|
||
|
xxx_messageInfo_LifecycleConfig.Merge(dst, src)
|
||
|
}
|
||
|
func (m *LifecycleConfig) XXX_Size() int {
|
||
|
return xxx_messageInfo_LifecycleConfig.Size(m)
|
||
|
}
|
||
|
func (m *LifecycleConfig) XXX_DiscardUnknown() {
|
||
|
xxx_messageInfo_LifecycleConfig.DiscardUnknown(m)
|
||
|
}
|
||
|
|
||
|
var xxx_messageInfo_LifecycleConfig proto.InternalMessageInfo
|
||
|
|
||
|
type isLifecycleConfig_Ttl interface {
|
||
|
isLifecycleConfig_Ttl()
|
||
|
}
|
||
|
|
||
|
type LifecycleConfig_AutoDeleteTime struct {
|
||
|
AutoDeleteTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=auto_delete_time,json=autoDeleteTime,proto3,oneof"`
|
||
|
}
|
||
|
type LifecycleConfig_AutoDeleteTtl struct {
|
||
|
AutoDeleteTtl *duration.Duration `protobuf:"bytes,3,opt,name=auto_delete_ttl,json=autoDeleteTtl,proto3,oneof"`
|
||
|
}
|
||
|
|
||
|
func (*LifecycleConfig_AutoDeleteTime) isLifecycleConfig_Ttl() {}
|
||
|
func (*LifecycleConfig_AutoDeleteTtl) isLifecycleConfig_Ttl() {}
|
||
|
|
||
|
func (m *LifecycleConfig) GetTtl() isLifecycleConfig_Ttl {
|
||
|
if m != nil {
|
||
|
return m.Ttl
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func (m *LifecycleConfig) GetIdleDeleteTtl() *duration.Duration {
|
||
|
if m != nil {
|
||
|
return m.IdleDeleteTtl
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func (m *LifecycleConfig) GetAutoDeleteTime() *timestamp.Timestamp {
|
||
|
if x, ok := m.GetTtl().(*LifecycleConfig_AutoDeleteTime); ok {
|
||
|
return x.AutoDeleteTime
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func (m *LifecycleConfig) GetAutoDeleteTtl() *duration.Duration {
|
||
|
if x, ok := m.GetTtl().(*LifecycleConfig_AutoDeleteTtl); ok {
|
||
|
return x.AutoDeleteTtl
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
// XXX_OneofFuncs is for the internal use of the proto package.
|
||
|
func (*LifecycleConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
|
||
|
return _LifecycleConfig_OneofMarshaler, _LifecycleConfig_OneofUnmarshaler, _LifecycleConfig_OneofSizer, []interface{}{
|
||
|
(*LifecycleConfig_AutoDeleteTime)(nil),
|
||
|
(*LifecycleConfig_AutoDeleteTtl)(nil),
|
||
|
}
|
||
|
}
|
||
|
|
||
|
func _LifecycleConfig_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
|
||
|
m := msg.(*LifecycleConfig)
|
||
|
// ttl
|
||
|
switch x := m.Ttl.(type) {
|
||
|
case *LifecycleConfig_AutoDeleteTime:
|
||
|
b.EncodeVarint(2<<3 | proto.WireBytes)
|
||
|
if err := b.EncodeMessage(x.AutoDeleteTime); err != nil {
|
||
|
return err
|
||
|
}
|
||
|
case *LifecycleConfig_AutoDeleteTtl:
|
||
|
b.EncodeVarint(3<<3 | proto.WireBytes)
|
||
|
if err := b.EncodeMessage(x.AutoDeleteTtl); err != nil {
|
||
|
return err
|
||
|
}
|
||
|
case nil:
|
||
|
default:
|
||
|
return fmt.Errorf("LifecycleConfig.Ttl has unexpected type %T", x)
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func _LifecycleConfig_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
|
||
|
m := msg.(*LifecycleConfig)
|
||
|
switch tag {
|
||
|
case 2: // ttl.auto_delete_time
|
||
|
if wire != proto.WireBytes {
|
||
|
return true, proto.ErrInternalBadWireType
|
||
|
}
|
||
|
msg := new(timestamp.Timestamp)
|
||
|
err := b.DecodeMessage(msg)
|
||
|
m.Ttl = &LifecycleConfig_AutoDeleteTime{msg}
|
||
|
return true, err
|
||
|
case 3: // ttl.auto_delete_ttl
|
||
|
if wire != proto.WireBytes {
|
||
|
return true, proto.ErrInternalBadWireType
|
||
|
}
|
||
|
msg := new(duration.Duration)
|
||
|
err := b.DecodeMessage(msg)
|
||
|
m.Ttl = &LifecycleConfig_AutoDeleteTtl{msg}
|
||
|
return true, err
|
||
|
default:
|
||
|
return false, nil
|
||
|
}
|
||
|
}
|
||
|
|
||
|
func _LifecycleConfig_OneofSizer(msg proto.Message) (n int) {
|
||
|
m := msg.(*LifecycleConfig)
|
||
|
// ttl
|
||
|
switch x := m.Ttl.(type) {
|
||
|
case *LifecycleConfig_AutoDeleteTime:
|
||
|
s := proto.Size(x.AutoDeleteTime)
|
||
|
n += 1 // tag and wire
|
||
|
n += proto.SizeVarint(uint64(s))
|
||
|
n += s
|
||
|
case *LifecycleConfig_AutoDeleteTtl:
|
||
|
s := proto.Size(x.AutoDeleteTtl)
|
||
|
n += 1 // tag and wire
|
||
|
n += proto.SizeVarint(uint64(s))
|
||
|
n += s
|
||
|
case nil:
|
||
|
default:
|
||
|
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
|
||
|
}
|
||
|
return n
|
||
|
}
|
||
|
|
||
|
// Specifies an executable to run on a fully configured node and a
|
||
|
// timeout period for executable completion.
|
||
|
type NodeInitializationAction struct {
|
||
|
// Required. Cloud Storage URI of executable file.
|
||
|
ExecutableFile string `protobuf:"bytes,1,opt,name=executable_file,json=executableFile,proto3" json:"executable_file,omitempty"`
|
||
|
// Optional. Amount of time executable has to complete. Default is
|
||
|
// 10 minutes. Cluster creation fails with an explanatory error message (the
|
||
|
// name of the executable that caused the error and the exceeded timeout
|
||
|
// period) if the executable is not completed at end of the timeout period.
|
||
|
ExecutionTimeout *duration.Duration `protobuf:"bytes,2,opt,name=execution_timeout,json=executionTimeout,proto3" json:"execution_timeout,omitempty"`
|
||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||
|
XXX_unrecognized []byte `json:"-"`
|
||
|
XXX_sizecache int32 `json:"-"`
|
||
|
}
|
||
|
|
||
|
func (m *NodeInitializationAction) Reset() { *m = NodeInitializationAction{} }
|
||
|
func (m *NodeInitializationAction) String() string { return proto.CompactTextString(m) }
|
||
|
func (*NodeInitializationAction) ProtoMessage() {}
|
||
|
func (*NodeInitializationAction) Descriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_clusters_92d23f68998e82a3, []int{8}
|
||
|
}
|
||
|
func (m *NodeInitializationAction) XXX_Unmarshal(b []byte) error {
|
||
|
return xxx_messageInfo_NodeInitializationAction.Unmarshal(m, b)
|
||
|
}
|
||
|
func (m *NodeInitializationAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||
|
return xxx_messageInfo_NodeInitializationAction.Marshal(b, m, deterministic)
|
||
|
}
|
||
|
func (dst *NodeInitializationAction) XXX_Merge(src proto.Message) {
|
||
|
xxx_messageInfo_NodeInitializationAction.Merge(dst, src)
|
||
|
}
|
||
|
func (m *NodeInitializationAction) XXX_Size() int {
|
||
|
return xxx_messageInfo_NodeInitializationAction.Size(m)
|
||
|
}
|
||
|
func (m *NodeInitializationAction) XXX_DiscardUnknown() {
|
||
|
xxx_messageInfo_NodeInitializationAction.DiscardUnknown(m)
|
||
|
}
|
||
|
|
||
|
var xxx_messageInfo_NodeInitializationAction proto.InternalMessageInfo
|
||
|
|
||
|
func (m *NodeInitializationAction) GetExecutableFile() string {
|
||
|
if m != nil {
|
||
|
return m.ExecutableFile
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
func (m *NodeInitializationAction) GetExecutionTimeout() *duration.Duration {
|
||
|
if m != nil {
|
||
|
return m.ExecutionTimeout
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
// The status of a cluster and its instances.
|
||
|
type ClusterStatus struct {
|
||
|
// Output only. The cluster's state.
|
||
|
State ClusterStatus_State `protobuf:"varint,1,opt,name=state,proto3,enum=google.cloud.dataproc.v1beta2.ClusterStatus_State" json:"state,omitempty"`
|
||
|
// Output only. Optional details of cluster's state.
|
||
|
Detail string `protobuf:"bytes,2,opt,name=detail,proto3" json:"detail,omitempty"`
|
||
|
// Output only. Time when this state was entered.
|
||
|
StateStartTime *timestamp.Timestamp `protobuf:"bytes,3,opt,name=state_start_time,json=stateStartTime,proto3" json:"state_start_time,omitempty"`
|
||
|
// Output only. Additional state information that includes
|
||
|
// status reported by the agent.
|
||
|
Substate ClusterStatus_Substate `protobuf:"varint,4,opt,name=substate,proto3,enum=google.cloud.dataproc.v1beta2.ClusterStatus_Substate" json:"substate,omitempty"`
|
||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||
|
XXX_unrecognized []byte `json:"-"`
|
||
|
XXX_sizecache int32 `json:"-"`
|
||
|
}
|
||
|
|
||
|
func (m *ClusterStatus) Reset() { *m = ClusterStatus{} }
|
||
|
func (m *ClusterStatus) String() string { return proto.CompactTextString(m) }
|
||
|
func (*ClusterStatus) ProtoMessage() {}
|
||
|
func (*ClusterStatus) Descriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_clusters_92d23f68998e82a3, []int{9}
|
||
|
}
|
||
|
func (m *ClusterStatus) XXX_Unmarshal(b []byte) error {
|
||
|
return xxx_messageInfo_ClusterStatus.Unmarshal(m, b)
|
||
|
}
|
||
|
func (m *ClusterStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||
|
return xxx_messageInfo_ClusterStatus.Marshal(b, m, deterministic)
|
||
|
}
|
||
|
func (dst *ClusterStatus) XXX_Merge(src proto.Message) {
|
||
|
xxx_messageInfo_ClusterStatus.Merge(dst, src)
|
||
|
}
|
||
|
func (m *ClusterStatus) XXX_Size() int {
|
||
|
return xxx_messageInfo_ClusterStatus.Size(m)
|
||
|
}
|
||
|
func (m *ClusterStatus) XXX_DiscardUnknown() {
|
||
|
xxx_messageInfo_ClusterStatus.DiscardUnknown(m)
|
||
|
}
|
||
|
|
||
|
var xxx_messageInfo_ClusterStatus proto.InternalMessageInfo
|
||
|
|
||
|
func (m *ClusterStatus) GetState() ClusterStatus_State {
|
||
|
if m != nil {
|
||
|
return m.State
|
||
|
}
|
||
|
return ClusterStatus_UNKNOWN
|
||
|
}
|
||
|
|
||
|
func (m *ClusterStatus) GetDetail() string {
|
||
|
if m != nil {
|
||
|
return m.Detail
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
func (m *ClusterStatus) GetStateStartTime() *timestamp.Timestamp {
|
||
|
if m != nil {
|
||
|
return m.StateStartTime
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func (m *ClusterStatus) GetSubstate() ClusterStatus_Substate {
|
||
|
if m != nil {
|
||
|
return m.Substate
|
||
|
}
|
||
|
return ClusterStatus_UNSPECIFIED
|
||
|
}
|
||
|
|
||
|
// Specifies the selection and config of software inside the cluster.
|
||
|
type SoftwareConfig struct {
|
||
|
// Optional. The version of software inside the cluster. It must be one of the supported
|
||
|
// [Cloud Dataproc Versions](/dataproc/docs/concepts/versioning/dataproc-versions#supported_cloud_dataproc_versions),
|
||
|
// such as "1.2" (including a subminor version, such as "1.2.29"), or the
|
||
|
// ["preview" version](/dataproc/docs/concepts/versioning/dataproc-versions#other_versions).
|
||
|
// If unspecified, it defaults to the latest version.
|
||
|
ImageVersion string `protobuf:"bytes,1,opt,name=image_version,json=imageVersion,proto3" json:"image_version,omitempty"`
|
||
|
// Optional. The properties to set on daemon config files.
|
||
|
//
|
||
|
// Property keys are specified in `prefix:property` format, such as
|
||
|
// `core:fs.defaultFS`. The following are supported prefixes
|
||
|
// and their mappings:
|
||
|
//
|
||
|
// * capacity-scheduler: `capacity-scheduler.xml`
|
||
|
// * core: `core-site.xml`
|
||
|
// * distcp: `distcp-default.xml`
|
||
|
// * hdfs: `hdfs-site.xml`
|
||
|
// * hive: `hive-site.xml`
|
||
|
// * mapred: `mapred-site.xml`
|
||
|
// * pig: `pig.properties`
|
||
|
// * spark: `spark-defaults.conf`
|
||
|
// * yarn: `yarn-site.xml`
|
||
|
//
|
||
|
// For more information, see
|
||
|
// [Cluster properties](/dataproc/docs/concepts/cluster-properties).
|
||
|
Properties map[string]string `protobuf:"bytes,2,rep,name=properties,proto3" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||
|
XXX_unrecognized []byte `json:"-"`
|
||
|
XXX_sizecache int32 `json:"-"`
|
||
|
}
|
||
|
|
||
|
func (m *SoftwareConfig) Reset() { *m = SoftwareConfig{} }
|
||
|
func (m *SoftwareConfig) String() string { return proto.CompactTextString(m) }
|
||
|
func (*SoftwareConfig) ProtoMessage() {}
|
||
|
func (*SoftwareConfig) Descriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_clusters_92d23f68998e82a3, []int{10}
|
||
|
}
|
||
|
func (m *SoftwareConfig) XXX_Unmarshal(b []byte) error {
|
||
|
return xxx_messageInfo_SoftwareConfig.Unmarshal(m, b)
|
||
|
}
|
||
|
func (m *SoftwareConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||
|
return xxx_messageInfo_SoftwareConfig.Marshal(b, m, deterministic)
|
||
|
}
|
||
|
func (dst *SoftwareConfig) XXX_Merge(src proto.Message) {
|
||
|
xxx_messageInfo_SoftwareConfig.Merge(dst, src)
|
||
|
}
|
||
|
func (m *SoftwareConfig) XXX_Size() int {
|
||
|
return xxx_messageInfo_SoftwareConfig.Size(m)
|
||
|
}
|
||
|
func (m *SoftwareConfig) XXX_DiscardUnknown() {
|
||
|
xxx_messageInfo_SoftwareConfig.DiscardUnknown(m)
|
||
|
}
|
||
|
|
||
|
var xxx_messageInfo_SoftwareConfig proto.InternalMessageInfo
|
||
|
|
||
|
func (m *SoftwareConfig) GetImageVersion() string {
|
||
|
if m != nil {
|
||
|
return m.ImageVersion
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
func (m *SoftwareConfig) GetProperties() map[string]string {
|
||
|
if m != nil {
|
||
|
return m.Properties
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
// Contains cluster daemon metrics, such as HDFS and YARN stats.
|
||
|
//
|
||
|
// **Beta Feature**: This report is available for testing purposes only. It may
|
||
|
// be changed before final release.
|
||
|
type ClusterMetrics struct {
|
||
|
// The HDFS metrics.
|
||
|
HdfsMetrics map[string]int64 `protobuf:"bytes,1,rep,name=hdfs_metrics,json=hdfsMetrics,proto3" json:"hdfs_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
|
||
|
// The YARN metrics.
|
||
|
YarnMetrics map[string]int64 `protobuf:"bytes,2,rep,name=yarn_metrics,json=yarnMetrics,proto3" json:"yarn_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"`
|
||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||
|
XXX_unrecognized []byte `json:"-"`
|
||
|
XXX_sizecache int32 `json:"-"`
|
||
|
}
|
||
|
|
||
|
func (m *ClusterMetrics) Reset() { *m = ClusterMetrics{} }
|
||
|
func (m *ClusterMetrics) String() string { return proto.CompactTextString(m) }
|
||
|
func (*ClusterMetrics) ProtoMessage() {}
|
||
|
func (*ClusterMetrics) Descriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_clusters_92d23f68998e82a3, []int{11}
|
||
|
}
|
||
|
func (m *ClusterMetrics) XXX_Unmarshal(b []byte) error {
|
||
|
return xxx_messageInfo_ClusterMetrics.Unmarshal(m, b)
|
||
|
}
|
||
|
func (m *ClusterMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||
|
return xxx_messageInfo_ClusterMetrics.Marshal(b, m, deterministic)
|
||
|
}
|
||
|
func (dst *ClusterMetrics) XXX_Merge(src proto.Message) {
|
||
|
xxx_messageInfo_ClusterMetrics.Merge(dst, src)
|
||
|
}
|
||
|
func (m *ClusterMetrics) XXX_Size() int {
|
||
|
return xxx_messageInfo_ClusterMetrics.Size(m)
|
||
|
}
|
||
|
func (m *ClusterMetrics) XXX_DiscardUnknown() {
|
||
|
xxx_messageInfo_ClusterMetrics.DiscardUnknown(m)
|
||
|
}
|
||
|
|
||
|
var xxx_messageInfo_ClusterMetrics proto.InternalMessageInfo
|
||
|
|
||
|
func (m *ClusterMetrics) GetHdfsMetrics() map[string]int64 {
|
||
|
if m != nil {
|
||
|
return m.HdfsMetrics
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func (m *ClusterMetrics) GetYarnMetrics() map[string]int64 {
|
||
|
if m != nil {
|
||
|
return m.YarnMetrics
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
// A request to create a cluster.
|
||
|
type CreateClusterRequest struct {
|
||
|
// Required. The ID of the Google Cloud Platform project that the cluster
|
||
|
// belongs to.
|
||
|
ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
|
||
|
// Required. The Cloud Dataproc region in which to handle the request.
|
||
|
Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
|
||
|
// Required. The cluster to create.
|
||
|
Cluster *Cluster `protobuf:"bytes,2,opt,name=cluster,proto3" json:"cluster,omitempty"`
|
||
|
// Optional. A unique id used to identify the request. If the server
|
||
|
// receives two [CreateClusterRequest][google.cloud.dataproc.v1beta2.CreateClusterRequest] requests with the same
|
||
|
// id, then the second request will be ignored and the
|
||
|
// first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the backend
|
||
|
// is returned.
|
||
|
//
|
||
|
// It is recommended to always set this value to a
|
||
|
// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
|
||
|
//
|
||
|
// The id must contain only letters (a-z, A-Z), numbers (0-9),
|
||
|
// underscores (_), and hyphens (-). The maximum length is 40 characters.
|
||
|
RequestId string `protobuf:"bytes,4,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
|
||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||
|
XXX_unrecognized []byte `json:"-"`
|
||
|
XXX_sizecache int32 `json:"-"`
|
||
|
}
|
||
|
|
||
|
func (m *CreateClusterRequest) Reset() { *m = CreateClusterRequest{} }
|
||
|
func (m *CreateClusterRequest) String() string { return proto.CompactTextString(m) }
|
||
|
func (*CreateClusterRequest) ProtoMessage() {}
|
||
|
func (*CreateClusterRequest) Descriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_clusters_92d23f68998e82a3, []int{12}
|
||
|
}
|
||
|
func (m *CreateClusterRequest) XXX_Unmarshal(b []byte) error {
|
||
|
return xxx_messageInfo_CreateClusterRequest.Unmarshal(m, b)
|
||
|
}
|
||
|
func (m *CreateClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||
|
return xxx_messageInfo_CreateClusterRequest.Marshal(b, m, deterministic)
|
||
|
}
|
||
|
func (dst *CreateClusterRequest) XXX_Merge(src proto.Message) {
|
||
|
xxx_messageInfo_CreateClusterRequest.Merge(dst, src)
|
||
|
}
|
||
|
func (m *CreateClusterRequest) XXX_Size() int {
|
||
|
return xxx_messageInfo_CreateClusterRequest.Size(m)
|
||
|
}
|
||
|
func (m *CreateClusterRequest) XXX_DiscardUnknown() {
|
||
|
xxx_messageInfo_CreateClusterRequest.DiscardUnknown(m)
|
||
|
}
|
||
|
|
||
|
var xxx_messageInfo_CreateClusterRequest proto.InternalMessageInfo
|
||
|
|
||
|
func (m *CreateClusterRequest) GetProjectId() string {
|
||
|
if m != nil {
|
||
|
return m.ProjectId
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
func (m *CreateClusterRequest) GetRegion() string {
|
||
|
if m != nil {
|
||
|
return m.Region
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
func (m *CreateClusterRequest) GetCluster() *Cluster {
|
||
|
if m != nil {
|
||
|
return m.Cluster
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func (m *CreateClusterRequest) GetRequestId() string {
|
||
|
if m != nil {
|
||
|
return m.RequestId
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
// A request to update a cluster.
|
||
|
type UpdateClusterRequest struct {
|
||
|
// Required. The ID of the Google Cloud Platform project the
|
||
|
// cluster belongs to.
|
||
|
ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
|
||
|
// Required. The Cloud Dataproc region in which to handle the request.
|
||
|
Region string `protobuf:"bytes,5,opt,name=region,proto3" json:"region,omitempty"`
|
||
|
// Required. The cluster name.
|
||
|
ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
|
||
|
// Required. The changes to the cluster.
|
||
|
Cluster *Cluster `protobuf:"bytes,3,opt,name=cluster,proto3" json:"cluster,omitempty"`
|
||
|
// Optional. Timeout for graceful YARN decomissioning. Graceful
|
||
|
// decommissioning allows removing nodes from the cluster without
|
||
|
// interrupting jobs in progress. Timeout specifies how long to wait for jobs
|
||
|
// in progress to finish before forcefully removing nodes (and potentially
|
||
|
// interrupting jobs). Default timeout is 0 (for forceful decommission), and
|
||
|
// the maximum allowed timeout is 1 day.
|
||
|
//
|
||
|
// Only supported on Dataproc image versions 1.2 and higher.
|
||
|
GracefulDecommissionTimeout *duration.Duration `protobuf:"bytes,6,opt,name=graceful_decommission_timeout,json=gracefulDecommissionTimeout,proto3" json:"graceful_decommission_timeout,omitempty"`
|
||
|
// Required. Specifies the path, relative to `Cluster`, of
|
||
|
// the field to update. For example, to change the number of workers
|
||
|
// in a cluster to 5, the `update_mask` parameter would be
|
||
|
// specified as `config.worker_config.num_instances`,
|
||
|
// and the `PATCH` request body would specify the new value, as follows:
|
||
|
//
|
||
|
// {
|
||
|
// "config":{
|
||
|
// "workerConfig":{
|
||
|
// "numInstances":"5"
|
||
|
// }
|
||
|
// }
|
||
|
// }
|
||
|
//
|
||
|
// Similarly, to change the number of preemptible workers in a cluster to 5, the
|
||
|
// `update_mask` parameter would be `config.secondary_worker_config.num_instances`,
|
||
|
// and the `PATCH` request body would be set as follows:
|
||
|
//
|
||
|
// {
|
||
|
// "config":{
|
||
|
// "secondaryWorkerConfig":{
|
||
|
// "numInstances":"5"
|
||
|
// }
|
||
|
// }
|
||
|
// }
|
||
|
// <strong>Note:</strong> currently only the following fields can be updated:
|
||
|
//
|
||
|
// <table>
|
||
|
// <tr>
|
||
|
// <td><strong>Mask</strong></td><td><strong>Purpose</strong></td>
|
||
|
// </tr>
|
||
|
// <tr>
|
||
|
// <td>labels</td><td>Updates labels</td>
|
||
|
// </tr>
|
||
|
// <tr>
|
||
|
// <td>config.worker_config.num_instances</td><td>Resize primary worker group</td>
|
||
|
// </tr>
|
||
|
// <tr>
|
||
|
// <td>config.secondary_worker_config.num_instances</td><td>Resize secondary worker group</td>
|
||
|
// </tr>
|
||
|
// <tr>
|
||
|
// <td>config.lifecycle_config.auto_delete_ttl</td><td>Reset MAX TTL duration</td>
|
||
|
// </tr>
|
||
|
// <tr>
|
||
|
// <td>config.lifecycle_config.auto_delete_time</td><td>Update MAX TTL deletion timestamp</td>
|
||
|
// </tr>
|
||
|
// <tr>
|
||
|
// <td>config.lifecycle_config.idle_delete_ttl</td><td>Update Idle TTL duration</td>
|
||
|
// </tr>
|
||
|
// </table>
|
||
|
UpdateMask *field_mask.FieldMask `protobuf:"bytes,4,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"`
|
||
|
// Optional. A unique id used to identify the request. If the server
|
||
|
// receives two [UpdateClusterRequest][google.cloud.dataproc.v1beta2.UpdateClusterRequest] requests with the same
|
||
|
// id, then the second request will be ignored and the
|
||
|
// first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
|
||
|
// backend is returned.
|
||
|
//
|
||
|
// It is recommended to always set this value to a
|
||
|
// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
|
||
|
//
|
||
|
// The id must contain only letters (a-z, A-Z), numbers (0-9),
|
||
|
// underscores (_), and hyphens (-). The maximum length is 40 characters.
|
||
|
RequestId string `protobuf:"bytes,7,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
|
||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||
|
XXX_unrecognized []byte `json:"-"`
|
||
|
XXX_sizecache int32 `json:"-"`
|
||
|
}
|
||
|
|
||
|
func (m *UpdateClusterRequest) Reset() { *m = UpdateClusterRequest{} }
|
||
|
func (m *UpdateClusterRequest) String() string { return proto.CompactTextString(m) }
|
||
|
func (*UpdateClusterRequest) ProtoMessage() {}
|
||
|
func (*UpdateClusterRequest) Descriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_clusters_92d23f68998e82a3, []int{13}
|
||
|
}
|
||
|
func (m *UpdateClusterRequest) XXX_Unmarshal(b []byte) error {
|
||
|
return xxx_messageInfo_UpdateClusterRequest.Unmarshal(m, b)
|
||
|
}
|
||
|
func (m *UpdateClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||
|
return xxx_messageInfo_UpdateClusterRequest.Marshal(b, m, deterministic)
|
||
|
}
|
||
|
func (dst *UpdateClusterRequest) XXX_Merge(src proto.Message) {
|
||
|
xxx_messageInfo_UpdateClusterRequest.Merge(dst, src)
|
||
|
}
|
||
|
func (m *UpdateClusterRequest) XXX_Size() int {
|
||
|
return xxx_messageInfo_UpdateClusterRequest.Size(m)
|
||
|
}
|
||
|
func (m *UpdateClusterRequest) XXX_DiscardUnknown() {
|
||
|
xxx_messageInfo_UpdateClusterRequest.DiscardUnknown(m)
|
||
|
}
|
||
|
|
||
|
var xxx_messageInfo_UpdateClusterRequest proto.InternalMessageInfo
|
||
|
|
||
|
func (m *UpdateClusterRequest) GetProjectId() string {
|
||
|
if m != nil {
|
||
|
return m.ProjectId
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
func (m *UpdateClusterRequest) GetRegion() string {
|
||
|
if m != nil {
|
||
|
return m.Region
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
func (m *UpdateClusterRequest) GetClusterName() string {
|
||
|
if m != nil {
|
||
|
return m.ClusterName
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
func (m *UpdateClusterRequest) GetCluster() *Cluster {
|
||
|
if m != nil {
|
||
|
return m.Cluster
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func (m *UpdateClusterRequest) GetGracefulDecommissionTimeout() *duration.Duration {
|
||
|
if m != nil {
|
||
|
return m.GracefulDecommissionTimeout
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func (m *UpdateClusterRequest) GetUpdateMask() *field_mask.FieldMask {
|
||
|
if m != nil {
|
||
|
return m.UpdateMask
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func (m *UpdateClusterRequest) GetRequestId() string {
|
||
|
if m != nil {
|
||
|
return m.RequestId
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
// A request to delete a cluster.
|
||
|
type DeleteClusterRequest struct {
|
||
|
// Required. The ID of the Google Cloud Platform project that the cluster
|
||
|
// belongs to.
|
||
|
ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
|
||
|
// Required. The Cloud Dataproc region in which to handle the request.
|
||
|
Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
|
||
|
// Required. The cluster name.
|
||
|
ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
|
||
|
// Optional. Specifying the `cluster_uuid` means the RPC should fail
|
||
|
// (with error NOT_FOUND) if cluster with specified UUID does not exist.
|
||
|
ClusterUuid string `protobuf:"bytes,4,opt,name=cluster_uuid,json=clusterUuid,proto3" json:"cluster_uuid,omitempty"`
|
||
|
// Optional. A unique id used to identify the request. If the server
|
||
|
// receives two [DeleteClusterRequest][google.cloud.dataproc.v1beta2.DeleteClusterRequest] requests with the same
|
||
|
// id, then the second request will be ignored and the
|
||
|
// first [google.longrunning.Operation][google.longrunning.Operation] created and stored in the
|
||
|
// backend is returned.
|
||
|
//
|
||
|
// It is recommended to always set this value to a
|
||
|
// [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier).
|
||
|
//
|
||
|
// The id must contain only letters (a-z, A-Z), numbers (0-9),
|
||
|
// underscores (_), and hyphens (-). The maximum length is 40 characters.
|
||
|
RequestId string `protobuf:"bytes,5,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"`
|
||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||
|
XXX_unrecognized []byte `json:"-"`
|
||
|
XXX_sizecache int32 `json:"-"`
|
||
|
}
|
||
|
|
||
|
func (m *DeleteClusterRequest) Reset() { *m = DeleteClusterRequest{} }
|
||
|
func (m *DeleteClusterRequest) String() string { return proto.CompactTextString(m) }
|
||
|
func (*DeleteClusterRequest) ProtoMessage() {}
|
||
|
func (*DeleteClusterRequest) Descriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_clusters_92d23f68998e82a3, []int{14}
|
||
|
}
|
||
|
func (m *DeleteClusterRequest) XXX_Unmarshal(b []byte) error {
|
||
|
return xxx_messageInfo_DeleteClusterRequest.Unmarshal(m, b)
|
||
|
}
|
||
|
func (m *DeleteClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||
|
return xxx_messageInfo_DeleteClusterRequest.Marshal(b, m, deterministic)
|
||
|
}
|
||
|
func (dst *DeleteClusterRequest) XXX_Merge(src proto.Message) {
|
||
|
xxx_messageInfo_DeleteClusterRequest.Merge(dst, src)
|
||
|
}
|
||
|
func (m *DeleteClusterRequest) XXX_Size() int {
|
||
|
return xxx_messageInfo_DeleteClusterRequest.Size(m)
|
||
|
}
|
||
|
func (m *DeleteClusterRequest) XXX_DiscardUnknown() {
|
||
|
xxx_messageInfo_DeleteClusterRequest.DiscardUnknown(m)
|
||
|
}
|
||
|
|
||
|
var xxx_messageInfo_DeleteClusterRequest proto.InternalMessageInfo
|
||
|
|
||
|
func (m *DeleteClusterRequest) GetProjectId() string {
|
||
|
if m != nil {
|
||
|
return m.ProjectId
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
func (m *DeleteClusterRequest) GetRegion() string {
|
||
|
if m != nil {
|
||
|
return m.Region
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
func (m *DeleteClusterRequest) GetClusterName() string {
|
||
|
if m != nil {
|
||
|
return m.ClusterName
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
func (m *DeleteClusterRequest) GetClusterUuid() string {
|
||
|
if m != nil {
|
||
|
return m.ClusterUuid
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
func (m *DeleteClusterRequest) GetRequestId() string {
|
||
|
if m != nil {
|
||
|
return m.RequestId
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
// Request to get the resource representation for a cluster in a project.
|
||
|
type GetClusterRequest struct {
|
||
|
// Required. The ID of the Google Cloud Platform project that the cluster
|
||
|
// belongs to.
|
||
|
ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
|
||
|
// Required. The Cloud Dataproc region in which to handle the request.
|
||
|
Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
|
||
|
// Required. The cluster name.
|
||
|
ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
|
||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||
|
XXX_unrecognized []byte `json:"-"`
|
||
|
XXX_sizecache int32 `json:"-"`
|
||
|
}
|
||
|
|
||
|
func (m *GetClusterRequest) Reset() { *m = GetClusterRequest{} }
|
||
|
func (m *GetClusterRequest) String() string { return proto.CompactTextString(m) }
|
||
|
func (*GetClusterRequest) ProtoMessage() {}
|
||
|
func (*GetClusterRequest) Descriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_clusters_92d23f68998e82a3, []int{15}
|
||
|
}
|
||
|
func (m *GetClusterRequest) XXX_Unmarshal(b []byte) error {
|
||
|
return xxx_messageInfo_GetClusterRequest.Unmarshal(m, b)
|
||
|
}
|
||
|
func (m *GetClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||
|
return xxx_messageInfo_GetClusterRequest.Marshal(b, m, deterministic)
|
||
|
}
|
||
|
func (dst *GetClusterRequest) XXX_Merge(src proto.Message) {
|
||
|
xxx_messageInfo_GetClusterRequest.Merge(dst, src)
|
||
|
}
|
||
|
func (m *GetClusterRequest) XXX_Size() int {
|
||
|
return xxx_messageInfo_GetClusterRequest.Size(m)
|
||
|
}
|
||
|
func (m *GetClusterRequest) XXX_DiscardUnknown() {
|
||
|
xxx_messageInfo_GetClusterRequest.DiscardUnknown(m)
|
||
|
}
|
||
|
|
||
|
var xxx_messageInfo_GetClusterRequest proto.InternalMessageInfo
|
||
|
|
||
|
func (m *GetClusterRequest) GetProjectId() string {
|
||
|
if m != nil {
|
||
|
return m.ProjectId
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
func (m *GetClusterRequest) GetRegion() string {
|
||
|
if m != nil {
|
||
|
return m.Region
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
func (m *GetClusterRequest) GetClusterName() string {
|
||
|
if m != nil {
|
||
|
return m.ClusterName
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
// A request to list the clusters in a project.
|
||
|
type ListClustersRequest struct {
|
||
|
// Required. The ID of the Google Cloud Platform project that the cluster
|
||
|
// belongs to.
|
||
|
ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
|
||
|
// Required. The Cloud Dataproc region in which to handle the request.
|
||
|
Region string `protobuf:"bytes,4,opt,name=region,proto3" json:"region,omitempty"`
|
||
|
// Optional. A filter constraining the clusters to list. Filters are
|
||
|
// case-sensitive and have the following syntax:
|
||
|
//
|
||
|
// field = value [AND [field = value]] ...
|
||
|
//
|
||
|
// where **field** is one of `status.state`, `clusterName`, or `labels.[KEY]`,
|
||
|
// and `[KEY]` is a label key. **value** can be `*` to match all values.
|
||
|
// `status.state` can be one of the following: `ACTIVE`, `INACTIVE`,
|
||
|
// `CREATING`, `RUNNING`, `ERROR`, `DELETING`, or `UPDATING`. `ACTIVE`
|
||
|
// contains the `CREATING`, `UPDATING`, and `RUNNING` states. `INACTIVE`
|
||
|
// contains the `DELETING` and `ERROR` states.
|
||
|
// `clusterName` is the name of the cluster provided at creation time.
|
||
|
// Only the logical `AND` operator is supported; space-separated items are
|
||
|
// treated as having an implicit `AND` operator.
|
||
|
//
|
||
|
// Example filter:
|
||
|
//
|
||
|
// status.state = ACTIVE AND clusterName = mycluster
|
||
|
// AND labels.env = staging AND labels.starred = *
|
||
|
Filter string `protobuf:"bytes,5,opt,name=filter,proto3" json:"filter,omitempty"`
|
||
|
// Optional. The standard List page size.
|
||
|
PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"`
|
||
|
// Optional. The standard List page token.
|
||
|
PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"`
|
||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||
|
XXX_unrecognized []byte `json:"-"`
|
||
|
XXX_sizecache int32 `json:"-"`
|
||
|
}
|
||
|
|
||
|
func (m *ListClustersRequest) Reset() { *m = ListClustersRequest{} }
|
||
|
func (m *ListClustersRequest) String() string { return proto.CompactTextString(m) }
|
||
|
func (*ListClustersRequest) ProtoMessage() {}
|
||
|
func (*ListClustersRequest) Descriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_clusters_92d23f68998e82a3, []int{16}
|
||
|
}
|
||
|
func (m *ListClustersRequest) XXX_Unmarshal(b []byte) error {
|
||
|
return xxx_messageInfo_ListClustersRequest.Unmarshal(m, b)
|
||
|
}
|
||
|
func (m *ListClustersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||
|
return xxx_messageInfo_ListClustersRequest.Marshal(b, m, deterministic)
|
||
|
}
|
||
|
func (dst *ListClustersRequest) XXX_Merge(src proto.Message) {
|
||
|
xxx_messageInfo_ListClustersRequest.Merge(dst, src)
|
||
|
}
|
||
|
func (m *ListClustersRequest) XXX_Size() int {
|
||
|
return xxx_messageInfo_ListClustersRequest.Size(m)
|
||
|
}
|
||
|
func (m *ListClustersRequest) XXX_DiscardUnknown() {
|
||
|
xxx_messageInfo_ListClustersRequest.DiscardUnknown(m)
|
||
|
}
|
||
|
|
||
|
var xxx_messageInfo_ListClustersRequest proto.InternalMessageInfo
|
||
|
|
||
|
func (m *ListClustersRequest) GetProjectId() string {
|
||
|
if m != nil {
|
||
|
return m.ProjectId
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
func (m *ListClustersRequest) GetRegion() string {
|
||
|
if m != nil {
|
||
|
return m.Region
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
func (m *ListClustersRequest) GetFilter() string {
|
||
|
if m != nil {
|
||
|
return m.Filter
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
func (m *ListClustersRequest) GetPageSize() int32 {
|
||
|
if m != nil {
|
||
|
return m.PageSize
|
||
|
}
|
||
|
return 0
|
||
|
}
|
||
|
|
||
|
func (m *ListClustersRequest) GetPageToken() string {
|
||
|
if m != nil {
|
||
|
return m.PageToken
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
// The list of all clusters in a project.
|
||
|
type ListClustersResponse struct {
|
||
|
// Output only. The clusters in the project.
|
||
|
Clusters []*Cluster `protobuf:"bytes,1,rep,name=clusters,proto3" json:"clusters,omitempty"`
|
||
|
// Output only. This token is included in the response if there are more
|
||
|
// results to fetch. To fetch additional results, provide this value as the
|
||
|
// `page_token` in a subsequent <code>ListClustersRequest</code>.
|
||
|
NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"`
|
||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||
|
XXX_unrecognized []byte `json:"-"`
|
||
|
XXX_sizecache int32 `json:"-"`
|
||
|
}
|
||
|
|
||
|
func (m *ListClustersResponse) Reset() { *m = ListClustersResponse{} }
|
||
|
func (m *ListClustersResponse) String() string { return proto.CompactTextString(m) }
|
||
|
func (*ListClustersResponse) ProtoMessage() {}
|
||
|
func (*ListClustersResponse) Descriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_clusters_92d23f68998e82a3, []int{17}
|
||
|
}
|
||
|
func (m *ListClustersResponse) XXX_Unmarshal(b []byte) error {
|
||
|
return xxx_messageInfo_ListClustersResponse.Unmarshal(m, b)
|
||
|
}
|
||
|
func (m *ListClustersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||
|
return xxx_messageInfo_ListClustersResponse.Marshal(b, m, deterministic)
|
||
|
}
|
||
|
func (dst *ListClustersResponse) XXX_Merge(src proto.Message) {
|
||
|
xxx_messageInfo_ListClustersResponse.Merge(dst, src)
|
||
|
}
|
||
|
func (m *ListClustersResponse) XXX_Size() int {
|
||
|
return xxx_messageInfo_ListClustersResponse.Size(m)
|
||
|
}
|
||
|
func (m *ListClustersResponse) XXX_DiscardUnknown() {
|
||
|
xxx_messageInfo_ListClustersResponse.DiscardUnknown(m)
|
||
|
}
|
||
|
|
||
|
var xxx_messageInfo_ListClustersResponse proto.InternalMessageInfo
|
||
|
|
||
|
func (m *ListClustersResponse) GetClusters() []*Cluster {
|
||
|
if m != nil {
|
||
|
return m.Clusters
|
||
|
}
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
func (m *ListClustersResponse) GetNextPageToken() string {
|
||
|
if m != nil {
|
||
|
return m.NextPageToken
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
// A request to collect cluster diagnostic information.
|
||
|
type DiagnoseClusterRequest struct {
|
||
|
// Required. The ID of the Google Cloud Platform project that the cluster
|
||
|
// belongs to.
|
||
|
ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
|
||
|
// Required. The Cloud Dataproc region in which to handle the request.
|
||
|
Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"`
|
||
|
// Required. The cluster name.
|
||
|
ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName,proto3" json:"cluster_name,omitempty"`
|
||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||
|
XXX_unrecognized []byte `json:"-"`
|
||
|
XXX_sizecache int32 `json:"-"`
|
||
|
}
|
||
|
|
||
|
func (m *DiagnoseClusterRequest) Reset() { *m = DiagnoseClusterRequest{} }
|
||
|
func (m *DiagnoseClusterRequest) String() string { return proto.CompactTextString(m) }
|
||
|
func (*DiagnoseClusterRequest) ProtoMessage() {}
|
||
|
func (*DiagnoseClusterRequest) Descriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_clusters_92d23f68998e82a3, []int{18}
|
||
|
}
|
||
|
func (m *DiagnoseClusterRequest) XXX_Unmarshal(b []byte) error {
|
||
|
return xxx_messageInfo_DiagnoseClusterRequest.Unmarshal(m, b)
|
||
|
}
|
||
|
func (m *DiagnoseClusterRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||
|
return xxx_messageInfo_DiagnoseClusterRequest.Marshal(b, m, deterministic)
|
||
|
}
|
||
|
func (dst *DiagnoseClusterRequest) XXX_Merge(src proto.Message) {
|
||
|
xxx_messageInfo_DiagnoseClusterRequest.Merge(dst, src)
|
||
|
}
|
||
|
func (m *DiagnoseClusterRequest) XXX_Size() int {
|
||
|
return xxx_messageInfo_DiagnoseClusterRequest.Size(m)
|
||
|
}
|
||
|
func (m *DiagnoseClusterRequest) XXX_DiscardUnknown() {
|
||
|
xxx_messageInfo_DiagnoseClusterRequest.DiscardUnknown(m)
|
||
|
}
|
||
|
|
||
|
var xxx_messageInfo_DiagnoseClusterRequest proto.InternalMessageInfo
|
||
|
|
||
|
func (m *DiagnoseClusterRequest) GetProjectId() string {
|
||
|
if m != nil {
|
||
|
return m.ProjectId
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
func (m *DiagnoseClusterRequest) GetRegion() string {
|
||
|
if m != nil {
|
||
|
return m.Region
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
func (m *DiagnoseClusterRequest) GetClusterName() string {
|
||
|
if m != nil {
|
||
|
return m.ClusterName
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
// The location of diagnostic output.
|
||
|
type DiagnoseClusterResults struct {
|
||
|
// Output only. The Cloud Storage URI of the diagnostic output.
|
||
|
// The output report is a plain text file with a summary of collected
|
||
|
// diagnostics.
|
||
|
OutputUri string `protobuf:"bytes,1,opt,name=output_uri,json=outputUri,proto3" json:"output_uri,omitempty"`
|
||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||
|
XXX_unrecognized []byte `json:"-"`
|
||
|
XXX_sizecache int32 `json:"-"`
|
||
|
}
|
||
|
|
||
|
func (m *DiagnoseClusterResults) Reset() { *m = DiagnoseClusterResults{} }
|
||
|
func (m *DiagnoseClusterResults) String() string { return proto.CompactTextString(m) }
|
||
|
func (*DiagnoseClusterResults) ProtoMessage() {}
|
||
|
func (*DiagnoseClusterResults) Descriptor() ([]byte, []int) {
|
||
|
return fileDescriptor_clusters_92d23f68998e82a3, []int{19}
|
||
|
}
|
||
|
func (m *DiagnoseClusterResults) XXX_Unmarshal(b []byte) error {
|
||
|
return xxx_messageInfo_DiagnoseClusterResults.Unmarshal(m, b)
|
||
|
}
|
||
|
func (m *DiagnoseClusterResults) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||
|
return xxx_messageInfo_DiagnoseClusterResults.Marshal(b, m, deterministic)
|
||
|
}
|
||
|
func (dst *DiagnoseClusterResults) XXX_Merge(src proto.Message) {
|
||
|
xxx_messageInfo_DiagnoseClusterResults.Merge(dst, src)
|
||
|
}
|
||
|
func (m *DiagnoseClusterResults) XXX_Size() int {
|
||
|
return xxx_messageInfo_DiagnoseClusterResults.Size(m)
|
||
|
}
|
||
|
func (m *DiagnoseClusterResults) XXX_DiscardUnknown() {
|
||
|
xxx_messageInfo_DiagnoseClusterResults.DiscardUnknown(m)
|
||
|
}
|
||
|
|
||
|
var xxx_messageInfo_DiagnoseClusterResults proto.InternalMessageInfo
|
||
|
|
||
|
func (m *DiagnoseClusterResults) GetOutputUri() string {
|
||
|
if m != nil {
|
||
|
return m.OutputUri
|
||
|
}
|
||
|
return ""
|
||
|
}
|
||
|
|
||
|
func init() {
|
||
|
proto.RegisterType((*Cluster)(nil), "google.cloud.dataproc.v1beta2.Cluster")
|
||
|
proto.RegisterMapType((map[string]string)(nil), "google.cloud.dataproc.v1beta2.Cluster.LabelsEntry")
|
||
|
proto.RegisterType((*ClusterConfig)(nil), "google.cloud.dataproc.v1beta2.ClusterConfig")
|
||
|
proto.RegisterType((*GceClusterConfig)(nil), "google.cloud.dataproc.v1beta2.GceClusterConfig")
|
||
|
proto.RegisterMapType((map[string]string)(nil), "google.cloud.dataproc.v1beta2.GceClusterConfig.MetadataEntry")
|
||
|
proto.RegisterType((*InstanceGroupConfig)(nil), "google.cloud.dataproc.v1beta2.InstanceGroupConfig")
|
||
|
proto.RegisterType((*ManagedGroupConfig)(nil), "google.cloud.dataproc.v1beta2.ManagedGroupConfig")
|
||
|
proto.RegisterType((*AcceleratorConfig)(nil), "google.cloud.dataproc.v1beta2.AcceleratorConfig")
|
||
|
proto.RegisterType((*DiskConfig)(nil), "google.cloud.dataproc.v1beta2.DiskConfig")
|
||
|
proto.RegisterType((*LifecycleConfig)(nil), "google.cloud.dataproc.v1beta2.LifecycleConfig")
|
||
|
proto.RegisterType((*NodeInitializationAction)(nil), "google.cloud.dataproc.v1beta2.NodeInitializationAction")
|
||
|
proto.RegisterType((*ClusterStatus)(nil), "google.cloud.dataproc.v1beta2.ClusterStatus")
|
||
|
proto.RegisterType((*SoftwareConfig)(nil), "google.cloud.dataproc.v1beta2.SoftwareConfig")
|
||
|
proto.RegisterMapType((map[string]string)(nil), "google.cloud.dataproc.v1beta2.SoftwareConfig.PropertiesEntry")
|
||
|
proto.RegisterType((*ClusterMetrics)(nil), "google.cloud.dataproc.v1beta2.ClusterMetrics")
|
||
|
proto.RegisterMapType((map[string]int64)(nil), "google.cloud.dataproc.v1beta2.ClusterMetrics.HdfsMetricsEntry")
|
||
|
proto.RegisterMapType((map[string]int64)(nil), "google.cloud.dataproc.v1beta2.ClusterMetrics.YarnMetricsEntry")
|
||
|
proto.RegisterType((*CreateClusterRequest)(nil), "google.cloud.dataproc.v1beta2.CreateClusterRequest")
|
||
|
proto.RegisterType((*UpdateClusterRequest)(nil), "google.cloud.dataproc.v1beta2.UpdateClusterRequest")
|
||
|
proto.RegisterType((*DeleteClusterRequest)(nil), "google.cloud.dataproc.v1beta2.DeleteClusterRequest")
|
||
|
proto.RegisterType((*GetClusterRequest)(nil), "google.cloud.dataproc.v1beta2.GetClusterRequest")
|
||
|
proto.RegisterType((*ListClustersRequest)(nil), "google.cloud.dataproc.v1beta2.ListClustersRequest")
|
||
|
proto.RegisterType((*ListClustersResponse)(nil), "google.cloud.dataproc.v1beta2.ListClustersResponse")
|
||
|
proto.RegisterType((*DiagnoseClusterRequest)(nil), "google.cloud.dataproc.v1beta2.DiagnoseClusterRequest")
|
||
|
proto.RegisterType((*DiagnoseClusterResults)(nil), "google.cloud.dataproc.v1beta2.DiagnoseClusterResults")
|
||
|
proto.RegisterEnum("google.cloud.dataproc.v1beta2.ClusterStatus_State", ClusterStatus_State_name, ClusterStatus_State_value)
|
||
|
proto.RegisterEnum("google.cloud.dataproc.v1beta2.ClusterStatus_Substate", ClusterStatus_Substate_name, ClusterStatus_Substate_value)
|
||
|
}
|
||
|
|
||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||
|
var _ context.Context
|
||
|
var _ grpc.ClientConn
|
||
|
|
||
|
// This is a compile-time assertion to ensure that this generated file
|
||
|
// is compatible with the grpc package it is being compiled against.
|
||
|
const _ = grpc.SupportPackageIsVersion4
|
||
|
|
||
|
// ClusterControllerClient is the client API for ClusterController service.
|
||
|
//
|
||
|
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||
|
type ClusterControllerClient interface {
|
||
|
// Creates a cluster in a project.
|
||
|
CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
|
||
|
// Updates a cluster in a project.
|
||
|
UpdateCluster(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
|
||
|
// Deletes a cluster in a project.
|
||
|
DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
|
||
|
// Gets the resource representation for a cluster in a project.
|
||
|
GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error)
|
||
|
// Lists all regions/{region}/clusters in a project.
|
||
|
ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error)
|
||
|
// Gets cluster diagnostic information.
|
||
|
// After the operation completes, the Operation.response field
|
||
|
// contains `DiagnoseClusterOutputLocation`.
|
||
|
DiagnoseCluster(ctx context.Context, in *DiagnoseClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error)
|
||
|
}
|
||
|
|
||
|
type clusterControllerClient struct {
|
||
|
cc *grpc.ClientConn
|
||
|
}
|
||
|
|
||
|
func NewClusterControllerClient(cc *grpc.ClientConn) ClusterControllerClient {
|
||
|
return &clusterControllerClient{cc}
|
||
|
}
|
||
|
|
||
|
func (c *clusterControllerClient) CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) {
|
||
|
out := new(longrunning.Operation)
|
||
|
err := c.cc.Invoke(ctx, "/google.cloud.dataproc.v1beta2.ClusterController/CreateCluster", in, out, opts...)
|
||
|
if err != nil {
|
||
|
return nil, err
|
||
|
}
|
||
|
return out, nil
|
||
|
}
|
||
|
|
||
|
func (c *clusterControllerClient) UpdateCluster(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) {
|
||
|
out := new(longrunning.Operation)
|
||
|
err := c.cc.Invoke(ctx, "/google.cloud.dataproc.v1beta2.ClusterController/UpdateCluster", in, out, opts...)
|
||
|
if err != nil {
|
||
|
return nil, err
|
||
|
}
|
||
|
return out, nil
|
||
|
}
|
||
|
|
||
|
func (c *clusterControllerClient) DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) {
|
||
|
out := new(longrunning.Operation)
|
||
|
err := c.cc.Invoke(ctx, "/google.cloud.dataproc.v1beta2.ClusterController/DeleteCluster", in, out, opts...)
|
||
|
if err != nil {
|
||
|
return nil, err
|
||
|
}
|
||
|
return out, nil
|
||
|
}
|
||
|
|
||
|
func (c *clusterControllerClient) GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error) {
|
||
|
out := new(Cluster)
|
||
|
err := c.cc.Invoke(ctx, "/google.cloud.dataproc.v1beta2.ClusterController/GetCluster", in, out, opts...)
|
||
|
if err != nil {
|
||
|
return nil, err
|
||
|
}
|
||
|
return out, nil
|
||
|
}
|
||
|
|
||
|
func (c *clusterControllerClient) ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) {
|
||
|
out := new(ListClustersResponse)
|
||
|
err := c.cc.Invoke(ctx, "/google.cloud.dataproc.v1beta2.ClusterController/ListClusters", in, out, opts...)
|
||
|
if err != nil {
|
||
|
return nil, err
|
||
|
}
|
||
|
return out, nil
|
||
|
}
|
||
|
|
||
|
func (c *clusterControllerClient) DiagnoseCluster(ctx context.Context, in *DiagnoseClusterRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) {
|
||
|
out := new(longrunning.Operation)
|
||
|
err := c.cc.Invoke(ctx, "/google.cloud.dataproc.v1beta2.ClusterController/DiagnoseCluster", in, out, opts...)
|
||
|
if err != nil {
|
||
|
return nil, err
|
||
|
}
|
||
|
return out, nil
|
||
|
}
|
||
|
|
||
|
// ClusterControllerServer is the server API for ClusterController service.
|
||
|
type ClusterControllerServer interface {
|
||
|
// Creates a cluster in a project.
|
||
|
CreateCluster(context.Context, *CreateClusterRequest) (*longrunning.Operation, error)
|
||
|
// Updates a cluster in a project.
|
||
|
UpdateCluster(context.Context, *UpdateClusterRequest) (*longrunning.Operation, error)
|
||
|
// Deletes a cluster in a project.
|
||
|
DeleteCluster(context.Context, *DeleteClusterRequest) (*longrunning.Operation, error)
|
||
|
// Gets the resource representation for a cluster in a project.
|
||
|
GetCluster(context.Context, *GetClusterRequest) (*Cluster, error)
|
||
|
// Lists all regions/{region}/clusters in a project.
|
||
|
ListClusters(context.Context, *ListClustersRequest) (*ListClustersResponse, error)
|
||
|
// Gets cluster diagnostic information.
|
||
|
// After the operation completes, the Operation.response field
|
||
|
// contains `DiagnoseClusterOutputLocation`.
|
||
|
DiagnoseCluster(context.Context, *DiagnoseClusterRequest) (*longrunning.Operation, error)
|
||
|
}
|
||
|
|
||
|
func RegisterClusterControllerServer(s *grpc.Server, srv ClusterControllerServer) {
|
||
|
s.RegisterService(&_ClusterController_serviceDesc, srv)
|
||
|
}
|
||
|
|
||
|
func _ClusterController_CreateCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||
|
in := new(CreateClusterRequest)
|
||
|
if err := dec(in); err != nil {
|
||
|
return nil, err
|
||
|
}
|
||
|
if interceptor == nil {
|
||
|
return srv.(ClusterControllerServer).CreateCluster(ctx, in)
|
||
|
}
|
||
|
info := &grpc.UnaryServerInfo{
|
||
|
Server: srv,
|
||
|
FullMethod: "/google.cloud.dataproc.v1beta2.ClusterController/CreateCluster",
|
||
|
}
|
||
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||
|
return srv.(ClusterControllerServer).CreateCluster(ctx, req.(*CreateClusterRequest))
|
||
|
}
|
||
|
return interceptor(ctx, in, info, handler)
|
||
|
}
|
||
|
|
||
|
func _ClusterController_UpdateCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||
|
in := new(UpdateClusterRequest)
|
||
|
if err := dec(in); err != nil {
|
||
|
return nil, err
|
||
|
}
|
||
|
if interceptor == nil {
|
||
|
return srv.(ClusterControllerServer).UpdateCluster(ctx, in)
|
||
|
}
|
||
|
info := &grpc.UnaryServerInfo{
|
||
|
Server: srv,
|
||
|
FullMethod: "/google.cloud.dataproc.v1beta2.ClusterController/UpdateCluster",
|
||
|
}
|
||
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||
|
return srv.(ClusterControllerServer).UpdateCluster(ctx, req.(*UpdateClusterRequest))
|
||
|
}
|
||
|
return interceptor(ctx, in, info, handler)
|
||
|
}
|
||
|
|
||
|
func _ClusterController_DeleteCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||
|
in := new(DeleteClusterRequest)
|
||
|
if err := dec(in); err != nil {
|
||
|
return nil, err
|
||
|
}
|
||
|
if interceptor == nil {
|
||
|
return srv.(ClusterControllerServer).DeleteCluster(ctx, in)
|
||
|
}
|
||
|
info := &grpc.UnaryServerInfo{
|
||
|
Server: srv,
|
||
|
FullMethod: "/google.cloud.dataproc.v1beta2.ClusterController/DeleteCluster",
|
||
|
}
|
||
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||
|
return srv.(ClusterControllerServer).DeleteCluster(ctx, req.(*DeleteClusterRequest))
|
||
|
}
|
||
|
return interceptor(ctx, in, info, handler)
|
||
|
}
|
||
|
|
||
|
func _ClusterController_GetCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||
|
in := new(GetClusterRequest)
|
||
|
if err := dec(in); err != nil {
|
||
|
return nil, err
|
||
|
}
|
||
|
if interceptor == nil {
|
||
|
return srv.(ClusterControllerServer).GetCluster(ctx, in)
|
||
|
}
|
||
|
info := &grpc.UnaryServerInfo{
|
||
|
Server: srv,
|
||
|
FullMethod: "/google.cloud.dataproc.v1beta2.ClusterController/GetCluster",
|
||
|
}
|
||
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||
|
return srv.(ClusterControllerServer).GetCluster(ctx, req.(*GetClusterRequest))
|
||
|
}
|
||
|
return interceptor(ctx, in, info, handler)
|
||
|
}
|
||
|
|
||
|
func _ClusterController_ListClusters_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||
|
in := new(ListClustersRequest)
|
||
|
if err := dec(in); err != nil {
|
||
|
return nil, err
|
||
|
}
|
||
|
if interceptor == nil {
|
||
|
return srv.(ClusterControllerServer).ListClusters(ctx, in)
|
||
|
}
|
||
|
info := &grpc.UnaryServerInfo{
|
||
|
Server: srv,
|
||
|
FullMethod: "/google.cloud.dataproc.v1beta2.ClusterController/ListClusters",
|
||
|
}
|
||
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||
|
return srv.(ClusterControllerServer).ListClusters(ctx, req.(*ListClustersRequest))
|
||
|
}
|
||
|
return interceptor(ctx, in, info, handler)
|
||
|
}
|
||
|
|
||
|
func _ClusterController_DiagnoseCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||
|
in := new(DiagnoseClusterRequest)
|
||
|
if err := dec(in); err != nil {
|
||
|
return nil, err
|
||
|
}
|
||
|
if interceptor == nil {
|
||
|
return srv.(ClusterControllerServer).DiagnoseCluster(ctx, in)
|
||
|
}
|
||
|
info := &grpc.UnaryServerInfo{
|
||
|
Server: srv,
|
||
|
FullMethod: "/google.cloud.dataproc.v1beta2.ClusterController/DiagnoseCluster",
|
||
|
}
|
||
|
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||
|
return srv.(ClusterControllerServer).DiagnoseCluster(ctx, req.(*DiagnoseClusterRequest))
|
||
|
}
|
||
|
return interceptor(ctx, in, info, handler)
|
||
|
}
|
||
|
|
||
|
var _ClusterController_serviceDesc = grpc.ServiceDesc{
|
||
|
ServiceName: "google.cloud.dataproc.v1beta2.ClusterController",
|
||
|
HandlerType: (*ClusterControllerServer)(nil),
|
||
|
Methods: []grpc.MethodDesc{
|
||
|
{
|
||
|
MethodName: "CreateCluster",
|
||
|
Handler: _ClusterController_CreateCluster_Handler,
|
||
|
},
|
||
|
{
|
||
|
MethodName: "UpdateCluster",
|
||
|
Handler: _ClusterController_UpdateCluster_Handler,
|
||
|
},
|
||
|
{
|
||
|
MethodName: "DeleteCluster",
|
||
|
Handler: _ClusterController_DeleteCluster_Handler,
|
||
|
},
|
||
|
{
|
||
|
MethodName: "GetCluster",
|
||
|
Handler: _ClusterController_GetCluster_Handler,
|
||
|
},
|
||
|
{
|
||
|
MethodName: "ListClusters",
|
||
|
Handler: _ClusterController_ListClusters_Handler,
|
||
|
},
|
||
|
{
|
||
|
MethodName: "DiagnoseCluster",
|
||
|
Handler: _ClusterController_DiagnoseCluster_Handler,
|
||
|
},
|
||
|
},
|
||
|
Streams: []grpc.StreamDesc{},
|
||
|
Metadata: "google/cloud/dataproc/v1beta2/clusters.proto",
|
||
|
}
|
||
|
|
||
|
func init() {
|
||
|
proto.RegisterFile("google/cloud/dataproc/v1beta2/clusters.proto", fileDescriptor_clusters_92d23f68998e82a3)
|
||
|
}
|
||
|
|
||
|
var fileDescriptor_clusters_92d23f68998e82a3 = []byte{
|
||
|
// 2165 bytes of a gzipped FileDescriptorProto
|
||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x59, 0x4f, 0x73, 0x1c, 0x47,
|
||
|
0x15, 0xf7, 0x68, 0xb5, 0xd2, 0xea, 0xed, 0x1f, 0xad, 0x3a, 0x8a, 0xd8, 0x28, 0x98, 0x38, 0x13,
|
||
|
0x08, 0x4a, 0x08, 0xbb, 0x89, 0x4c, 0x2a, 0xc1, 0xc6, 0xa9, 0xc8, 0x92, 0x2c, 0x29, 0xc8, 0xb2,
|
||
|
0x98, 0xdd, 0xb5, 0x31, 0x94, 0x6b, 0xaa, 0x77, 0xa6, 0x77, 0xdd, 0xd1, 0xfc, 0x63, 0xba, 0xc7,
|
||
|
0x89, 0xec, 0x72, 0x15, 0x15, 0x4e, 0x14, 0x47, 0x3e, 0x00, 0x39, 0x73, 0xc8, 0x81, 0xa2, 0x8a,
|
||
|
0xe2, 0xc0, 0x8d, 0x6f, 0x10, 0x8a, 0x4f, 0xc0, 0x81, 0x0b, 0x77, 0x8e, 0x54, 0xff, 0x99, 0xdd,
|
||
|
0x99, 0x95, 0xec, 0x91, 0x84, 0x2b, 0x27, 0xed, 0xbc, 0x7e, 0xef, 0xf7, 0x7e, 0xfd, 0xfa, 0xf5,
|
||
|
0xeb, 0xd7, 0x2d, 0x78, 0x67, 0x14, 0x86, 0x23, 0x8f, 0x74, 0x1c, 0x2f, 0x4c, 0xdc, 0x8e, 0x8b,
|
||
|
0x39, 0x8e, 0xe2, 0xd0, 0xe9, 0x3c, 0x7a, 0x6f, 0x40, 0x38, 0x5e, 0xef, 0x38, 0x5e, 0xc2, 0x38,
|
||
|
0x89, 0x59, 0x3b, 0x8a, 0x43, 0x1e, 0xa2, 0xcb, 0x4a, 0xbb, 0x2d, 0xb5, 0xdb, 0xa9, 0x76, 0x5b,
|
||
|
0x6b, 0xaf, 0x7e, 0x5b, 0x83, 0xe1, 0x88, 0x76, 0x70, 0x10, 0x84, 0x1c, 0x73, 0x1a, 0x06, 0xda,
|
||
|
0x78, 0xf5, 0xed, 0xe7, 0xbb, 0x62, 0x0f, 0x71, 0x4c, 0x5c, 0xad, 0xfb, 0x86, 0xd6, 0xf5, 0xc2,
|
||
|
0x60, 0x14, 0x27, 0x41, 0x40, 0x83, 0x51, 0x27, 0x8c, 0x48, 0x9c, 0x03, 0xfc, 0x8e, 0x56, 0x92,
|
||
|
0x5f, 0x83, 0x64, 0xd8, 0x71, 0x13, 0xa5, 0xa0, 0xc7, 0xaf, 0x4c, 0x8f, 0x0f, 0x29, 0xf1, 0x5c,
|
||
|
0xdb, 0xc7, 0xec, 0x48, 0x6b, 0xbc, 0x36, 0xad, 0xc1, 0xa9, 0x4f, 0x18, 0xc7, 0x7e, 0xa4, 0x14,
|
||
|
0xcc, 0x3f, 0xcc, 0xc2, 0xfc, 0xa6, 0x8a, 0x01, 0xba, 0x0c, 0x10, 0xc5, 0xe1, 0xa7, 0xc4, 0xe1,
|
||
|
0x36, 0x75, 0x5b, 0xc6, 0x15, 0x63, 0x6d, 0xc1, 0x5a, 0xd0, 0x92, 0x3d, 0x17, 0xbd, 0x0e, 0x35,
|
||
|
0x1d, 0x2d, 0x3b, 0xc0, 0x3e, 0x69, 0xcd, 0x48, 0x85, 0xaa, 0x96, 0x1d, 0x60, 0x9f, 0xa0, 0x2d,
|
||
|
0x98, 0x73, 0xc2, 0x60, 0x48, 0x47, 0xad, 0xd2, 0x15, 0x63, 0xad, 0xba, 0xfe, 0x4e, 0xfb, 0xb9,
|
||
|
0xf1, 0x6c, 0x6b, 0xcf, 0x9b, 0xd2, 0xc6, 0xd2, 0xb6, 0xe8, 0x13, 0x98, 0xf3, 0xf0, 0x80, 0x78,
|
||
|
0xac, 0x55, 0xb9, 0x52, 0x5a, 0xab, 0xae, 0xaf, 0x9f, 0x0d, 0xa5, 0xbd, 0x2f, 0x8d, 0xb6, 0x03,
|
||
|
0x1e, 0x1f, 0x5b, 0x1a, 0x41, 0x30, 0x62, 0x1c, 0xf3, 0x84, 0xb5, 0x66, 0xcf, 0xc3, 0xa8, 0x2b,
|
||
|
0x6d, 0x2c, 0x6d, 0x8b, 0xba, 0xd0, 0x50, 0xbf, 0xec, 0x87, 0x94, 0xf1, 0x30, 0x3e, 0x6e, 0xcd,
|
||
|
0x4b, 0x66, 0xe7, 0x43, 0xab, 0x2b, 0x8c, 0x5d, 0x05, 0x91, 0x8d, 0x67, 0x92, 0x50, 0xb7, 0x35,
|
||
|
0x97, 0x8b, 0x67, 0x3f, 0xa1, 0x2e, 0xda, 0x81, 0x79, 0x9f, 0xf0, 0x98, 0x3a, 0xac, 0xb5, 0x20,
|
||
|
0xe9, 0xff, 0xf0, 0x6c, 0x0e, 0x6f, 0x2b, 0x23, 0x2b, 0xb5, 0x5e, 0xfd, 0x31, 0x54, 0x33, 0xd1,
|
||
|
0x41, 0x4d, 0x28, 0x1d, 0x91, 0x63, 0xbd, 0xc4, 0xe2, 0x27, 0x5a, 0x86, 0xf2, 0x23, 0xec, 0x25,
|
||
|
0xe9, 0xaa, 0xaa, 0x8f, 0x6b, 0x33, 0x1f, 0x1a, 0xe6, 0x3f, 0xca, 0x50, 0xcf, 0xad, 0x13, 0x7a,
|
||
|
0x03, 0xea, 0x6a, 0xa5, 0xec, 0x41, 0xe2, 0x1c, 0x11, 0xae, 0x71, 0x6a, 0x4a, 0x78, 0x53, 0xca,
|
||
|
0xd0, 0x03, 0x40, 0x23, 0x87, 0xd8, 0xe9, 0x0c, 0x75, 0x5a, 0x54, 0xe4, 0x2c, 0x3a, 0x05, 0xb3,
|
||
|
0xd8, 0x71, 0x48, 0x3e, 0x33, 0x9a, 0xa3, 0x29, 0x09, 0xba, 0x07, 0x75, 0x1f, 0x67, 0x91, 0x55,
|
||
|
0x7c, 0x8a, 0x52, 0x65, 0x2f, 0x60, 0x1c, 0x07, 0x0e, 0xd9, 0x89, 0xc3, 0x24, 0xd2, 0xe0, 0x35,
|
||
|
0x05, 0x34, 0x01, 0xfe, 0x2c, 0x8c, 0x8f, 0x26, 0xc0, 0x70, 0x71, 0x60, 0x05, 0xa4, 0x81, 0x3f,
|
||
|
0x85, 0x6f, 0x31, 0xe2, 0x84, 0x81, 0x8b, 0xe3, 0x63, 0x3b, 0xef, 0xa2, 0x76, 0x61, 0x17, 0x2f,
|
||
|
0x8f, 0x21, 0xef, 0x65, 0x7d, 0xdd, 0x85, 0x45, 0x16, 0x0e, 0xf9, 0x67, 0x38, 0x26, 0xa9, 0x8f,
|
||
|
0xfa, 0x99, 0xf2, 0xa7, 0xab, 0xad, 0x34, 0x7c, 0x83, 0xe5, 0xbe, 0xd1, 0x7d, 0x68, 0x7a, 0x74,
|
||
|
0x48, 0x9c, 0x63, 0xc7, 0x1b, 0x03, 0x37, 0x24, 0x70, 0xbb, 0x00, 0x78, 0x3f, 0x35, 0xd3, 0xc8,
|
||
|
0x8b, 0x5e, 0x5e, 0x80, 0x02, 0x58, 0xa1, 0x01, 0xe5, 0x14, 0x7b, 0xf4, 0xb1, 0xac, 0x71, 0x36,
|
||
|
0x76, 0x64, 0x2d, 0x6c, 0x55, 0xe5, 0x56, 0xfb, 0xa0, 0xc0, 0xc1, 0x41, 0xe8, 0x92, 0xbd, 0x1c,
|
||
|
0xc0, 0x86, 0xb4, 0xb7, 0x5e, 0xa6, 0xa7, 0x48, 0x99, 0xf9, 0xc7, 0x12, 0x34, 0xa7, 0xf3, 0x0c,
|
||
|
0xbd, 0x02, 0x95, 0xc7, 0x61, 0x40, 0xec, 0x24, 0xa6, 0x3a, 0xa9, 0xe7, 0xc5, 0x77, 0x3f, 0xa6,
|
||
|
0xe8, 0x35, 0xa8, 0x06, 0x84, 0x8b, 0x75, 0x93, 0xa3, 0x6a, 0x9b, 0x80, 0x16, 0x09, 0x85, 0xef,
|
||
|
0x41, 0x83, 0x25, 0x83, 0xac, 0x8e, 0xda, 0xd0, 0xf5, 0x89, 0x54, 0xa8, 0xad, 0x41, 0x93, 0x06,
|
||
|
0x9c, 0xc4, 0x01, 0xf6, 0x6c, 0x1a, 0xd9, 0x61, 0xe0, 0x89, 0x62, 0x62, 0xac, 0x55, 0xac, 0x46,
|
||
|
0x2a, 0xdf, 0x8b, 0xee, 0x04, 0xde, 0x31, 0xfa, 0x3e, 0x2c, 0x32, 0x12, 0x3f, 0xa2, 0x0e, 0xb1,
|
||
|
0xb1, 0xe3, 0x84, 0x49, 0xc0, 0xe5, 0xf6, 0x59, 0xb0, 0x1a, 0x5a, 0xbc, 0xa1, 0xa4, 0xe8, 0x47,
|
||
|
0xb0, 0x32, 0xa5, 0x68, 0x33, 0x27, 0x8c, 0x08, 0x6b, 0x95, 0xae, 0x94, 0xd6, 0x16, 0xac, 0xe5,
|
||
|
0xbc, 0x7e, 0x57, 0x8e, 0x21, 0x04, 0xb3, 0x1c, 0x8f, 0x44, 0x5d, 0x14, 0x3a, 0xf2, 0x37, 0xba,
|
||
|
0x0f, 0x15, 0x9f, 0x70, 0x2c, 0x82, 0xdb, 0x2a, 0xcb, 0xb0, 0xdf, 0x38, 0xe7, 0x56, 0x6d, 0xdf,
|
||
|
0xd6, 0xf6, 0xaa, 0x0c, 0x8f, 0xe1, 0x56, 0xaf, 0x43, 0x3d, 0x37, 0x74, 0xae, 0x1a, 0xf4, 0xeb,
|
||
|
0x59, 0x78, 0xe9, 0x94, 0xf4, 0x17, 0x95, 0x28, 0x48, 0x7c, 0x9b, 0xea, 0x21, 0x26, 0xd1, 0xca,
|
||
|
0x56, 0x2d, 0x48, 0xfc, 0x54, 0x9d, 0x89, 0x85, 0x49, 0x15, 0xe4, 0xc1, 0xc5, 0x5a, 0x33, 0x72,
|
||
|
0xca, 0xf5, 0x54, 0x2a, 0x8e, 0x2e, 0x86, 0x5e, 0x85, 0x05, 0xea, 0xe3, 0x91, 0x5a, 0xfc, 0x92,
|
||
|
0x64, 0x50, 0x91, 0x02, 0xbd, 0x6a, 0x3e, 0x76, 0x1e, 0xd2, 0x80, 0xd8, 0xfc, 0x38, 0x52, 0x3a,
|
||
|
0xb3, 0x6a, 0x31, 0xb4, 0xbc, 0x77, 0x1c, 0x49, 0xcd, 0x4f, 0xa0, 0xea, 0x52, 0x76, 0x94, 0xee,
|
||
|
0x8e, 0xb2, 0xdc, 0x1d, 0x6f, 0x15, 0x44, 0x71, 0x8b, 0xb2, 0x23, 0xbd, 0x31, 0xc0, 0x1d, 0xff,
|
||
|
0x96, 0xcc, 0x99, 0x1d, 0xc5, 0x84, 0xf8, 0x11, 0xa7, 0x03, 0x8f, 0xc8, 0x94, 0xaa, 0x58, 0x75,
|
||
|
0xca, 0x0e, 0x27, 0x42, 0xe4, 0xc0, 0xb2, 0x8f, 0x03, 0x3c, 0x22, 0xae, 0x3d, 0x12, 0xc1, 0x49,
|
||
|
0x7d, 0xcf, 0x4b, 0xdf, 0xef, 0x15, 0xf8, 0xbe, 0xad, 0x4c, 0xb3, 0x55, 0x05, 0xf9, 0x27, 0x64,
|
||
|
0xa8, 0x07, 0x35, 0xec, 0x38, 0xc4, 0x13, 0x2d, 0x4a, 0x18, 0xa7, 0x47, 0xf3, 0xbb, 0x05, 0xe0,
|
||
|
0x1b, 0x13, 0x93, 0xb4, 0x28, 0x66, 0x51, 0x64, 0x5c, 0x69, 0x60, 0x3b, 0x51, 0x62, 0x47, 0x1e,
|
||
|
0xe6, 0xc3, 0x30, 0xf6, 0x65, 0x25, 0x17, 0x71, 0xa5, 0xc1, 0x66, 0x94, 0x1c, 0x6a, 0xa9, 0xf9,
|
||
|
0x5b, 0x03, 0xd0, 0x49, 0xaa, 0x22, 0xf7, 0xc7, 0x8b, 0xcb, 0x89, 0x2f, 0x40, 0xd4, 0x2a, 0xeb,
|
||
|
0xc4, 0x5a, 0x4e, 0x47, 0x7b, 0x7a, 0x50, 0xf6, 0x29, 0x37, 0xe0, 0xd5, 0xb1, 0x95, 0x0a, 0x99,
|
||
|
0x9a, 0x71, 0xae, 0xb3, 0x69, 0xd1, 0x6c, 0xc6, 0x29, 0xdf, 0xb2, 0xcd, 0x31, 0x63, 0x58, 0x3a,
|
||
|
0x31, 0x31, 0xf4, 0x2e, 0x2c, 0x67, 0xa6, 0x36, 0x49, 0x13, 0xc5, 0x03, 0x65, 0xc6, 0xd2, 0x54,
|
||
|
0xf9, 0x01, 0x2c, 0x65, 0x2d, 0xd4, 0x16, 0x9f, 0x91, 0x19, 0xdc, 0xc4, 0x59, 0xfc, 0x24, 0xe0,
|
||
|
0xe6, 0x6f, 0x0c, 0x80, 0x49, 0x9a, 0xa0, 0xef, 0x42, 0x63, 0x10, 0x86, 0xdc, 0x96, 0xb9, 0x26,
|
||
|
0x7c, 0xe9, 0x94, 0xad, 0x09, 0xa9, 0xd0, 0x13, 0x4e, 0xd0, 0x5b, 0xb0, 0x34, 0xd1, 0x62, 0xf4,
|
||
|
0x31, 0xb1, 0x47, 0x03, 0xbd, 0x47, 0x1a, 0xa9, 0x62, 0x97, 0x3e, 0x26, 0x3b, 0x03, 0x01, 0x28,
|
||
|
0xb6, 0x92, 0x17, 0x3a, 0xd8, 0xb3, 0x19, 0x73, 0x99, 0x66, 0x22, 0xf6, 0xd2, 0xbe, 0x10, 0x76,
|
||
|
0x99, 0xcb, 0xcc, 0x7f, 0x1b, 0xb0, 0x38, 0x55, 0xca, 0xd1, 0x06, 0x2c, 0x52, 0xd7, 0x23, 0xb6,
|
||
|
0x4b, 0x3c, 0xc2, 0x89, 0xcd, 0xb9, 0x27, 0x5d, 0x54, 0xd7, 0x5f, 0x49, 0x93, 0x23, 0xed, 0x3e,
|
||
|
0xdb, 0x5b, 0xba, 0x7f, 0xb5, 0xea, 0xc2, 0x62, 0x4b, 0x1a, 0xf4, 0xb8, 0x87, 0x6e, 0x41, 0x13,
|
||
|
0x27, 0x3c, 0x1c, 0x43, 0x50, 0xbd, 0x08, 0xd5, 0xf5, 0xd5, 0x13, 0x18, 0xbd, 0xb4, 0x83, 0xdd,
|
||
|
0xbd, 0x64, 0x35, 0x84, 0x95, 0x86, 0xa1, 0x3e, 0x41, 0x9b, 0xb0, 0x98, 0xc3, 0xe1, 0x9e, 0x6e,
|
||
|
0x44, 0x9f, 0x4d, 0x65, 0xf7, 0x92, 0x55, 0xcf, 0xa0, 0x70, 0xef, 0x66, 0x19, 0x4a, 0x9c, 0x7b,
|
||
|
0xe6, 0xef, 0x0c, 0x68, 0x3d, 0xeb, 0x50, 0x11, 0xb5, 0x99, 0x7c, 0x4e, 0x9c, 0x84, 0xe3, 0x81,
|
||
|
0x47, 0xec, 0x21, 0xf5, 0xd2, 0x7c, 0x6b, 0x4c, 0xc4, 0xb7, 0xa8, 0x47, 0xd0, 0x2d, 0x58, 0x52,
|
||
|
0x12, 0x71, 0xa2, 0x89, 0x79, 0x85, 0x09, 0xd7, 0x53, 0x7b, 0x4e, 0x78, 0x9a, 0x63, 0x9b, 0x9e,
|
||
|
0x32, 0x31, 0xbf, 0x2c, 0x8d, 0xbb, 0x30, 0xd5, 0x4d, 0xa2, 0x5d, 0x28, 0x8b, 0x7e, 0x52, 0x39,
|
||
|
0x6e, 0x9c, 0xb5, 0x49, 0x56, 0xc6, 0x6d, 0xf1, 0x87, 0x58, 0x0a, 0x00, 0xad, 0xc0, 0x9c, 0x4b,
|
||
|
0x38, 0xa6, 0x9e, 0x4e, 0x7c, 0xfd, 0x85, 0xb6, 0xa0, 0x29, 0x15, 0x6c, 0xc6, 0x71, 0xcc, 0xd5,
|
||
|
0xaa, 0x94, 0x8a, 0x56, 0xc5, 0x92, 0x9d, 0x32, 0xe9, 0x0a, 0x13, 0xb9, 0x26, 0x3f, 0x83, 0x0a,
|
||
|
0x4b, 0x06, 0x8a, 0xea, 0xac, 0xa4, 0xfa, 0xfe, 0xf9, 0xa8, 0x6a, 0x63, 0x6b, 0x0c, 0x63, 0xde,
|
||
|
0x85, 0xb2, 0x9c, 0x00, 0xaa, 0xc2, 0x7c, 0xff, 0xe0, 0xa7, 0x07, 0x77, 0xee, 0x1d, 0x34, 0x2f,
|
||
|
0xa1, 0x1a, 0x54, 0x36, 0xad, 0xed, 0x8d, 0xde, 0xde, 0xc1, 0x4e, 0xd3, 0x10, 0x43, 0x56, 0xff,
|
||
|
0xe0, 0x40, 0x7c, 0xcc, 0xa0, 0x05, 0x28, 0x6f, 0x5b, 0xd6, 0x1d, 0xab, 0x59, 0x12, 0x5a, 0x5b,
|
||
|
0xdb, 0xfb, 0xdb, 0x52, 0x6b, 0x56, 0x7c, 0xf5, 0x0f, 0xb7, 0x94, 0x4d, 0xd9, 0xfc, 0x09, 0x54,
|
||
|
0x52, 0x6f, 0x68, 0x11, 0xaa, 0xfd, 0x83, 0xee, 0xe1, 0xf6, 0xe6, 0xde, 0xad, 0xbd, 0xed, 0xad,
|
||
|
0xe6, 0x25, 0x54, 0x87, 0x85, 0xfe, 0xc1, 0xee, 0xf6, 0xc6, 0x7e, 0x6f, 0xf7, 0x7e, 0xd3, 0x40,
|
||
|
0x4d, 0xa8, 0x75, 0x7b, 0x1b, 0xfb, 0xdb, 0x76, 0xb7, 0xb7, 0xd1, 0xeb, 0x77, 0x9b, 0x33, 0xe6,
|
||
|
0xd7, 0x06, 0x34, 0xf2, 0xfd, 0x93, 0x38, 0x9f, 0xd4, 0x99, 0xf2, 0x88, 0xc4, 0x8c, 0x86, 0x41,
|
||
|
0xda, 0x29, 0x4b, 0xe1, 0x5d, 0x25, 0x43, 0x0f, 0xe4, 0xb5, 0x2b, 0x22, 0x31, 0xa7, 0xfa, 0x6c,
|
||
|
0x2a, 0x3e, 0x76, 0xf3, 0x7e, 0xda, 0x87, 0x63, 0x7b, 0x75, 0xec, 0x66, 0x00, 0x57, 0x6f, 0xc0,
|
||
|
0xe2, 0xd4, 0xf0, 0xb9, 0x8e, 0xde, 0x7f, 0xce, 0x40, 0x23, 0x7f, 0xab, 0x40, 0x18, 0x6a, 0x0f,
|
||
|
0xdd, 0x21, 0xb3, 0xd3, 0xab, 0x89, 0x21, 0x29, 0x7f, 0x74, 0xae, 0xab, 0x49, 0x7b, 0xd7, 0x1d,
|
||
|
0x32, 0xfd, 0x5b, 0x71, 0xae, 0x3e, 0x9c, 0x48, 0x84, 0x8b, 0x63, 0x1c, 0x07, 0x63, 0x17, 0x33,
|
||
|
0x17, 0x71, 0x71, 0x1f, 0xc7, 0x41, 0xde, 0xc5, 0xf1, 0x44, 0xb2, 0xfa, 0x11, 0x34, 0xa7, 0x39,
|
||
|
0x14, 0x05, 0xa6, 0x94, 0x09, 0x8c, 0xb0, 0x9f, 0x76, 0x70, 0x1e, 0x7b, 0xf3, 0x2b, 0x03, 0x96,
|
||
|
0x37, 0x63, 0x82, 0x79, 0xda, 0x40, 0x59, 0xe4, 0x57, 0x09, 0x61, 0xbc, 0xe8, 0x1a, 0xbe, 0x02,
|
||
|
0x73, 0x31, 0x19, 0x89, 0x64, 0x52, 0x15, 0x5f, 0x7f, 0xa1, 0x8f, 0x61, 0x5e, 0x5f, 0xb6, 0x74,
|
||
|
0x7d, 0x79, 0xf3, 0x6c, 0xd1, 0xb2, 0x52, 0x33, 0xe1, 0x38, 0x56, 0x1c, 0x84, 0x63, 0xd5, 0xde,
|
||
|
0x2c, 0x68, 0xc9, 0x9e, 0x6b, 0xfe, 0x67, 0x06, 0x96, 0xfb, 0x91, 0xfb, 0x7f, 0x10, 0x2e, 0xe7,
|
||
|
0x08, 0x9f, 0xe1, 0x3d, 0x21, 0x33, 0xa7, 0xd2, 0xc5, 0xe6, 0xf4, 0x00, 0x2e, 0x8f, 0x62, 0xec,
|
||
|
0x90, 0x61, 0xe2, 0xd9, 0x2e, 0x71, 0x42, 0xdf, 0xa7, 0x8c, 0x65, 0x6b, 0xf1, 0x5c, 0x51, 0x2d,
|
||
|
0x7e, 0x35, 0xb5, 0xdf, 0xca, 0x98, 0xeb, 0xb2, 0x8c, 0xae, 0x43, 0x35, 0x91, 0x21, 0x91, 0x8f,
|
||
|
0x2e, 0xfa, 0x8d, 0xe1, 0x64, 0x75, 0xbc, 0x45, 0x89, 0xe7, 0xde, 0xc6, 0xec, 0xc8, 0x02, 0xa5,
|
||
|
0x2e, 0x7e, 0x4f, 0xc5, 0x7b, 0x7e, 0x3a, 0xde, 0x7f, 0x32, 0x60, 0x59, 0x9d, 0x4a, 0x2f, 0x26,
|
||
|
0x41, 0xce, 0x10, 0xef, 0xe9, 0x27, 0x89, 0xd9, 0x93, 0x4f, 0x12, 0x79, 0xd2, 0xe5, 0x69, 0xd2,
|
||
|
0x3e, 0x2c, 0xed, 0x10, 0xfe, 0x4d, 0x11, 0x36, 0xbf, 0x34, 0xe0, 0xa5, 0x7d, 0xca, 0x52, 0x87,
|
||
|
0xec, 0xdc, 0x1e, 0x67, 0x73, 0x1e, 0x57, 0x60, 0x6e, 0x48, 0x3d, 0x91, 0x6e, 0x3a, 0x55, 0xd5,
|
||
|
0x97, 0xb8, 0x1b, 0x44, 0xa2, 0x8c, 0x8b, 0x16, 0x4a, 0xf7, 0x45, 0x15, 0x21, 0x10, 0xbd, 0x93,
|
||
|
0xf4, 0x25, 0x06, 0x79, 0x78, 0x44, 0xd2, 0x29, 0x48, 0xf5, 0x9e, 0x10, 0x98, 0x5f, 0x18, 0xb0,
|
||
|
0x9c, 0xa7, 0xc8, 0xa2, 0x30, 0x60, 0x04, 0xdd, 0x84, 0x4a, 0xfa, 0xfa, 0xa8, 0x4b, 0xe8, 0x59,
|
||
|
0xb3, 0x7b, 0x6c, 0x87, 0xde, 0x84, 0xc5, 0x80, 0x7c, 0xce, 0xed, 0x0c, 0x01, 0x15, 0xa5, 0xba,
|
||
|
0x10, 0x1f, 0x8e, 0x49, 0xc4, 0xb0, 0xb2, 0x45, 0xf1, 0x28, 0x08, 0xd9, 0x37, 0x96, 0x4c, 0xe6,
|
||
|
0x07, 0xa7, 0xf8, 0x64, 0x89, 0xc7, 0x99, 0xf0, 0x19, 0x26, 0x3c, 0x4a, 0x78, 0xa6, 0x41, 0x5e,
|
||
|
0x50, 0x92, 0x7e, 0x4c, 0xd7, 0xff, 0x5b, 0x81, 0xa5, 0xc9, 0xa5, 0x92, 0xc7, 0xa1, 0xe7, 0x91,
|
||
|
0x18, 0x7d, 0x65, 0x40, 0x3d, 0x57, 0x2f, 0xd1, 0xd5, 0xa2, 0x70, 0x9d, 0x52, 0x5d, 0x57, 0x2f,
|
||
|
0xa7, 0x46, 0x99, 0x97, 0xd7, 0xf6, 0x9d, 0xf4, 0xe5, 0xd5, 0xdc, 0xfb, 0xe2, 0xeb, 0x7f, 0xfd,
|
||
|
0x7e, 0x66, 0xd3, 0xfc, 0x70, 0xfc, 0x6c, 0xab, 0x63, 0xc1, 0x3a, 0x4f, 0x26, 0x71, 0x7a, 0xda,
|
||
|
0x51, 0x61, 0x60, 0x9d, 0x27, 0xea, 0xc7, 0xd3, 0xf1, 0x43, 0xf2, 0xb5, 0x71, 0xe9, 0xf9, 0x9b,
|
||
|
0x01, 0xf5, 0x5c, 0xbd, 0x2c, 0x24, 0x7c, 0x5a, 0x75, 0x2d, 0x22, 0xfc, 0x73, 0x49, 0xd8, 0x5a,
|
||
|
0xdf, 0xb9, 0x28, 0xe1, 0xce, 0x93, 0xec, 0x42, 0x3e, 0x9d, 0xf0, 0xff, 0xb3, 0x01, 0xf5, 0x5c,
|
||
|
0xfd, 0x29, 0xe4, 0x7f, 0x5a, 0xb5, 0x2a, 0xe2, 0x7f, 0x47, 0xf2, 0xdf, 0x7b, 0xfb, 0x45, 0xf1,
|
||
|
0x47, 0x7f, 0x31, 0x00, 0x26, 0x25, 0x08, 0x15, 0xdd, 0x50, 0x4f, 0x54, 0xab, 0xd5, 0x33, 0xee,
|
||
|
0xc2, 0x94, 0x39, 0x7a, 0x61, 0xcc, 0xff, 0x6a, 0x40, 0x2d, 0x5b, 0x29, 0xd0, 0x7a, 0xe1, 0xa3,
|
||
|
0xda, 0x89, 0xca, 0xb7, 0x7a, 0xf5, 0x5c, 0x36, 0xaa, 0x14, 0x99, 0x1f, 0xcb, 0xa9, 0x5c, 0x43,
|
||
|
0x17, 0xce, 0x7a, 0xf4, 0x77, 0x03, 0x16, 0xa7, 0x76, 0x3b, 0x7a, 0xbf, 0xf0, 0xd5, 0xe3, 0xb4,
|
||
|
0x8a, 0x54, 0x94, 0x30, 0xbf, 0x94, 0x5c, 0xfb, 0xe6, 0xe1, 0x8b, 0x4a, 0x78, 0x57, 0xd3, 0xb8,
|
||
|
0x66, 0xbc, 0x7d, 0xf3, 0x09, 0xbc, 0xee, 0x84, 0xfe, 0xf3, 0x79, 0xdf, 0x4c, 0x2f, 0x62, 0xec,
|
||
|
0x50, 0x9c, 0xef, 0x87, 0xc6, 0x2f, 0xb6, 0xb5, 0xfe, 0x28, 0xf4, 0x70, 0x30, 0x6a, 0x87, 0xf1,
|
||
|
0xa8, 0x33, 0x22, 0x81, 0x3c, 0xfd, 0x3b, 0x6a, 0x08, 0x47, 0x94, 0x3d, 0xe3, 0xff, 0x42, 0xd7,
|
||
|
0x53, 0xc1, 0x60, 0x4e, 0x5a, 0x5c, 0xfd, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4a, 0xea, 0xcc,
|
||
|
0x54, 0xb3, 0x1a, 0x00, 0x00,
|
||
|
}
|