Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 5365240

Browse files
committedOct 3, 2023
feat(dataproc): update the api
#### dataproc:v1 The following keys were added: - schemas.InstanceGroupConfig.properties.startupConfig.$ref (Total Keys: 1) - schemas.SparkStandaloneAutoscalingConfig.properties.removeOnlyIdleWorkers.type (Total Keys: 1) - schemas.StartupConfig (Total Keys: 4)
1 parent 119d519 commit 5365240

7 files changed

+255
-1
lines changed
 

‎docs/dyn/dataproc_v1.projects.locations.autoscalingPolicies.html

+6
Original file line numberDiff line numberDiff line change
@@ -124,6 +124,7 @@ <h3>Method Details</h3>
124124
&quot;cooldownPeriod&quot;: &quot;A String&quot;, # Optional. Duration between scaling events. A scaling period starts after the update operation from the previous event has completed.Bounds: 2m, 1d. Default: 2m.
125125
&quot;sparkStandaloneConfig&quot;: { # Basic autoscaling configurations for Spark Standalone. # Optional. Spark Standalone autoscaling configuration
126126
&quot;gracefulDecommissionTimeout&quot;: &quot;A String&quot;, # Required. Timeout for Spark graceful decommissioning of spark workers. Specifies the duration to wait for spark worker to complete spark decommissioning tasks before forcefully removing workers. Only applicable to downscaling operations.Bounds: 0s, 1d.
127+
&quot;removeOnlyIdleWorkers&quot;: True or False, # Optional. Remove only idle workers when scaling down cluster
127128
&quot;scaleDownFactor&quot;: 3.14, # Required. Fraction of required executors to remove from Spark Serverless clusters. A scale-down factor of 1.0 will result in scaling down so that there are no more executors for the Spark Job.(more aggressive scaling). A scale-down factor closer to 0 will result in a smaller magnitude of scaling donw (less aggressive scaling).Bounds: 0.0, 1.0.
128129
&quot;scaleDownMinWorkerFraction&quot;: 3.14, # Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.
129130
&quot;scaleUpFactor&quot;: 3.14, # Required. Fraction of required workers to add to Spark Standalone clusters. A scale-up factor of 1.0 will result in scaling up so that there are no more required workers for the Spark Job (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling).Bounds: 0.0, 1.0.
@@ -167,6 +168,7 @@ <h3>Method Details</h3>
167168
&quot;cooldownPeriod&quot;: &quot;A String&quot;, # Optional. Duration between scaling events. A scaling period starts after the update operation from the previous event has completed.Bounds: 2m, 1d. Default: 2m.
168169
&quot;sparkStandaloneConfig&quot;: { # Basic autoscaling configurations for Spark Standalone. # Optional. Spark Standalone autoscaling configuration
169170
&quot;gracefulDecommissionTimeout&quot;: &quot;A String&quot;, # Required. Timeout for Spark graceful decommissioning of spark workers. Specifies the duration to wait for spark worker to complete spark decommissioning tasks before forcefully removing workers. Only applicable to downscaling operations.Bounds: 0s, 1d.
171+
&quot;removeOnlyIdleWorkers&quot;: True or False, # Optional. Remove only idle workers when scaling down cluster
170172
&quot;scaleDownFactor&quot;: 3.14, # Required. Fraction of required executors to remove from Spark Serverless clusters. A scale-down factor of 1.0 will result in scaling down so that there are no more executors for the Spark Job.(more aggressive scaling). A scale-down factor closer to 0 will result in a smaller magnitude of scaling donw (less aggressive scaling).Bounds: 0.0, 1.0.
171173
&quot;scaleDownMinWorkerFraction&quot;: 3.14, # Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.
172174
&quot;scaleUpFactor&quot;: 3.14, # Required. Fraction of required workers to add to Spark Standalone clusters. A scale-up factor of 1.0 will result in scaling up so that there are no more required workers for the Spark Job (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling).Bounds: 0.0, 1.0.
@@ -235,6 +237,7 @@ <h3>Method Details</h3>
235237
&quot;cooldownPeriod&quot;: &quot;A String&quot;, # Optional. Duration between scaling events. A scaling period starts after the update operation from the previous event has completed.Bounds: 2m, 1d. Default: 2m.
236238
&quot;sparkStandaloneConfig&quot;: { # Basic autoscaling configurations for Spark Standalone. # Optional. Spark Standalone autoscaling configuration
237239
&quot;gracefulDecommissionTimeout&quot;: &quot;A String&quot;, # Required. Timeout for Spark graceful decommissioning of spark workers. Specifies the duration to wait for spark worker to complete spark decommissioning tasks before forcefully removing workers. Only applicable to downscaling operations.Bounds: 0s, 1d.
240+
&quot;removeOnlyIdleWorkers&quot;: True or False, # Optional. Remove only idle workers when scaling down cluster
238241
&quot;scaleDownFactor&quot;: 3.14, # Required. Fraction of required executors to remove from Spark Serverless clusters. A scale-down factor of 1.0 will result in scaling down so that there are no more executors for the Spark Job.(more aggressive scaling). A scale-down factor closer to 0 will result in a smaller magnitude of scaling donw (less aggressive scaling).Bounds: 0.0, 1.0.
239242
&quot;scaleDownMinWorkerFraction&quot;: 3.14, # Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.
240243
&quot;scaleUpFactor&quot;: 3.14, # Required. Fraction of required workers to add to Spark Standalone clusters. A scale-up factor of 1.0 will result in scaling up so that there are no more required workers for the Spark Job (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling).Bounds: 0.0, 1.0.
@@ -333,6 +336,7 @@ <h3>Method Details</h3>
333336
&quot;cooldownPeriod&quot;: &quot;A String&quot;, # Optional. Duration between scaling events. A scaling period starts after the update operation from the previous event has completed.Bounds: 2m, 1d. Default: 2m.
334337
&quot;sparkStandaloneConfig&quot;: { # Basic autoscaling configurations for Spark Standalone. # Optional. Spark Standalone autoscaling configuration
335338
&quot;gracefulDecommissionTimeout&quot;: &quot;A String&quot;, # Required. Timeout for Spark graceful decommissioning of spark workers. Specifies the duration to wait for spark worker to complete spark decommissioning tasks before forcefully removing workers. Only applicable to downscaling operations.Bounds: 0s, 1d.
339+
&quot;removeOnlyIdleWorkers&quot;: True or False, # Optional. Remove only idle workers when scaling down cluster
336340
&quot;scaleDownFactor&quot;: 3.14, # Required. Fraction of required executors to remove from Spark Serverless clusters. A scale-down factor of 1.0 will result in scaling down so that there are no more executors for the Spark Job.(more aggressive scaling). A scale-down factor closer to 0 will result in a smaller magnitude of scaling donw (less aggressive scaling).Bounds: 0.0, 1.0.
337341
&quot;scaleDownMinWorkerFraction&quot;: 3.14, # Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.
338342
&quot;scaleUpFactor&quot;: 3.14, # Required. Fraction of required workers to add to Spark Standalone clusters. A scale-up factor of 1.0 will result in scaling up so that there are no more required workers for the Spark Job (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling).Bounds: 0.0, 1.0.
@@ -482,6 +486,7 @@ <h3>Method Details</h3>
482486
&quot;cooldownPeriod&quot;: &quot;A String&quot;, # Optional. Duration between scaling events. A scaling period starts after the update operation from the previous event has completed.Bounds: 2m, 1d. Default: 2m.
483487
&quot;sparkStandaloneConfig&quot;: { # Basic autoscaling configurations for Spark Standalone. # Optional. Spark Standalone autoscaling configuration
484488
&quot;gracefulDecommissionTimeout&quot;: &quot;A String&quot;, # Required. Timeout for Spark graceful decommissioning of spark workers. Specifies the duration to wait for spark worker to complete spark decommissioning tasks before forcefully removing workers. Only applicable to downscaling operations.Bounds: 0s, 1d.
489+
&quot;removeOnlyIdleWorkers&quot;: True or False, # Optional. Remove only idle workers when scaling down cluster
485490
&quot;scaleDownFactor&quot;: 3.14, # Required. Fraction of required executors to remove from Spark Serverless clusters. A scale-down factor of 1.0 will result in scaling down so that there are no more executors for the Spark Job.(more aggressive scaling). A scale-down factor closer to 0 will result in a smaller magnitude of scaling donw (less aggressive scaling).Bounds: 0.0, 1.0.
486491
&quot;scaleDownMinWorkerFraction&quot;: 3.14, # Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.
487492
&quot;scaleUpFactor&quot;: 3.14, # Required. Fraction of required workers to add to Spark Standalone clusters. A scale-up factor of 1.0 will result in scaling up so that there are no more required workers for the Spark Job (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling).Bounds: 0.0, 1.0.
@@ -525,6 +530,7 @@ <h3>Method Details</h3>
525530
&quot;cooldownPeriod&quot;: &quot;A String&quot;, # Optional. Duration between scaling events. A scaling period starts after the update operation from the previous event has completed.Bounds: 2m, 1d. Default: 2m.
526531
&quot;sparkStandaloneConfig&quot;: { # Basic autoscaling configurations for Spark Standalone. # Optional. Spark Standalone autoscaling configuration
527532
&quot;gracefulDecommissionTimeout&quot;: &quot;A String&quot;, # Required. Timeout for Spark graceful decommissioning of spark workers. Specifies the duration to wait for spark worker to complete spark decommissioning tasks before forcefully removing workers. Only applicable to downscaling operations.Bounds: 0s, 1d.
533+
&quot;removeOnlyIdleWorkers&quot;: True or False, # Optional. Remove only idle workers when scaling down cluster
528534
&quot;scaleDownFactor&quot;: 3.14, # Required. Fraction of required executors to remove from Spark Serverless clusters. A scale-down factor of 1.0 will result in scaling down so that there are no more executors for the Spark Job.(more aggressive scaling). A scale-down factor closer to 0 will result in a smaller magnitude of scaling donw (less aggressive scaling).Bounds: 0.0, 1.0.
529535
&quot;scaleDownMinWorkerFraction&quot;: 3.14, # Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.
530536
&quot;scaleUpFactor&quot;: 3.14, # Required. Fraction of required workers to add to Spark Standalone clusters. A scale-up factor of 1.0 will result in scaling up so that there are no more required workers for the Spark Job (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling).Bounds: 0.0, 1.0.

‎docs/dyn/dataproc_v1.projects.locations.workflowTemplates.html

+84
Large diffs are not rendered by default.

‎docs/dyn/dataproc_v1.projects.regions.autoscalingPolicies.html

+6
Original file line numberDiff line numberDiff line change
@@ -124,6 +124,7 @@ <h3>Method Details</h3>
124124
&quot;cooldownPeriod&quot;: &quot;A String&quot;, # Optional. Duration between scaling events. A scaling period starts after the update operation from the previous event has completed.Bounds: 2m, 1d. Default: 2m.
125125
&quot;sparkStandaloneConfig&quot;: { # Basic autoscaling configurations for Spark Standalone. # Optional. Spark Standalone autoscaling configuration
126126
&quot;gracefulDecommissionTimeout&quot;: &quot;A String&quot;, # Required. Timeout for Spark graceful decommissioning of spark workers. Specifies the duration to wait for spark worker to complete spark decommissioning tasks before forcefully removing workers. Only applicable to downscaling operations.Bounds: 0s, 1d.
127+
&quot;removeOnlyIdleWorkers&quot;: True or False, # Optional. Remove only idle workers when scaling down cluster
127128
&quot;scaleDownFactor&quot;: 3.14, # Required. Fraction of required executors to remove from Spark Serverless clusters. A scale-down factor of 1.0 will result in scaling down so that there are no more executors for the Spark Job.(more aggressive scaling). A scale-down factor closer to 0 will result in a smaller magnitude of scaling donw (less aggressive scaling).Bounds: 0.0, 1.0.
128129
&quot;scaleDownMinWorkerFraction&quot;: 3.14, # Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.
129130
&quot;scaleUpFactor&quot;: 3.14, # Required. Fraction of required workers to add to Spark Standalone clusters. A scale-up factor of 1.0 will result in scaling up so that there are no more required workers for the Spark Job (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling).Bounds: 0.0, 1.0.
@@ -167,6 +168,7 @@ <h3>Method Details</h3>
167168
&quot;cooldownPeriod&quot;: &quot;A String&quot;, # Optional. Duration between scaling events. A scaling period starts after the update operation from the previous event has completed.Bounds: 2m, 1d. Default: 2m.
168169
&quot;sparkStandaloneConfig&quot;: { # Basic autoscaling configurations for Spark Standalone. # Optional. Spark Standalone autoscaling configuration
169170
&quot;gracefulDecommissionTimeout&quot;: &quot;A String&quot;, # Required. Timeout for Spark graceful decommissioning of spark workers. Specifies the duration to wait for spark worker to complete spark decommissioning tasks before forcefully removing workers. Only applicable to downscaling operations.Bounds: 0s, 1d.
171+
&quot;removeOnlyIdleWorkers&quot;: True or False, # Optional. Remove only idle workers when scaling down cluster
170172
&quot;scaleDownFactor&quot;: 3.14, # Required. Fraction of required executors to remove from Spark Serverless clusters. A scale-down factor of 1.0 will result in scaling down so that there are no more executors for the Spark Job.(more aggressive scaling). A scale-down factor closer to 0 will result in a smaller magnitude of scaling donw (less aggressive scaling).Bounds: 0.0, 1.0.
171173
&quot;scaleDownMinWorkerFraction&quot;: 3.14, # Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.
172174
&quot;scaleUpFactor&quot;: 3.14, # Required. Fraction of required workers to add to Spark Standalone clusters. A scale-up factor of 1.0 will result in scaling up so that there are no more required workers for the Spark Job (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling).Bounds: 0.0, 1.0.
@@ -235,6 +237,7 @@ <h3>Method Details</h3>
235237
&quot;cooldownPeriod&quot;: &quot;A String&quot;, # Optional. Duration between scaling events. A scaling period starts after the update operation from the previous event has completed.Bounds: 2m, 1d. Default: 2m.
236238
&quot;sparkStandaloneConfig&quot;: { # Basic autoscaling configurations for Spark Standalone. # Optional. Spark Standalone autoscaling configuration
237239
&quot;gracefulDecommissionTimeout&quot;: &quot;A String&quot;, # Required. Timeout for Spark graceful decommissioning of spark workers. Specifies the duration to wait for spark worker to complete spark decommissioning tasks before forcefully removing workers. Only applicable to downscaling operations.Bounds: 0s, 1d.
240+
&quot;removeOnlyIdleWorkers&quot;: True or False, # Optional. Remove only idle workers when scaling down cluster
238241
&quot;scaleDownFactor&quot;: 3.14, # Required. Fraction of required executors to remove from Spark Serverless clusters. A scale-down factor of 1.0 will result in scaling down so that there are no more executors for the Spark Job.(more aggressive scaling). A scale-down factor closer to 0 will result in a smaller magnitude of scaling donw (less aggressive scaling).Bounds: 0.0, 1.0.
239242
&quot;scaleDownMinWorkerFraction&quot;: 3.14, # Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.
240243
&quot;scaleUpFactor&quot;: 3.14, # Required. Fraction of required workers to add to Spark Standalone clusters. A scale-up factor of 1.0 will result in scaling up so that there are no more required workers for the Spark Job (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling).Bounds: 0.0, 1.0.
@@ -333,6 +336,7 @@ <h3>Method Details</h3>
333336
&quot;cooldownPeriod&quot;: &quot;A String&quot;, # Optional. Duration between scaling events. A scaling period starts after the update operation from the previous event has completed.Bounds: 2m, 1d. Default: 2m.
334337
&quot;sparkStandaloneConfig&quot;: { # Basic autoscaling configurations for Spark Standalone. # Optional. Spark Standalone autoscaling configuration
335338
&quot;gracefulDecommissionTimeout&quot;: &quot;A String&quot;, # Required. Timeout for Spark graceful decommissioning of spark workers. Specifies the duration to wait for spark worker to complete spark decommissioning tasks before forcefully removing workers. Only applicable to downscaling operations.Bounds: 0s, 1d.
339+
&quot;removeOnlyIdleWorkers&quot;: True or False, # Optional. Remove only idle workers when scaling down cluster
336340
&quot;scaleDownFactor&quot;: 3.14, # Required. Fraction of required executors to remove from Spark Serverless clusters. A scale-down factor of 1.0 will result in scaling down so that there are no more executors for the Spark Job.(more aggressive scaling). A scale-down factor closer to 0 will result in a smaller magnitude of scaling donw (less aggressive scaling).Bounds: 0.0, 1.0.
337341
&quot;scaleDownMinWorkerFraction&quot;: 3.14, # Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.
338342
&quot;scaleUpFactor&quot;: 3.14, # Required. Fraction of required workers to add to Spark Standalone clusters. A scale-up factor of 1.0 will result in scaling up so that there are no more required workers for the Spark Job (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling).Bounds: 0.0, 1.0.
@@ -482,6 +486,7 @@ <h3>Method Details</h3>
482486
&quot;cooldownPeriod&quot;: &quot;A String&quot;, # Optional. Duration between scaling events. A scaling period starts after the update operation from the previous event has completed.Bounds: 2m, 1d. Default: 2m.
483487
&quot;sparkStandaloneConfig&quot;: { # Basic autoscaling configurations for Spark Standalone. # Optional. Spark Standalone autoscaling configuration
484488
&quot;gracefulDecommissionTimeout&quot;: &quot;A String&quot;, # Required. Timeout for Spark graceful decommissioning of spark workers. Specifies the duration to wait for spark worker to complete spark decommissioning tasks before forcefully removing workers. Only applicable to downscaling operations.Bounds: 0s, 1d.
489+
&quot;removeOnlyIdleWorkers&quot;: True or False, # Optional. Remove only idle workers when scaling down cluster
485490
&quot;scaleDownFactor&quot;: 3.14, # Required. Fraction of required executors to remove from Spark Serverless clusters. A scale-down factor of 1.0 will result in scaling down so that there are no more executors for the Spark Job.(more aggressive scaling). A scale-down factor closer to 0 will result in a smaller magnitude of scaling donw (less aggressive scaling).Bounds: 0.0, 1.0.
486491
&quot;scaleDownMinWorkerFraction&quot;: 3.14, # Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.
487492
&quot;scaleUpFactor&quot;: 3.14, # Required. Fraction of required workers to add to Spark Standalone clusters. A scale-up factor of 1.0 will result in scaling up so that there are no more required workers for the Spark Job (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling).Bounds: 0.0, 1.0.
@@ -525,6 +530,7 @@ <h3>Method Details</h3>
525530
&quot;cooldownPeriod&quot;: &quot;A String&quot;, # Optional. Duration between scaling events. A scaling period starts after the update operation from the previous event has completed.Bounds: 2m, 1d. Default: 2m.
526531
&quot;sparkStandaloneConfig&quot;: { # Basic autoscaling configurations for Spark Standalone. # Optional. Spark Standalone autoscaling configuration
527532
&quot;gracefulDecommissionTimeout&quot;: &quot;A String&quot;, # Required. Timeout for Spark graceful decommissioning of spark workers. Specifies the duration to wait for spark worker to complete spark decommissioning tasks before forcefully removing workers. Only applicable to downscaling operations.Bounds: 0s, 1d.
533+
&quot;removeOnlyIdleWorkers&quot;: True or False, # Optional. Remove only idle workers when scaling down cluster
528534
&quot;scaleDownFactor&quot;: 3.14, # Required. Fraction of required executors to remove from Spark Serverless clusters. A scale-down factor of 1.0 will result in scaling down so that there are no more executors for the Spark Job.(more aggressive scaling). A scale-down factor closer to 0 will result in a smaller magnitude of scaling donw (less aggressive scaling).Bounds: 0.0, 1.0.
529535
&quot;scaleDownMinWorkerFraction&quot;: 3.14, # Optional. Minimum scale-down threshold as a fraction of total cluster size before scaling occurs. For example, in a 20-worker cluster, a threshold of 0.1 means the autoscaler must recommend at least a 2 worker scale-down for the cluster to scale. A threshold of 0 means the autoscaler will scale down on any recommended change.Bounds: 0.0, 1.0. Default: 0.0.
530536
&quot;scaleUpFactor&quot;: 3.14, # Required. Fraction of required workers to add to Spark Standalone clusters. A scale-up factor of 1.0 will result in scaling up so that there are no more required workers for the Spark Job (more aggressive scaling). A scale-up factor closer to 0 will result in a smaller magnitude of scaling up (less aggressive scaling).Bounds: 0.0, 1.0.

‎docs/dyn/dataproc_v1.projects.regions.clusters.html

+48
Large diffs are not rendered by default.

‎docs/dyn/dataproc_v1.projects.regions.clusters.nodeGroups.html

+6
Original file line numberDiff line numberDiff line change
@@ -158,6 +158,9 @@ <h3>Method Details</h3>
158158
&quot;minNumInstances&quot;: 42, # Optional. The minimum number of primary worker instances to create. If min_num_instances is set, cluster creation will succeed if the number of primary workers created is at least equal to the min_num_instances number.Example: Cluster creation request with num_instances = 5 and min_num_instances = 3: If 4 VMs are created and 1 instance fails, the failed VM is deleted. The cluster is resized to 4 instances and placed in a RUNNING state. If 2 instances are created and 3 instances fail, the cluster in placed in an ERROR state. The failed VMs are not deleted.
159159
&quot;numInstances&quot;: 42, # Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1.
160160
&quot;preemptibility&quot;: &quot;A String&quot;, # Optional. Specifies the preemptibility of the instance group.The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed.The default value for secondary instances is PREEMPTIBLE.
161+
&quot;startupConfig&quot;: { # Configuration to handle the startup of instances during cluster create and update process. # Optional. Configuration to handle the startup of instances during cluster create and update process.
162+
&quot;requiredRegistrationFraction&quot;: 3.14, # Optional. The config setting to enable cluster creation/ updation to be successful only after required_registration_fraction of instances are up and running. This configuration is applicable to only secondary workers for now. The cluster will fail if required_registration_fraction of instances are not available. This will include instance creation, agent registration, and service registration (if enabled).
163+
},
161164
},
162165
&quot;roles&quot;: [ # Required. Node group roles.
163166
&quot;A String&quot;,
@@ -266,6 +269,9 @@ <h3>Method Details</h3>
266269
&quot;minNumInstances&quot;: 42, # Optional. The minimum number of primary worker instances to create. If min_num_instances is set, cluster creation will succeed if the number of primary workers created is at least equal to the min_num_instances number.Example: Cluster creation request with num_instances = 5 and min_num_instances = 3: If 4 VMs are created and 1 instance fails, the failed VM is deleted. The cluster is resized to 4 instances and placed in a RUNNING state. If 2 instances are created and 3 instances fail, the cluster in placed in an ERROR state. The failed VMs are not deleted.
267270
&quot;numInstances&quot;: 42, # Optional. The number of VM instances in the instance group. For HA cluster master_config groups, must be set to 3. For standard cluster master_config groups, must be set to 1.
268271
&quot;preemptibility&quot;: &quot;A String&quot;, # Optional. Specifies the preemptibility of the instance group.The default value for master and worker groups is NON_PREEMPTIBLE. This default cannot be changed.The default value for secondary instances is PREEMPTIBLE.
272+
&quot;startupConfig&quot;: { # Configuration to handle the startup of instances during cluster create and update process. # Optional. Configuration to handle the startup of instances during cluster create and update process.
273+
&quot;requiredRegistrationFraction&quot;: 3.14, # Optional. The config setting to enable cluster creation/ updation to be successful only after required_registration_fraction of instances are up and running. This configuration is applicable to only secondary workers for now. The cluster will fail if required_registration_fraction of instances are not available. This will include instance creation, agent registration, and service registration (if enabled).
274+
},
269275
},
270276
&quot;roles&quot;: [ # Required. Node group roles.
271277
&quot;A String&quot;,

‎docs/dyn/dataproc_v1.projects.regions.workflowTemplates.html

+84
Large diffs are not rendered by default.

‎googleapiclient/discovery_cache/documents/dataproc.v1.json

+21-1
Original file line numberDiff line numberDiff line change
@@ -3001,7 +3001,7 @@
30013001
}
30023002
}
30033003
},
3004-
"revision": "20230919",
3004+
"revision": "20230926",
30053005
"rootUrl": "https://dataproc.googleapis.com/",
30063006
"schemas": {
30073007
"AcceleratorConfig": {
@@ -4496,6 +4496,10 @@
44964496
"Instances are Spot VMs (https://cloud.google.com/compute/docs/instances/spot).This option is allowed only for secondary worker (https://cloud.google.com/dataproc/docs/concepts/compute/secondary-vms) groups. Spot VMs are the latest version of preemptible VMs (https://cloud.google.com/compute/docs/instances/preemptible), and provide additional features."
44974497
],
44984498
"type": "string"
4499+
},
4500+
"startupConfig": {
4501+
"$ref": "StartupConfig",
4502+
"description": "Optional. Configuration to handle the startup of instances during cluster create and update process."
44994503
}
45004504
},
45014505
"type": "object"
@@ -6640,6 +6644,10 @@
66406644
"format": "google-duration",
66416645
"type": "string"
66426646
},
6647+
"removeOnlyIdleWorkers": {
6648+
"description": "Optional. Remove only idle workers when scaling down cluster",
6649+
"type": "boolean"
6650+
},
66436651
"scaleDownFactor": {
66446652
"description": "Required. Fraction of required executors to remove from Spark Serverless clusters. A scale-down factor of 1.0 will result in scaling down so that there are no more executors for the Spark Job.(more aggressive scaling). A scale-down factor closer to 0 will result in a smaller magnitude of scaling donw (less aggressive scaling).Bounds: 0.0, 1.0.",
66456653
"format": "double",
@@ -6678,6 +6686,18 @@
66786686
},
66796687
"type": "object"
66806688
},
6689+
"StartupConfig": {
6690+
"description": "Configuration to handle the startup of instances during cluster create and update process.",
6691+
"id": "StartupConfig",
6692+
"properties": {
6693+
"requiredRegistrationFraction": {
6694+
"description": "Optional. The config setting to enable cluster creation/ updation to be successful only after required_registration_fraction of instances are up and running. This configuration is applicable to only secondary workers for now. The cluster will fail if required_registration_fraction of instances are not available. This will include instance creation, agent registration, and service registration (if enabled).",
6695+
"format": "double",
6696+
"type": "number"
6697+
}
6698+
},
6699+
"type": "object"
6700+
},
66816701
"StateHistory": {
66826702
"description": "Historical state information.",
66836703
"id": "StateHistory",

0 commit comments

Comments
 (0)
Please sign in to comment.