Skip to content

Commit

Permalink
feat(dataflow): update the api
Browse files Browse the repository at this point in the history
#### dataflow:v1b3

The following keys were added:
- schemas.Base2Exponent (Total Keys: 6)
- schemas.BucketOptions (Total Keys: 4)
- schemas.DataflowHistogramValue (Total Keys: 9)
- schemas.Linear (Total Keys: 8)
- schemas.MetricValue (Total Keys: 8)
- schemas.OutlierStats (Total Keys: 10)
- schemas.PerStepNamespaceMetrics (Total Keys: 6)
- schemas.PerWorkerMetrics (Total Keys: 4)
- schemas.WorkerMessage.properties.perWorkerMetrics.$ref (Total Keys: 1)
  • Loading branch information
yoshi-automation committed Jan 16, 2024
1 parent e54edf0 commit 06bf8b4
Show file tree
Hide file tree
Showing 3 changed files with 257 additions and 1 deletion.
40 changes: 40 additions & 0 deletions docs/dyn/dataflow_v1b3.projects.html
Original file line number Diff line number Diff line change
Expand Up @@ -154,6 +154,46 @@ <h3>Method Details</h3>
&quot;labels&quot;: { # Labels are used to group WorkerMessages. For example, a worker_message about a particular container might have the labels: { &quot;JOB_ID&quot;: &quot;2015-04-22&quot;, &quot;WORKER_ID&quot;: &quot;wordcount-vm-2015…&quot; &quot;CONTAINER_TYPE&quot;: &quot;worker&quot;, &quot;CONTAINER_ID&quot;: &quot;ac1234def&quot;} Label tags typically correspond to Label enum values. However, for ease of development other strings can be used as tags. LABEL_UNSPECIFIED should not be used here.
&quot;a_key&quot;: &quot;A String&quot;,
},
&quot;perWorkerMetrics&quot;: { # Per worker metrics. # System defined metrics for this worker.
&quot;perStepNamespaceMetrics&quot;: [ # Optional. Metrics for a particular unfused step and namespace.
{ # Metrics for a particular unfused step and namespace. A metric is uniquely identified by the `metrics_namespace`, `original_step`, `metric name` and `metric_labels`.
&quot;metricValues&quot;: [ # Optional. Metrics that are recorded for this namespace and unfused step.
{ # The value of a metric along with its name and labels.
&quot;metric&quot;: &quot;A String&quot;, # Base name for this metric.
&quot;metricLabels&quot;: { # Optional. Set of metric labels for this metric.
&quot;a_key&quot;: &quot;A String&quot;,
},
&quot;valueHistogram&quot;: { # Summary statistics for a population of values. HistogramValue contains a sequence of buckets and gives a count of values that fall into each bucket. Bucket boundares are defined by a formula and bucket widths are either fixed or exponentially increasing. # Histogram value of this metric.
&quot;bucketCounts&quot;: [ # Optional. The number of values in each bucket of the histogram, as described in `bucket_options`. `bucket_counts` should contain N values, where N is the number of buckets specified in `bucket_options`. If `bucket_counts` has fewer than N values, the remaining values are assumed to be 0.
&quot;A String&quot;,
],
&quot;bucketOptions&quot;: { # `BucketOptions` describes the bucket boundaries used in the histogram. # Describes the bucket boundaries used in the histogram.
&quot;exponential&quot;: { # Exponential buckets where the growth factor between buckets is `2**(2**-scale)`. e.g. for `scale=1` growth factor is `2**(2**(-1))=sqrt(2)`. `n` buckets will have the following boundaries. - 0th: [0, gf) - i in [1, n-1]: [gf^(i), gf^(i+1)) # Bucket boundaries grow exponentially.
&quot;numberOfBuckets&quot;: 42, # Must be greater than 0.
&quot;scale&quot;: 42, # Must be between -3 and 3. This forces the growth factor of the bucket boundaries to be between `2^(1/8)` and `256`.
},
&quot;linear&quot;: { # Linear buckets with the following boundaries for indices in 0 to n-1. - i in [0, n-1]: [start + (i)*width, start + (i+1)*width) # Bucket boundaries grow linearly.
&quot;numberOfBuckets&quot;: 42, # Must be greater than 0.
&quot;start&quot;: 3.14, # Lower bound of the first bucket.
&quot;width&quot;: 3.14, # Distance between bucket boundaries. Must be greater than 0.
},
},
&quot;count&quot;: &quot;A String&quot;, # Number of values recorded in this histogram.
&quot;outlierStats&quot;: { # Statistics for the underflow and overflow bucket. # Statistics on the values recorded in the histogram that fall out of the bucket boundaries.
&quot;overflowCount&quot;: &quot;A String&quot;, # Number of values that are larger than the upper bound of the largest bucket.
&quot;overflowMean&quot;: 3.14, # Mean of values in the overflow bucket.
&quot;underflowCount&quot;: &quot;A String&quot;, # Number of values that are smaller than the lower bound of the smallest bucket.
&quot;underflowMean&quot;: 3.14, # Mean of values in the undeflow bucket.
},
},
&quot;valueInt64&quot;: &quot;A String&quot;, # Integer value of this metric.
},
],
&quot;metricsNamespace&quot;: &quot;A String&quot;, # The namespace of these metrics on the worker.
&quot;originalStep&quot;: &quot;A String&quot;, # The original system name of the unfused step that these metrics are reported from.
},
],
},
&quot;streamingScalingReport&quot;: { # Contains per-user worker telemetry used in streaming autoscaling. # Contains per-user worker telemetry used in streaming autoscaling.
&quot;activeBundleCount&quot;: 42, # Current acive bundle count.
&quot;activeThreadCount&quot;: 42, # Current acive thread count.
Expand Down
40 changes: 40 additions & 0 deletions docs/dyn/dataflow_v1b3.projects.locations.html
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,46 @@ <h3>Method Details</h3>
&quot;labels&quot;: { # Labels are used to group WorkerMessages. For example, a worker_message about a particular container might have the labels: { &quot;JOB_ID&quot;: &quot;2015-04-22&quot;, &quot;WORKER_ID&quot;: &quot;wordcount-vm-2015…&quot; &quot;CONTAINER_TYPE&quot;: &quot;worker&quot;, &quot;CONTAINER_ID&quot;: &quot;ac1234def&quot;} Label tags typically correspond to Label enum values. However, for ease of development other strings can be used as tags. LABEL_UNSPECIFIED should not be used here.
&quot;a_key&quot;: &quot;A String&quot;,
},
&quot;perWorkerMetrics&quot;: { # Per worker metrics. # System defined metrics for this worker.
&quot;perStepNamespaceMetrics&quot;: [ # Optional. Metrics for a particular unfused step and namespace.
{ # Metrics for a particular unfused step and namespace. A metric is uniquely identified by the `metrics_namespace`, `original_step`, `metric name` and `metric_labels`.
&quot;metricValues&quot;: [ # Optional. Metrics that are recorded for this namespace and unfused step.
{ # The value of a metric along with its name and labels.
&quot;metric&quot;: &quot;A String&quot;, # Base name for this metric.
&quot;metricLabels&quot;: { # Optional. Set of metric labels for this metric.
&quot;a_key&quot;: &quot;A String&quot;,
},
&quot;valueHistogram&quot;: { # Summary statistics for a population of values. HistogramValue contains a sequence of buckets and gives a count of values that fall into each bucket. Bucket boundares are defined by a formula and bucket widths are either fixed or exponentially increasing. # Histogram value of this metric.
&quot;bucketCounts&quot;: [ # Optional. The number of values in each bucket of the histogram, as described in `bucket_options`. `bucket_counts` should contain N values, where N is the number of buckets specified in `bucket_options`. If `bucket_counts` has fewer than N values, the remaining values are assumed to be 0.
&quot;A String&quot;,
],
&quot;bucketOptions&quot;: { # `BucketOptions` describes the bucket boundaries used in the histogram. # Describes the bucket boundaries used in the histogram.
&quot;exponential&quot;: { # Exponential buckets where the growth factor between buckets is `2**(2**-scale)`. e.g. for `scale=1` growth factor is `2**(2**(-1))=sqrt(2)`. `n` buckets will have the following boundaries. - 0th: [0, gf) - i in [1, n-1]: [gf^(i), gf^(i+1)) # Bucket boundaries grow exponentially.
&quot;numberOfBuckets&quot;: 42, # Must be greater than 0.
&quot;scale&quot;: 42, # Must be between -3 and 3. This forces the growth factor of the bucket boundaries to be between `2^(1/8)` and `256`.
},
&quot;linear&quot;: { # Linear buckets with the following boundaries for indices in 0 to n-1. - i in [0, n-1]: [start + (i)*width, start + (i+1)*width) # Bucket boundaries grow linearly.
&quot;numberOfBuckets&quot;: 42, # Must be greater than 0.
&quot;start&quot;: 3.14, # Lower bound of the first bucket.
&quot;width&quot;: 3.14, # Distance between bucket boundaries. Must be greater than 0.
},
},
&quot;count&quot;: &quot;A String&quot;, # Number of values recorded in this histogram.
&quot;outlierStats&quot;: { # Statistics for the underflow and overflow bucket. # Statistics on the values recorded in the histogram that fall out of the bucket boundaries.
&quot;overflowCount&quot;: &quot;A String&quot;, # Number of values that are larger than the upper bound of the largest bucket.
&quot;overflowMean&quot;: 3.14, # Mean of values in the overflow bucket.
&quot;underflowCount&quot;: &quot;A String&quot;, # Number of values that are smaller than the lower bound of the smallest bucket.
&quot;underflowMean&quot;: 3.14, # Mean of values in the undeflow bucket.
},
},
&quot;valueInt64&quot;: &quot;A String&quot;, # Integer value of this metric.
},
],
&quot;metricsNamespace&quot;: &quot;A String&quot;, # The namespace of these metrics on the worker.
&quot;originalStep&quot;: &quot;A String&quot;, # The original system name of the unfused step that these metrics are reported from.
},
],
},
&quot;streamingScalingReport&quot;: { # Contains per-user worker telemetry used in streaming autoscaling. # Contains per-user worker telemetry used in streaming autoscaling.
&quot;activeBundleCount&quot;: 42, # Current acive bundle count.
&quot;activeThreadCount&quot;: 42, # Current acive thread count.
Expand Down
178 changes: 177 additions & 1 deletion googleapiclient/discovery_cache/documents/dataflow.v1b3.json
Original file line number Diff line number Diff line change
Expand Up @@ -2221,7 +2221,7 @@
}
}
},
"revision": "20231217",
"revision": "20240107",
"rootUrl": "https://dataflow.googleapis.com/",
"schemas": {
"ApproximateProgress": {
Expand Down Expand Up @@ -2367,6 +2367,23 @@
},
"type": "object"
},
"Base2Exponent": {
"description": "Exponential buckets where the growth factor between buckets is `2**(2**-scale)`. e.g. for `scale=1` growth factor is `2**(2**(-1))=sqrt(2)`. `n` buckets will have the following boundaries. - 0th: [0, gf) - i in [1, n-1]: [gf^(i), gf^(i+1))",
"id": "Base2Exponent",
"properties": {
"numberOfBuckets": {
"description": "Must be greater than 0.",
"format": "int32",
"type": "integer"
},
"scale": {
"description": "Must be between -3 and 3. This forces the growth factor of the bucket boundaries to be between `2^(1/8)` and `256`.",
"format": "int32",
"type": "integer"
}
},
"type": "object"
},
"BigQueryIODetails": {
"description": "Metadata for a BigQuery connector used by the job.",
"id": "BigQueryIODetails",
Expand Down Expand Up @@ -2409,6 +2426,21 @@
},
"type": "object"
},
"BucketOptions": {
"description": "`BucketOptions` describes the bucket boundaries used in the histogram.",
"id": "BucketOptions",
"properties": {
"exponential": {
"$ref": "Base2Exponent",
"description": "Bucket boundaries grow exponentially."
},
"linear": {
"$ref": "Linear",
"description": "Bucket boundaries grow linearly."
}
},
"type": "object"
},
"CPUTime": {
"description": "Modeled after information exposed by /proc/stat.",
"id": "CPUTime",
Expand Down Expand Up @@ -2907,6 +2939,34 @@
},
"type": "object"
},
"DataflowHistogramValue": {
"description": "Summary statistics for a population of values. HistogramValue contains a sequence of buckets and gives a count of values that fall into each bucket. Bucket boundares are defined by a formula and bucket widths are either fixed or exponentially increasing.",
"id": "DataflowHistogramValue",
"properties": {
"bucketCounts": {
"description": "Optional. The number of values in each bucket of the histogram, as described in `bucket_options`. `bucket_counts` should contain N values, where N is the number of buckets specified in `bucket_options`. If `bucket_counts` has fewer than N values, the remaining values are assumed to be 0.",
"items": {
"format": "int64",
"type": "string"
},
"type": "array"
},
"bucketOptions": {
"$ref": "BucketOptions",
"description": "Describes the bucket boundaries used in the histogram."
},
"count": {
"description": "Number of values recorded in this histogram.",
"format": "int64",
"type": "string"
},
"outlierStats": {
"$ref": "OutlierStats",
"description": "Statistics on the values recorded in the histogram that fall out of the bucket boundaries."
}
},
"type": "object"
},
"DatastoreIODetails": {
"description": "Metadata for a Datastore connector used by the job.",
"id": "DatastoreIODetails",
Expand Down Expand Up @@ -4408,6 +4468,28 @@
},
"type": "object"
},
"Linear": {
"description": "Linear buckets with the following boundaries for indices in 0 to n-1. - i in [0, n-1]: [start + (i)*width, start + (i+1)*width)",
"id": "Linear",
"properties": {
"numberOfBuckets": {
"description": "Must be greater than 0.",
"format": "int32",
"type": "integer"
},
"start": {
"description": "Lower bound of the first bucket.",
"format": "double",
"type": "number"
},
"width": {
"description": "Distance between bucket boundaries. Must be greater than 0.",
"format": "double",
"type": "number"
}
},
"type": "object"
},
"ListJobMessagesResponse": {
"description": "Response to a request to list job messages.",
"id": "ListJobMessagesResponse",
Expand Down Expand Up @@ -4621,6 +4703,33 @@
},
"type": "object"
},
"MetricValue": {
"description": "The value of a metric along with its name and labels.",
"id": "MetricValue",
"properties": {
"metric": {
"description": "Base name for this metric.",
"type": "string"
},
"metricLabels": {
"additionalProperties": {
"type": "string"
},
"description": "Optional. Set of metric labels for this metric.",
"type": "object"
},
"valueHistogram": {
"$ref": "DataflowHistogramValue",
"description": "Histogram value of this metric."
},
"valueInt64": {
"description": "Integer value of this metric.",
"format": "int64",
"type": "string"
}
},
"type": "object"
},
"MountedDataDisk": {
"description": "Describes mounted data disk.",
"id": "MountedDataDisk",
Expand Down Expand Up @@ -4682,6 +4791,33 @@
},
"type": "object"
},
"OutlierStats": {
"description": "Statistics for the underflow and overflow bucket.",
"id": "OutlierStats",
"properties": {
"overflowCount": {
"description": "Number of values that are larger than the upper bound of the largest bucket.",
"format": "int64",
"type": "string"
},
"overflowMean": {
"description": "Mean of values in the overflow bucket.",
"format": "double",
"type": "number"
},
"underflowCount": {
"description": "Number of values that are smaller than the lower bound of the smallest bucket.",
"format": "int64",
"type": "string"
},
"underflowMean": {
"description": "Mean of values in the undeflow bucket.",
"format": "double",
"type": "number"
}
},
"type": "object"
},
"Package": {
"description": "The packages that must be installed in order for a worker to run the steps of the Cloud Dataflow job that will be assigned to its worker pool. This is the mechanism by which the Cloud Dataflow SDK causes code to be loaded onto the workers. For example, the Cloud Dataflow Java SDK might use this to install jars containing the user's code and all of the various dependencies (libraries, data files, etc.) required in order for that code to run.",
"id": "Package",
Expand Down Expand Up @@ -4968,6 +5104,42 @@
},
"type": "object"
},
"PerStepNamespaceMetrics": {
"description": "Metrics for a particular unfused step and namespace. A metric is uniquely identified by the `metrics_namespace`, `original_step`, `metric name` and `metric_labels`.",
"id": "PerStepNamespaceMetrics",
"properties": {
"metricValues": {
"description": "Optional. Metrics that are recorded for this namespace and unfused step.",
"items": {
"$ref": "MetricValue"
},
"type": "array"
},
"metricsNamespace": {
"description": "The namespace of these metrics on the worker.",
"type": "string"
},
"originalStep": {
"description": "The original system name of the unfused step that these metrics are reported from.",
"type": "string"
}
},
"type": "object"
},
"PerWorkerMetrics": {
"description": "Per worker metrics.",
"id": "PerWorkerMetrics",
"properties": {
"perStepNamespaceMetrics": {
"description": "Optional. Metrics for a particular unfused step and namespace.",
"items": {
"$ref": "PerStepNamespaceMetrics"
},
"type": "array"
}
},
"type": "object"
},
"PipelineDescription": {
"description": "A descriptive representation of submitted pipeline as well as the executed form. This data is provided by the Dataflow service for ease of visualizing the pipeline and interpreting Dataflow provided metrics.",
"id": "PipelineDescription",
Expand Down Expand Up @@ -7287,6 +7459,10 @@
"description": "Labels are used to group WorkerMessages. For example, a worker_message about a particular container might have the labels: { \"JOB_ID\": \"2015-04-22\", \"WORKER_ID\": \"wordcount-vm-2015\u2026\" \"CONTAINER_TYPE\": \"worker\", \"CONTAINER_ID\": \"ac1234def\"} Label tags typically correspond to Label enum values. However, for ease of development other strings can be used as tags. LABEL_UNSPECIFIED should not be used here.",
"type": "object"
},
"perWorkerMetrics": {
"$ref": "PerWorkerMetrics",
"description": "System defined metrics for this worker."
},
"streamingScalingReport": {
"$ref": "StreamingScalingReport",
"description": "Contains per-user worker telemetry used in streaming autoscaling."
Expand Down

0 comments on commit 06bf8b4

Please sign in to comment.