From fe4dbaf8494bdfb947536a9c5a4c64e4fe6409b0 Mon Sep 17 00:00:00 2001 From: The Magician Date: Wed, 30 Nov 2022 17:06:25 -0800 Subject: [PATCH] Cloudrun v2 job (#6852) (#13154) resolve https://github.com/hashicorp/terraform-provider-google/issues/11743 Signed-off-by: Modular Magician Signed-off-by: Modular Magician --- .changelog/6852.txt | 3 + google/cloud_run_v2_operation.go | 75 + google/config.go | 4 + google/config_test_utils.go | 1 + google/provider.go | 14 +- google/resource_cloud_run_v2_job.go | 3141 +++++++++++++++++ ...esource_cloud_run_v2_job_generated_test.go | 408 +++ .../resource_cloud_run_v2_job_sweeper_test.go | 124 + google/resource_cloud_run_v2_job_test.go | 414 +++ website/docs/r/cloud_run_v2_job.html.markdown | 822 +++++ 10 files changed, 5004 insertions(+), 2 deletions(-) create mode 100644 .changelog/6852.txt create mode 100644 google/cloud_run_v2_operation.go create mode 100644 google/resource_cloud_run_v2_job.go create mode 100644 google/resource_cloud_run_v2_job_generated_test.go create mode 100644 google/resource_cloud_run_v2_job_sweeper_test.go create mode 100644 google/resource_cloud_run_v2_job_test.go create mode 100644 website/docs/r/cloud_run_v2_job.html.markdown diff --git a/.changelog/6852.txt b/.changelog/6852.txt new file mode 100644 index 00000000000..5f29e8b06db --- /dev/null +++ b/.changelog/6852.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +`google_cloud_run_v2_job` +``` diff --git a/google/cloud_run_v2_operation.go b/google/cloud_run_v2_operation.go new file mode 100644 index 00000000000..0faf2c28bdd --- /dev/null +++ b/google/cloud_run_v2_operation.go @@ -0,0 +1,75 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "encoding/json" + "fmt" + "time" +) + +type CloudRunV2OperationWaiter struct { + Config *Config + UserAgent string + Project string + CommonOperationWaiter +} + +func (w *CloudRunV2OperationWaiter) QueryOp() (interface{}, error) { + if w == nil { + return nil, fmt.Errorf("Cannot query operation, it's unset or nil.") + } + // Returns the proper get. + url := fmt.Sprintf("%s%s", w.Config.CloudRunV2BasePath, w.CommonOperationWaiter.Op.Name) + + return sendRequest(w.Config, "GET", w.Project, url, w.UserAgent, nil) +} + +func createCloudRunV2Waiter(config *Config, op map[string]interface{}, project, activity, userAgent string) (*CloudRunV2OperationWaiter, error) { + w := &CloudRunV2OperationWaiter{ + Config: config, + UserAgent: userAgent, + Project: project, + } + if err := w.CommonOperationWaiter.SetOp(op); err != nil { + return nil, err + } + return w, nil +} + +// nolint: deadcode,unused +func cloudRunV2OperationWaitTimeWithResponse(config *Config, op map[string]interface{}, response *map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + w, err := createCloudRunV2Waiter(config, op, project, activity, userAgent) + if err != nil { + return err + } + if err := OperationWait(w, activity, timeout, config.PollInterval); err != nil { + return err + } + return json.Unmarshal([]byte(w.CommonOperationWaiter.Op.Response), response) +} + +func cloudRunV2OperationWaitTime(config *Config, op map[string]interface{}, project, activity, userAgent string, timeout time.Duration) error { + if val, ok := op["name"]; !ok || val == "" { + // This was a synchronous call - there is no operation to wait for. + return nil + } + w, err := createCloudRunV2Waiter(config, op, project, activity, userAgent) + if err != nil { + // If w is nil, the op was synchronous. + return err + } + return OperationWait(w, activity, timeout, config.PollInterval) +} diff --git a/google/config.go b/google/config.go index ff462dee40b..f5f4fb338bb 100644 --- a/google/config.go +++ b/google/config.go @@ -195,6 +195,7 @@ type Config struct { CloudIdsBasePath string CloudIotBasePath string CloudRunBasePath string + CloudRunV2BasePath string CloudSchedulerBasePath string CloudTasksBasePath string ComputeBasePath string @@ -293,6 +294,7 @@ const CloudIdentityBasePathKey = "CloudIdentity" const CloudIdsBasePathKey = "CloudIds" const CloudIotBasePathKey = "CloudIot" const CloudRunBasePathKey = "CloudRun" +const CloudRunV2BasePathKey = "CloudRunV2" const CloudSchedulerBasePathKey = "CloudScheduler" const CloudTasksBasePathKey = "CloudTasks" const ComputeBasePathKey = "Compute" @@ -385,6 +387,7 @@ var DefaultBasePaths = map[string]string{ CloudIdsBasePathKey: "https://ids.googleapis.com/v1/", CloudIotBasePathKey: "https://cloudiot.googleapis.com/v1/", CloudRunBasePathKey: "https://{{location}}-run.googleapis.com/", + CloudRunV2BasePathKey: "https://run.googleapis.com/v2/", CloudSchedulerBasePathKey: "https://cloudscheduler.googleapis.com/v1/", CloudTasksBasePathKey: "https://cloudtasks.googleapis.com/v2/", ComputeBasePathKey: "https://compute.googleapis.com/compute/v1/", @@ -1239,6 +1242,7 @@ func ConfigureBasePaths(c *Config) { c.CloudIdsBasePath = DefaultBasePaths[CloudIdsBasePathKey] c.CloudIotBasePath = DefaultBasePaths[CloudIotBasePathKey] c.CloudRunBasePath = DefaultBasePaths[CloudRunBasePathKey] + c.CloudRunV2BasePath = DefaultBasePaths[CloudRunV2BasePathKey] c.CloudSchedulerBasePath = DefaultBasePaths[CloudSchedulerBasePathKey] c.CloudTasksBasePath = DefaultBasePaths[CloudTasksBasePathKey] c.ComputeBasePath = DefaultBasePaths[ComputeBasePathKey] diff --git a/google/config_test_utils.go b/google/config_test_utils.go index b91052f4af6..a48c0609a62 100644 --- a/google/config_test_utils.go +++ b/google/config_test_utils.go @@ -42,6 +42,7 @@ func configureTestBasePaths(c *Config, url string) { c.CloudIdsBasePath = url c.CloudIotBasePath = url c.CloudRunBasePath = url + c.CloudRunV2BasePath = url c.CloudSchedulerBasePath = url c.CloudTasksBasePath = url c.ComputeBasePath = url diff --git a/google/provider.go b/google/provider.go index 3b52859bc8c..f0b4161b655 100644 --- a/google/provider.go +++ b/google/provider.go @@ -341,6 +341,14 @@ func Provider() *schema.Provider { "GOOGLE_CLOUD_RUN_CUSTOM_ENDPOINT", }, DefaultBasePaths[CloudRunBasePathKey]), }, + "cloud_run_v2_custom_endpoint": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateCustomEndpoint, + DefaultFunc: schema.MultiEnvDefaultFunc([]string{ + "GOOGLE_CLOUD_RUN_V2_CUSTOM_ENDPOINT", + }, DefaultBasePaths[CloudRunV2BasePathKey]), + }, "cloud_scheduler_custom_endpoint": { Type: schema.TypeString, Optional: true, @@ -926,9 +934,9 @@ func Provider() *schema.Provider { return provider } -// Generated resources: 248 +// Generated resources: 249 // Generated IAM resources: 156 -// Total generated resources: 404 +// Total generated resources: 405 func ResourceMap() map[string]*schema.Resource { resourceMap, _ := ResourceMapWithErrors() return resourceMap @@ -1033,6 +1041,7 @@ func ResourceMapWithErrors() (map[string]*schema.Resource, error) { "google_cloud_run_service_iam_binding": ResourceIamBinding(CloudRunServiceIamSchema, CloudRunServiceIamUpdaterProducer, CloudRunServiceIdParseFunc), "google_cloud_run_service_iam_member": ResourceIamMember(CloudRunServiceIamSchema, CloudRunServiceIamUpdaterProducer, CloudRunServiceIdParseFunc), "google_cloud_run_service_iam_policy": ResourceIamPolicy(CloudRunServiceIamSchema, CloudRunServiceIamUpdaterProducer, CloudRunServiceIdParseFunc), + "google_cloud_run_v2_job": resourceCloudRunV2Job(), "google_cloud_scheduler_job": resourceCloudSchedulerJob(), "google_cloud_tasks_queue": resourceCloudTasksQueue(), "google_cloud_tasks_queue_iam_binding": ResourceIamBinding(CloudTasksQueueIamSchema, CloudTasksQueueIamUpdaterProducer, CloudTasksQueueIdParseFunc), @@ -1575,6 +1584,7 @@ func providerConfigure(ctx context.Context, d *schema.ResourceData, p *schema.Pr config.CloudIdsBasePath = d.Get("cloud_ids_custom_endpoint").(string) config.CloudIotBasePath = d.Get("cloud_iot_custom_endpoint").(string) config.CloudRunBasePath = d.Get("cloud_run_custom_endpoint").(string) + config.CloudRunV2BasePath = d.Get("cloud_run_v2_custom_endpoint").(string) config.CloudSchedulerBasePath = d.Get("cloud_scheduler_custom_endpoint").(string) config.CloudTasksBasePath = d.Get("cloud_tasks_custom_endpoint").(string) config.ComputeBasePath = d.Get("compute_custom_endpoint").(string) diff --git a/google/resource_cloud_run_v2_job.go b/google/resource_cloud_run_v2_job.go new file mode 100644 index 00000000000..4d9c2634b02 --- /dev/null +++ b/google/resource_cloud_run_v2_job.go @@ -0,0 +1,3141 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + "log" + "reflect" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func resourceCloudRunV2Job() *schema.Resource { + return &schema.Resource{ + Create: resourceCloudRunV2JobCreate, + Read: resourceCloudRunV2JobRead, + Update: resourceCloudRunV2JobUpdate, + Delete: resourceCloudRunV2JobDelete, + + Importer: &schema.ResourceImporter{ + State: resourceCloudRunV2JobImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + DiffSuppressFunc: compareSelfLinkOrResourceName, + Description: `Name of the Job.`, + }, + "template": { + Type: schema.TypeList, + Required: true, + Description: `The template used to create executions for this Job.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "template": { + Type: schema.TypeList, + Required: true, + Description: `Describes the task(s) that will be created when executing an execution`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "containers": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Holds the single container that defines the unit of execution for this task.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "image": { + Type: schema.TypeString, + Required: true, + Description: `URL of the Container image in Google Container Registry or Google Artifact Registry. More info: https://kubernetes.io/docs/concepts/containers/images`, + }, + "args": { + Type: schema.TypeList, + Optional: true, + Description: `Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "command": { + Type: schema.TypeList, + Optional: true, + Description: `Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + "env": { + Type: schema.TypeList, + Optional: true, + Description: `List of environment variables to set in the container.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Name of the environment variable. Must be a C_IDENTIFIER, and mnay not exceed 32768 characters.`, + }, + "value": { + Type: schema.TypeString, + Optional: true, + Description: `Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any route environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "", and the maximum length is 32768 bytes`, + }, + "value_source": { + Type: schema.TypeList, + Optional: true, + Description: `Source for the environment variable's value.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secret_key_ref": { + Type: schema.TypeList, + Optional: true, + Description: `Selects a secret and a specific version from Cloud Secret Manager.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secret": { + Type: schema.TypeString, + Required: true, + Description: `The name of the secret in Cloud Secret Manager. Format: {secretName} if the secret is in the same project. projects/{project}/secrets/{secretName} if the secret is in a different project.`, + }, + "version": { + Type: schema.TypeString, + Required: true, + Description: `The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version.`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "liveness_probe": { + Type: schema.TypeList, + Optional: true, + Description: `Periodic probe of container liveness. Container will be restarted if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "failure_threshold": { + Type: schema.TypeInt, + Optional: true, + Description: `Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.`, + Default: 3, + }, + "http_get": { + Type: schema.TypeList, + Optional: true, + Description: `HTTPGet specifies the http request to perform. Exactly one of HTTPGet or TCPSocket must be specified.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "http_headers": { + Type: schema.TypeList, + Optional: true, + Description: `Custom headers to set in the request. HTTP allows repeated headers.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `The header field name`, + }, + "value": { + Type: schema.TypeString, + Optional: true, + Description: `The header field value`, + Default: "", + }, + }, + }, + }, + "path": { + Type: schema.TypeString, + Optional: true, + Description: `Path to access on the HTTP server. Defaults to '/'.`, + Default: "/", + }, + }, + }, + }, + "initial_delay_seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of seconds after the container has started before the probe is initiated. Defaults to 0 seconds. Minimum value is 0. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes`, + Default: 0, + }, + "period_seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. Must be greater or equal than timeoutSeconds`, + Default: 10, + }, + "tcp_socket": { + Type: schema.TypeList, + Optional: true, + Description: `TCPSocket specifies an action involving a TCP port. Exactly one of HTTPGet or TCPSocket must be specified.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "port": { + Type: schema.TypeInt, + Optional: true, + Description: `Port number to access on the container. Must be in the range 1 to 65535. If not specified, defaults to 8080.`, + }, + }, + }, + }, + "timeout_seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. Maximum value is 3600. Must be smaller than periodSeconds. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes`, + Default: 1, + }, + }, + }, + }, + "name": { + Type: schema.TypeString, + Optional: true, + Description: `Name of the container specified as a DNS_LABEL.`, + }, + "ports": { + Type: schema.TypeList, + Optional: true, + Description: `List of ports to expose from the container. Only a single port can be specified. The specified ports must be listening on all interfaces (0.0.0.0) within the container to be accessible. + +If omitted, a port number will be chosen and passed to the container through the PORT environment variable for the container to listen on`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "container_port": { + Type: schema.TypeInt, + Optional: true, + Description: `Port number the container listens on. This must be a valid TCP port number, 0 < containerPort < 65536.`, + }, + "name": { + Type: schema.TypeString, + Optional: true, + Description: `If specified, used to specify which protocol to use. Allowed values are "http1" and "h2c".`, + }, + }, + }, + }, + "resources": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Compute Resource requirements by this container. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "limits": { + Type: schema.TypeMap, + Computed: true, + Optional: true, + Description: `Only memory and CPU are supported. Note: The only supported values for CPU are '1', '2', '4', and '8'. Setting 4 CPU requires at least 2Gi of memory. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + }, + }, + "startup_probe": { + Type: schema.TypeList, + Computed: true, + Optional: true, + Description: `Startup probe of application within the container. All other probes are disabled if a startup probe is provided, until it succeeds. Container will not be added to service endpoints if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "failure_threshold": { + Type: schema.TypeInt, + Optional: true, + Description: `Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.`, + Default: 3, + }, + "http_get": { + Type: schema.TypeList, + Optional: true, + Description: `HTTPGet specifies the http request to perform. Exactly one of HTTPGet or TCPSocket must be specified.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "http_headers": { + Type: schema.TypeList, + Optional: true, + Description: `Custom headers to set in the request. HTTP allows repeated headers.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `The header field name`, + }, + "value": { + Type: schema.TypeString, + Optional: true, + Description: `The header field value`, + Default: "", + }, + }, + }, + }, + "path": { + Type: schema.TypeString, + Optional: true, + Description: `Path to access on the HTTP server. Defaults to '/'.`, + Default: "/", + }, + }, + }, + }, + "initial_delay_seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of seconds after the container has started before the probe is initiated. Defaults to 0 seconds. Minimum value is 0. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes`, + Default: 0, + }, + "period_seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. Must be greater or equal than timeoutSeconds`, + Default: 10, + }, + "tcp_socket": { + Type: schema.TypeList, + Optional: true, + Description: `TCPSocket specifies an action involving a TCP port. Exactly one of HTTPGet or TCPSocket must be specified.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "port": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `Port number to access on the container. Must be in the range 1 to 65535. If not specified, defaults to 8080.`, + }, + }, + }, + }, + "timeout_seconds": { + Type: schema.TypeInt, + Optional: true, + Description: `Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. Maximum value is 3600. Must be smaller than periodSeconds. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes`, + Default: 1, + }, + }, + }, + }, + "volume_mounts": { + Type: schema.TypeList, + Optional: true, + Description: `Volume to mount into the container's filesystem.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "mount_path": { + Type: schema.TypeString, + Required: true, + Description: `Path within the container at which the volume should be mounted. Must not contain ':'. For Cloud SQL volumes, it can be left empty, or must otherwise be /cloudsql. All instances defined in the Volume will be available as /cloudsql/[instance]. For more information on Cloud SQL volumes, visit https://cloud.google.com/sql/docs/mysql/connect-run`, + }, + "name": { + Type: schema.TypeString, + Required: true, + Description: `This must match the Name of a Volume.`, + }, + }, + }, + }, + "working_dir": { + Type: schema.TypeString, + Optional: true, + Description: `Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image.`, + }, + }, + }, + }, + "encryption_key": { + Type: schema.TypeString, + Optional: true, + Description: `A reference to a customer managed encryption key (CMEK) to use to encrypt this container image. For more information, go to https://cloud.google.com/run/docs/securing/using-cmek`, + }, + "execution_environment": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: validateEnum([]string{"EXECUTION_ENVIRONMENT_GEN1", "EXECUTION_ENVIRONMENT_GEN2", ""}), + Description: `The execution environment being used to host this Task. Possible values: ["EXECUTION_ENVIRONMENT_GEN1", "EXECUTION_ENVIRONMENT_GEN2"]`, + }, + "max_retries": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `Number of retries allowed per Task, before marking this Task failed.`, + }, + "service_account": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Email address of the IAM service account associated with the Task of a Job. The service account represents the identity of the running task, and determines what permissions the task has. If not provided, the task will use the project's default service account.`, + }, + "timeout": { + Type: schema.TypeString, + Computed: true, + Optional: true, + Description: `Max allowed time duration the Task may be active before the system will actively try to mark it failed and kill associated containers. This applies per attempt of a task, meaning each retry can run for the full timeout. + +A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s".`, + }, + "volumes": { + Type: schema.TypeList, + Optional: true, + Description: `A list of Volumes to make available to containers.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "name": { + Type: schema.TypeString, + Required: true, + Description: `Volume's name.`, + }, + "cloud_sql_instance": { + Type: schema.TypeList, + Optional: true, + Description: `For Cloud SQL volumes, contains the specific instances that should be mounted. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "instances": { + Type: schema.TypeList, + Optional: true, + Description: `The Cloud SQL instance connection names, as can be found in https://console.cloud.google.com/sql/instances. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run. Format: {project}:{location}:{instance}`, + Elem: &schema.Schema{ + Type: schema.TypeString, + }, + }, + }, + }, + }, + "secret": { + Type: schema.TypeList, + Optional: true, + Description: `Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "secret": { + Type: schema.TypeString, + Required: true, + Description: `The name of the secret in Cloud Secret Manager. Format: {secret} if the secret is in the same project. projects/{project}/secrets/{secret} if the secret is in a different project.`, + }, + "default_mode": { + Type: schema.TypeInt, + Optional: true, + Description: `Integer representation of mode bits to use on created files by default. Must be a value between 0000 and 0777 (octal), defaulting to 0444. Directories within the path are not affected by this setting.`, + }, + "items": { + Type: schema.TypeList, + Optional: true, + Description: `If unspecified, the volume will expose a file whose name is the secret, relative to VolumeMount.mount_path. If specified, the key will be used as the version to fetch from Cloud Secret Manager and the path will be the name of the file exposed in the volume. When items are defined, they must specify a path and a version.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "mode": { + Type: schema.TypeInt, + Required: true, + Description: `Integer octal mode bits to use on this file, must be a value between 01 and 0777 (octal). If 0 or not set, the Volume's default mode will be used.`, + }, + "path": { + Type: schema.TypeString, + Required: true, + Description: `The relative path of the secret in the container.`, + }, + "version": { + Type: schema.TypeString, + Required: true, + Description: `The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version`, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + "vpc_access": { + Type: schema.TypeList, + Optional: true, + Description: `VPC Access configuration to use for this Task. For more information, visit https://cloud.google.com/run/docs/configuring/connecting-vpc.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "connector": { + Type: schema.TypeString, + Optional: true, + Description: `VPC Access connector name. Format: projects/{project}/locations/{location}/connectors/{connector}, where {project} can be project id or number.`, + }, + "egress": { + Type: schema.TypeString, + Optional: true, + ValidateFunc: validateEnum([]string{"ALL_TRAFFIC", "PRIVATE_RANGES_ONLY", ""}), + Description: `Traffic VPC egress settings. Possible values: ["ALL_TRAFFIC", "PRIVATE_RANGES_ONLY"]`, + }, + }, + }, + }, + }, + }, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `KRM-style labels for the resource.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "parallelism": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `Specifies the maximum desired number of tasks the execution should run at given time. Must be <= taskCount. When the job is run, if this field is 0 or unset, the maximum possible value will be used for that execution. The actual number of tasks running in steady state will be less than this number when there are fewer tasks waiting to be completed remaining, i.e. when the work left to do is less than max parallelism.`, + }, + "task_count": { + Type: schema.TypeInt, + Computed: true, + Optional: true, + Description: `Specifies the desired number of tasks the execution should run. Setting to 1 means that parallelism is limited to 1 and the success of that task signals the success of the execution. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/`, + }, + }, + }, + }, + "binary_authorization": { + Type: schema.TypeList, + Optional: true, + Description: `Settings for the Binary Authorization feature.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "breakglass_justification": { + Type: schema.TypeString, + Optional: true, + Description: `If present, indicates to use Breakglass using this justification. If useDefault is False, then it must be empty. For more information on breakglass, see https://cloud.google.com/binary-authorization/docs/using-breakglass`, + }, + "use_default": { + Type: schema.TypeBool, + Optional: true, + Description: `If True, indicates to use the default project's binary authorization policy. If False, binary authorization will be disabled.`, + }, + }, + }, + }, + "client": { + Type: schema.TypeString, + Optional: true, + Description: `Arbitrary identifier for the API client.`, + }, + "client_version": { + Type: schema.TypeString, + Optional: true, + Description: `Arbitrary version identifier for the API client.`, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `KRM-style labels for the resource. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels Cloud Run will populate some labels with 'run.googleapis.com' or 'serving.knative.dev' namespaces. Those labels are read-only, and user changes will not be preserved.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "launch_stage": { + Type: schema.TypeString, + Computed: true, + Optional: true, + ValidateFunc: validateEnum([]string{"UNIMPLEMENTED", "PRELAUNCH", "EARLY_ACCESS", "ALPHA", "BETA", "GA", "DEPRECATED", ""}), + Description: `The launch stage as defined by Google Cloud Platform Launch Stages. Cloud Run supports ALPHA, BETA, and GA. If no value is specified, GA is assumed. Possible values: ["UNIMPLEMENTED", "PRELAUNCH", "EARLY_ACCESS", "ALPHA", "BETA", "GA", "DEPRECATED"]`, + }, + "location": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The location of the cloud run job`, + }, + "conditions": { + Type: schema.TypeList, + Computed: true, + Description: `The Conditions of all other associated sub-resources. They contain additional diagnostics information in case the Job does not reach its desired state. See comments in reconciling for additional information on 'reconciliation' process in Cloud Run.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "execution_reason": { + Type: schema.TypeString, + Computed: true, + Description: `A reason for the execution condition.`, + }, + "last_transition_time": { + Type: schema.TypeString, + Computed: true, + Description: `Last time the condition transitioned from one status to another. + +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "message": { + Type: schema.TypeString, + Computed: true, + Description: `Human readable message indicating details about the current status.`, + }, + "reason": { + Type: schema.TypeString, + Computed: true, + Description: `A common (service-level) reason for this condition.`, + }, + "revision_reason": { + Type: schema.TypeString, + Computed: true, + Description: `A reason for the revision condition.`, + }, + "severity": { + Type: schema.TypeString, + Computed: true, + Description: `How to interpret failures of this condition, one of Error, Warning, Info`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `State of the condition.`, + }, + "type": { + Type: schema.TypeString, + Computed: true, + Description: `type is used to communicate the status of the reconciliation process. See also: https://github.com/knative/serving/blob/main/docs/spec/errors.md#error-conditions-and-reporting Types common to all resources include: * "Ready": True when the Resource is ready.`, + }, + }, + }, + }, + "etag": { + Type: schema.TypeString, + Computed: true, + Description: `A system-generated fingerprint for this version of the resource. May be used to detect modification conflict during updates.`, + }, + "execution_count": { + Type: schema.TypeInt, + Computed: true, + Description: `Number of executions created for this job.`, + }, + "generation": { + Type: schema.TypeString, + Computed: true, + Description: `A number that monotonically increases every time the user modifies the desired state.`, + }, + "latest_created_execution": { + Type: schema.TypeList, + Computed: true, + Description: `Name of the last created execution.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "completion_time": { + Type: schema.TypeString, + Computed: true, + Description: `Completion timestamp of the execution. + +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Creation timestamp of the execution. + +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "name": { + Type: schema.TypeString, + Computed: true, + Description: `Name of the execution.`, + }, + }, + }, + }, + "observed_generation": { + Type: schema.TypeString, + Computed: true, + Description: `The generation of this Job. See comments in reconciling for additional information on reconciliation process in Cloud Run.`, + }, + "reconciling": { + Type: schema.TypeBool, + Computed: true, + Description: `Returns true if the Job is currently being acted upon by the system to bring it into the desired state. + +When a new Job is created, or an existing one is updated, Cloud Run will asynchronously perform all necessary steps to bring the Job to the desired state. This process is called reconciliation. While reconciliation is in process, observedGeneration and latest_succeeded_execution, will have transient values that might mismatch the intended state: Once reconciliation is over (and this field is false), there are two possible outcomes: reconciliation succeeded and the state matches the Job, or there was an error, and reconciliation failed. This state can be found in terminalCondition.state. + +If reconciliation succeeded, the following fields will match: observedGeneration and generation, latest_succeeded_execution and latestCreatedExecution. + +If reconciliation failed, observedGeneration and latest_succeeded_execution will have the state of the last succeeded execution or empty for newly created Job. Additional information on the failure can be found in terminalCondition and conditions`, + }, + "terminal_condition": { + Type: schema.TypeList, + Computed: true, + Description: `The Condition of this Job, containing its readiness status, and detailed error information in case it did not reach the desired state`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "execution_reason": { + Type: schema.TypeString, + Computed: true, + Description: `A reason for the execution condition.`, + }, + "last_transition_time": { + Type: schema.TypeString, + Computed: true, + Description: `Last time the condition transitioned from one status to another. + +A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z".`, + }, + "message": { + Type: schema.TypeString, + Computed: true, + Description: `Human readable message indicating details about the current status.`, + }, + "reason": { + Type: schema.TypeString, + Computed: true, + Description: `A common (service-level) reason for this condition.`, + }, + "revision_reason": { + Type: schema.TypeString, + Computed: true, + Description: `A reason for the revision condition.`, + }, + "severity": { + Type: schema.TypeString, + Computed: true, + Description: `How to interpret failures of this condition, one of Error, Warning, Info`, + }, + "state": { + Type: schema.TypeString, + Computed: true, + Description: `State of the condition.`, + }, + "type": { + Type: schema.TypeString, + Computed: true, + Description: `type is used to communicate the status of the reconciliation process. See also: https://github.com/knative/serving/blob/main/docs/spec/errors.md#error-conditions-and-reporting Types common to all resources include: * "Ready": True when the Resource is ready.`, + }, + }, + }, + }, + "uid": { + Type: schema.TypeString, + Computed: true, + Description: `Server assigned unique identifier for the Execution. The value is a UUID4 string and guaranteed to remain unchanged until the resource is deleted.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceCloudRunV2JobCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + labelsProp, err := expandCloudRunV2JobLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + clientProp, err := expandCloudRunV2JobClient(d.Get("client"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("client"); !isEmptyValue(reflect.ValueOf(clientProp)) && (ok || !reflect.DeepEqual(v, clientProp)) { + obj["client"] = clientProp + } + clientVersionProp, err := expandCloudRunV2JobClientVersion(d.Get("client_version"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("client_version"); !isEmptyValue(reflect.ValueOf(clientVersionProp)) && (ok || !reflect.DeepEqual(v, clientVersionProp)) { + obj["clientVersion"] = clientVersionProp + } + launchStageProp, err := expandCloudRunV2JobLaunchStage(d.Get("launch_stage"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("launch_stage"); !isEmptyValue(reflect.ValueOf(launchStageProp)) && (ok || !reflect.DeepEqual(v, launchStageProp)) { + obj["launchStage"] = launchStageProp + } + binaryAuthorizationProp, err := expandCloudRunV2JobBinaryAuthorization(d.Get("binary_authorization"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("binary_authorization"); !isEmptyValue(reflect.ValueOf(binaryAuthorizationProp)) && (ok || !reflect.DeepEqual(v, binaryAuthorizationProp)) { + obj["binaryAuthorization"] = binaryAuthorizationProp + } + templateProp, err := expandCloudRunV2JobTemplate(d.Get("template"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("template"); !isEmptyValue(reflect.ValueOf(templateProp)) && (ok || !reflect.DeepEqual(v, templateProp)) { + obj["template"] = templateProp + } + + url, err := replaceVars(d, config, "{{CloudRunV2BasePath}}projects/{{project}}/locations/{{location}}/jobs?jobId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Job: %#v", obj) + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Job: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf("Error creating Job: %s", err) + } + + // Store the ID now + id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/jobs/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = cloudRunV2OperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Job", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Job: %s", err) + } + + // This may have caused the ID to update - update it if so. + id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/jobs/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Job %q: %#v", d.Id(), res) + + return resourceCloudRunV2JobRead(d, meta) +} + +func resourceCloudRunV2JobRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + url, err := replaceVars(d, config, "{{CloudRunV2BasePath}}projects/{{project}}/locations/{{location}}/jobs/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Job: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("CloudRunV2Job %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + + if err := d.Set("uid", flattenCloudRunV2JobUid(res["uid"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + if err := d.Set("generation", flattenCloudRunV2JobGeneration(res["generation"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + if err := d.Set("labels", flattenCloudRunV2JobLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + if err := d.Set("client", flattenCloudRunV2JobClient(res["client"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + if err := d.Set("client_version", flattenCloudRunV2JobClientVersion(res["clientVersion"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + if err := d.Set("launch_stage", flattenCloudRunV2JobLaunchStage(res["launchStage"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + if err := d.Set("binary_authorization", flattenCloudRunV2JobBinaryAuthorization(res["binaryAuthorization"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + if err := d.Set("template", flattenCloudRunV2JobTemplate(res["template"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + if err := d.Set("observed_generation", flattenCloudRunV2JobObservedGeneration(res["observedGeneration"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + if err := d.Set("terminal_condition", flattenCloudRunV2JobTerminalCondition(res["terminalCondition"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + if err := d.Set("conditions", flattenCloudRunV2JobConditions(res["conditions"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + if err := d.Set("execution_count", flattenCloudRunV2JobExecutionCount(res["executionCount"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + if err := d.Set("latest_created_execution", flattenCloudRunV2JobLatestCreatedExecution(res["latestCreatedExecution"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + if err := d.Set("reconciling", flattenCloudRunV2JobReconciling(res["reconciling"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + if err := d.Set("etag", flattenCloudRunV2JobEtag(res["etag"], d, config)); err != nil { + return fmt.Errorf("Error reading Job: %s", err) + } + + return nil +} + +func resourceCloudRunV2JobUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Job: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + labelsProp, err := expandCloudRunV2JobLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + clientProp, err := expandCloudRunV2JobClient(d.Get("client"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("client"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, clientProp)) { + obj["client"] = clientProp + } + clientVersionProp, err := expandCloudRunV2JobClientVersion(d.Get("client_version"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("client_version"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, clientVersionProp)) { + obj["clientVersion"] = clientVersionProp + } + launchStageProp, err := expandCloudRunV2JobLaunchStage(d.Get("launch_stage"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("launch_stage"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, launchStageProp)) { + obj["launchStage"] = launchStageProp + } + binaryAuthorizationProp, err := expandCloudRunV2JobBinaryAuthorization(d.Get("binary_authorization"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("binary_authorization"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, binaryAuthorizationProp)) { + obj["binaryAuthorization"] = binaryAuthorizationProp + } + templateProp, err := expandCloudRunV2JobTemplate(d.Get("template"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("template"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, templateProp)) { + obj["template"] = templateProp + } + + url, err := replaceVars(d, config, "{{CloudRunV2BasePath}}projects/{{project}}/locations/{{location}}/jobs/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Job %q: %#v", d.Id(), obj) + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return fmt.Errorf("Error updating Job %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Job %q: %#v", d.Id(), res) + } + + err = cloudRunV2OperationWaitTime( + config, res, project, "Updating Job", userAgent, + d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return err + } + + return resourceCloudRunV2JobRead(d, meta) +} + +func resourceCloudRunV2JobDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Job: %s", err) + } + billingProject = project + + url, err := replaceVars(d, config, "{{CloudRunV2BasePath}}projects/{{project}}/locations/{{location}}/jobs/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Job %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return handleNotFoundError(err, d, "Job") + } + + err = cloudRunV2OperationWaitTime( + config, res, project, "Deleting Job", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Job %q: %#v", d.Id(), res) + return nil +} + +func resourceCloudRunV2JobImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if err := parseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/jobs/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/jobs/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenCloudRunV2JobUid(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobGeneration(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobClient(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobClientVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobLaunchStage(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobBinaryAuthorization(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["breakglass_justification"] = + flattenCloudRunV2JobBinaryAuthorizationBreakglassJustification(original["breakglassJustification"], d, config) + transformed["use_default"] = + flattenCloudRunV2JobBinaryAuthorizationUseDefault(original["useDefault"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2JobBinaryAuthorizationBreakglassJustification(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobBinaryAuthorizationUseDefault(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTemplate(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["labels"] = + flattenCloudRunV2JobTemplateLabels(original["labels"], d, config) + transformed["parallelism"] = + flattenCloudRunV2JobTemplateParallelism(original["parallelism"], d, config) + transformed["task_count"] = + flattenCloudRunV2JobTemplateTaskCount(original["taskCount"], d, config) + transformed["template"] = + flattenCloudRunV2JobTemplateTemplate(original["template"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2JobTemplateLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTemplateParallelism(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2JobTemplateTaskCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2JobTemplateTemplate(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["containers"] = + flattenCloudRunV2JobTemplateTemplateContainers(original["containers"], d, config) + transformed["volumes"] = + flattenCloudRunV2JobTemplateTemplateVolumes(original["volumes"], d, config) + transformed["timeout"] = + flattenCloudRunV2JobTemplateTemplateTimeout(original["timeout"], d, config) + transformed["service_account"] = + flattenCloudRunV2JobTemplateTemplateServiceAccount(original["serviceAccount"], d, config) + transformed["execution_environment"] = + flattenCloudRunV2JobTemplateTemplateExecutionEnvironment(original["executionEnvironment"], d, config) + transformed["encryption_key"] = + flattenCloudRunV2JobTemplateTemplateEncryptionKey(original["encryptionKey"], d, config) + transformed["vpc_access"] = + flattenCloudRunV2JobTemplateTemplateVPCAccess(original["vpcAccess"], d, config) + transformed["max_retries"] = + flattenCloudRunV2JobTemplateTemplateMaxRetries(original["maxRetries"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2JobTemplateTemplateContainers(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenCloudRunV2JobTemplateTemplateContainersName(original["name"], d, config), + "image": flattenCloudRunV2JobTemplateTemplateContainersImage(original["image"], d, config), + "command": flattenCloudRunV2JobTemplateTemplateContainersCommand(original["command"], d, config), + "args": flattenCloudRunV2JobTemplateTemplateContainersArgs(original["args"], d, config), + "env": flattenCloudRunV2JobTemplateTemplateContainersEnv(original["env"], d, config), + "resources": flattenCloudRunV2JobTemplateTemplateContainersResources(original["resources"], d, config), + "ports": flattenCloudRunV2JobTemplateTemplateContainersPorts(original["ports"], d, config), + "volume_mounts": flattenCloudRunV2JobTemplateTemplateContainersVolumeMounts(original["volumeMounts"], d, config), + "working_dir": flattenCloudRunV2JobTemplateTemplateContainersWorkingDir(original["workingDir"], d, config), + "liveness_probe": flattenCloudRunV2JobTemplateTemplateContainersLivenessProbe(original["livenessProbe"], d, config), + "startup_probe": flattenCloudRunV2JobTemplateTemplateContainersStartupProbe(original["startupProbe"], d, config), + }) + } + return transformed +} +func flattenCloudRunV2JobTemplateTemplateContainersName(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTemplateTemplateContainersImage(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTemplateTemplateContainersCommand(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTemplateTemplateContainersArgs(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTemplateTemplateContainersEnv(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenCloudRunV2JobTemplateTemplateContainersEnvName(original["name"], d, config), + "value": flattenCloudRunV2JobTemplateTemplateContainersEnvValue(original["value"], d, config), + "value_source": flattenCloudRunV2JobTemplateTemplateContainersEnvValueSource(original["valueSource"], d, config), + }) + } + return transformed +} +func flattenCloudRunV2JobTemplateTemplateContainersEnvName(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTemplateTemplateContainersEnvValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTemplateTemplateContainersEnvValueSource(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["secret_key_ref"] = + flattenCloudRunV2JobTemplateTemplateContainersEnvValueSourceSecretKeyRef(original["secretKeyRef"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2JobTemplateTemplateContainersEnvValueSourceSecretKeyRef(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["secret"] = + flattenCloudRunV2JobTemplateTemplateContainersEnvValueSourceSecretKeyRefSecret(original["secret"], d, config) + transformed["version"] = + flattenCloudRunV2JobTemplateTemplateContainersEnvValueSourceSecretKeyRefVersion(original["version"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2JobTemplateTemplateContainersEnvValueSourceSecretKeyRefSecret(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTemplateTemplateContainersEnvValueSourceSecretKeyRefVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTemplateTemplateContainersResources(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["limits"] = + flattenCloudRunV2JobTemplateTemplateContainersResourcesLimits(original["limits"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2JobTemplateTemplateContainersResourcesLimits(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTemplateTemplateContainersPorts(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenCloudRunV2JobTemplateTemplateContainersPortsName(original["name"], d, config), + "container_port": flattenCloudRunV2JobTemplateTemplateContainersPortsContainerPort(original["containerPort"], d, config), + }) + } + return transformed +} +func flattenCloudRunV2JobTemplateTemplateContainersPortsName(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTemplateTemplateContainersPortsContainerPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2JobTemplateTemplateContainersVolumeMounts(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenCloudRunV2JobTemplateTemplateContainersVolumeMountsName(original["name"], d, config), + "mount_path": flattenCloudRunV2JobTemplateTemplateContainersVolumeMountsMountPath(original["mountPath"], d, config), + }) + } + return transformed +} +func flattenCloudRunV2JobTemplateTemplateContainersVolumeMountsName(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTemplateTemplateContainersVolumeMountsMountPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTemplateTemplateContainersWorkingDir(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbe(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["initial_delay_seconds"] = + flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeInitialDelaySeconds(original["initialDelaySeconds"], d, config) + transformed["timeout_seconds"] = + flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeTimeoutSeconds(original["timeoutSeconds"], d, config) + transformed["period_seconds"] = + flattenCloudRunV2JobTemplateTemplateContainersLivenessProbePeriodSeconds(original["periodSeconds"], d, config) + transformed["failure_threshold"] = + flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeFailureThreshold(original["failureThreshold"], d, config) + transformed["http_get"] = + flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGet(original["httpGet"], d, config) + transformed["tcp_socket"] = + flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeTcpSocket(original["tcpSocket"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeInitialDelaySeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeTimeoutSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbePeriodSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeFailureThreshold(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGet(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["path"] = + flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetPath(original["path"], d, config) + transformed["http_headers"] = + flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetHttpHeaders(original["httpHeaders"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetHttpHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetHttpHeadersName(original["name"], d, config), + "value": flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetHttpHeadersValue(original["value"], d, config), + }) + } + return transformed +} +func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetHttpHeadersName(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetHttpHeadersValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeTcpSocket(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["port"] = + flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeTcpSocketPort(original["port"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2JobTemplateTemplateContainersLivenessProbeTcpSocketPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2JobTemplateTemplateContainersStartupProbe(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["initial_delay_seconds"] = + flattenCloudRunV2JobTemplateTemplateContainersStartupProbeInitialDelaySeconds(original["initialDelaySeconds"], d, config) + transformed["timeout_seconds"] = + flattenCloudRunV2JobTemplateTemplateContainersStartupProbeTimeoutSeconds(original["timeoutSeconds"], d, config) + transformed["period_seconds"] = + flattenCloudRunV2JobTemplateTemplateContainersStartupProbePeriodSeconds(original["periodSeconds"], d, config) + transformed["failure_threshold"] = + flattenCloudRunV2JobTemplateTemplateContainersStartupProbeFailureThreshold(original["failureThreshold"], d, config) + transformed["http_get"] = + flattenCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGet(original["httpGet"], d, config) + transformed["tcp_socket"] = + flattenCloudRunV2JobTemplateTemplateContainersStartupProbeTcpSocket(original["tcpSocket"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2JobTemplateTemplateContainersStartupProbeInitialDelaySeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2JobTemplateTemplateContainersStartupProbeTimeoutSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2JobTemplateTemplateContainersStartupProbePeriodSeconds(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2JobTemplateTemplateContainersStartupProbeFailureThreshold(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGet(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["path"] = + flattenCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetPath(original["path"], d, config) + transformed["http_headers"] = + flattenCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetHttpHeaders(original["httpHeaders"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetHttpHeaders(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetHttpHeadersName(original["name"], d, config), + "value": flattenCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetHttpHeadersValue(original["value"], d, config), + }) + } + return transformed +} +func flattenCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetHttpHeadersName(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetHttpHeadersValue(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTemplateTemplateContainersStartupProbeTcpSocket(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + transformed := make(map[string]interface{}) + transformed["port"] = + flattenCloudRunV2JobTemplateTemplateContainersStartupProbeTcpSocketPort(original["port"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2JobTemplateTemplateContainersStartupProbeTcpSocketPort(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2JobTemplateTemplateVolumes(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "name": flattenCloudRunV2JobTemplateTemplateVolumesName(original["name"], d, config), + "secret": flattenCloudRunV2JobTemplateTemplateVolumesSecret(original["secret"], d, config), + "cloud_sql_instance": flattenCloudRunV2JobTemplateTemplateVolumesCloudSqlInstance(original["cloudSqlInstance"], d, config), + }) + } + return transformed +} +func flattenCloudRunV2JobTemplateTemplateVolumesName(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTemplateTemplateVolumesSecret(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["secret"] = + flattenCloudRunV2JobTemplateTemplateVolumesSecretSecret(original["secret"], d, config) + transformed["default_mode"] = + flattenCloudRunV2JobTemplateTemplateVolumesSecretDefaultMode(original["defaultMode"], d, config) + transformed["items"] = + flattenCloudRunV2JobTemplateTemplateVolumesSecretItems(original["items"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2JobTemplateTemplateVolumesSecretSecret(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTemplateTemplateVolumesSecretDefaultMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2JobTemplateTemplateVolumesSecretItems(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "path": flattenCloudRunV2JobTemplateTemplateVolumesSecretItemsPath(original["path"], d, config), + "version": flattenCloudRunV2JobTemplateTemplateVolumesSecretItemsVersion(original["version"], d, config), + "mode": flattenCloudRunV2JobTemplateTemplateVolumesSecretItemsMode(original["mode"], d, config), + }) + } + return transformed +} +func flattenCloudRunV2JobTemplateTemplateVolumesSecretItemsPath(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTemplateTemplateVolumesSecretItemsVersion(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTemplateTemplateVolumesSecretItemsMode(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2JobTemplateTemplateVolumesCloudSqlInstance(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["instances"] = + flattenCloudRunV2JobTemplateTemplateVolumesCloudSqlInstanceInstances(original["instances"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2JobTemplateTemplateVolumesCloudSqlInstanceInstances(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTemplateTemplateTimeout(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTemplateTemplateServiceAccount(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTemplateTemplateExecutionEnvironment(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTemplateTemplateEncryptionKey(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTemplateTemplateVPCAccess(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["connector"] = + flattenCloudRunV2JobTemplateTemplateVPCAccessConnector(original["connector"], d, config) + transformed["egress"] = + flattenCloudRunV2JobTemplateTemplateVPCAccessEgress(original["egress"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2JobTemplateTemplateVPCAccessConnector(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTemplateTemplateVPCAccessEgress(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTemplateTemplateMaxRetries(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2JobObservedGeneration(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTerminalCondition(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["type"] = + flattenCloudRunV2JobTerminalConditionType(original["type"], d, config) + transformed["state"] = + flattenCloudRunV2JobTerminalConditionState(original["state"], d, config) + transformed["message"] = + flattenCloudRunV2JobTerminalConditionMessage(original["message"], d, config) + transformed["last_transition_time"] = + flattenCloudRunV2JobTerminalConditionLastTransitionTime(original["lastTransitionTime"], d, config) + transformed["severity"] = + flattenCloudRunV2JobTerminalConditionSeverity(original["severity"], d, config) + transformed["reason"] = + flattenCloudRunV2JobTerminalConditionReason(original["reason"], d, config) + transformed["revision_reason"] = + flattenCloudRunV2JobTerminalConditionRevisionReason(original["revisionReason"], d, config) + transformed["execution_reason"] = + flattenCloudRunV2JobTerminalConditionExecutionReason(original["executionReason"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2JobTerminalConditionType(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTerminalConditionState(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTerminalConditionMessage(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTerminalConditionLastTransitionTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTerminalConditionSeverity(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTerminalConditionReason(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTerminalConditionRevisionReason(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobTerminalConditionExecutionReason(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobConditions(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "type": flattenCloudRunV2JobConditionsType(original["type"], d, config), + "state": flattenCloudRunV2JobConditionsState(original["state"], d, config), + "message": flattenCloudRunV2JobConditionsMessage(original["message"], d, config), + "last_transition_time": flattenCloudRunV2JobConditionsLastTransitionTime(original["lastTransitionTime"], d, config), + "severity": flattenCloudRunV2JobConditionsSeverity(original["severity"], d, config), + "reason": flattenCloudRunV2JobConditionsReason(original["reason"], d, config), + "revision_reason": flattenCloudRunV2JobConditionsRevisionReason(original["revisionReason"], d, config), + "execution_reason": flattenCloudRunV2JobConditionsExecutionReason(original["executionReason"], d, config), + }) + } + return transformed +} +func flattenCloudRunV2JobConditionsType(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobConditionsState(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobConditionsMessage(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobConditionsLastTransitionTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobConditionsSeverity(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobConditionsReason(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobConditionsRevisionReason(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobConditionsExecutionReason(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobExecutionCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenCloudRunV2JobLatestCreatedExecution(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["name"] = + flattenCloudRunV2JobLatestCreatedExecutionName(original["name"], d, config) + transformed["create_time"] = + flattenCloudRunV2JobLatestCreatedExecutionCreateTime(original["createTime"], d, config) + transformed["completion_time"] = + flattenCloudRunV2JobLatestCreatedExecutionCompletionTime(original["completionTime"], d, config) + return []interface{}{transformed} +} +func flattenCloudRunV2JobLatestCreatedExecutionName(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobLatestCreatedExecutionCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobLatestCreatedExecutionCompletionTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobReconciling(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenCloudRunV2JobEtag(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func expandCloudRunV2JobLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandCloudRunV2JobClient(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobClientVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobLaunchStage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobBinaryAuthorization(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedBreakglassJustification, err := expandCloudRunV2JobBinaryAuthorizationBreakglassJustification(original["breakglass_justification"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedBreakglassJustification); val.IsValid() && !isEmptyValue(val) { + transformed["breakglassJustification"] = transformedBreakglassJustification + } + + transformedUseDefault, err := expandCloudRunV2JobBinaryAuthorizationUseDefault(original["use_default"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedUseDefault); val.IsValid() && !isEmptyValue(val) { + transformed["useDefault"] = transformedUseDefault + } + + return transformed, nil +} + +func expandCloudRunV2JobBinaryAuthorizationBreakglassJustification(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobBinaryAuthorizationUseDefault(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedLabels, err := expandCloudRunV2JobTemplateLabels(original["labels"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLabels); val.IsValid() && !isEmptyValue(val) { + transformed["labels"] = transformedLabels + } + + transformedParallelism, err := expandCloudRunV2JobTemplateParallelism(original["parallelism"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedParallelism); val.IsValid() && !isEmptyValue(val) { + transformed["parallelism"] = transformedParallelism + } + + transformedTaskCount, err := expandCloudRunV2JobTemplateTaskCount(original["task_count"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTaskCount); val.IsValid() && !isEmptyValue(val) { + transformed["taskCount"] = transformedTaskCount + } + + transformedTemplate, err := expandCloudRunV2JobTemplateTemplate(original["template"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTemplate); val.IsValid() && !isEmptyValue(val) { + transformed["template"] = transformedTemplate + } + + return transformed, nil +} + +func expandCloudRunV2JobTemplateLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandCloudRunV2JobTemplateParallelism(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTaskCount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplate(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedContainers, err := expandCloudRunV2JobTemplateTemplateContainers(original["containers"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedContainers); val.IsValid() && !isEmptyValue(val) { + transformed["containers"] = transformedContainers + } + + transformedVolumes, err := expandCloudRunV2JobTemplateTemplateVolumes(original["volumes"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVolumes); val.IsValid() && !isEmptyValue(val) { + transformed["volumes"] = transformedVolumes + } + + transformedTimeout, err := expandCloudRunV2JobTemplateTemplateTimeout(original["timeout"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimeout); val.IsValid() && !isEmptyValue(val) { + transformed["timeout"] = transformedTimeout + } + + transformedServiceAccount, err := expandCloudRunV2JobTemplateTemplateServiceAccount(original["service_account"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedServiceAccount); val.IsValid() && !isEmptyValue(val) { + transformed["serviceAccount"] = transformedServiceAccount + } + + transformedExecutionEnvironment, err := expandCloudRunV2JobTemplateTemplateExecutionEnvironment(original["execution_environment"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedExecutionEnvironment); val.IsValid() && !isEmptyValue(val) { + transformed["executionEnvironment"] = transformedExecutionEnvironment + } + + transformedEncryptionKey, err := expandCloudRunV2JobTemplateTemplateEncryptionKey(original["encryption_key"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEncryptionKey); val.IsValid() && !isEmptyValue(val) { + transformed["encryptionKey"] = transformedEncryptionKey + } + + transformedVPCAccess, err := expandCloudRunV2JobTemplateTemplateVPCAccess(original["vpc_access"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVPCAccess); val.IsValid() && !isEmptyValue(val) { + transformed["vpcAccess"] = transformedVPCAccess + } + + transformedMaxRetries, err := expandCloudRunV2JobTemplateTemplateMaxRetries(original["max_retries"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMaxRetries); val.IsValid() && !isEmptyValue(val) { + transformed["maxRetries"] = transformedMaxRetries + } + + return transformed, nil +} + +func expandCloudRunV2JobTemplateTemplateContainers(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandCloudRunV2JobTemplateTemplateContainersName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedImage, err := expandCloudRunV2JobTemplateTemplateContainersImage(original["image"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedImage); val.IsValid() && !isEmptyValue(val) { + transformed["image"] = transformedImage + } + + transformedCommand, err := expandCloudRunV2JobTemplateTemplateContainersCommand(original["command"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCommand); val.IsValid() && !isEmptyValue(val) { + transformed["command"] = transformedCommand + } + + transformedArgs, err := expandCloudRunV2JobTemplateTemplateContainersArgs(original["args"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedArgs); val.IsValid() && !isEmptyValue(val) { + transformed["args"] = transformedArgs + } + + transformedEnv, err := expandCloudRunV2JobTemplateTemplateContainersEnv(original["env"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEnv); val.IsValid() && !isEmptyValue(val) { + transformed["env"] = transformedEnv + } + + transformedResources, err := expandCloudRunV2JobTemplateTemplateContainersResources(original["resources"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedResources); val.IsValid() && !isEmptyValue(val) { + transformed["resources"] = transformedResources + } + + transformedPorts, err := expandCloudRunV2JobTemplateTemplateContainersPorts(original["ports"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPorts); val.IsValid() && !isEmptyValue(val) { + transformed["ports"] = transformedPorts + } + + transformedVolumeMounts, err := expandCloudRunV2JobTemplateTemplateContainersVolumeMounts(original["volume_mounts"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVolumeMounts); val.IsValid() && !isEmptyValue(val) { + transformed["volumeMounts"] = transformedVolumeMounts + } + + transformedWorkingDir, err := expandCloudRunV2JobTemplateTemplateContainersWorkingDir(original["working_dir"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedWorkingDir); val.IsValid() && !isEmptyValue(val) { + transformed["workingDir"] = transformedWorkingDir + } + + transformedLivenessProbe, err := expandCloudRunV2JobTemplateTemplateContainersLivenessProbe(original["liveness_probe"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLivenessProbe); val.IsValid() && !isEmptyValue(val) { + transformed["livenessProbe"] = transformedLivenessProbe + } + + transformedStartupProbe, err := expandCloudRunV2JobTemplateTemplateContainersStartupProbe(original["startup_probe"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedStartupProbe); val.IsValid() && !isEmptyValue(val) { + transformed["startupProbe"] = transformedStartupProbe + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersImage(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersCommand(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersArgs(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersEnv(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandCloudRunV2JobTemplateTemplateContainersEnvName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedValue, err := expandCloudRunV2JobTemplateTemplateContainersEnvValue(original["value"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedValue); val.IsValid() && !isEmptyValue(val) { + transformed["value"] = transformedValue + } + + transformedValueSource, err := expandCloudRunV2JobTemplateTemplateContainersEnvValueSource(original["value_source"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedValueSource); val.IsValid() && !isEmptyValue(val) { + transformed["valueSource"] = transformedValueSource + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersEnvName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersEnvValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersEnvValueSource(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSecretKeyRef, err := expandCloudRunV2JobTemplateTemplateContainersEnvValueSourceSecretKeyRef(original["secret_key_ref"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSecretKeyRef); val.IsValid() && !isEmptyValue(val) { + transformed["secretKeyRef"] = transformedSecretKeyRef + } + + return transformed, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersEnvValueSourceSecretKeyRef(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSecret, err := expandCloudRunV2JobTemplateTemplateContainersEnvValueSourceSecretKeyRefSecret(original["secret"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSecret); val.IsValid() && !isEmptyValue(val) { + transformed["secret"] = transformedSecret + } + + transformedVersion, err := expandCloudRunV2JobTemplateTemplateContainersEnvValueSourceSecretKeyRefVersion(original["version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !isEmptyValue(val) { + transformed["version"] = transformedVersion + } + + return transformed, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersEnvValueSourceSecretKeyRefSecret(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersEnvValueSourceSecretKeyRefVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersResources(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedLimits, err := expandCloudRunV2JobTemplateTemplateContainersResourcesLimits(original["limits"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedLimits); val.IsValid() && !isEmptyValue(val) { + transformed["limits"] = transformedLimits + } + + return transformed, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersResourcesLimits(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersPorts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandCloudRunV2JobTemplateTemplateContainersPortsName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedContainerPort, err := expandCloudRunV2JobTemplateTemplateContainersPortsContainerPort(original["container_port"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedContainerPort); val.IsValid() && !isEmptyValue(val) { + transformed["containerPort"] = transformedContainerPort + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersPortsName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersPortsContainerPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersVolumeMounts(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandCloudRunV2JobTemplateTemplateContainersVolumeMountsName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedMountPath, err := expandCloudRunV2JobTemplateTemplateContainersVolumeMountsMountPath(original["mount_path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMountPath); val.IsValid() && !isEmptyValue(val) { + transformed["mountPath"] = transformedMountPath + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersVolumeMountsName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersVolumeMountsMountPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersWorkingDir(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersLivenessProbe(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedInitialDelaySeconds, err := expandCloudRunV2JobTemplateTemplateContainersLivenessProbeInitialDelaySeconds(original["initial_delay_seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInitialDelaySeconds); val.IsValid() && !isEmptyValue(val) { + transformed["initialDelaySeconds"] = transformedInitialDelaySeconds + } + + transformedTimeoutSeconds, err := expandCloudRunV2JobTemplateTemplateContainersLivenessProbeTimeoutSeconds(original["timeout_seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimeoutSeconds); val.IsValid() && !isEmptyValue(val) { + transformed["timeoutSeconds"] = transformedTimeoutSeconds + } + + transformedPeriodSeconds, err := expandCloudRunV2JobTemplateTemplateContainersLivenessProbePeriodSeconds(original["period_seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPeriodSeconds); val.IsValid() && !isEmptyValue(val) { + transformed["periodSeconds"] = transformedPeriodSeconds + } + + transformedFailureThreshold, err := expandCloudRunV2JobTemplateTemplateContainersLivenessProbeFailureThreshold(original["failure_threshold"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFailureThreshold); val.IsValid() && !isEmptyValue(val) { + transformed["failureThreshold"] = transformedFailureThreshold + } + + transformedHttpGet, err := expandCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGet(original["http_get"], d, config) + if err != nil { + return nil, err + } else { + transformed["httpGet"] = transformedHttpGet + } + + transformedTcpSocket, err := expandCloudRunV2JobTemplateTemplateContainersLivenessProbeTcpSocket(original["tcp_socket"], d, config) + if err != nil { + return nil, err + } else { + transformed["tcpSocket"] = transformedTcpSocket + } + + return transformed, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersLivenessProbeInitialDelaySeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersLivenessProbeTimeoutSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersLivenessProbePeriodSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersLivenessProbeFailureThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGet(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPath, err := expandCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetPath(original["path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { + transformed["path"] = transformedPath + } + + transformedHttpHeaders, err := expandCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetHttpHeaders(original["http_headers"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHttpHeaders); val.IsValid() && !isEmptyValue(val) { + transformed["httpHeaders"] = transformedHttpHeaders + } + + return transformed, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetHttpHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetHttpHeadersName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedValue, err := expandCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetHttpHeadersValue(original["value"], d, config) + if err != nil { + return nil, err + } else { + transformed["value"] = transformedValue + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetHttpHeadersName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersLivenessProbeHttpGetHttpHeadersValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersLivenessProbeTcpSocket(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPort, err := expandCloudRunV2JobTemplateTemplateContainersLivenessProbeTcpSocketPort(original["port"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { + transformed["port"] = transformedPort + } + + return transformed, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersLivenessProbeTcpSocketPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersStartupProbe(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedInitialDelaySeconds, err := expandCloudRunV2JobTemplateTemplateContainersStartupProbeInitialDelaySeconds(original["initial_delay_seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInitialDelaySeconds); val.IsValid() && !isEmptyValue(val) { + transformed["initialDelaySeconds"] = transformedInitialDelaySeconds + } + + transformedTimeoutSeconds, err := expandCloudRunV2JobTemplateTemplateContainersStartupProbeTimeoutSeconds(original["timeout_seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedTimeoutSeconds); val.IsValid() && !isEmptyValue(val) { + transformed["timeoutSeconds"] = transformedTimeoutSeconds + } + + transformedPeriodSeconds, err := expandCloudRunV2JobTemplateTemplateContainersStartupProbePeriodSeconds(original["period_seconds"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPeriodSeconds); val.IsValid() && !isEmptyValue(val) { + transformed["periodSeconds"] = transformedPeriodSeconds + } + + transformedFailureThreshold, err := expandCloudRunV2JobTemplateTemplateContainersStartupProbeFailureThreshold(original["failure_threshold"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedFailureThreshold); val.IsValid() && !isEmptyValue(val) { + transformed["failureThreshold"] = transformedFailureThreshold + } + + transformedHttpGet, err := expandCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGet(original["http_get"], d, config) + if err != nil { + return nil, err + } else { + transformed["httpGet"] = transformedHttpGet + } + + transformedTcpSocket, err := expandCloudRunV2JobTemplateTemplateContainersStartupProbeTcpSocket(original["tcp_socket"], d, config) + if err != nil { + return nil, err + } else { + transformed["tcpSocket"] = transformedTcpSocket + } + + return transformed, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersStartupProbeInitialDelaySeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersStartupProbeTimeoutSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersStartupProbePeriodSeconds(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersStartupProbeFailureThreshold(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGet(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPath, err := expandCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetPath(original["path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { + transformed["path"] = transformedPath + } + + transformedHttpHeaders, err := expandCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetHttpHeaders(original["http_headers"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedHttpHeaders); val.IsValid() && !isEmptyValue(val) { + transformed["httpHeaders"] = transformedHttpHeaders + } + + return transformed, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetHttpHeaders(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetHttpHeadersName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedValue, err := expandCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetHttpHeadersValue(original["value"], d, config) + if err != nil { + return nil, err + } else { + transformed["value"] = transformedValue + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetHttpHeadersName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersStartupProbeHttpGetHttpHeadersValue(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersStartupProbeTcpSocket(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 { + return nil, nil + } + + if l[0] == nil { + transformed := make(map[string]interface{}) + return transformed, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPort, err := expandCloudRunV2JobTemplateTemplateContainersStartupProbeTcpSocketPort(original["port"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPort); val.IsValid() && !isEmptyValue(val) { + transformed["port"] = transformedPort + } + + return transformed, nil +} + +func expandCloudRunV2JobTemplateTemplateContainersStartupProbeTcpSocketPort(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateVolumes(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedName, err := expandCloudRunV2JobTemplateTemplateVolumesName(original["name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedName); val.IsValid() && !isEmptyValue(val) { + transformed["name"] = transformedName + } + + transformedSecret, err := expandCloudRunV2JobTemplateTemplateVolumesSecret(original["secret"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSecret); val.IsValid() && !isEmptyValue(val) { + transformed["secret"] = transformedSecret + } + + transformedCloudSqlInstance, err := expandCloudRunV2JobTemplateTemplateVolumesCloudSqlInstance(original["cloud_sql_instance"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedCloudSqlInstance); val.IsValid() && !isEmptyValue(val) { + transformed["cloudSqlInstance"] = transformedCloudSqlInstance + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudRunV2JobTemplateTemplateVolumesName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateVolumesSecret(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedSecret, err := expandCloudRunV2JobTemplateTemplateVolumesSecretSecret(original["secret"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedSecret); val.IsValid() && !isEmptyValue(val) { + transformed["secret"] = transformedSecret + } + + transformedDefaultMode, err := expandCloudRunV2JobTemplateTemplateVolumesSecretDefaultMode(original["default_mode"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedDefaultMode); val.IsValid() && !isEmptyValue(val) { + transformed["defaultMode"] = transformedDefaultMode + } + + transformedItems, err := expandCloudRunV2JobTemplateTemplateVolumesSecretItems(original["items"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedItems); val.IsValid() && !isEmptyValue(val) { + transformed["items"] = transformedItems + } + + return transformed, nil +} + +func expandCloudRunV2JobTemplateTemplateVolumesSecretSecret(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateVolumesSecretDefaultMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateVolumesSecretItems(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + req := make([]interface{}, 0, len(l)) + for _, raw := range l { + if raw == nil { + continue + } + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedPath, err := expandCloudRunV2JobTemplateTemplateVolumesSecretItemsPath(original["path"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedPath); val.IsValid() && !isEmptyValue(val) { + transformed["path"] = transformedPath + } + + transformedVersion, err := expandCloudRunV2JobTemplateTemplateVolumesSecretItemsVersion(original["version"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedVersion); val.IsValid() && !isEmptyValue(val) { + transformed["version"] = transformedVersion + } + + transformedMode, err := expandCloudRunV2JobTemplateTemplateVolumesSecretItemsMode(original["mode"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedMode); val.IsValid() && !isEmptyValue(val) { + transformed["mode"] = transformedMode + } + + req = append(req, transformed) + } + return req, nil +} + +func expandCloudRunV2JobTemplateTemplateVolumesSecretItemsPath(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateVolumesSecretItemsVersion(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateVolumesSecretItemsMode(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateVolumesCloudSqlInstance(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedInstances, err := expandCloudRunV2JobTemplateTemplateVolumesCloudSqlInstanceInstances(original["instances"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedInstances); val.IsValid() && !isEmptyValue(val) { + transformed["instances"] = transformedInstances + } + + return transformed, nil +} + +func expandCloudRunV2JobTemplateTemplateVolumesCloudSqlInstanceInstances(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateTimeout(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateServiceAccount(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateExecutionEnvironment(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateEncryptionKey(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateVPCAccess(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedConnector, err := expandCloudRunV2JobTemplateTemplateVPCAccessConnector(original["connector"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedConnector); val.IsValid() && !isEmptyValue(val) { + transformed["connector"] = transformedConnector + } + + transformedEgress, err := expandCloudRunV2JobTemplateTemplateVPCAccessEgress(original["egress"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedEgress); val.IsValid() && !isEmptyValue(val) { + transformed["egress"] = transformedEgress + } + + return transformed, nil +} + +func expandCloudRunV2JobTemplateTemplateVPCAccessConnector(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateVPCAccessEgress(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandCloudRunV2JobTemplateTemplateMaxRetries(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} diff --git a/google/resource_cloud_run_v2_job_generated_test.go b/google/resource_cloud_run_v2_job_generated_test.go new file mode 100644 index 00000000000..b45e2540660 --- /dev/null +++ b/google/resource_cloud_run_v2_job_generated_test.go @@ -0,0 +1,408 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func TestAccCloudRunV2Job_cloudrunv2JobBasicExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCloudRunV2JobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunV2Job_cloudrunv2JobBasicExample(context), + }, + { + ResourceName: "google_cloud_run_v2_job.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location"}, + }, + }, + }) +} + +func testAccCloudRunV2Job_cloudrunv2JobBasicExample(context map[string]interface{}) string { + return Nprintf(` +resource "google_cloud_run_v2_job" "default" { + name = "tf-test-cloudrun-job%{random_suffix}" + location = "us-central1" + launch_stage = "BETA" + + template { + template { + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + } + } + } +} +`, context) +} + +func TestAccCloudRunV2Job_cloudrunv2JobSqlExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "deletion_protection": false, + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCloudRunV2JobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunV2Job_cloudrunv2JobSqlExample(context), + }, + { + ResourceName: "google_cloud_run_v2_job.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location"}, + }, + }, + }) +} + +func testAccCloudRunV2Job_cloudrunv2JobSqlExample(context map[string]interface{}) string { + return Nprintf(` +resource "google_cloud_run_v2_job" "default" { + name = "tf-test-cloudrun-job%{random_suffix}" + location = "us-central1" + launch_stage = "BETA" + + template { + template{ + volumes { + name = "cloudsql" + cloud_sql_instance { + instances = [google_sql_database_instance.instance.connection_name] + } + } + + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + + env { + name = "FOO" + value = "bar" + } + env { + name = "latestdclsecret" + value_source { + secret_key_ref { + secret = google_secret_manager_secret.secret.secret_id + version = "1" + } + } + } + volume_mounts { + name = "cloudsql" + mount_path = "/cloudsql" + } + } + } + } +} + +data "google_project" "project" { +} + +resource "google_secret_manager_secret" "secret" { + secret_id = "secret%{random_suffix}" + replication { + automatic = true + } +} + +resource "google_secret_manager_secret_version" "secret-version-data" { + secret = google_secret_manager_secret.secret.name + secret_data = "secret-data" +} + +resource "google_secret_manager_secret_iam_member" "secret-access" { + secret_id = google_secret_manager_secret.secret.id + role = "roles/secretmanager.secretAccessor" + member = "serviceAccount:${data.google_project.project.number}-compute@developer.gserviceaccount.com" + depends_on = [google_secret_manager_secret.secret] +} + +resource "google_sql_database_instance" "instance" { + name = "tf-test-cloudrun-sql%{random_suffix}" + region = "us-central1" + database_version = "MYSQL_5_7" + settings { + tier = "db-f1-micro" + } + + deletion_protection = "%{deletion_protection}" +} +`, context) +} + +func TestAccCloudRunV2Job_cloudrunv2JobVpcaccessExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCloudRunV2JobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunV2Job_cloudrunv2JobVpcaccessExample(context), + }, + { + ResourceName: "google_cloud_run_v2_job.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location"}, + }, + }, + }) +} + +func testAccCloudRunV2Job_cloudrunv2JobVpcaccessExample(context map[string]interface{}) string { + return Nprintf(` +resource "google_cloud_run_v2_job" "default" { + name = "tf-test-cloudrun-job%{random_suffix}" + location = "us-central1" + launch_stage = "BETA" + + template { + template{ + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + } + vpc_access{ + connector = google_vpc_access_connector.connector.id + egress = "ALL_TRAFFIC" + } + } + } +} + +resource "google_vpc_access_connector" "connector" { + name = "tf-test-run-vpc%{random_suffix}" + subnet { + name = google_compute_subnetwork.custom_test.name + } + machine_type = "e2-standard-4" + min_instances = 2 + max_instances = 3 + region = "us-central1" +} +resource "google_compute_subnetwork" "custom_test" { + name = "tf-test-run-subnetwork%{random_suffix}" + ip_cidr_range = "10.2.0.0/28" + region = "us-central1" + network = google_compute_network.custom_test.id +} +resource "google_compute_network" "custom_test" { + name = "tf-test-run-network%{random_suffix}" + auto_create_subnetworks = false +} +`, context) +} + +func TestAccCloudRunV2Job_cloudrunv2JobProbesExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCloudRunV2JobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunV2Job_cloudrunv2JobProbesExample(context), + }, + { + ResourceName: "google_cloud_run_v2_job.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location"}, + }, + }, + }) +} + +func testAccCloudRunV2Job_cloudrunv2JobProbesExample(context map[string]interface{}) string { + return Nprintf(` +resource "google_cloud_run_v2_job" "default" { + name = "tf-test-cloudrun-job%{random_suffix}" + location = "us-central1" + launch_stage = "BETA" + + template { + template{ + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + startup_probe { + initial_delay_seconds = 0 + timeout_seconds = 1 + period_seconds = 3 + failure_threshold = 1 + tcp_socket { + port = 8080 + } + } + liveness_probe { + http_get { + path = "/" + } + } + } + } + } +} +`, context) +} + +func TestAccCloudRunV2Job_cloudrunv2JobSecretExample(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCloudRunV2JobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunV2Job_cloudrunv2JobSecretExample(context), + }, + { + ResourceName: "google_cloud_run_v2_job.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location"}, + }, + }, + }) +} + +func testAccCloudRunV2Job_cloudrunv2JobSecretExample(context map[string]interface{}) string { + return Nprintf(` +resource "google_cloud_run_v2_job" "default" { + name = "tf-test-cloudrun-job%{random_suffix}" + location = "us-central1" + launch_stage = "BETA" + + template { + template { + volumes { + name = "a-volume" + secret { + secret = google_secret_manager_secret.secret.secret_id + default_mode = 292 # 0444 + items { + version = "1" + path = "my-secret" + mode = 256 # 0400 + } + } + } + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + volume_mounts { + name = "a-volume" + mount_path = "/secrets" + } + } + } + } +} + +data "google_project" "project" { +} + +resource "google_secret_manager_secret" "secret" { + secret_id = "secret%{random_suffix}" + replication { + automatic = true + } +} + +resource "google_secret_manager_secret_version" "secret-version-data" { + secret = google_secret_manager_secret.secret.name + secret_data = "secret-data" +} + +resource "google_secret_manager_secret_iam_member" "secret-access" { + secret_id = google_secret_manager_secret.secret.id + role = "roles/secretmanager.secretAccessor" + member = "serviceAccount:${data.google_project.project.number}-compute@developer.gserviceaccount.com" + depends_on = [google_secret_manager_secret.secret] +} +`, context) +} + +func testAccCheckCloudRunV2JobDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_cloud_run_v2_job" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := googleProviderConfig(t) + + url, err := replaceVarsForTest(config, rs, "{{CloudRunV2BasePath}}projects/{{project}}/locations/{{location}}/jobs/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = sendRequest(config, "GET", billingProject, url, config.userAgent, nil) + if err == nil { + return fmt.Errorf("CloudRunV2Job still exists at %s", url) + } + } + + return nil + } +} diff --git a/google/resource_cloud_run_v2_job_sweeper_test.go b/google/resource_cloud_run_v2_job_sweeper_test.go new file mode 100644 index 00000000000..758abc76b65 --- /dev/null +++ b/google/resource_cloud_run_v2_job_sweeper_test.go @@ -0,0 +1,124 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func init() { + resource.AddTestSweepers("CloudRunV2Job", &resource.Sweeper{ + Name: "CloudRunV2Job", + F: testSweepCloudRunV2Job, + }) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepCloudRunV2Job(region string) error { + resourceName := "CloudRunV2Job" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := getTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://run.googleapis.com/v2/projects/{{project}}/locations/{{location}}/jobs", "?")[0] + listUrl, err := replaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := sendRequest(config, "GET", config.Project, listUrl, config.userAgent, nil) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["jobs"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !isSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://run.googleapis.com/v2/projects/{{project}}/locations/{{location}}/jobs/{{name}}" + deleteUrl, err := replaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = sendRequest(config, "DELETE", config.Project, deleteUrl, config.userAgent, nil) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/google/resource_cloud_run_v2_job_test.go b/google/resource_cloud_run_v2_job_test.go new file mode 100644 index 00000000000..3728abdfb7c --- /dev/null +++ b/google/resource_cloud_run_v2_job_test.go @@ -0,0 +1,414 @@ +package google + +import ( + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func TestAccCloudRunV2Job_cloudrunv2JobFullUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCloudRunV2JobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunV2Job_cloudrunv2JobFull(context), + }, + { + ResourceName: "google_cloud_run_v2_job.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location"}, + }, + { + Config: testAccCloudRunV2Job_cloudrunv2JobFullUpdate(context), + }, + { + ResourceName: "google_cloud_run_v2_job.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"location"}, + }, + }, + }) +} + +func testAccCloudRunV2Job_cloudrunv2JobFull(context map[string]interface{}) string { + return Nprintf(` + resource "google_cloud_run_v2_job" "default" { + name = "tf-test-cloudrun-job%{random_suffix}" + location = "us-central1" + launch_stage = "BETA" + labels = { + label-1 = "value-1" + } + client = "client-1" + client_version = "client-version-1" + + template { + labels = { + label-1 = "value-1" + } + parallelism = 4 + task_count = 4 + template { + timeout = "300s" + service_account = google_service_account.service_account.email + execution_environment = "EXECUTION_ENVIRONMENT_GEN2" + containers { + name = "container-1" + image = "us-docker.pkg.dev/cloudrun/container/hello" + args = ["https://cloud.google.com/run", "www.google.com"] + command = ["/bin/echo"] + env { + name = "SOURCE" + value = "remote" + } + env { + name = "TARGET" + value = "home" + } + ports { + name = "h2c" + container_port = 8080 + } + resources { + limits = { + cpu = "4" + memory = "2Gi" + } + } + } + max_retries = 5 + } + } + } + resource "google_service_account" "service_account" { + account_id = "tf-test-my-account%{random_suffix}" + display_name = "Test Service Account" + } +`, context) +} + +func testAccCloudRunV2Job_cloudrunv2JobFullUpdate(context map[string]interface{}) string { + return Nprintf(` +resource "google_cloud_run_v2_job" "default" { + name = "tf-test-cloudrun-job%{random_suffix}" + location = "us-central1" + launch_stage = "BETA" + binary_authorization { + use_default = true + breakglass_justification = "Some justification" + } + labels = { + label-1 = "value-update" + } + client = "client-update" + client_version = "client-version-update" + + template { + labels = { + label-1 = "value-update" + } + parallelism = 2 + task_count = 8 + template { + timeout = "500s" + service_account = google_service_account.service_account.email + execution_environment = "EXECUTION_ENVIRONMENT_GEN1" + containers { + name = "container-update" + image = "us-docker.pkg.dev/cloudrun/container/hello" + args = ["https://cloud.google.com/run"] + command = ["printenv"] + env { + name = "SOURCE_UPDATE" + value = "remote-update" + } + env { + name = "TARGET_UPDATE" + value = "home-update" + } + ports { + name = "h2c" + container_port = 8080 + } + resources { + limits = { + cpu = "2" + memory = "8Gi" + } + } + } + vpc_access{ + connector = google_vpc_access_connector.connector.id + egress = "ALL_TRAFFIC" + } + max_retries = 2 + } + } +} +resource "google_service_account" "service_account" { + account_id = "tf-test-my-account%{random_suffix}" + display_name = "Test Service Account" +} +resource "google_vpc_access_connector" "connector" { + name = "tf-test-run-vpc%{random_suffix}" + subnet { + name = google_compute_subnetwork.custom_test.name + } + machine_type = "e2-standard-4" + min_instances = 2 + max_instances = 3 + region = "us-central1" +} +resource "google_compute_subnetwork" "custom_test" { + name = "tf-test-run-subnetwork%{random_suffix}" + ip_cidr_range = "10.2.0.0/28" + region = "us-central1" + network = google_compute_network.custom_test.id +} +resource "google_compute_network" "custom_test" { + name = "tf-test-run-network%{random_suffix}" + auto_create_subnetworks = false +} +`, context) +} + +func TestAccCloudRunV2Job_cloudrunv2JobProbesUpdate(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckCloudRunV2JobDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccCloudRunV2Job_cloudrunv2JobWithEmptyTCPStartupProbeAndHTTPLivenessProbe(context), + }, + { + ResourceName: "google_cloud_run_v2_job.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location"}, + }, + { + Config: testAccCloudRunV2Job_cloudrunv2JobUpdateWithTCPStartupProbeAndHTTPLivenessProbe(context), + }, + { + ResourceName: "google_cloud_run_v2_job.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location"}, + }, + { + Config: testAccCloudRunV2Job_cloudrunv2JobUpdateWithHTTPStartupProbeAndTCPLivenessProbe(context), + }, + { + ResourceName: "google_cloud_run_v2_job.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location"}, + }, + { + Config: testAccCloudRunV2Job_cloudrunv2JobUpdateWithEmptyHTTPStartupProbe(context), + }, + { + ResourceName: "google_cloud_run_v2_job.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location"}, + }, + { + Config: testAccCloudRunV2Job_cloudrunv2JobUpdateWithHTTPStartupProbe(context), + }, + { + ResourceName: "google_cloud_run_v2_job.default", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"name", "location"}, + }, + }, + }) +} + +func testAccCloudRunV2Job_cloudrunv2JobWithEmptyTCPStartupProbeAndHTTPLivenessProbe(context map[string]interface{}) string { + return Nprintf(` +resource "google_cloud_run_v2_job" "default" { + name = "tf-test-cloudrun-job%{random_suffix}" + location = "us-central1" + launch_stage = "BETA" + + template { + template { + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + ports { + container_port = 8080 + } + startup_probe { + tcp_socket {} + } + liveness_probe { + http_get {} + } + } + } + } +} +`, context) +} + +func testAccCloudRunV2Job_cloudrunv2JobUpdateWithTCPStartupProbeAndHTTPLivenessProbe(context map[string]interface{}) string { + return Nprintf(` +resource "google_cloud_run_v2_job" "default" { + name = "tf-test-cloudrun-job%{random_suffix}" + location = "us-central1" + launch_stage = "BETA" + + template{ + template { + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + ports { + container_port = 8080 + } + startup_probe { + initial_delay_seconds = 2 + period_seconds = 1 + timeout_seconds = 5 + failure_threshold = 2 + tcp_socket { + port = 8080 + } + } + liveness_probe { + initial_delay_seconds = 2 + period_seconds = 1 + timeout_seconds = 5 + failure_threshold = 2 + http_get { + path = "/some-path" + http_headers { + name = "User-Agent" + value = "magic-modules" + } + http_headers { + name = "Some-Name" + } + } + } + } + } + } +} +`, context) +} + +func testAccCloudRunV2Job_cloudrunv2JobUpdateWithHTTPStartupProbeAndTCPLivenessProbe(context map[string]interface{}) string { + return Nprintf(` +resource "google_cloud_run_v2_job" "default" { + name = "tf-test-cloudrun-job%{random_suffix}" + location = "us-central1" + launch_stage = "BETA" + + template{ + template { + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + ports { + container_port = 8080 + } + startup_probe { + initial_delay_seconds = 2 + period_seconds = 1 + timeout_seconds = 5 + failure_threshold = 2 + http_get { + path = "/some-path" + http_headers { + name = "User-Agent" + value = "magic-modules" + } + http_headers { + name = "Some-Name" + } + } + } + liveness_probe { + initial_delay_seconds = 2 + period_seconds = 1 + timeout_seconds = 5 + failure_threshold = 2 + tcp_socket { + port = 8080 + } + } + } + } + } +} +`, context) +} + +func testAccCloudRunV2Job_cloudrunv2JobUpdateWithEmptyHTTPStartupProbe(context map[string]interface{}) string { + return Nprintf(` +resource "google_cloud_run_v2_job" "default" { + name = "tf-test-cloudrun-job%{random_suffix}" + location = "us-central1" + launch_stage = "BETA" + + template { + template { + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + startup_probe { + http_get {} + } + } + } + } +} +`, context) +} + +func testAccCloudRunV2Job_cloudrunv2JobUpdateWithHTTPStartupProbe(context map[string]interface{}) string { + return Nprintf(` +resource "google_cloud_run_v2_job" "default" { + name = "tf-test-cloudrun-job%{random_suffix}" + location = "us-central1" + launch_stage = "BETA" + + template{ + template { + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + startup_probe { + http_get { + path = "/some-path" + http_headers { + name = "User-Agent" + value = "magic-modules" + } + http_headers { + name = "Some-Name" + } + } + } + } + } + } +} +`, context) +} diff --git a/website/docs/r/cloud_run_v2_job.html.markdown b/website/docs/r/cloud_run_v2_job.html.markdown new file mode 100644 index 00000000000..9a346dd8e71 --- /dev/null +++ b/website/docs/r/cloud_run_v2_job.html.markdown @@ -0,0 +1,822 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file in +# .github/CONTRIBUTING.md. +# +# ---------------------------------------------------------------------------- +subcategory: "Cloud Run (2nd gen)" +page_title: "Google: google_cloud_run_v2_job" +description: |- + A Cloud Run Job resource that references a container image which is run to completion. +--- + +# google\_cloud\_run\_v2\_job + +A Cloud Run Job resource that references a container image which is run to completion. + + +To get more information about Job, see: + +* [API documentation](https://cloud.google.com/run/docs/reference/rest/v2/projects.locations.jobs) +* How-to Guides + * [Official Documentation](https://cloud.google.com/run/docs/) + + +## Example Usage - Cloudrunv2 Job Basic + + +```hcl +resource "google_cloud_run_v2_job" "default" { + name = "cloudrun-job" + location = "us-central1" + launch_stage = "BETA" + + template { + template { + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + } + } + } +} +``` + +## Example Usage - Cloudrunv2 Job Sql + + +```hcl +resource "google_cloud_run_v2_job" "default" { + name = "cloudrun-job" + location = "us-central1" + launch_stage = "BETA" + + template { + template{ + volumes { + name = "cloudsql" + cloud_sql_instance { + instances = [google_sql_database_instance.instance.connection_name] + } + } + + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + + env { + name = "FOO" + value = "bar" + } + env { + name = "latestdclsecret" + value_source { + secret_key_ref { + secret = google_secret_manager_secret.secret.secret_id + version = "1" + } + } + } + volume_mounts { + name = "cloudsql" + mount_path = "/cloudsql" + } + } + } + } +} + +data "google_project" "project" { +} + +resource "google_secret_manager_secret" "secret" { + secret_id = "secret" + replication { + automatic = true + } +} + +resource "google_secret_manager_secret_version" "secret-version-data" { + secret = google_secret_manager_secret.secret.name + secret_data = "secret-data" +} + +resource "google_secret_manager_secret_iam_member" "secret-access" { + secret_id = google_secret_manager_secret.secret.id + role = "roles/secretmanager.secretAccessor" + member = "serviceAccount:${data.google_project.project.number}-compute@developer.gserviceaccount.com" + depends_on = [google_secret_manager_secret.secret] +} + +resource "google_sql_database_instance" "instance" { + name = "cloudrun-sql" + region = "us-central1" + database_version = "MYSQL_5_7" + settings { + tier = "db-f1-micro" + } + + deletion_protection = "true" +} +``` + +## Example Usage - Cloudrunv2 Job Vpcaccess + + +```hcl +resource "google_cloud_run_v2_job" "default" { + name = "cloudrun-job" + location = "us-central1" + launch_stage = "BETA" + + template { + template{ + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + } + vpc_access{ + connector = google_vpc_access_connector.connector.id + egress = "ALL_TRAFFIC" + } + } + } +} + +resource "google_vpc_access_connector" "connector" { + name = "run-vpc" + subnet { + name = google_compute_subnetwork.custom_test.name + } + machine_type = "e2-standard-4" + min_instances = 2 + max_instances = 3 + region = "us-central1" +} +resource "google_compute_subnetwork" "custom_test" { + name = "run-subnetwork" + ip_cidr_range = "10.2.0.0/28" + region = "us-central1" + network = google_compute_network.custom_test.id +} +resource "google_compute_network" "custom_test" { + name = "run-network" + auto_create_subnetworks = false +} +``` + +## Example Usage - Cloudrunv2 Job Probes + + +```hcl +resource "google_cloud_run_v2_job" "default" { + name = "cloudrun-job" + location = "us-central1" + launch_stage = "BETA" + + template { + template{ + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + startup_probe { + initial_delay_seconds = 0 + timeout_seconds = 1 + period_seconds = 3 + failure_threshold = 1 + tcp_socket { + port = 8080 + } + } + liveness_probe { + http_get { + path = "/" + } + } + } + } + } +} +``` + +## Example Usage - Cloudrunv2 Job Secret + + +```hcl +resource "google_cloud_run_v2_job" "default" { + name = "cloudrun-job" + location = "us-central1" + launch_stage = "BETA" + + template { + template { + volumes { + name = "a-volume" + secret { + secret = google_secret_manager_secret.secret.secret_id + default_mode = 292 # 0444 + items { + version = "1" + path = "my-secret" + mode = 256 # 0400 + } + } + } + containers { + image = "us-docker.pkg.dev/cloudrun/container/hello" + volume_mounts { + name = "a-volume" + mount_path = "/secrets" + } + } + } + } +} + +data "google_project" "project" { +} + +resource "google_secret_manager_secret" "secret" { + secret_id = "secret" + replication { + automatic = true + } +} + +resource "google_secret_manager_secret_version" "secret-version-data" { + secret = google_secret_manager_secret.secret.name + secret_data = "secret-data" +} + +resource "google_secret_manager_secret_iam_member" "secret-access" { + secret_id = google_secret_manager_secret.secret.id + role = "roles/secretmanager.secretAccessor" + member = "serviceAccount:${data.google_project.project.number}-compute@developer.gserviceaccount.com" + depends_on = [google_secret_manager_secret.secret] +} +``` + +## Argument Reference + +The following arguments are supported: + + +* `name` - + (Required) + Name of the Job. + +* `template` - + (Required) + The template used to create executions for this Job. + Structure is [documented below](#nested_template). + + +The `template` block supports: + +* `labels` - + (Optional) + KRM-style labels for the resource. + +* `parallelism` - + (Optional) + Specifies the maximum desired number of tasks the execution should run at given time. Must be <= taskCount. When the job is run, if this field is 0 or unset, the maximum possible value will be used for that execution. The actual number of tasks running in steady state will be less than this number when there are fewer tasks waiting to be completed remaining, i.e. when the work left to do is less than max parallelism. + +* `task_count` - + (Optional) + Specifies the desired number of tasks the execution should run. Setting to 1 means that parallelism is limited to 1 and the success of that task signals the success of the execution. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ + +* `template` - + (Required) + Describes the task(s) that will be created when executing an execution + Structure is [documented below](#nested_template). + + +The `template` block supports: + +* `containers` - + (Optional) + Holds the single container that defines the unit of execution for this task. + Structure is [documented below](#nested_containers). + +* `volumes` - + (Optional) + A list of Volumes to make available to containers. + Structure is [documented below](#nested_volumes). + +* `timeout` - + (Optional) + Max allowed time duration the Task may be active before the system will actively try to mark it failed and kill associated containers. This applies per attempt of a task, meaning each retry can run for the full timeout. + A duration in seconds with up to nine fractional digits, ending with 's'. Example: "3.5s". + +* `service_account` - + (Optional) + Email address of the IAM service account associated with the Task of a Job. The service account represents the identity of the running task, and determines what permissions the task has. If not provided, the task will use the project's default service account. + +* `execution_environment` - + (Optional) + The execution environment being used to host this Task. + Possible values are `EXECUTION_ENVIRONMENT_GEN1` and `EXECUTION_ENVIRONMENT_GEN2`. + +* `encryption_key` - + (Optional) + A reference to a customer managed encryption key (CMEK) to use to encrypt this container image. For more information, go to https://cloud.google.com/run/docs/securing/using-cmek + +* `vpc_access` - + (Optional) + VPC Access configuration to use for this Task. For more information, visit https://cloud.google.com/run/docs/configuring/connecting-vpc. + Structure is [documented below](#nested_vpc_access). + +* `max_retries` - + (Optional) + Number of retries allowed per Task, before marking this Task failed. + + +The `containers` block supports: + +* `name` - + (Optional) + Name of the container specified as a DNS_LABEL. + +* `image` - + (Required) + URL of the Container image in Google Container Registry or Google Artifact Registry. More info: https://kubernetes.io/docs/concepts/containers/images + +* `command` - + (Optional) + Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + +* `args` - + (Optional) + Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + +* `env` - + (Optional) + List of environment variables to set in the container. + Structure is [documented below](#nested_env). + +* `resources` - + (Optional) + Compute Resource requirements by this container. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + Structure is [documented below](#nested_resources). + +* `ports` - + (Optional) + List of ports to expose from the container. Only a single port can be specified. The specified ports must be listening on all interfaces (0.0.0.0) within the container to be accessible. + If omitted, a port number will be chosen and passed to the container through the PORT environment variable for the container to listen on + Structure is [documented below](#nested_ports). + +* `volume_mounts` - + (Optional) + Volume to mount into the container's filesystem. + Structure is [documented below](#nested_volume_mounts). + +* `working_dir` - + (Optional) + Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. + +* `liveness_probe` - + (Optional) + Periodic probe of container liveness. Container will be restarted if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + Structure is [documented below](#nested_liveness_probe). + +* `startup_probe` - + (Optional) + Startup probe of application within the container. All other probes are disabled if a startup probe is provided, until it succeeds. Container will not be added to service endpoints if the probe fails. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + Structure is [documented below](#nested_startup_probe). + + +The `env` block supports: + +* `name` - + (Required) + Name of the environment variable. Must be a C_IDENTIFIER, and mnay not exceed 32768 characters. + +* `value` - + (Optional) + Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any route environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "", and the maximum length is 32768 bytes + +* `value_source` - + (Optional) + Source for the environment variable's value. + Structure is [documented below](#nested_value_source). + + +The `value_source` block supports: + +* `secret_key_ref` - + (Optional) + Selects a secret and a specific version from Cloud Secret Manager. + Structure is [documented below](#nested_secret_key_ref). + + +The `secret_key_ref` block supports: + +* `secret` - + (Required) + The name of the secret in Cloud Secret Manager. Format: {secretName} if the secret is in the same project. projects/{project}/secrets/{secretName} if the secret is in a different project. + +* `version` - + (Required) + The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version. + +The `resources` block supports: + +* `limits` - + (Optional) + Only memory and CPU are supported. Note: The only supported values for CPU are '1', '2', '4', and '8'. Setting 4 CPU requires at least 2Gi of memory. The values of the map is string form of the 'quantity' k8s type: https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go + +The `ports` block supports: + +* `name` - + (Optional) + If specified, used to specify which protocol to use. Allowed values are "http1" and "h2c". + +* `container_port` - + (Optional) + Port number the container listens on. This must be a valid TCP port number, 0 < containerPort < 65536. + +The `volume_mounts` block supports: + +* `name` - + (Required) + This must match the Name of a Volume. + +* `mount_path` - + (Required) + Path within the container at which the volume should be mounted. Must not contain ':'. For Cloud SQL volumes, it can be left empty, or must otherwise be /cloudsql. All instances defined in the Volume will be available as /cloudsql/[instance]. For more information on Cloud SQL volumes, visit https://cloud.google.com/sql/docs/mysql/connect-run + +The `liveness_probe` block supports: + +* `initial_delay_seconds` - + (Optional) + Number of seconds after the container has started before the probe is initiated. Defaults to 0 seconds. Minimum value is 0. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + +* `timeout_seconds` - + (Optional) + Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. Maximum value is 3600. Must be smaller than periodSeconds. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + +* `period_seconds` - + (Optional) + How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. Must be greater or equal than timeoutSeconds + +* `failure_threshold` - + (Optional) + Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + +* `http_get` - + (Optional) + HTTPGet specifies the http request to perform. Exactly one of HTTPGet or TCPSocket must be specified. + Structure is [documented below](#nested_http_get). + +* `tcp_socket` - + (Optional) + TCPSocket specifies an action involving a TCP port. Exactly one of HTTPGet or TCPSocket must be specified. + Structure is [documented below](#nested_tcp_socket). + + +The `http_get` block supports: + +* `path` - + (Optional) + Path to access on the HTTP server. Defaults to '/'. + +* `http_headers` - + (Optional) + Custom headers to set in the request. HTTP allows repeated headers. + Structure is [documented below](#nested_http_headers). + + +The `http_headers` block supports: + +* `name` - + (Required) + The header field name + +* `value` - + (Optional) + The header field value + +The `tcp_socket` block supports: + +* `port` - + (Optional) + Port number to access on the container. Must be in the range 1 to 65535. If not specified, defaults to 8080. + +The `startup_probe` block supports: + +* `initial_delay_seconds` - + (Optional) + Number of seconds after the container has started before the probe is initiated. Defaults to 0 seconds. Minimum value is 0. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + +* `timeout_seconds` - + (Optional) + Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. Maximum value is 3600. Must be smaller than periodSeconds. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + +* `period_seconds` - + (Optional) + How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1. Maximum value for liveness probe is 3600. Maximum value for startup probe is 240. Must be greater or equal than timeoutSeconds + +* `failure_threshold` - + (Optional) + Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1. + +* `http_get` - + (Optional) + HTTPGet specifies the http request to perform. Exactly one of HTTPGet or TCPSocket must be specified. + Structure is [documented below](#nested_http_get). + +* `tcp_socket` - + (Optional) + TCPSocket specifies an action involving a TCP port. Exactly one of HTTPGet or TCPSocket must be specified. + Structure is [documented below](#nested_tcp_socket). + + +The `http_get` block supports: + +* `path` - + (Optional) + Path to access on the HTTP server. Defaults to '/'. + +* `http_headers` - + (Optional) + Custom headers to set in the request. HTTP allows repeated headers. + Structure is [documented below](#nested_http_headers). + + +The `http_headers` block supports: + +* `name` - + (Required) + The header field name + +* `value` - + (Optional) + The header field value + +The `tcp_socket` block supports: + +* `port` - + (Optional) + Port number to access on the container. Must be in the range 1 to 65535. If not specified, defaults to 8080. + +The `volumes` block supports: + +* `name` - + (Required) + Volume's name. + +* `secret` - + (Optional) + Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + Structure is [documented below](#nested_secret). + +* `cloud_sql_instance` - + (Optional) + For Cloud SQL volumes, contains the specific instances that should be mounted. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run. + Structure is [documented below](#nested_cloud_sql_instance). + + +The `secret` block supports: + +* `secret` - + (Required) + The name of the secret in Cloud Secret Manager. Format: {secret} if the secret is in the same project. projects/{project}/secrets/{secret} if the secret is in a different project. + +* `default_mode` - + (Optional) + Integer representation of mode bits to use on created files by default. Must be a value between 0000 and 0777 (octal), defaulting to 0444. Directories within the path are not affected by this setting. + +* `items` - + (Optional) + If unspecified, the volume will expose a file whose name is the secret, relative to VolumeMount.mount_path. If specified, the key will be used as the version to fetch from Cloud Secret Manager and the path will be the name of the file exposed in the volume. When items are defined, they must specify a path and a version. + Structure is [documented below](#nested_items). + + +The `items` block supports: + +* `path` - + (Required) + The relative path of the secret in the container. + +* `version` - + (Required) + The Cloud Secret Manager secret version. Can be 'latest' for the latest value or an integer for a specific version + +* `mode` - + (Required) + Integer octal mode bits to use on this file, must be a value between 01 and 0777 (octal). If 0 or not set, the Volume's default mode will be used. + +The `cloud_sql_instance` block supports: + +* `instances` - + (Optional) + The Cloud SQL instance connection names, as can be found in https://console.cloud.google.com/sql/instances. Visit https://cloud.google.com/sql/docs/mysql/connect-run for more information on how to connect Cloud SQL and Cloud Run. Format: {project}:{location}:{instance} + +The `vpc_access` block supports: + +* `connector` - + (Optional) + VPC Access connector name. Format: projects/{project}/locations/{location}/connectors/{connector}, where {project} can be project id or number. + +* `egress` - + (Optional) + Traffic VPC egress settings. + Possible values are `ALL_TRAFFIC` and `PRIVATE_RANGES_ONLY`. + +- - - + + +* `labels` - + (Optional) + KRM-style labels for the resource. User-provided labels are shared with Google's billing system, so they can be used to filter, or break down billing charges by team, component, environment, state, etc. For more information, visit https://cloud.google.com/resource-manager/docs/creating-managing-labels or https://cloud.google.com/run/docs/configuring/labels Cloud Run will populate some labels with 'run.googleapis.com' or 'serving.knative.dev' namespaces. Those labels are read-only, and user changes will not be preserved. + +* `client` - + (Optional) + Arbitrary identifier for the API client. + +* `client_version` - + (Optional) + Arbitrary version identifier for the API client. + +* `launch_stage` - + (Optional) + The launch stage as defined by Google Cloud Platform Launch Stages. Cloud Run supports ALPHA, BETA, and GA. If no value is specified, GA is assumed. + Possible values are `UNIMPLEMENTED`, `PRELAUNCH`, `EARLY_ACCESS`, `ALPHA`, `BETA`, `GA`, and `DEPRECATED`. + +* `binary_authorization` - + (Optional) + Settings for the Binary Authorization feature. + Structure is [documented below](#nested_binary_authorization). + +* `location` - + (Optional) + The location of the cloud run job + +* `project` - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + + +The `binary_authorization` block supports: + +* `breakglass_justification` - + (Optional) + If present, indicates to use Breakglass using this justification. If useDefault is False, then it must be empty. For more information on breakglass, see https://cloud.google.com/binary-authorization/docs/using-breakglass + +* `use_default` - + (Optional) + If True, indicates to use the default project's binary authorization policy. If False, binary authorization will be disabled. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/jobs/{{name}}` + +* `uid` - + Server assigned unique identifier for the Execution. The value is a UUID4 string and guaranteed to remain unchanged until the resource is deleted. + +* `generation` - + A number that monotonically increases every time the user modifies the desired state. + +* `observed_generation` - + The generation of this Job. See comments in reconciling for additional information on reconciliation process in Cloud Run. + +* `terminal_condition` - + The Condition of this Job, containing its readiness status, and detailed error information in case it did not reach the desired state + Structure is [documented below](#nested_terminal_condition). + +* `conditions` - + The Conditions of all other associated sub-resources. They contain additional diagnostics information in case the Job does not reach its desired state. See comments in reconciling for additional information on `reconciliation` process in Cloud Run. + Structure is [documented below](#nested_conditions). + +* `execution_count` - + Number of executions created for this job. + +* `latest_created_execution` - + Name of the last created execution. + Structure is [documented below](#nested_latest_created_execution). + +* `reconciling` - + Returns true if the Job is currently being acted upon by the system to bring it into the desired state. + When a new Job is created, or an existing one is updated, Cloud Run will asynchronously perform all necessary steps to bring the Job to the desired state. This process is called reconciliation. While reconciliation is in process, observedGeneration and latest_succeeded_execution, will have transient values that might mismatch the intended state: Once reconciliation is over (and this field is false), there are two possible outcomes: reconciliation succeeded and the state matches the Job, or there was an error, and reconciliation failed. This state can be found in terminalCondition.state. + If reconciliation succeeded, the following fields will match: observedGeneration and generation, latest_succeeded_execution and latestCreatedExecution. + If reconciliation failed, observedGeneration and latest_succeeded_execution will have the state of the last succeeded execution or empty for newly created Job. Additional information on the failure can be found in terminalCondition and conditions + +* `etag` - + A system-generated fingerprint for this version of the resource. May be used to detect modification conflict during updates. + + +The `terminal_condition` block contains: + +* `type` - + type is used to communicate the status of the reconciliation process. See also: https://github.com/knative/serving/blob/main/docs/spec/errors.md#error-conditions-and-reporting Types common to all resources include: * "Ready": True when the Resource is ready. + +* `state` - + State of the condition. + +* `message` - + Human readable message indicating details about the current status. + +* `last_transition_time` - + Last time the condition transitioned from one status to another. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + +* `severity` - + How to interpret failures of this condition, one of Error, Warning, Info + +* `reason` - + A common (service-level) reason for this condition. + +* `revision_reason` - + A reason for the revision condition. + +* `execution_reason` - + A reason for the execution condition. + +The `conditions` block contains: + +* `type` - + type is used to communicate the status of the reconciliation process. See also: https://github.com/knative/serving/blob/main/docs/spec/errors.md#error-conditions-and-reporting Types common to all resources include: * "Ready": True when the Resource is ready. + +* `state` - + State of the condition. + +* `message` - + Human readable message indicating details about the current status. + +* `last_transition_time` - + Last time the condition transitioned from one status to another. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + +* `severity` - + How to interpret failures of this condition, one of Error, Warning, Info + +* `reason` - + A common (service-level) reason for this condition. + +* `revision_reason` - + A reason for the revision condition. + +* `execution_reason` - + A reason for the execution condition. + +The `latest_created_execution` block contains: + +* `name` - + Name of the execution. + +* `create_time` - + Creation timestamp of the execution. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + +* `completion_time` - + Completion timestamp of the execution. + A timestamp in RFC3339 UTC "Zulu" format, with nanosecond resolution and up to nine fractional digits. Examples: "2014-10-02T15:01:23Z" and "2014-10-02T15:01:23.045123456Z". + +## Timeouts + +This resource provides the following +[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `update` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + + +Job can be imported using any of these accepted formats: + +``` +$ terraform import google_cloud_run_v2_job.default projects/{{project}}/locations/{{location}}/jobs/{{name}} +$ terraform import google_cloud_run_v2_job.default {{project}}/{{location}}/{{name}} +$ terraform import google_cloud_run_v2_job.default {{location}}/{{name}} +``` + +## User Project Overrides + +This resource supports [User Project Overrides](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference#user_project_override).