From 836832fb2f9e5940c3254a1d5817715e8ac6034f Mon Sep 17 00:00:00 2001 From: The Magician Date: Fri, 21 Oct 2022 15:12:44 -0700 Subject: [PATCH] Add the vertex endpoint resource. (#6661) (#12858) * Add the vertex endpoint resource. * Add actions block excluding update for vertex resources that do not use operations for update. * Fix declared but not used compile error * Remove pre_update from vertex entity type and feature. * Add a handwritten test that includes an update. * Make endpoint name user-specified. * Skip the vertex endpoint test. * Make vertex endpoint name field required, url param only, and immutable. * Unskip test, add link to cloud console to deployedModels description, and describe format of name field. Signed-off-by: Modular Magician Signed-off-by: Modular Magician --- .changelog/6661.txt | 3 + google/provider.go | 5 +- google/resource_vertex_ai_dataset.go | 8 - google/resource_vertex_ai_endpoint.go | 966 ++++++++++++++++++ ...esource_vertex_ai_endpoint_sweeper_test.go | 124 +++ google/resource_vertex_ai_endpoint_test.go | 188 ++++ ...ource_vertex_ai_featurestore_entitytype.go | 18 - ...rtex_ai_featurestore_entitytype_feature.go | 18 - .../docs/r/vertex_ai_endpoint.html.markdown | 273 +++++ 9 files changed, 1557 insertions(+), 46 deletions(-) create mode 100644 .changelog/6661.txt create mode 100644 google/resource_vertex_ai_endpoint.go create mode 100644 google/resource_vertex_ai_endpoint_sweeper_test.go create mode 100644 google/resource_vertex_ai_endpoint_test.go create mode 100644 website/docs/r/vertex_ai_endpoint.html.markdown diff --git a/.changelog/6661.txt b/.changelog/6661.txt new file mode 100644 index 00000000000..0f8affe2bf4 --- /dev/null +++ b/.changelog/6661.txt @@ -0,0 +1,3 @@ +```release-note:new-resource +google_vertex_ai_endpoint +``` diff --git a/google/provider.go b/google/provider.go index 8475d2330be..d25717d2c89 100644 --- a/google/provider.go +++ b/google/provider.go @@ -910,9 +910,9 @@ func Provider() *schema.Provider { return provider } -// Generated resources: 242 +// Generated resources: 243 // Generated IAM resources: 147 -// Total generated resources: 389 +// Total generated resources: 390 func ResourceMap() map[string]*schema.Resource { resourceMap, _ := ResourceMapWithErrors() return resourceMap @@ -1296,6 +1296,7 @@ func ResourceMapWithErrors() (map[string]*schema.Resource, error) { "google_tags_tag_binding": resourceTagsTagBinding(), "google_tpu_node": resourceTPUNode(), "google_vertex_ai_dataset": resourceVertexAIDataset(), + "google_vertex_ai_endpoint": resourceVertexAIEndpoint(), "google_vertex_ai_featurestore": resourceVertexAIFeaturestore(), "google_vertex_ai_featurestore_entitytype": resourceVertexAIFeaturestoreEntitytype(), "google_vertex_ai_featurestore_entitytype_feature": resourceVertexAIFeaturestoreEntitytypeFeature(), diff --git a/google/resource_vertex_ai_dataset.go b/google/resource_vertex_ai_dataset.go index eb27f7c19ea..9480389f997 100644 --- a/google/resource_vertex_ai_dataset.go +++ b/google/resource_vertex_ai_dataset.go @@ -323,14 +323,6 @@ func resourceVertexAIDatasetUpdate(d *schema.ResourceData, meta interface{}) err log.Printf("[DEBUG] Finished updating Dataset %q: %#v", d.Id(), res) } - err = vertexAIOperationWaitTime( - config, res, project, "Updating Dataset", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - return resourceVertexAIDatasetRead(d, meta) } diff --git a/google/resource_vertex_ai_endpoint.go b/google/resource_vertex_ai_endpoint.go new file mode 100644 index 00000000000..62fd6064b99 --- /dev/null +++ b/google/resource_vertex_ai_endpoint.go @@ -0,0 +1,966 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + "log" + "reflect" + "strings" + "time" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema" +) + +func resourceVertexAIEndpoint() *schema.Resource { + return &schema.Resource{ + Create: resourceVertexAIEndpointCreate, + Read: resourceVertexAIEndpointRead, + Update: resourceVertexAIEndpointUpdate, + Delete: resourceVertexAIEndpointDelete, + + Importer: &schema.ResourceImporter{ + State: resourceVertexAIEndpointImport, + }, + + Timeouts: &schema.ResourceTimeout{ + Create: schema.DefaultTimeout(20 * time.Minute), + Update: schema.DefaultTimeout(20 * time.Minute), + Delete: schema.DefaultTimeout(20 * time.Minute), + }, + + Schema: map[string]*schema.Schema{ + "display_name": { + Type: schema.TypeString, + Required: true, + Description: `Required. The display name of the Endpoint. The name can be up to 128 characters long and can consist of any UTF-8 characters.`, + }, + "location": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The location for the resource`, + }, + "name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `The resource name of the Endpoint. The name must be numeric with no leading zeros and can be at most 10 digits.`, + }, + "description": { + Type: schema.TypeString, + Optional: true, + Description: `The description of the Endpoint.`, + }, + "encryption_spec": { + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Description: `Customer-managed encryption key spec for an Endpoint. If set, this Endpoint and all sub-resources of this Endpoint will be secured by this key.`, + MaxItems: 1, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "kms_key_name": { + Type: schema.TypeString, + Required: true, + ForceNew: true, + Description: `Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: 'projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key'. The key needs to be in the same region as where the compute resource is created.`, + }, + }, + }, + }, + "labels": { + Type: schema.TypeMap, + Optional: true, + Description: `The labels with user-defined metadata to organize your Endpoints. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels.`, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "network": { + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Description: `The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): 'projects/{project}/global/networks/{network}'. Where '{project}' is a project number, as in '12345', and '{network}' is network name.`, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Timestamp when this Endpoint was created.`, + }, + "deployed_models": { + Type: schema.TypeList, + Computed: true, + Description: `Output only. The models deployed in this Endpoint. To add or remove DeployedModels use EndpointService.DeployModel and EndpointService.UndeployModel respectively. Models can also be deployed and undeployed using the [Cloud Console](https://console.cloud.google.com/vertex-ai/).`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "automatic_resources": { + Type: schema.TypeList, + Computed: true, + Description: `A description of resources that to large degree are decided by Vertex AI, and require only a modest additional configuration.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "max_replica_count": { + Type: schema.TypeInt, + Computed: true, + Description: `The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, a no upper bound for scaling under heavy traffic will be assume, though Vertex AI may be unable to scale beyond certain replica number.`, + }, + "min_replica_count": { + Type: schema.TypeInt, + Computed: true, + Description: `The minimum number of replicas this DeployedModel will be always deployed on. If traffic against it increases, it may dynamically be deployed onto more replicas up to max_replica_count, and as traffic decreases, some of these extra replicas may be freed. If the requested value is too large, the deployment will error.`, + }, + }, + }, + }, + "create_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Timestamp when the DeployedModel was created.`, + }, + "dedicated_resources": { + Type: schema.TypeList, + Computed: true, + Description: `A description of resources that are dedicated to the DeployedModel, and that need a higher degree of manual configuration.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "autoscaling_metric_specs": { + Type: schema.TypeList, + Computed: true, + Description: `The metric specifications that overrides a resource utilization metric (CPU utilization, accelerator's duty cycle, and so on) target value (default to 60 if not set). At most one entry is allowed per metric. If machine_spec.accelerator_count is above 0, the autoscaling will be based on both CPU utilization and accelerator's duty cycle metrics and scale up when either metrics exceeds its target value while scale down if both metrics are under their target value. The default target value is 60 for both metrics. If machine_spec.accelerator_count is 0, the autoscaling will be based on CPU utilization metric only with default target value 60 if not explicitly set. For example, in the case of Online Prediction, if you want to override target CPU utilization to 80, you should set autoscaling_metric_specs.metric_name to 'aiplatform.googleapis.com/prediction/online/cpu/utilization' and autoscaling_metric_specs.target to '80'.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "metric_name": { + Type: schema.TypeString, + Computed: true, + Description: `The resource metric name. Supported metrics: * For Online Prediction: * 'aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle' * 'aiplatform.googleapis.com/prediction/online/cpu/utilization'`, + }, + "target": { + Type: schema.TypeInt, + Computed: true, + Description: `The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided.`, + }, + }, + }, + }, + "machine_spec": { + Type: schema.TypeList, + Computed: true, + Description: `The specification of a single machine used by the prediction.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "accelerator_count": { + Type: schema.TypeInt, + Computed: true, + Description: `The number of accelerators to attach to the machine.`, + }, + "accelerator_type": { + Type: schema.TypeString, + Computed: true, + Description: `The type of accelerator(s) that may be attached to the machine as per accelerator_count. See possible values [here](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec#AcceleratorType).`, + }, + "machine_type": { + Type: schema.TypeString, + Computed: true, + Description: `The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is 'n1-standard-2'. For BatchPredictionJob or as part of WorkerPoolSpec this field is required. TODO(rsurowka): Try to better unify the required vs optional.`, + }, + }, + }, + }, + "max_replica_count": { + Type: schema.TypeInt, + Computed: true, + Description: `The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type).`, + }, + "min_replica_count": { + Type: schema.TypeInt, + Computed: true, + Description: `The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed.`, + }, + }, + }, + }, + "display_name": { + Type: schema.TypeString, + Computed: true, + Description: `The display name of the DeployedModel. If not provided upon creation, the Model's display_name is used.`, + }, + "enable_access_logging": { + Type: schema.TypeBool, + Computed: true, + Description: `These logs are like standard server access logs, containing information like timestamp and latency for each prediction request. Note that Stackdriver logs may incur a cost, especially if your project receives prediction requests at a high queries per second rate (QPS). Estimate your costs before enabling this option.`, + }, + "enable_container_logging": { + Type: schema.TypeBool, + Computed: true, + Description: `If true, the container of the DeployedModel instances will send 'stderr' and 'stdout' streams to Stackdriver Logging. Only supported for custom-trained Models and AutoML Tabular Models.`, + }, + "id": { + Type: schema.TypeString, + Computed: true, + Description: `The ID of the DeployedModel. If not provided upon deployment, Vertex AI will generate a value for this ID. This value should be 1-10 characters, and valid characters are /[0-9]/.`, + }, + "model": { + Type: schema.TypeString, + Computed: true, + Description: `The name of the Model that this is the deployment of. Note that the Model may be in a different location than the DeployedModel's Endpoint.`, + }, + "model_version_id": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The version ID of the model that is deployed.`, + }, + "private_endpoints": { + Type: schema.TypeList, + Computed: true, + Description: `Output only. Provide paths for users to send predict/explain/health requests directly to the deployed model services running on Cloud via private services access. This field is populated if network is configured.`, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "explain_http_uri": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Http(s) path to send explain requests.`, + }, + "health_http_uri": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Http(s) path to send health check requests.`, + }, + "predict_http_uri": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Http(s) path to send prediction requests.`, + }, + "service_attachment": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. The name of the service attachment resource. Populated if private service connect is enabled.`, + }, + }, + }, + }, + "service_account": { + Type: schema.TypeString, + Computed: true, + Description: `The service account that the DeployedModel's container runs as. Specify the email address of the service account. If this service account is not specified, the container runs as a service account that doesn't have access to the resource project. Users deploying the Model must have the 'iam.serviceAccounts.actAs' permission on this service account.`, + }, + "shared_resources": { + Type: schema.TypeString, + Computed: true, + Description: `The resource name of the shared DeploymentResourcePool to deploy on. Format: projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool}`, + }, + }, + }, + }, + "etag": { + Type: schema.TypeString, + Computed: true, + Description: `Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens.`, + }, + "model_deployment_monitoring_job": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Resource name of the Model Monitoring job associated with this Endpoint if monitoring is enabled by CreateModelDeploymentMonitoringJob. Format: 'projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}'`, + }, + "update_time": { + Type: schema.TypeString, + Computed: true, + Description: `Output only. Timestamp when this Endpoint was last updated.`, + }, + "project": { + Type: schema.TypeString, + Optional: true, + Computed: true, + ForceNew: true, + }, + }, + UseJSONNumber: true, + } +} + +func resourceVertexAIEndpointCreate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + obj := make(map[string]interface{}) + displayNameProp, err := expandVertexAIEndpointDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(displayNameProp)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandVertexAIEndpointDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(descriptionProp)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandVertexAIEndpointLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(labelsProp)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + encryptionSpecProp, err := expandVertexAIEndpointEncryptionSpec(d.Get("encryption_spec"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("encryption_spec"); !isEmptyValue(reflect.ValueOf(encryptionSpecProp)) && (ok || !reflect.DeepEqual(v, encryptionSpecProp)) { + obj["encryptionSpec"] = encryptionSpecProp + } + networkProp, err := expandVertexAIEndpointNetwork(d.Get("network"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("network"); !isEmptyValue(reflect.ValueOf(networkProp)) && (ok || !reflect.DeepEqual(v, networkProp)) { + obj["network"] = networkProp + } + + url, err := replaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{location}}/endpoints?endpointId={{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Creating new Endpoint: %#v", obj) + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Endpoint: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "POST", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutCreate)) + if err != nil { + return fmt.Errorf("Error creating Endpoint: %s", err) + } + + // Store the ID now + id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/endpoints/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + // Use the resource in the operation response to populate + // identity fields and d.Id() before read + var opRes map[string]interface{} + err = vertexAIOperationWaitTimeWithResponse( + config, res, &opRes, project, "Creating Endpoint", userAgent, + d.Timeout(schema.TimeoutCreate)) + if err != nil { + // The resource didn't actually create + d.SetId("") + + return fmt.Errorf("Error waiting to create Endpoint: %s", err) + } + + // This may have caused the ID to update - update it if so. + id, err = replaceVars(d, config, "projects/{{project}}/locations/{{location}}/endpoints/{{name}}") + if err != nil { + return fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + log.Printf("[DEBUG] Finished creating Endpoint %q: %#v", d.Id(), res) + + return resourceVertexAIEndpointRead(d, meta) +} + +func resourceVertexAIEndpointRead(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + url, err := replaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{location}}/endpoints/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Endpoint: %s", err) + } + billingProject = project + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequest(config, "GET", billingProject, url, userAgent, nil) + if err != nil { + return handleNotFoundError(err, d, fmt.Sprintf("VertexAIEndpoint %q", d.Id())) + } + + if err := d.Set("project", project); err != nil { + return fmt.Errorf("Error reading Endpoint: %s", err) + } + + if err := d.Set("display_name", flattenVertexAIEndpointDisplayName(res["displayName"], d, config)); err != nil { + return fmt.Errorf("Error reading Endpoint: %s", err) + } + if err := d.Set("description", flattenVertexAIEndpointDescription(res["description"], d, config)); err != nil { + return fmt.Errorf("Error reading Endpoint: %s", err) + } + if err := d.Set("deployed_models", flattenVertexAIEndpointDeployedModels(res["deployedModels"], d, config)); err != nil { + return fmt.Errorf("Error reading Endpoint: %s", err) + } + if err := d.Set("labels", flattenVertexAIEndpointLabels(res["labels"], d, config)); err != nil { + return fmt.Errorf("Error reading Endpoint: %s", err) + } + if err := d.Set("create_time", flattenVertexAIEndpointCreateTime(res["createTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Endpoint: %s", err) + } + if err := d.Set("update_time", flattenVertexAIEndpointUpdateTime(res["updateTime"], d, config)); err != nil { + return fmt.Errorf("Error reading Endpoint: %s", err) + } + if err := d.Set("encryption_spec", flattenVertexAIEndpointEncryptionSpec(res["encryptionSpec"], d, config)); err != nil { + return fmt.Errorf("Error reading Endpoint: %s", err) + } + if err := d.Set("network", flattenVertexAIEndpointNetwork(res["network"], d, config)); err != nil { + return fmt.Errorf("Error reading Endpoint: %s", err) + } + if err := d.Set("model_deployment_monitoring_job", flattenVertexAIEndpointModelDeploymentMonitoringJob(res["modelDeploymentMonitoringJob"], d, config)); err != nil { + return fmt.Errorf("Error reading Endpoint: %s", err) + } + + return nil +} + +func resourceVertexAIEndpointUpdate(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Endpoint: %s", err) + } + billingProject = project + + obj := make(map[string]interface{}) + displayNameProp, err := expandVertexAIEndpointDisplayName(d.Get("display_name"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("display_name"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, displayNameProp)) { + obj["displayName"] = displayNameProp + } + descriptionProp, err := expandVertexAIEndpointDescription(d.Get("description"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("description"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, descriptionProp)) { + obj["description"] = descriptionProp + } + labelsProp, err := expandVertexAIEndpointLabels(d.Get("labels"), d, config) + if err != nil { + return err + } else if v, ok := d.GetOkExists("labels"); !isEmptyValue(reflect.ValueOf(v)) && (ok || !reflect.DeepEqual(v, labelsProp)) { + obj["labels"] = labelsProp + } + + url, err := replaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{location}}/endpoints/{{name}}") + if err != nil { + return err + } + + log.Printf("[DEBUG] Updating Endpoint %q: %#v", d.Id(), obj) + updateMask := []string{} + + if d.HasChange("display_name") { + updateMask = append(updateMask, "displayName") + } + + if d.HasChange("description") { + updateMask = append(updateMask, "description") + } + + if d.HasChange("labels") { + updateMask = append(updateMask, "labels") + } + // updateMask is a URL parameter but not present in the schema, so replaceVars + // won't set it + url, err = addQueryParams(url, map[string]string{"updateMask": strings.Join(updateMask, ",")}) + if err != nil { + return err + } + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "PATCH", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutUpdate)) + + if err != nil { + return fmt.Errorf("Error updating Endpoint %q: %s", d.Id(), err) + } else { + log.Printf("[DEBUG] Finished updating Endpoint %q: %#v", d.Id(), res) + } + + return resourceVertexAIEndpointRead(d, meta) +} + +func resourceVertexAIEndpointDelete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + userAgent, err := generateUserAgentString(d, config.userAgent) + if err != nil { + return err + } + + billingProject := "" + + project, err := getProject(d, config) + if err != nil { + return fmt.Errorf("Error fetching project for Endpoint: %s", err) + } + billingProject = project + + url, err := replaceVars(d, config, "{{VertexAIBasePath}}projects/{{project}}/locations/{{location}}/endpoints/{{name}}") + if err != nil { + return err + } + + var obj map[string]interface{} + log.Printf("[DEBUG] Deleting Endpoint %q", d.Id()) + + // err == nil indicates that the billing_project value was found + if bp, err := getBillingProject(d, config); err == nil { + billingProject = bp + } + + res, err := sendRequestWithTimeout(config, "DELETE", billingProject, url, userAgent, obj, d.Timeout(schema.TimeoutDelete)) + if err != nil { + return handleNotFoundError(err, d, "Endpoint") + } + + err = vertexAIOperationWaitTime( + config, res, project, "Deleting Endpoint", userAgent, + d.Timeout(schema.TimeoutDelete)) + + if err != nil { + return err + } + + log.Printf("[DEBUG] Finished deleting Endpoint %q: %#v", d.Id(), res) + return nil +} + +func resourceVertexAIEndpointImport(d *schema.ResourceData, meta interface{}) ([]*schema.ResourceData, error) { + config := meta.(*Config) + if err := parseImportId([]string{ + "projects/(?P[^/]+)/locations/(?P[^/]+)/endpoints/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)/(?P[^/]+)", + "(?P[^/]+)/(?P[^/]+)", + }, d, config); err != nil { + return nil, err + } + + // Replace import id for the resource id + id, err := replaceVars(d, config, "projects/{{project}}/locations/{{location}}/endpoints/{{name}}") + if err != nil { + return nil, fmt.Errorf("Error constructing id: %s", err) + } + d.SetId(id) + + return []*schema.ResourceData{d}, nil +} + +func flattenVertexAIEndpointDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenVertexAIEndpointDescription(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenVertexAIEndpointDeployedModels(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "dedicated_resources": flattenVertexAIEndpointDeployedModelsDedicatedResources(original["dedicatedResources"], d, config), + "automatic_resources": flattenVertexAIEndpointDeployedModelsAutomaticResources(original["automaticResources"], d, config), + "id": flattenVertexAIEndpointDeployedModelsId(original["id"], d, config), + "model": flattenVertexAIEndpointDeployedModelsModel(original["model"], d, config), + "model_version_id": flattenVertexAIEndpointDeployedModelsModelVersionId(original["modelVersionId"], d, config), + "display_name": flattenVertexAIEndpointDeployedModelsDisplayName(original["displayName"], d, config), + "create_time": flattenVertexAIEndpointDeployedModelsCreateTime(original["createTime"], d, config), + "service_account": flattenVertexAIEndpointDeployedModelsServiceAccount(original["serviceAccount"], d, config), + "enable_access_logging": flattenVertexAIEndpointDeployedModelsEnableAccessLogging(original["enableAccessLogging"], d, config), + "private_endpoints": flattenVertexAIEndpointDeployedModelsPrivateEndpoints(original["privateEndpoints"], d, config), + "shared_resources": flattenVertexAIEndpointDeployedModelsSharedResources(original["sharedResources"], d, config), + "enable_container_logging": flattenVertexAIEndpointDeployedModelsEnableContainerLogging(original["enableContainerLogging"], d, config), + }) + } + return transformed +} +func flattenVertexAIEndpointDeployedModelsDedicatedResources(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["machine_spec"] = + flattenVertexAIEndpointDeployedModelsDedicatedResourcesMachineSpec(original["machineSpec"], d, config) + transformed["min_replica_count"] = + flattenVertexAIEndpointDeployedModelsDedicatedResourcesMinReplicaCount(original["minReplicaCount"], d, config) + transformed["max_replica_count"] = + flattenVertexAIEndpointDeployedModelsDedicatedResourcesMaxReplicaCount(original["maxReplicaCount"], d, config) + transformed["autoscaling_metric_specs"] = + flattenVertexAIEndpointDeployedModelsDedicatedResourcesAutoscalingMetricSpecs(original["autoscalingMetricSpecs"], d, config) + return []interface{}{transformed} +} +func flattenVertexAIEndpointDeployedModelsDedicatedResourcesMachineSpec(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["machine_type"] = + flattenVertexAIEndpointDeployedModelsDedicatedResourcesMachineSpecMachineType(original["machineType"], d, config) + transformed["accelerator_type"] = + flattenVertexAIEndpointDeployedModelsDedicatedResourcesMachineSpecAcceleratorType(original["acceleratorType"], d, config) + transformed["accelerator_count"] = + flattenVertexAIEndpointDeployedModelsDedicatedResourcesMachineSpecAcceleratorCount(original["acceleratorCount"], d, config) + return []interface{}{transformed} +} +func flattenVertexAIEndpointDeployedModelsDedicatedResourcesMachineSpecMachineType(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenVertexAIEndpointDeployedModelsDedicatedResourcesMachineSpecAcceleratorType(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenVertexAIEndpointDeployedModelsDedicatedResourcesMachineSpecAcceleratorCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVertexAIEndpointDeployedModelsDedicatedResourcesMinReplicaCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVertexAIEndpointDeployedModelsDedicatedResourcesMaxReplicaCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVertexAIEndpointDeployedModelsDedicatedResourcesAutoscalingMetricSpecs(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return v + } + l := v.([]interface{}) + transformed := make([]interface{}, 0, len(l)) + for _, raw := range l { + original := raw.(map[string]interface{}) + if len(original) < 1 { + // Do not include empty json objects coming back from the api + continue + } + transformed = append(transformed, map[string]interface{}{ + "metric_name": flattenVertexAIEndpointDeployedModelsDedicatedResourcesAutoscalingMetricSpecsMetricName(original["metricName"], d, config), + "target": flattenVertexAIEndpointDeployedModelsDedicatedResourcesAutoscalingMetricSpecsTarget(original["target"], d, config), + }) + } + return transformed +} +func flattenVertexAIEndpointDeployedModelsDedicatedResourcesAutoscalingMetricSpecsMetricName(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenVertexAIEndpointDeployedModelsDedicatedResourcesAutoscalingMetricSpecsTarget(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVertexAIEndpointDeployedModelsAutomaticResources(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["min_replica_count"] = + flattenVertexAIEndpointDeployedModelsAutomaticResourcesMinReplicaCount(original["minReplicaCount"], d, config) + transformed["max_replica_count"] = + flattenVertexAIEndpointDeployedModelsAutomaticResourcesMaxReplicaCount(original["maxReplicaCount"], d, config) + return []interface{}{transformed} +} +func flattenVertexAIEndpointDeployedModelsAutomaticResourcesMinReplicaCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVertexAIEndpointDeployedModelsAutomaticResourcesMaxReplicaCount(v interface{}, d *schema.ResourceData, config *Config) interface{} { + // Handles the string fixed64 format + if strVal, ok := v.(string); ok { + if intVal, err := stringToFixed64(strVal); err == nil { + return intVal + } + } + + // number values are represented as float64 + if floatVal, ok := v.(float64); ok { + intVal := int(floatVal) + return intVal + } + + return v // let terraform core handle it otherwise +} + +func flattenVertexAIEndpointDeployedModelsId(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenVertexAIEndpointDeployedModelsModel(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenVertexAIEndpointDeployedModelsModelVersionId(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenVertexAIEndpointDeployedModelsDisplayName(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenVertexAIEndpointDeployedModelsCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenVertexAIEndpointDeployedModelsServiceAccount(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenVertexAIEndpointDeployedModelsEnableAccessLogging(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenVertexAIEndpointDeployedModelsPrivateEndpoints(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["predict_http_uri"] = + flattenVertexAIEndpointDeployedModelsPrivateEndpointsPredictHttpUri(original["predictHttpUri"], d, config) + transformed["explain_http_uri"] = + flattenVertexAIEndpointDeployedModelsPrivateEndpointsExplainHttpUri(original["explainHttpUri"], d, config) + transformed["health_http_uri"] = + flattenVertexAIEndpointDeployedModelsPrivateEndpointsHealthHttpUri(original["healthHttpUri"], d, config) + transformed["service_attachment"] = + flattenVertexAIEndpointDeployedModelsPrivateEndpointsServiceAttachment(original["serviceAttachment"], d, config) + return []interface{}{transformed} +} +func flattenVertexAIEndpointDeployedModelsPrivateEndpointsPredictHttpUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenVertexAIEndpointDeployedModelsPrivateEndpointsExplainHttpUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenVertexAIEndpointDeployedModelsPrivateEndpointsHealthHttpUri(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenVertexAIEndpointDeployedModelsPrivateEndpointsServiceAttachment(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenVertexAIEndpointDeployedModelsSharedResources(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenVertexAIEndpointDeployedModelsEnableContainerLogging(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenVertexAIEndpointLabels(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenVertexAIEndpointCreateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenVertexAIEndpointUpdateTime(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenVertexAIEndpointEncryptionSpec(v interface{}, d *schema.ResourceData, config *Config) interface{} { + if v == nil { + return nil + } + original := v.(map[string]interface{}) + if len(original) == 0 { + return nil + } + transformed := make(map[string]interface{}) + transformed["kms_key_name"] = + flattenVertexAIEndpointEncryptionSpecKmsKeyName(original["kmsKeyName"], d, config) + return []interface{}{transformed} +} +func flattenVertexAIEndpointEncryptionSpecKmsKeyName(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenVertexAIEndpointNetwork(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func flattenVertexAIEndpointModelDeploymentMonitoringJob(v interface{}, d *schema.ResourceData, config *Config) interface{} { + return v +} + +func expandVertexAIEndpointDisplayName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIEndpointDescription(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIEndpointLabels(v interface{}, d TerraformResourceData, config *Config) (map[string]string, error) { + if v == nil { + return map[string]string{}, nil + } + m := make(map[string]string) + for k, val := range v.(map[string]interface{}) { + m[k] = val.(string) + } + return m, nil +} + +func expandVertexAIEndpointEncryptionSpec(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + l := v.([]interface{}) + if len(l) == 0 || l[0] == nil { + return nil, nil + } + raw := l[0] + original := raw.(map[string]interface{}) + transformed := make(map[string]interface{}) + + transformedKmsKeyName, err := expandVertexAIEndpointEncryptionSpecKmsKeyName(original["kms_key_name"], d, config) + if err != nil { + return nil, err + } else if val := reflect.ValueOf(transformedKmsKeyName); val.IsValid() && !isEmptyValue(val) { + transformed["kmsKeyName"] = transformedKmsKeyName + } + + return transformed, nil +} + +func expandVertexAIEndpointEncryptionSpecKmsKeyName(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} + +func expandVertexAIEndpointNetwork(v interface{}, d TerraformResourceData, config *Config) (interface{}, error) { + return v, nil +} diff --git a/google/resource_vertex_ai_endpoint_sweeper_test.go b/google/resource_vertex_ai_endpoint_sweeper_test.go new file mode 100644 index 00000000000..60172d4d57a --- /dev/null +++ b/google/resource_vertex_ai_endpoint_sweeper_test.go @@ -0,0 +1,124 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "context" + "log" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" +) + +func init() { + resource.AddTestSweepers("VertexAIEndpoint", &resource.Sweeper{ + Name: "VertexAIEndpoint", + F: testSweepVertexAIEndpoint, + }) +} + +// At the time of writing, the CI only passes us-central1 as the region +func testSweepVertexAIEndpoint(region string) error { + resourceName := "VertexAIEndpoint" + log.Printf("[INFO][SWEEPER_LOG] Starting sweeper for %s", resourceName) + + config, err := sharedConfigForRegion(region) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error getting shared config for region: %s", err) + return err + } + + err = config.LoadAndValidate(context.Background()) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error loading: %s", err) + return err + } + + t := &testing.T{} + billingId := getTestBillingAccountFromEnv(t) + + // Setup variables to replace in list template + d := &ResourceDataMock{ + FieldsInSchema: map[string]interface{}{ + "project": config.Project, + "region": region, + "location": region, + "zone": "-", + "billing_account": billingId, + }, + } + + listTemplate := strings.Split("https://{{region}}-aiplatform.googleapis.com/v1/projects/{{project}}/locations/{{location}}/endpoints", "?")[0] + listUrl, err := replaceVars(d, config, listTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing sweeper list url: %s", err) + return nil + } + + res, err := sendRequest(config, "GET", config.Project, listUrl, config.userAgent, nil) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error in response from request %s: %s", listUrl, err) + return nil + } + + resourceList, ok := res["endpoints"] + if !ok { + log.Printf("[INFO][SWEEPER_LOG] Nothing found in response.") + return nil + } + + rl := resourceList.([]interface{}) + + log.Printf("[INFO][SWEEPER_LOG] Found %d items in %s list response.", len(rl), resourceName) + // Keep count of items that aren't sweepable for logging. + nonPrefixCount := 0 + for _, ri := range rl { + obj := ri.(map[string]interface{}) + if obj["name"] == nil { + log.Printf("[INFO][SWEEPER_LOG] %s resource name was nil", resourceName) + return nil + } + + name := GetResourceNameFromSelfLink(obj["name"].(string)) + // Skip resources that shouldn't be sweeped + if !isSweepableTestResource(name) { + nonPrefixCount++ + continue + } + + deleteTemplate := "https://{{region}}-aiplatform.googleapis.com/v1/projects/{{project}}/locations/{{location}}/endpoints/{{name}}" + deleteUrl, err := replaceVars(d, config, deleteTemplate) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] error preparing delete url: %s", err) + return nil + } + deleteUrl = deleteUrl + name + + // Don't wait on operations as we may have a lot to delete + _, err = sendRequest(config, "DELETE", config.Project, deleteUrl, config.userAgent, nil) + if err != nil { + log.Printf("[INFO][SWEEPER_LOG] Error deleting for url %s : %s", deleteUrl, err) + } else { + log.Printf("[INFO][SWEEPER_LOG] Sent delete request for %s resource: %s", resourceName, name) + } + } + + if nonPrefixCount > 0 { + log.Printf("[INFO][SWEEPER_LOG] %d items were non-sweepable and skipped.", nonPrefixCount) + } + + return nil +} diff --git a/google/resource_vertex_ai_endpoint_test.go b/google/resource_vertex_ai_endpoint_test.go new file mode 100644 index 00000000000..eda5b4d3efa --- /dev/null +++ b/google/resource_vertex_ai_endpoint_test.go @@ -0,0 +1,188 @@ +// ---------------------------------------------------------------------------- +// +// *** AUTO GENERATED CODE *** Type: MMv1 *** +// +// ---------------------------------------------------------------------------- +// +// This file is automatically generated by Magic Modules and manual +// changes will be clobbered when the file is regenerated. +// +// Please read more about how to change this file in +// .github/CONTRIBUTING.md. +// +// ---------------------------------------------------------------------------- + +package google + +import ( + "fmt" + "strings" + "testing" + + "github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource" + "github.com/hashicorp/terraform-plugin-sdk/v2/terraform" +) + +func TestAccVertexAIEndpoint_vertexAiEndpointNetwork(t *testing.T) { + t.Parallel() + + context := map[string]interface{}{ + "endpoint_name": fmt.Sprint(randInt(t) % 9999999999), + "kms_key_name": BootstrapKMSKeyInLocation(t, "us-central1").CryptoKey.Name, + "network_name": BootstrapSharedTestNetwork(t, "vertex"), + "random_suffix": randString(t, 10), + } + + vcrTest(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckVertexAIEndpointDestroyProducer(t), + Steps: []resource.TestStep{ + { + Config: testAccVertexAIEndpoint_vertexAiEndpointNetwork(context), + }, + { + ResourceName: "google_vertex_ai_endpoint.endpoint", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "location"}, + }, + { + Config: testAccVertexAIEndpoint_vertexAiEndpointNetworkUpdate(context), + }, + { + ResourceName: "google_vertex_ai_endpoint.endpoint", + ImportState: true, + ImportStateVerify: true, + ImportStateVerifyIgnore: []string{"etag", "location"}, + }, + }, + }) +} + +func testAccVertexAIEndpoint_vertexAiEndpointNetwork(context map[string]interface{}) string { + return Nprintf(` +resource "google_vertex_ai_endpoint" "endpoint" { + name = "%{endpoint_name}" + display_name = "sample-endpoint" + description = "A sample vertex endpoint" + location = "us-central1" + labels = { + label-one = "value-one" + } + network = "projects/${data.google_project.project.number}/global/networks/${data.google_compute_network.vertex_network.name}" + encryption_spec { + kms_key_name = "%{kms_key_name}" + } + depends_on = [ + google_service_networking_connection.vertex_vpc_connection + ] +} + +resource "google_service_networking_connection" "vertex_vpc_connection" { + network = data.google_compute_network.vertex_network.id + service = "servicenetworking.googleapis.com" + reserved_peering_ranges = [google_compute_global_address.vertex_range.name] +} + +resource "google_compute_global_address" "vertex_range" { + name = "tf-test-address-name%{random_suffix}" + purpose = "VPC_PEERING" + address_type = "INTERNAL" + prefix_length = 24 + network = data.google_compute_network.vertex_network.id +} + +data "google_compute_network" "vertex_network" { + name = "%{network_name}" +} + +resource "google_kms_crypto_key_iam_member" "crypto_key" { + crypto_key_id = "%{kms_key_name}" + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-aiplatform.iam.gserviceaccount.com" +} + +data "google_project" "project" {} +`, context) +} + +func testAccVertexAIEndpoint_vertexAiEndpointNetworkUpdate(context map[string]interface{}) string { + return Nprintf(` +resource "google_vertex_ai_endpoint" "endpoint" { + name = "%{endpoint_name}" + display_name = "new-sample-endpoint" + description = "An updated sample vertex endpoint" + location = "us-central1" + labels = { + label-two = "value-two" + } + network = "projects/${data.google_project.project.number}/global/networks/${data.google_compute_network.vertex_network.name}" + encryption_spec { + kms_key_name = "%{kms_key_name}" + } + depends_on = [ + google_service_networking_connection.vertex_vpc_connection + ] +} + +resource "google_service_networking_connection" "vertex_vpc_connection" { + network = data.google_compute_network.vertex_network.id + service = "servicenetworking.googleapis.com" + reserved_peering_ranges = [google_compute_global_address.vertex_range.name] +} + +resource "google_compute_global_address" "vertex_range" { + name = "tf-test-address-name%{random_suffix}" + purpose = "VPC_PEERING" + address_type = "INTERNAL" + prefix_length = 24 + network = data.google_compute_network.vertex_network.id +} + +data "google_compute_network" "vertex_network" { + name = "%{network_name}" +} + +resource "google_kms_crypto_key_iam_member" "crypto_key" { + crypto_key_id = "%{kms_key_name}" + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-aiplatform.iam.gserviceaccount.com" +} + +data "google_project" "project" {} +`, context) +} + +func testAccCheckVertexAIEndpointDestroyProducer(t *testing.T) func(s *terraform.State) error { + return func(s *terraform.State) error { + for name, rs := range s.RootModule().Resources { + if rs.Type != "google_vertex_ai_endpoint" { + continue + } + if strings.HasPrefix(name, "data.") { + continue + } + + config := googleProviderConfig(t) + + url, err := replaceVarsForTest(config, rs, "{{VertexAIBasePath}}projects/{{project}}/locations/{{location}}/endpoints/{{name}}") + if err != nil { + return err + } + + billingProject := "" + + if config.BillingProject != "" { + billingProject = config.BillingProject + } + + _, err = sendRequest(config, "GET", billingProject, url, config.userAgent, nil) + if err == nil { + return fmt.Errorf("VertexAIEndpoint still exists at %s", url) + } + } + + return nil + } +} diff --git a/google/resource_vertex_ai_featurestore_entitytype.go b/google/resource_vertex_ai_featurestore_entitytype.go index af05a43bac5..b2dd2489cd1 100644 --- a/google/resource_vertex_ai_featurestore_entitytype.go +++ b/google/resource_vertex_ai_featurestore_entitytype.go @@ -231,7 +231,6 @@ func resourceVertexAIFeaturestoreEntitytypeRead(d *schema.ResourceData, meta int } func resourceVertexAIFeaturestoreEntitytypeUpdate(d *schema.ResourceData, meta interface{}) error { - var project string config := meta.(*Config) userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { @@ -275,15 +274,6 @@ func resourceVertexAIFeaturestoreEntitytypeUpdate(d *schema.ResourceData, meta i if err != nil { return err } - if v, ok := d.GetOk("featurestore"); ok { - re := regexp.MustCompile("projects/([a-zA-Z0-9-]*)/(?:locations|regions)/([a-zA-Z0-9-]*)") - switch { - case re.MatchString(v.(string)): - if res := re.FindStringSubmatch(v.(string)); len(res) == 3 && res[1] != "" { - project = res[1] - } - } - } // err == nil indicates that the billing_project value was found if bp, err := getBillingProject(d, config); err == nil { @@ -298,14 +288,6 @@ func resourceVertexAIFeaturestoreEntitytypeUpdate(d *schema.ResourceData, meta i log.Printf("[DEBUG] Finished updating FeaturestoreEntitytype %q: %#v", d.Id(), res) } - err = vertexAIOperationWaitTime( - config, res, project, "Updating FeaturestoreEntitytype", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - return resourceVertexAIFeaturestoreEntitytypeRead(d, meta) } diff --git a/google/resource_vertex_ai_featurestore_entitytype_feature.go b/google/resource_vertex_ai_featurestore_entitytype_feature.go index edc02b27b18..7fd60f04999 100644 --- a/google/resource_vertex_ai_featurestore_entitytype_feature.go +++ b/google/resource_vertex_ai_featurestore_entitytype_feature.go @@ -222,7 +222,6 @@ func resourceVertexAIFeaturestoreEntitytypeFeatureRead(d *schema.ResourceData, m } func resourceVertexAIFeaturestoreEntitytypeFeatureUpdate(d *schema.ResourceData, meta interface{}) error { - var project string config := meta.(*Config) userAgent, err := generateUserAgentString(d, config.userAgent) if err != nil { @@ -276,15 +275,6 @@ func resourceVertexAIFeaturestoreEntitytypeFeatureUpdate(d *schema.ResourceData, if err != nil { return err } - if v, ok := d.GetOk("entitytype"); ok { - re := regexp.MustCompile("projects/([a-zA-Z0-9-]*)/(?:locations|regions)/([a-zA-Z0-9-]*)") - switch { - case re.MatchString(v.(string)): - if res := re.FindStringSubmatch(v.(string)); len(res) == 3 && res[1] != "" { - project = res[1] - } - } - } // err == nil indicates that the billing_project value was found if bp, err := getBillingProject(d, config); err == nil { @@ -299,14 +289,6 @@ func resourceVertexAIFeaturestoreEntitytypeFeatureUpdate(d *schema.ResourceData, log.Printf("[DEBUG] Finished updating FeaturestoreEntitytypeFeature %q: %#v", d.Id(), res) } - err = vertexAIOperationWaitTime( - config, res, project, "Updating FeaturestoreEntitytypeFeature", userAgent, - d.Timeout(schema.TimeoutUpdate)) - - if err != nil { - return err - } - return resourceVertexAIFeaturestoreEntitytypeFeatureRead(d, meta) } diff --git a/website/docs/r/vertex_ai_endpoint.html.markdown b/website/docs/r/vertex_ai_endpoint.html.markdown new file mode 100644 index 00000000000..98a47fd3d68 --- /dev/null +++ b/website/docs/r/vertex_ai_endpoint.html.markdown @@ -0,0 +1,273 @@ +--- +# ---------------------------------------------------------------------------- +# +# *** AUTO GENERATED CODE *** Type: MMv1 *** +# +# ---------------------------------------------------------------------------- +# +# This file is automatically generated by Magic Modules and manual +# changes will be clobbered when the file is regenerated. +# +# Please read more about how to change this file in +# .github/CONTRIBUTING.md. +# +# ---------------------------------------------------------------------------- +subcategory: "Vertex AI" +page_title: "Google: google_vertex_ai_endpoint" +description: |- + Models are deployed into it, and afterwards Endpoint is called to obtain predictions and explanations. +--- + +# google\_vertex\_ai\_endpoint + +Models are deployed into it, and afterwards Endpoint is called to obtain predictions and explanations. + + +To get more information about Endpoint, see: + +* [API documentation](https://cloud.google.com/vertex-ai/docs/reference/rest/v1beta1/projects.locations.endpoints) +* How-to Guides + * [Official Documentation](https://cloud.google.com/vertex-ai/docs) + +## Example Usage - Vertex Ai Endpoint Network + + +```hcl +resource "google_vertex_ai_endpoint" "endpoint" { + name = "endpoint-name" + display_name = "sample-endpoint" + description = "A sample vertex endpoint" + location = "us-central1" + labels = { + label-one = "value-one" + } + network = "projects/${data.google_project.project.number}/global/networks/${data.google_compute_network.vertex_network.name}" + encryption_spec { + kms_key_name = "kms-name" + } + depends_on = [ + google_service_networking_connection.vertex_vpc_connection + ] +} + +resource "google_service_networking_connection" "vertex_vpc_connection" { + network = data.google_compute_network.vertex_network.id + service = "servicenetworking.googleapis.com" + reserved_peering_ranges = [google_compute_global_address.vertex_range.name] +} + +resource "google_compute_global_address" "vertex_range" { + name = "address-name" + purpose = "VPC_PEERING" + address_type = "INTERNAL" + prefix_length = 24 + network = data.google_compute_network.vertex_network.id +} + +data "google_compute_network" "vertex_network" { + name = "network-name" +} + +resource "google_kms_crypto_key_iam_member" "crypto_key" { + crypto_key_id = "kms-name" + role = "roles/cloudkms.cryptoKeyEncrypterDecrypter" + member = "serviceAccount:service-${data.google_project.project.number}@gcp-sa-aiplatform.iam.gserviceaccount.com" +} + +data "google_project" "project" {} +``` + +## Argument Reference + +The following arguments are supported: + + +* `name` - + (Required) + The resource name of the Endpoint. The name must be numeric with no leading zeros and can be at most 10 digits. + +* `display_name` - + (Required) + Required. The display name of the Endpoint. The name can be up to 128 characters long and can consist of any UTF-8 characters. + +* `location` - + (Required) + The location for the resource + + +- - - + + +* `description` - + (Optional) + The description of the Endpoint. + +* `labels` - + (Optional) + The labels with user-defined metadata to organize your Endpoints. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. See https://goo.gl/xmQnxf for more information and examples of labels. + +* `encryption_spec` - + (Optional) + Customer-managed encryption key spec for an Endpoint. If set, this Endpoint and all sub-resources of this Endpoint will be secured by this key. + Structure is [documented below](#nested_encryption_spec). + +* `network` - + (Optional) + The full name of the Google Compute Engine [network](https://cloud.google.com//compute/docs/networks-and-firewalls#networks) to which the Endpoint should be peered. Private services access must already be configured for the network. If left unspecified, the Endpoint is not peered with any network. Only one of the fields, network or enable_private_service_connect, can be set. [Format](https://cloud.google.com/compute/docs/reference/rest/v1/networks/insert): `projects/{project}/global/networks/{network}`. Where `{project}` is a project number, as in `12345`, and `{network}` is network name. + +* `project` - (Optional) The ID of the project in which the resource belongs. + If it is not provided, the provider project is used. + + +The `encryption_spec` block supports: + +* `kms_key_name` - + (Required) + Required. The Cloud KMS resource identifier of the customer managed encryption key used to protect a resource. Has the form: `projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key`. The key needs to be in the same region as where the compute resource is created. + +## Attributes Reference + +In addition to the arguments listed above, the following computed attributes are exported: + +* `id` - an identifier for the resource with format `projects/{{project}}/locations/{{location}}/endpoints/{{name}}` + +* `deployed_models` - + Output only. The models deployed in this Endpoint. To add or remove DeployedModels use EndpointService.DeployModel and EndpointService.UndeployModel respectively. Models can also be deployed and undeployed using the [Cloud Console](https://console.cloud.google.com/vertex-ai/). + Structure is [documented below](#nested_deployed_models). + +* `etag` - + Used to perform consistent read-modify-write updates. If not set, a blind "overwrite" update happens. + +* `create_time` - + Output only. Timestamp when this Endpoint was created. + +* `update_time` - + Output only. Timestamp when this Endpoint was last updated. + +* `model_deployment_monitoring_job` - + Output only. Resource name of the Model Monitoring job associated with this Endpoint if monitoring is enabled by CreateModelDeploymentMonitoringJob. Format: `projects/{project}/locations/{location}/modelDeploymentMonitoringJobs/{model_deployment_monitoring_job}` + + +The `deployed_models` block contains: + +* `dedicated_resources` - + A description of resources that are dedicated to the DeployedModel, and that need a higher degree of manual configuration. + Structure is [documented below](#nested_dedicated_resources). + +* `automatic_resources` - + A description of resources that to large degree are decided by Vertex AI, and require only a modest additional configuration. + Structure is [documented below](#nested_automatic_resources). + +* `id` - + The ID of the DeployedModel. If not provided upon deployment, Vertex AI will generate a value for this ID. This value should be 1-10 characters, and valid characters are /[0-9]/. + +* `model` - + The name of the Model that this is the deployment of. Note that the Model may be in a different location than the DeployedModel's Endpoint. + +* `model_version_id` - + Output only. The version ID of the model that is deployed. + +* `display_name` - + The display name of the DeployedModel. If not provided upon creation, the Model's display_name is used. + +* `create_time` - + Output only. Timestamp when the DeployedModel was created. + +* `service_account` - + The service account that the DeployedModel's container runs as. Specify the email address of the service account. If this service account is not specified, the container runs as a service account that doesn't have access to the resource project. Users deploying the Model must have the `iam.serviceAccounts.actAs` permission on this service account. + +* `enable_access_logging` - + These logs are like standard server access logs, containing information like timestamp and latency for each prediction request. Note that Stackdriver logs may incur a cost, especially if your project receives prediction requests at a high queries per second rate (QPS). Estimate your costs before enabling this option. + +* `private_endpoints` - + Output only. Provide paths for users to send predict/explain/health requests directly to the deployed model services running on Cloud via private services access. This field is populated if network is configured. + Structure is [documented below](#nested_private_endpoints). + +* `shared_resources` - + The resource name of the shared DeploymentResourcePool to deploy on. Format: projects/{project}/locations/{location}/deploymentResourcePools/{deployment_resource_pool} + +* `enable_container_logging` - + If true, the container of the DeployedModel instances will send `stderr` and `stdout` streams to Stackdriver Logging. Only supported for custom-trained Models and AutoML Tabular Models. + + +The `dedicated_resources` block contains: + +* `machine_spec` - + The specification of a single machine used by the prediction. + Structure is [documented below](#nested_machine_spec). + +* `min_replica_count` - + The minimum number of machine replicas this DeployedModel will be always deployed on. This value must be greater than or equal to 1. If traffic against the DeployedModel increases, it may dynamically be deployed onto more replicas, and as traffic decreases, some of these extra replicas may be freed. + +* `max_replica_count` - + The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, will use min_replica_count as the default value. The value of this field impacts the charge against Vertex CPU and GPU quotas. Specifically, you will be charged for max_replica_count * number of cores in the selected machine type) and (max_replica_count * number of GPUs per replica in the selected machine type). + +* `autoscaling_metric_specs` - + The metric specifications that overrides a resource utilization metric (CPU utilization, accelerator's duty cycle, and so on) target value (default to 60 if not set). At most one entry is allowed per metric. If machine_spec.accelerator_count is above 0, the autoscaling will be based on both CPU utilization and accelerator's duty cycle metrics and scale up when either metrics exceeds its target value while scale down if both metrics are under their target value. The default target value is 60 for both metrics. If machine_spec.accelerator_count is 0, the autoscaling will be based on CPU utilization metric only with default target value 60 if not explicitly set. For example, in the case of Online Prediction, if you want to override target CPU utilization to 80, you should set autoscaling_metric_specs.metric_name to `aiplatform.googleapis.com/prediction/online/cpu/utilization` and autoscaling_metric_specs.target to `80`. + Structure is [documented below](#nested_autoscaling_metric_specs). + + +The `machine_spec` block contains: + +* `machine_type` - + The type of the machine. See the [list of machine types supported for prediction](https://cloud.google.com/vertex-ai/docs/predictions/configure-compute#machine-types) See the [list of machine types supported for custom training](https://cloud.google.com/vertex-ai/docs/training/configure-compute#machine-types). For DeployedModel this field is optional, and the default value is `n1-standard-2`. For BatchPredictionJob or as part of WorkerPoolSpec this field is required. TODO(rsurowka): Try to better unify the required vs optional. + +* `accelerator_type` - + The type of accelerator(s) that may be attached to the machine as per accelerator_count. See possible values [here](https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec#AcceleratorType). + +* `accelerator_count` - + The number of accelerators to attach to the machine. + +The `autoscaling_metric_specs` block contains: + +* `metric_name` - + The resource metric name. Supported metrics: * For Online Prediction: * `aiplatform.googleapis.com/prediction/online/accelerator/duty_cycle` * `aiplatform.googleapis.com/prediction/online/cpu/utilization` + +* `target` - + The target resource utilization in percentage (1% - 100%) for the given metric; once the real usage deviates from the target by a certain percentage, the machine replicas change. The default value is 60 (representing 60%) if not provided. + +The `automatic_resources` block contains: + +* `min_replica_count` - + The minimum number of replicas this DeployedModel will be always deployed on. If traffic against it increases, it may dynamically be deployed onto more replicas up to max_replica_count, and as traffic decreases, some of these extra replicas may be freed. If the requested value is too large, the deployment will error. + +* `max_replica_count` - + The maximum number of replicas this DeployedModel may be deployed on when the traffic against it increases. If the requested value is too large, the deployment will error, but if deployment succeeds then the ability to scale the model to that many replicas is guaranteed (barring service outages). If traffic against the DeployedModel increases beyond what its replicas at maximum may handle, a portion of the traffic will be dropped. If this value is not provided, a no upper bound for scaling under heavy traffic will be assume, though Vertex AI may be unable to scale beyond certain replica number. + +The `private_endpoints` block contains: + +* `predict_http_uri` - + Output only. Http(s) path to send prediction requests. + +* `explain_http_uri` - + Output only. Http(s) path to send explain requests. + +* `health_http_uri` - + Output only. Http(s) path to send health check requests. + +* `service_attachment` - + Output only. The name of the service attachment resource. Populated if private service connect is enabled. + +## Timeouts + +This resource provides the following +[Timeouts](/docs/configuration/resources.html#timeouts) configuration options: + +- `create` - Default is 20 minutes. +- `update` - Default is 20 minutes. +- `delete` - Default is 20 minutes. + +## Import + + +Endpoint can be imported using any of these accepted formats: + +``` +$ terraform import google_vertex_ai_endpoint.default projects/{{project}}/locations/{{location}}/endpoints/{{name}} +$ terraform import google_vertex_ai_endpoint.default {{project}}/{{location}}/{{name}} +$ terraform import google_vertex_ai_endpoint.default {{location}}/{{name}} +``` + +## User Project Overrides + +This resource supports [User Project Overrides](https://www.terraform.io/docs/providers/google/guides/provider_reference.html#user_project_override).