diff --git a/Documentation/ceph-object-bucket-claim.md b/Documentation/ceph-object-bucket-claim.md index c9419917118a..6b5c527d7237 100644 --- a/Documentation/ceph-object-bucket-claim.md +++ b/Documentation/ceph-object-bucket-claim.md @@ -102,18 +102,79 @@ metadata: labels: aws-s3/object [1] provisioner: rook-ceph.ceph.rook.io/bucket [2] -parameters: [3] +parameters: objectStoreName: my-store objectStoreNamespace: rook-ceph - region: us-west-1 + region: us-west-1 [3] bucketName: ceph-bucket [4] reclaimPolicy: Delete [5] ``` 1. `label`(optional) here associates this `StorageClass` to a specific provisioner. 1. `provisioner` responsible for handling `OBCs` referencing this `StorageClass`. -1. **all** `parameter` required. +1. `region`(optional) defines the region in which the bucket should be created. For RGW, this maps to [zonegroup](https://docs.ceph.com/en/latest/radosgw/multisite/#zone-groups) of the server. If user does not define `region`, Rook will fill this value accordingly. Please check example at the end of the doc. 1. `bucketName` is required for access to existing buckets but is omitted when provisioning new buckets. Unlike greenfield provisioning, the brownfield bucket name appears in the `StorageClass`, not the `OBC`. 1. rook-ceph provisioner decides how to treat the `reclaimPolicy` when an `OBC` is deleted for the bucket. See explanation as [specified in Kubernetes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#retain) + _Delete_ = physically delete the bucket. + _Retain_ = do not physically delete the bucket. + +##### Example: Custom Region +If user is providing the `region` value other than `us-east-1` then additional CRDs for zonegroup/zone/realm need to be created. For example, here user is providing `region` value as `us-west-1`. So make sure following [multisite related CRDs](ceph-object-multisite-crd.md) are defined as well. The Rook Operator will create required `zonegroup` for RGW server. +```yaml +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: rook-ceph-delete-bucket +provisioner: rook-ceph.ceph.rook.io/bucket +reclaimPolicy: Delete +parameters: + objectStoreName: my-store + objectStoreNamespace: rook-ceph + region: us-west-1 +--- +apiVersion: ceph.rook.io/v1 +kind: CephObjectRealm +metadata: + name: us-west-realm + namespace: rook-ceph +--- +apiVersion: ceph.rook.io/v1 +kind: CephObjectZoneGroup +metadata: + name: us-west-1 + namespace: rook-ceph +spec: + realm: us-west-realm +--- +apiVersion: ceph.rook.io/v1 +kind: CephObjectZone +metadata: + name: us-west-1a + namespace: rook-ceph +spec: + zoneGroup: us-west-1 + metadataPool: + failureDomain: host + replicated: + size: 1 + requireSafeReplicaSize: false + dataPool: + failureDomain: host + replicated: + size: 1 + requireSafeReplicaSize: false + parameters: + compression_mode: none +--- +apiVersion: ceph.rook.io/v1 +kind: CephObjectStore +metadata: + name: my-store + namespace: rook-ceph +spec: + gateway: + port: 80 + instances: 1 + zone: + name: us-west-1a +``` diff --git a/deploy/examples/storageclass-bucket-delete.yaml b/deploy/examples/storageclass-bucket-delete.yaml index 63b25fabdadb..f9507b754ba7 100644 --- a/deploy/examples/storageclass-bucket-delete.yaml +++ b/deploy/examples/storageclass-bucket-delete.yaml @@ -9,7 +9,7 @@ reclaimPolicy: Delete parameters: objectStoreName: my-store objectStoreNamespace: rook-ceph # namespace:cluster - region: us-east-1 + # region: us-east-1 # To accommodate brownfield cases reference the existing bucket name here instead # of in the ObjectBucketClaim (OBC). In this case the provisioner will grant # access to the bucket by creating a new user, attaching it to the bucket, and diff --git a/deploy/examples/storageclass-bucket-retain.yaml b/deploy/examples/storageclass-bucket-retain.yaml index 37361a8467b4..03a5094f5c05 100644 --- a/deploy/examples/storageclass-bucket-retain.yaml +++ b/deploy/examples/storageclass-bucket-retain.yaml @@ -8,7 +8,7 @@ reclaimPolicy: Retain parameters: objectStoreName: my-store # port 80 assumed objectStoreNamespace: rook-ceph # namespace:cluster - region: us-east-1 + # region: us-east-1 # To accommodate brownfield cases reference the existing bucket name here instead # of in the ObjectBucketClaim (OBC). In this case the provisioner will grant # access to the bucket by creating a new user, attaching it to the bucket, and diff --git a/pkg/operator/ceph/object/bucket/provisioner.go b/pkg/operator/ceph/object/bucket/provisioner.go index 388eff634364..3ece8af97622 100644 --- a/pkg/operator/ceph/object/bucket/provisioner.go +++ b/pkg/operator/ceph/object/bucket/provisioner.go @@ -337,13 +337,13 @@ func (p *Provisioner) initializeCreateOrGrant(options *apibkt.BucketOptions) err } p.setObjectStoreName(sc) - p.setRegion(sc) p.setAdditionalConfigData(obc.Spec.AdditionalConfig) p.setEndpoint(sc) err = p.setObjectContext() if err != nil { return err } + p.setRegion(sc) // If an endpoint is declared let's use it err = p.populateDomainAndPort(sc) @@ -513,7 +513,13 @@ func (p *Provisioner) setEndpoint(sc *storagev1.StorageClass) { func (p *Provisioner) setRegion(sc *storagev1.StorageClass) { const key = "region" - p.region = sc.Parameters[key] + if len(sc.Parameters[key]) > 0 { + p.region = sc.Parameters[key] + } else { + // If user does not define, then set region to Zonegroup, since RGW internally maps + // aws region to ZoneGroup + p.region = p.objectContext.ZoneGroup + } } func (p Provisioner) getObjectStoreEndpoint() string { diff --git a/pkg/operator/ceph/object/health.go b/pkg/operator/ceph/object/health.go index 996686d24c85..d3e9496a69d6 100644 --- a/pkg/operator/ceph/object/health.go +++ b/pkg/operator/ceph/object/health.go @@ -165,7 +165,7 @@ func (c *bucketChecker) checkObjectStoreHealth() error { // Initiate s3 agent logger.Debugf("initializing s3 connection for object store %q", c.namespacedName.Name) - s3client, err := NewInsecureS3Agent(s3AccessKey, s3SecretKey, s3endpoint, "", false) + s3client, err := NewInsecureS3Agent(s3AccessKey, s3SecretKey, s3endpoint, c.objContext.ZoneGroup, false) if err != nil { return errors.Wrap(err, "failed to initialize s3 connection") } diff --git a/pkg/operator/ceph/object/s3-handlers.go b/pkg/operator/ceph/object/s3-handlers.go index 479aaf09bddb..392951d15b18 100644 --- a/pkg/operator/ceph/object/s3-handlers.go +++ b/pkg/operator/ceph/object/s3-handlers.go @@ -45,11 +45,6 @@ func NewInsecureS3Agent(accessKey, secretKey, endpoint, region string, debug boo } func newS3Agent(accessKey, secretKey, endpoint, region string, debug bool, tlsCert []byte, insecure bool) (*S3Agent, error) { - var cephRegion = "us-east-1" - if region != "" { - cephRegion = region - } - logLevel := aws.LogOff if debug { logLevel = aws.LogDebug @@ -64,7 +59,7 @@ func newS3Agent(accessKey, secretKey, endpoint, region string, debug bool, tlsCe } session, err := awssession.NewSession( aws.NewConfig(). - WithRegion(cephRegion). + WithRegion(region). WithCredentials(credentials.NewStaticCredentials(accessKey, secretKey, "")). WithEndpoint(endpoint). WithS3ForcePathStyle(true). diff --git a/tests/framework/clients/bucket.go b/tests/framework/clients/bucket.go index 037bcdf19397..6f3842430273 100644 --- a/tests/framework/clients/bucket.go +++ b/tests/framework/clients/bucket.go @@ -157,9 +157,9 @@ func (b *BucketOperation) CheckBucketNotificationSetonRGW(namespace, storeName, s3AccessKey, _ := helper.BucketClient.GetAccessKey(obcName) s3SecretKey, _ := helper.BucketClient.GetSecretKey(obcName) if tlsEnabled { - s3client, err = rgw.NewInsecureS3Agent(s3AccessKey, s3SecretKey, s3endpoint, "", true) + s3client, err = rgw.NewInsecureS3Agent(s3AccessKey, s3SecretKey, s3endpoint, storeName, true) } else { - s3client, err = rgw.NewS3Agent(s3AccessKey, s3SecretKey, s3endpoint, "", true, nil) + s3client, err = rgw.NewS3Agent(s3AccessKey, s3SecretKey, s3endpoint, storeName, true, nil) } if err != nil { logger.Infof("failed to s3client due to %v", err) diff --git a/tests/framework/clients/object_user.go b/tests/framework/clients/object_user.go index 667a8bef5d7d..2453c06e7d11 100644 --- a/tests/framework/clients/object_user.go +++ b/tests/framework/clients/object_user.go @@ -18,6 +18,7 @@ package clients import ( "context" + b64 "encoding/base64" "fmt" "strings" @@ -34,6 +35,11 @@ type ObjectUserOperation struct { manifests installer.CephManifests } +var ( + // #nosec G101 since this is not leaking any hardcoded credentials, it's just prefix for the secret name + objectStoreUserSecretPrefix = "rook-ceph-object-user-" +) + // CreateObjectUserOperation creates new rook object user client func CreateObjectUserOperation(k8sh *utils.K8sHelper, manifests installer.CephManifests) *ObjectUserOperation { return &ObjectUserOperation{k8sh, manifests} @@ -91,3 +97,26 @@ func (o *ObjectUserOperation) Delete(namespace string, userid string) error { } return nil } + +// Fetch SecretKey, AccessKey for s3 client. +func (o *ObjectUserOperation) GetAccessKey(namespace, store, userid string) (string, error) { + SecretName := objectStoreUserSecretPrefix + store + "-" + userid + args := []string{"-n", namespace, "get", "secret", SecretName, "-o", "jsonpath={@.data.AccessKey}"} + AccessKey, err := o.k8sh.Kubectl(args...) + if err != nil { + return "", fmt.Errorf("Unable to find access key -- %s", err) + } + decode, _ := b64.StdEncoding.DecodeString(AccessKey) + return string(decode), nil +} + +func (o *ObjectUserOperation) GetSecretKey(namespace, store, userid string) (string, error) { + SecretName := objectStoreUserSecretPrefix + store + "-" + userid + args := []string{"-n", namespace, "get", "secret", SecretName, "-o", "jsonpath={@.data.SecretKey}"} + SecretKey, err := o.k8sh.Kubectl(args...) + if err != nil { + return "", fmt.Errorf("Unable to find secret key-- %s", err) + } + decode, _ := b64.StdEncoding.DecodeString(SecretKey) + return string(decode), nil +} diff --git a/tests/framework/installer/ceph_helm_installer.go b/tests/framework/installer/ceph_helm_installer.go index fd07352d48a0..03a1e580affd 100644 --- a/tests/framework/installer/ceph_helm_installer.go +++ b/tests/framework/installer/ceph_helm_installer.go @@ -276,7 +276,7 @@ func (h *CephInstaller) CreateObjectStoreConfiguration(values map[string]interfa return err } - storageClassBytes := []byte(h.Manifests.GetBucketStorageClass(name, scName, "Delete", "us-east-1")) + storageClassBytes := []byte(h.Manifests.GetBucketStorageClass(name, scName, "Delete", name)) var testObjectStoreSC map[string]interface{} if err := yaml.Unmarshal(storageClassBytes, &testObjectStoreSC); err != nil { return err diff --git a/tests/integration/ceph_base_object_test.go b/tests/integration/ceph_base_object_test.go index 7e650e21a0fb..7178ef8f8402 100644 --- a/tests/integration/ceph_base_object_test.go +++ b/tests/integration/ceph_base_object_test.go @@ -53,13 +53,13 @@ var ( ObjectKey4 = "rookObj4" contentType = "plain/text" obcName = "smoke-delete-bucket" - region = "us-east-1" maxObject = "2" newMaxObject = "3" bucketStorageClassName = "rook-smoke-delete-bucket" maxBucket = 1 maxSize = "100000" userCap = "read" + userBucket = "user-bkt" ) // Test Object StoreCreation on Rook that was installed via helm diff --git a/tests/integration/ceph_bucket_notification_test.go b/tests/integration/ceph_bucket_notification_test.go index d0a67a8c840e..c55fb4a1415c 100644 --- a/tests/integration/ceph_bucket_notification_test.go +++ b/tests/integration/ceph_bucket_notification_test.go @@ -90,7 +90,7 @@ func testBucketNotifications(s suite.Suite, helper *clients.TestClient, k8sh *ut t.Run("create ObjectBucketClaim", func(t *testing.T) { logger.Infof("create OBC %q with storageclass %q and notification %q", obcName, bucketStorageClassName, notificationName) - cobErr := helper.BucketClient.CreateBucketStorageClass(namespace, storeName, bucketStorageClassName, "Delete", region) + cobErr := helper.BucketClient.CreateBucketStorageClass(namespace, storeName, bucketStorageClassName, "Delete", storeName) assert.Nil(t, cobErr) cobcErr := helper.BucketClient.CreateObcNotification(obcName, bucketStorageClassName, bucketname, notificationName, true) assert.Nil(t, cobcErr) @@ -124,9 +124,9 @@ func testBucketNotifications(s suite.Suite, helper *clients.TestClient, k8sh *ut s3AccessKey, _ := helper.BucketClient.GetAccessKey(obcName) s3SecretKey, _ := helper.BucketClient.GetSecretKey(obcName) if objectStore.Spec.IsTLSEnabled() { - s3client, err = rgw.NewInsecureS3Agent(s3AccessKey, s3SecretKey, s3endpoint, region, true) + s3client, err = rgw.NewInsecureS3Agent(s3AccessKey, s3SecretKey, s3endpoint, storeName, true) } else { - s3client, err = rgw.NewS3Agent(s3AccessKey, s3SecretKey, s3endpoint, region, true, nil) + s3client, err = rgw.NewS3Agent(s3AccessKey, s3SecretKey, s3endpoint, storeName, true, nil) } assert.Nil(t, err) @@ -228,9 +228,9 @@ func testBucketNotifications(s suite.Suite, helper *clients.TestClient, k8sh *ut s3AccessKey, _ := helper.BucketClient.GetAccessKey(obcName) s3SecretKey, _ := helper.BucketClient.GetSecretKey(obcName) if objectStore.Spec.IsTLSEnabled() { - s3client, err = rgw.NewInsecureS3Agent(s3AccessKey, s3SecretKey, s3endpoint, region, true) + s3client, err = rgw.NewInsecureS3Agent(s3AccessKey, s3SecretKey, s3endpoint, storeName, true) } else { - s3client, err = rgw.NewS3Agent(s3AccessKey, s3SecretKey, s3endpoint, region, true, nil) + s3client, err = rgw.NewS3Agent(s3AccessKey, s3SecretKey, s3endpoint, storeName, true, nil) } assert.Nil(t, err) @@ -427,7 +427,7 @@ func testBucketNotifications(s suite.Suite, helper *clients.TestClient, k8sh *ut assert.NotEqual(t, 4, i) assert.Equal(t, rgwErr, rgw.RGWErrorNotFound) - dobErr := helper.BucketClient.DeleteBucketStorageClass(namespace, storeName, bucketStorageClassName, "Delete", region) + dobErr := helper.BucketClient.DeleteBucketStorageClass(namespace, storeName, bucketStorageClassName, "Delete", storeName) assert.Nil(t, dobErr) }) diff --git a/tests/integration/ceph_object_test.go b/tests/integration/ceph_object_test.go index 268dd2729dd3..21c95995e5cb 100644 --- a/tests/integration/ceph_object_test.go +++ b/tests/integration/ceph_object_test.go @@ -163,8 +163,13 @@ func testObjectStoreOperations(s suite.Suite, helper *clients.TestClient, k8sh * ctx := context.TODO() clusterInfo := client.AdminTestClusterInfo(namespace) t := s.T() + context := k8sh.MakeContext() + objectStore, err := k8sh.RookClientset.CephV1().CephObjectStores(namespace).Get(ctx, storeName, metav1.GetOptions{}) + assert.Nil(t, err) + rgwcontext, err := rgw.NewMultisiteContext(context, clusterInfo, objectStore) + assert.Nil(t, err) + logger.Infof("Testing Object Store Operations on %s", storeName) - logger.Infof("Testing Object Operations on %s", storeName) t.Run("create CephObjectStoreUser", func(t *testing.T) { createCephObjectUser(s, helper, k8sh, namespace, storeName, userid, true, true) i := 0 @@ -178,14 +183,44 @@ func testObjectStoreOperations(s suite.Suite, helper *clients.TestClient, k8sh * assert.NotEqual(t, 4, i) }) - context := k8sh.MakeContext() - objectStore, err := k8sh.RookClientset.CephV1().CephObjectStores(namespace).Get(ctx, storeName, metav1.GetOptions{}) - assert.Nil(t, err) - rgwcontext, err := rgw.NewMultisiteContext(context, clusterInfo, objectStore) - assert.Nil(t, err) + t.Run("S3 access for Ceph Object Store User", func(t *testing.T) { + var s3client *rgw.S3Agent + s3endpoint, _ := helper.ObjectClient.GetEndPointUrl(namespace, storeName) + s3AccessKey, _ := helper.ObjectUserClient.GetAccessKey(namespace, storeName, userid) + s3SecretKey, _ := helper.ObjectUserClient.GetSecretKey(namespace, storeName, userid) + if objectStore.Spec.IsTLSEnabled() { + s3client, err = rgw.NewInsecureS3Agent(s3AccessKey, s3SecretKey, s3endpoint, storeName, true) + } else { + s3client, err = rgw.NewS3Agent(s3AccessKey, s3SecretKey, s3endpoint, storeName, true, nil) + } + + assert.Nil(t, err) + logger.Infof("endpoint (%s) Accesskey (%s) secret (%s)", s3endpoint, s3AccessKey, s3SecretKey) + + t.Run("create bucket", func(t *testing.T) { + err = s3client.CreateBucket(userBucket) + assert.Nil(t, err) + }) + + t.Run("delete bucket", func(t *testing.T) { + _, err = s3client.DeleteBucket(userBucket) + assert.Nil(t, err) + }) + + t.Run("create bucket with invalid region", func(t *testing.T) { + if objectStore.Spec.IsTLSEnabled() { + s3client, err = rgw.NewInsecureS3Agent(s3AccessKey, s3SecretKey, s3endpoint, "invalid-region", true) + } else { + s3client, err = rgw.NewS3Agent(s3AccessKey, s3SecretKey, s3endpoint, "invalid-region", true, nil) + } + err = s3client.CreateBucket(userBucket) + assert.Error(t, err) + }) + }) + t.Run("create ObjectBucketClaim", func(t *testing.T) { logger.Infof("create OBC %q with storageclass %q - using reclaim policy 'delete' so buckets don't block deletion", obcName, bucketStorageClassName) - cobErr := helper.BucketClient.CreateBucketStorageClass(namespace, storeName, bucketStorageClassName, "Delete", region) + cobErr := helper.BucketClient.CreateBucketStorageClass(namespace, storeName, bucketStorageClassName, "Delete", storeName) assert.Nil(t, cobErr) cobcErr := helper.BucketClient.CreateObc(obcName, bucketStorageClassName, bucketname, maxObject, true) assert.Nil(t, cobcErr) @@ -219,9 +254,9 @@ func testObjectStoreOperations(s suite.Suite, helper *clients.TestClient, k8sh * s3AccessKey, _ := helper.BucketClient.GetAccessKey(obcName) s3SecretKey, _ := helper.BucketClient.GetSecretKey(obcName) if objectStore.Spec.IsTLSEnabled() { - s3client, err = rgw.NewInsecureS3Agent(s3AccessKey, s3SecretKey, s3endpoint, region, true) + s3client, err = rgw.NewInsecureS3Agent(s3AccessKey, s3SecretKey, s3endpoint, storeName, true) } else { - s3client, err = rgw.NewS3Agent(s3AccessKey, s3SecretKey, s3endpoint, region, true, nil) + s3client, err = rgw.NewS3Agent(s3AccessKey, s3SecretKey, s3endpoint, storeName, true, nil) } assert.Nil(t, err) @@ -269,6 +304,7 @@ func testObjectStoreOperations(s suite.Suite, helper *clients.TestClient, k8sh * assert.Nil(t, delobjErr) logger.Info("Objects deleted on bucket successfully") }) + }) t.Run("Regression check: OBC does not revert to Pending phase", func(t *testing.T) { @@ -342,7 +378,7 @@ func testObjectStoreOperations(s suite.Suite, helper *clients.TestClient, k8sh * assert.NotEqual(t, 4, i) assert.Equal(t, rgwErr, rgw.RGWErrorNotFound) - dobErr := helper.BucketClient.DeleteBucketStorageClass(namespace, storeName, bucketStorageClassName, "Delete", region) + dobErr := helper.BucketClient.DeleteBucketStorageClass(namespace, storeName, bucketStorageClassName, "Delete", storeName) assert.Nil(t, dobErr) }) diff --git a/tests/integration/ceph_upgrade_test.go b/tests/integration/ceph_upgrade_test.go index e4f74d90c62c..55c969bb5047 100644 --- a/tests/integration/ceph_upgrade_test.go +++ b/tests/integration/ceph_upgrade_test.go @@ -118,7 +118,7 @@ func (s *UpgradeSuite) testUpgrade(useHelm bool, initialCephVersion v1.CephVersi cleanupFilesystem(s.helper, s.k8sh, s.Suite, s.namespace, installer.FilesystemName) _ = s.helper.ObjectUserClient.Delete(s.namespace, objectUserID) _ = s.helper.BucketClient.DeleteObc(obcName, installer.ObjectStoreSCName, bucketPrefix, maxObject, false) - _ = s.helper.BucketClient.DeleteBucketStorageClass(s.namespace, installer.ObjectStoreName, installer.ObjectStoreSCName, "Delete", region) + _ = s.helper.BucketClient.DeleteBucketStorageClass(s.namespace, installer.ObjectStoreName, installer.ObjectStoreSCName, "Delete", installer.ObjectStoreName) objectStoreCleanUp(s.Suite, s.helper, s.k8sh, s.settings.Namespace, installer.ObjectStoreName) }() @@ -182,7 +182,7 @@ func (s *UpgradeSuite) TestUpgradeCephToOctopusDevel() { cleanupFilesystem(s.helper, s.k8sh, s.Suite, s.namespace, installer.FilesystemName) _ = s.helper.ObjectUserClient.Delete(s.namespace, objectUserID) _ = s.helper.BucketClient.DeleteObc(obcName, installer.ObjectStoreSCName, bucketPrefix, maxObject, false) - _ = s.helper.BucketClient.DeleteBucketStorageClass(s.namespace, installer.ObjectStoreName, installer.ObjectStoreName, "Delete", region) + _ = s.helper.BucketClient.DeleteBucketStorageClass(s.namespace, installer.ObjectStoreName, installer.ObjectStoreName, "Delete", installer.ObjectStoreName) objectStoreCleanUp(s.Suite, s.helper, s.k8sh, s.settings.Namespace, installer.ObjectStoreName) }() @@ -215,7 +215,7 @@ func (s *UpgradeSuite) TestUpgradeCephToPacificDevel() { cleanupFilesystem(s.helper, s.k8sh, s.Suite, s.namespace, installer.FilesystemName) _ = s.helper.ObjectUserClient.Delete(s.namespace, objectUserID) _ = s.helper.BucketClient.DeleteObc(obcName, installer.ObjectStoreSCName, bucketPrefix, maxObject, false) - _ = s.helper.BucketClient.DeleteBucketStorageClass(s.namespace, installer.ObjectStoreName, installer.ObjectStoreSCName, "Delete", region) + _ = s.helper.BucketClient.DeleteBucketStorageClass(s.namespace, installer.ObjectStoreName, installer.ObjectStoreSCName, "Delete", installer.ObjectStoreName) objectStoreCleanUp(s.Suite, s.helper, s.k8sh, s.settings.Namespace, installer.ObjectStoreName) }() @@ -270,7 +270,7 @@ func (s *UpgradeSuite) deployClusterforUpgrade(objectUserID, preFilename string) createCephObjectUser(s.Suite, s.helper, s.k8sh, s.namespace, installer.ObjectStoreName, objectUserID, false, false) logger.Info("Initializing object bucket claim before the upgrade") - cobErr := s.helper.BucketClient.CreateBucketStorageClass(s.namespace, installer.ObjectStoreName, installer.ObjectStoreName, "Delete", region) + cobErr := s.helper.BucketClient.CreateBucketStorageClass(s.namespace, installer.ObjectStoreName, installer.ObjectStoreName, "Delete", installer.ObjectStoreName) require.Nil(s.T(), cobErr) cobcErr := s.helper.BucketClient.CreateObc(obcName, installer.ObjectStoreName, bucketPrefix, maxObject, false) require.Nil(s.T(), cobcErr)