From aa12d2883b6c96107b5110e59c3449e6e12be7ed Mon Sep 17 00:00:00 2001 From: Denis Egorenko Date: Thu, 2 Dec 2021 16:34:54 +0400 Subject: [PATCH] file: allow to create CephFS data pools with predefined names Add an ability to create data pools for CephFS with predefined names. Related-Issue: rook#9295 Signed-off-by: Denis Egorenko --- Documentation/ceph-filesystem-crd.md | 14 +-- Documentation/ceph-filesystem.md | 5 +- .../charts/rook-ceph/templates/resources.yaml | 7 +- deploy/examples/crds.yaml | 7 +- .../examples/csi/cephfs/storageclass-ec.yaml | 3 +- deploy/examples/csi/cephfs/storageclass.yaml | 2 +- deploy/examples/filesystem-ec.yaml | 3 +- deploy/examples/filesystem-test.yaml | 3 +- deploy/examples/filesystem.yaml | 3 +- pkg/apis/ceph.rook.io/v1/types.go | 12 ++- .../ceph.rook.io/v1/zz_generated.deepcopy.go | 19 ++++- .../disruption/clusterdisruption/pools.go | 4 +- pkg/operator/ceph/file/filesystem.go | 16 ++-- pkg/operator/ceph/file/filesystem_test.go | 85 +++++++++++++++---- 14 files changed, 140 insertions(+), 43 deletions(-) diff --git a/Documentation/ceph-filesystem-crd.md b/Documentation/ceph-filesystem-crd.md index de34aff5f457e..25664dc1d7b92 100644 --- a/Documentation/ceph-filesystem-crd.md +++ b/Documentation/ceph-filesystem-crd.md @@ -31,7 +31,8 @@ spec: replicated: size: 3 dataPools: - - failureDomain: host + - name: replicated + failureDomain: host replicated: size: 3 preserveFilesystemOnDelete: true @@ -86,9 +87,11 @@ spec: replicated: size: 3 dataPools: - - replicated: + - name: default + replicated: size: 3 - - erasureCoded: + - name: erasurecoded + erasureCoded: dataChunks: 2 codingChunks: 1 metadataServer: @@ -122,7 +125,8 @@ spec: replicated: size: 3 dataPools: - - failureDomain: host + - name: replicated + failureDomain: host replicated: size: 3 preserveFilesystemOnDelete: true @@ -187,7 +191,7 @@ See the official cephfs mirror documentation on [how to add a bootstrap peer](ht The pools allow all of the settings defined in the Pool CRD spec. For more details, see the [Pool CRD](ceph-pool-crd.md) settings. In the example above, there must be at least three hosts (size 3) and at least eight devices (6 data + 2 coding chunks) in the cluster. * `metadataPool`: The settings used to create the filesystem metadata pool. Must use replication. -* `dataPools`: The settings to create the filesystem data pools. If multiple pools are specified, Rook will add the pools to the filesystem. Assigning users or files to a pool is left as an exercise for the reader with the [CephFS documentation](http://docs.ceph.com/docs/master/cephfs/file-layouts/). The data pools can use replication or erasure coding. If erasure coding pools are specified, the cluster must be running with bluestore enabled on the OSDs. +* `dataPools`: The settings to create the filesystem data pools. Optionally, a pool name can be specified with the `name` field to override the default generated name, then final pool name will consist of filesystem name and pool name, eg. `-`. We are highly recommend to specify `name` to prevent issues that can arise from modifying the spec in a way that causes Rook to lose the original pool ordering. If multiple pools are specified, Rook will add the pools to the filesystem. Assigning users or files to a pool is left as an exercise for the reader with the [CephFS documentation](http://docs.ceph.com/docs/master/cephfs/file-layouts/). The data pools can use replication or erasure coding. If erasure coding pools are specified, the cluster must be running with bluestore enabled on the OSDs. * `preserveFilesystemOnDelete`: If it is set to 'true' the filesystem will remain when the CephFilesystem resource is deleted. This is a security measure to avoid loss of data if the CephFilesystem resource is deleted accidentally. The default value is 'false'. This option diff --git a/Documentation/ceph-filesystem.md b/Documentation/ceph-filesystem.md index 59af1a91c1fcd..d345eec2daeff 100644 --- a/Documentation/ceph-filesystem.md +++ b/Documentation/ceph-filesystem.md @@ -36,7 +36,8 @@ spec: replicated: size: 3 dataPools: - - replicated: + - name: replicated + replicated: size: 3 preserveFilesystemOnDelete: true metadataServer: @@ -98,7 +99,7 @@ parameters: # Ceph pool into which the volume shall be created # Required for provisionVolume: "true" - pool: myfs-data0 + pool: myfs-replicated # The secrets contain Ceph admin credentials. These are generated automatically by the operator # in the same namespace as the cluster. diff --git a/deploy/charts/rook-ceph/templates/resources.yaml b/deploy/charts/rook-ceph/templates/resources.yaml index 5ea659a804ac4..0b4e8824e802a 100644 --- a/deploy/charts/rook-ceph/templates/resources.yaml +++ b/deploy/charts/rook-ceph/templates/resources.yaml @@ -4808,9 +4808,9 @@ spec: description: FilesystemSpec represents the spec of a file system properties: dataPools: - description: The data pool settings + description: The data pool settings, with optional predefined pool name. items: - description: PoolSpec represents the spec of ceph pool + description: NamedPoolSpec represents the named ceph pool spec properties: compressionMode: description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters' @@ -4890,6 +4890,9 @@ spec: type: object type: array type: object + name: + description: Name of the pool + type: string parameters: additionalProperties: type: string diff --git a/deploy/examples/crds.yaml b/deploy/examples/crds.yaml index cd4c25eb49095..bb04e3731ea76 100644 --- a/deploy/examples/crds.yaml +++ b/deploy/examples/crds.yaml @@ -4805,9 +4805,9 @@ spec: description: FilesystemSpec represents the spec of a file system properties: dataPools: - description: The data pool settings + description: The data pool settings, with optional predefined pool name. items: - description: PoolSpec represents the spec of ceph pool + description: NamedPoolSpec represents the named ceph pool spec properties: compressionMode: description: 'DEPRECATED: use Parameters instead, e.g., Parameters["compression_mode"] = "force" The inline compression mode in Bluestore OSD to set to (options are: none, passive, aggressive, force) Do NOT set a default value for kubebuilder as this will override the Parameters' @@ -4887,6 +4887,9 @@ spec: type: object type: array type: object + name: + description: Name of the pool + type: string parameters: additionalProperties: type: string diff --git a/deploy/examples/csi/cephfs/storageclass-ec.yaml b/deploy/examples/csi/cephfs/storageclass-ec.yaml index d3975cec844f7..1a3559a227264 100644 --- a/deploy/examples/csi/cephfs/storageclass-ec.yaml +++ b/deploy/examples/csi/cephfs/storageclass-ec.yaml @@ -14,10 +14,9 @@ parameters: # Ceph pool into which the volume shall be created # Required for provisionVolume: "true" - # For erasure coded pools, we have to create a replicated pool as the default data pool and an erasure-coded # pool as a secondary pool. - pool: myfs-ec-data1 + pool: myfs-ec-erasurecoded # The secrets contain Ceph admin credentials. These are generated automatically by the operator # in the same namespace as the cluster. diff --git a/deploy/examples/csi/cephfs/storageclass.yaml b/deploy/examples/csi/cephfs/storageclass.yaml index 9b7c0ac7e62f2..c9f599a8366bf 100644 --- a/deploy/examples/csi/cephfs/storageclass.yaml +++ b/deploy/examples/csi/cephfs/storageclass.yaml @@ -14,7 +14,7 @@ parameters: # Ceph pool into which the volume shall be created # Required for provisionVolume: "true" - pool: myfs-data0 + pool: myfs-replicated # The secrets contain Ceph admin credentials. These are generated automatically by the operator # in the same namespace as the cluster. diff --git a/deploy/examples/filesystem-ec.yaml b/deploy/examples/filesystem-ec.yaml index fcf15dcc17f14..b83b3767e709e 100644 --- a/deploy/examples/filesystem-ec.yaml +++ b/deploy/examples/filesystem-ec.yaml @@ -22,7 +22,8 @@ spec: - replicated: size: 3 # You need at least three `bluestore` OSDs on different nodes for this config to work - - erasureCoded: + - name: erasurecoded + erasureCoded: dataChunks: 2 codingChunks: 1 # Inline compression mode for the data pool diff --git a/deploy/examples/filesystem-test.yaml b/deploy/examples/filesystem-test.yaml index c001f75312816..7f960027a7298 100644 --- a/deploy/examples/filesystem-test.yaml +++ b/deploy/examples/filesystem-test.yaml @@ -14,7 +14,8 @@ spec: size: 1 requireSafeReplicaSize: false dataPools: - - failureDomain: osd + - name: replicated + failureDomain: osd replicated: size: 1 requireSafeReplicaSize: false diff --git a/deploy/examples/filesystem.yaml b/deploy/examples/filesystem.yaml index eedd7181d8d93..a0fb62eb253ec 100644 --- a/deploy/examples/filesystem.yaml +++ b/deploy/examples/filesystem.yaml @@ -25,7 +25,8 @@ spec: #target_size_ratio: ".5" # The list of data pool specs. Can use replication or erasure coding. dataPools: - - failureDomain: host + - name: replicated + failureDomain: host replicated: size: 3 # Disallow setting pool with replica 1, this could lead to data loss without recovery. diff --git a/pkg/apis/ceph.rook.io/v1/types.go b/pkg/apis/ceph.rook.io/v1/types.go index 2080df09fef8b..3558ef8facee5 100755 --- a/pkg/apis/ceph.rook.io/v1/types.go +++ b/pkg/apis/ceph.rook.io/v1/types.go @@ -639,6 +639,14 @@ type PoolSpec struct { Quotas QuotaSpec `json:"quotas,omitempty"` } +// NamedPoolSpec represents the named ceph pool spec +type NamedPoolSpec struct { + // Name of the pool + Name string `json:"name,omitempty"` + // PoolSpec represents the spec of ceph pool + PoolSpec `json:",inline"` +} + // MirrorHealthCheckSpec represents the health specification of a Ceph Storage Pool mirror type MirrorHealthCheckSpec struct { // +optional @@ -964,9 +972,9 @@ type FilesystemSpec struct { // +nullable MetadataPool PoolSpec `json:"metadataPool"` - // The data pool settings + // The data pool settings, with optional predefined pool name. // +nullable - DataPools []PoolSpec `json:"dataPools"` + DataPools []NamedPoolSpec `json:"dataPools"` // Preserve pools on filesystem deletion // +optional diff --git a/pkg/apis/ceph.rook.io/v1/zz_generated.deepcopy.go b/pkg/apis/ceph.rook.io/v1/zz_generated.deepcopy.go index 424d4df9246e9..5d3e5e8c6cfc1 100644 --- a/pkg/apis/ceph.rook.io/v1/zz_generated.deepcopy.go +++ b/pkg/apis/ceph.rook.io/v1/zz_generated.deepcopy.go @@ -1935,7 +1935,7 @@ func (in *FilesystemSpec) DeepCopyInto(out *FilesystemSpec) { in.MetadataPool.DeepCopyInto(&out.MetadataPool) if in.DataPools != nil { in, out := &in.DataPools, &out.DataPools - *out = make([]PoolSpec, len(*in)) + *out = make([]NamedPoolSpec, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -2475,6 +2475,23 @@ func (in *NFSGaneshaSpec) DeepCopy() *NFSGaneshaSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamedPoolSpec) DeepCopyInto(out *NamedPoolSpec) { + *out = *in + in.PoolSpec.DeepCopyInto(&out.PoolSpec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamedPoolSpec. +func (in *NamedPoolSpec) DeepCopy() *NamedPoolSpec { + if in == nil { + return nil + } + out := new(NamedPoolSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NetworkSpec) DeepCopyInto(out *NetworkSpec) { *out = *in diff --git a/pkg/operator/ceph/disruption/clusterdisruption/pools.go b/pkg/operator/ceph/disruption/clusterdisruption/pools.go index 41f1d6988ee6c..bf825ceeec722 100644 --- a/pkg/operator/ceph/disruption/clusterdisruption/pools.go +++ b/pkg/operator/ceph/disruption/clusterdisruption/pools.go @@ -57,7 +57,9 @@ func (r *ReconcileClusterDisruption) processPools(request reconcile.Request) (*c poolCount += len(cephFilesystemList.Items) for _, cephFilesystem := range cephFilesystemList.Items { poolSpecs = append(poolSpecs, cephFilesystem.Spec.MetadataPool) - poolSpecs = append(poolSpecs, cephFilesystem.Spec.DataPools...) + for _, pool := range cephFilesystem.Spec.DataPools { + poolSpecs = append(poolSpecs, pool.PoolSpec) + } } diff --git a/pkg/operator/ceph/file/filesystem.go b/pkg/operator/ceph/file/filesystem.go index 2430c9b08f323..f90145abbd6aa 100644 --- a/pkg/operator/ceph/file/filesystem.go +++ b/pkg/operator/ceph/file/filesystem.go @@ -134,7 +134,7 @@ func validateFilesystem(context *clusterd.Context, clusterInfo *cephclient.Clust return errors.Wrap(err, "invalid metadata pool") } for _, p := range f.Spec.DataPools { - localpoolSpec := p + localpoolSpec := p.PoolSpec if err := pool.ValidatePoolSpec(context, clusterInfo, clusterSpec, &localpoolSpec); err != nil { return errors.Wrap(err, "Invalid data pool") } @@ -163,7 +163,7 @@ func SetPoolSize(f *Filesystem, context *clusterd.Context, clusterInfo *cephclie dataPoolNames := generateDataPoolNames(f, spec) for i, pool := range spec.DataPools { poolName := dataPoolNames[i] - err := cephclient.CreatePoolWithProfile(context, clusterInfo, clusterSpec, poolName, pool, "") + err := cephclient.CreatePoolWithProfile(context, clusterInfo, clusterSpec, poolName, pool.PoolSpec, "") if err != nil { return errors.Wrapf(err, "failed to update datapool %q", poolName) } @@ -243,7 +243,7 @@ func (f *Filesystem) doFilesystemCreate(context *clusterd.Context, clusterInfo * for i, pool := range spec.DataPools { poolName := dataPoolNames[i] if _, poolFound := reversedPoolMap[poolName]; !poolFound { - err = cephclient.CreatePoolWithProfile(context, clusterInfo, clusterSpec, poolName, pool, "") + err = cephclient.CreatePoolWithProfile(context, clusterInfo, clusterSpec, poolName, pool.PoolSpec, "") if err != nil { return errors.Wrapf(err, "failed to create data pool %q", poolName) } @@ -278,10 +278,16 @@ func downFilesystem(context *clusterd.Context, clusterInfo *cephclient.ClusterIn } // generateDataPoolName generates DataPool name by prefixing the filesystem name to the constant DataPoolSuffix +// or get predefined name from spec func generateDataPoolNames(f *Filesystem, spec cephv1.FilesystemSpec) []string { var dataPoolNames []string - for i := range spec.DataPools { - poolName := fmt.Sprintf("%s-%s%d", f.Name, dataPoolSuffix, i) + for i, pool := range spec.DataPools { + poolName := "" + if pool.Name == "" { + poolName = fmt.Sprintf("%s-%s%d", f.Name, dataPoolSuffix, i) + } else { + poolName = fmt.Sprintf("%s-%s", f.Name, pool.Name) + } dataPoolNames = append(dataPoolNames, poolName) } return dataPoolNames diff --git a/pkg/operator/ceph/file/filesystem_test.go b/pkg/operator/ceph/file/filesystem_test.go index b3a48845532f7..63b6a39716779 100644 --- a/pkg/operator/ceph/file/filesystem_test.go +++ b/pkg/operator/ceph/file/filesystem_test.go @@ -62,7 +62,7 @@ func TestValidateSpec(t *testing.T) { // missing data pools assert.NotNil(t, validateFilesystem(context, clusterInfo, clusterSpec, fs)) p := cephv1.PoolSpec{Replicated: cephv1.ReplicatedSpec{Size: 1, RequireSafeReplicaSize: false}} - fs.Spec.DataPools = append(fs.Spec.DataPools, p) + fs.Spec.DataPools = append(fs.Spec.DataPools, cephv1.NamedPoolSpec{PoolSpec: p}) // missing metadata pool assert.NotNil(t, validateFilesystem(context, clusterInfo, clusterSpec, fs)) @@ -76,6 +76,25 @@ func TestValidateSpec(t *testing.T) { assert.Nil(t, validateFilesystem(context, clusterInfo, clusterSpec, fs)) } +func TestGenerateDataPoolNames(t *testing.T) { + fs := &Filesystem{Name: "fake", Namespace: "fake"} + fsSpec := cephv1.FilesystemSpec{ + DataPools: []cephv1.NamedPoolSpec{ + { + PoolSpec: cephv1.PoolSpec{Replicated: cephv1.ReplicatedSpec{Size: 1, RequireSafeReplicaSize: false}}, + }, + { + Name: "somename", + PoolSpec: cephv1.PoolSpec{Replicated: cephv1.ReplicatedSpec{Size: 1, RequireSafeReplicaSize: false}}, + }, + }, + } + + expectedNames := []string{"fake-data0", "fake-somename"} + names := generateDataPoolNames(fs, fsSpec) + assert.Equal(t, expectedNames, names) +} + func isBasePoolOperation(fsName, command string, args []string) bool { if reflect.DeepEqual(args[0:7], []string{"osd", "pool", "create", fsName + "-metadata", "0", "replicated", fsName + "-metadata"}) { return true @@ -95,7 +114,7 @@ func isBasePoolOperation(fsName, command string, args []string) bool { return false } -func fsExecutor(t *testing.T, fsName, configDir string, multiFS bool, createDataOnePoolCount, addDataOnePoolCount *int) *exectest.MockExecutor { +func fsExecutor(t *testing.T, fsName, configDir string, multiFS bool, createDataPoolCount, addDataPoolCount *int) *exectest.MockExecutor { mdsmap := cephclient.CephFilesystemDetails{ ID: 0, MDSMap: cephclient.MDSMap{ @@ -163,12 +182,22 @@ func fsExecutor(t *testing.T, fsName, configDir string, multiFS bool, createData } else if reflect.DeepEqual(args[0:5], []string{"osd", "crush", "rule", "create-replicated", fsName + "-data1"}) { return "", nil } else if reflect.DeepEqual(args[0:4], []string{"osd", "pool", "create", fsName + "-data1"}) { - *createDataOnePoolCount++ + *createDataPoolCount++ return "", nil } else if reflect.DeepEqual(args[0:6], []string{"osd", "pool", "set", fsName + "-data1", "size", "1"}) { return "", nil } else if reflect.DeepEqual(args[0:4], []string{"fs", "add_data_pool", fsName, fsName + "-data1"}) { - *addDataOnePoolCount++ + *addDataPoolCount++ + return "", nil + } else if reflect.DeepEqual(args[0:5], []string{"osd", "crush", "rule", "create-replicated", fsName + "-named-pool"}) { + return "", nil + } else if reflect.DeepEqual(args[0:4], []string{"osd", "pool", "create", fsName + "-named-pool"}) { + *createDataPoolCount++ + return "", nil + } else if reflect.DeepEqual(args[0:6], []string{"osd", "pool", "set", fsName + "-named-pool", "size", "1"}) { + return "", nil + } else if reflect.DeepEqual(args[0:4], []string{"fs", "add_data_pool", fsName, fsName + "-named-pool"}) { + *addDataPoolCount++ return "", nil } else if contains(args, "versions") { versionStr, _ := json.Marshal( @@ -226,12 +255,22 @@ func fsExecutor(t *testing.T, fsName, configDir string, multiFS bool, createData } else if reflect.DeepEqual(args[0:5], []string{"osd", "crush", "rule", "create-replicated", fsName + "-data1"}) { return "", nil } else if reflect.DeepEqual(args[0:4], []string{"osd", "pool", "create", fsName + "-data1"}) { - *createDataOnePoolCount++ + *createDataPoolCount++ return "", nil } else if reflect.DeepEqual(args[0:6], []string{"osd", "pool", "set", fsName + "-data1", "size", "1"}) { return "", nil } else if reflect.DeepEqual(args[0:4], []string{"fs", "add_data_pool", fsName, fsName + "-data1"}) { - *addDataOnePoolCount++ + *addDataPoolCount++ + return "", nil + } else if reflect.DeepEqual(args[0:5], []string{"osd", "crush", "rule", "create-replicated", fsName + "-named-pool"}) { + return "", nil + } else if reflect.DeepEqual(args[0:4], []string{"osd", "pool", "create", fsName + "-named-pool"}) { + *createDataPoolCount++ + return "", nil + } else if reflect.DeepEqual(args[0:6], []string{"osd", "pool", "set", fsName + "-named-pool", "size", "1"}) { + return "", nil + } else if reflect.DeepEqual(args[0:4], []string{"fs", "add_data_pool", fsName, fsName + "-named-pool"}) { + *addDataPoolCount++ return "", nil } else if contains(args, "versions") { versionStr, _ := json.Marshal( @@ -256,7 +295,11 @@ func fsTest(fsName string) cephv1.CephFilesystem { ObjectMeta: metav1.ObjectMeta{Name: fsName, Namespace: "ns"}, Spec: cephv1.FilesystemSpec{ MetadataPool: cephv1.PoolSpec{Replicated: cephv1.ReplicatedSpec{Size: 1, RequireSafeReplicaSize: false}}, - DataPools: []cephv1.PoolSpec{{Replicated: cephv1.ReplicatedSpec{Size: 1, RequireSafeReplicaSize: false}}}, + DataPools: []cephv1.NamedPoolSpec{ + { + PoolSpec: cephv1.PoolSpec{Replicated: cephv1.ReplicatedSpec{Size: 1, RequireSafeReplicaSize: false}}, + }, + }, MetadataServer: cephv1.MetadataServerSpec{ ActiveCount: 1, Resources: v1.ResourceRequirements{ @@ -278,9 +321,9 @@ func TestCreateFilesystem(t *testing.T) { mds.UpdateDeploymentAndWait, deploymentsUpdated = testopk8s.UpdateDeploymentAndWaitStub() configDir, _ := ioutil.TempDir("", "") fsName := "myfs" - addDataOnePoolCount := 0 - createDataOnePoolCount := 0 - executor := fsExecutor(t, fsName, configDir, false, &createDataOnePoolCount, &addDataOnePoolCount) + addDataPoolCount := 0 + createDataPoolCount := 0 + executor := fsExecutor(t, fsName, configDir, false, &createDataPoolCount, &addDataPoolCount) defer os.RemoveAll(configDir) clientset := testop.New(t, 1) context := &clusterd.Context{ @@ -313,19 +356,27 @@ func TestCreateFilesystem(t *testing.T) { Executor: executor, ConfigDir: configDir, Clientset: clientset} - fs.Spec.DataPools = append(fs.Spec.DataPools, cephv1.PoolSpec{Replicated: cephv1.ReplicatedSpec{Size: 1, RequireSafeReplicaSize: false}}) + // add not named pool, with default naming + fs.Spec.DataPools = append(fs.Spec.DataPools, cephv1.NamedPoolSpec{ + PoolSpec: cephv1.PoolSpec{Replicated: cephv1.ReplicatedSpec{Size: 1, RequireSafeReplicaSize: false}}, + }) + // add named pool + fs.Spec.DataPools = append(fs.Spec.DataPools, cephv1.NamedPoolSpec{ + Name: "named-pool", + PoolSpec: cephv1.PoolSpec{Replicated: cephv1.ReplicatedSpec{Size: 1, RequireSafeReplicaSize: false}}, + }) err := createFilesystem(context, clusterInfo, fs, &cephv1.ClusterSpec{}, ownerInfo, "/var/lib/rook/") assert.Nil(t, err) validateStart(ctx, t, context, fs) assert.ElementsMatch(t, []string{fmt.Sprintf("rook-ceph-mds-%s-a", fsName), fmt.Sprintf("rook-ceph-mds-%s-b", fsName)}, testopk8s.DeploymentNamesUpdated(deploymentsUpdated)) - assert.Equal(t, 1, createDataOnePoolCount) - assert.Equal(t, 1, addDataOnePoolCount) + assert.Equal(t, 2, createDataPoolCount) + assert.Equal(t, 2, addDataPoolCount) testopk8s.ClearDeploymentsUpdated(deploymentsUpdated) }) t.Run("multiple filesystem creation", func(t *testing.T) { context = &clusterd.Context{ - Executor: fsExecutor(t, fsName, configDir, true, &createDataOnePoolCount, &addDataOnePoolCount), + Executor: fsExecutor(t, fsName, configDir, true, &createDataPoolCount, &addDataPoolCount), ConfigDir: configDir, Clientset: clientset, } @@ -350,9 +401,9 @@ func TestUpgradeFilesystem(t *testing.T) { configDir, _ := ioutil.TempDir("", "") fsName := "myfs" - addDataOnePoolCount := 0 - createDataOnePoolCount := 0 - executor := fsExecutor(t, fsName, configDir, false, &createDataOnePoolCount, &addDataOnePoolCount) + addDataPoolCount := 0 + createDataPoolCount := 0 + executor := fsExecutor(t, fsName, configDir, false, &createDataPoolCount, &addDataPoolCount) defer os.RemoveAll(configDir) clientset := testop.New(t, 1) context := &clusterd.Context{