From 1afd322650bd99158364cf97e86fdfa0deb7c3b5 Mon Sep 17 00:00:00 2001 From: Travis Nielsen Date: Thu, 18 Nov 2021 15:22:56 -0700 Subject: [PATCH] core: ensure cluster name is available on cluster info The cluster info is important context for the cluster controller to create the cluster, and all the fields must be properly set. A test cluster name was being set temporarily, resulting in mons incorrectly getting the wrong cluster CR name. There is no known issue from the temporary value, it was just exposed by https://github.com/rook/rook/pull/8678 setting the value to a label. Now the functions are more clearly named so only unit and integration tests should be using the test value for the cluster name where it is not important. Signed-off-by: Travis Nielsen --- cmd/rook/ceph/cleanup.go | 2 +- pkg/daemon/ceph/client/command_test.go | 12 ++++---- pkg/daemon/ceph/client/crash_test.go | 2 +- pkg/daemon/ceph/client/crush_rule_test.go | 4 +-- pkg/daemon/ceph/client/crush_test.go | 4 +-- pkg/daemon/ceph/client/deviceclass_test.go | 4 +-- .../ceph/client/erasure-code-profile_test.go | 2 +- .../ceph/client/filesystem_mirror_test.go | 12 ++++---- pkg/daemon/ceph/client/filesystem_test.go | 26 ++++++++--------- pkg/daemon/ceph/client/image_test.go | 8 +++--- pkg/daemon/ceph/client/info.go | 14 ++++++---- pkg/daemon/ceph/client/mgr_test.go | 8 +++--- pkg/daemon/ceph/client/mirror_test.go | 28 +++++++++---------- pkg/daemon/ceph/client/mon_test.go | 8 +++--- pkg/daemon/ceph/client/osd_test.go | 12 ++++---- pkg/daemon/ceph/client/pool_test.go | 16 +++++------ pkg/daemon/ceph/client/upgrade_test.go | 18 ++++++------ pkg/daemon/ceph/osd/device_test.go | 2 +- pkg/operator/ceph/cluster/cephstatus_test.go | 4 +-- pkg/operator/ceph/cluster/cluster.go | 2 +- pkg/operator/ceph/cluster/cluster_test.go | 14 +++++----- pkg/operator/ceph/cluster/mgr/mgr_test.go | 6 ++-- pkg/operator/ceph/cluster/mon/mon_test.go | 2 +- pkg/operator/ceph/cluster/mon/service_test.go | 2 +- .../ceph/cluster/osd/deviceset_test.go | 6 ++-- pkg/operator/ceph/cluster/osd/health_test.go | 8 +++--- pkg/operator/ceph/cluster/osd/osd_test.go | 2 +- pkg/operator/ceph/cluster/osd/spec_test.go | 2 +- pkg/operator/ceph/cluster/watcher.go | 2 +- .../ceph/config/keyring/store_test.go | 2 +- pkg/operator/ceph/config/monstore_test.go | 10 +++---- .../ceph/controller/mirror_peer_test.go | 2 +- pkg/operator/ceph/controller/spec_test.go | 2 +- pkg/operator/ceph/csi/peermap/config_test.go | 4 +-- .../disruption/machinedisruption/reconcile.go | 2 +- pkg/operator/ceph/object/admin_test.go | 2 +- .../ceph/object/bucket/provisioner_test.go | 2 +- pkg/operator/ceph/object/dependents_test.go | 2 +- pkg/operator/ceph/object/objectstore_test.go | 2 +- .../ceph/object/realm/controller_test.go | 2 +- pkg/operator/ceph/object/rgw_test.go | 2 +- .../ceph/object/zone/controller_test.go | 2 +- .../ceph/object/zonegroup/controller_test.go | 2 +- pkg/operator/ceph/pool/controller_test.go | 6 ++-- pkg/operator/ceph/pool/validate_test.go | 4 +-- tests/framework/clients/filesystem.go | 2 +- tests/framework/clients/object_user.go | 2 +- tests/framework/clients/pool.go | 2 +- tests/framework/clients/test_client.go | 4 +-- tests/framework/installer/ceph_installer.go | 4 +-- tests/integration/ceph_base_block_test.go | 4 +-- .../ceph_bucket_notification_test.go | 2 +- tests/integration/ceph_multi_cluster_test.go | 2 +- tests/integration/ceph_object_test.go | 2 +- tests/integration/ceph_smoke_test.go | 4 +-- tests/integration/ceph_upgrade_test.go | 10 +++---- 56 files changed, 161 insertions(+), 157 deletions(-) diff --git a/cmd/rook/ceph/cleanup.go b/cmd/rook/ceph/cleanup.go index fe16107cd155..27fb80c6e5a4 100644 --- a/cmd/rook/ceph/cleanup.go +++ b/cmd/rook/ceph/cleanup.go @@ -67,7 +67,7 @@ func startCleanUp(cmd *cobra.Command, args []string) error { } namespace := os.Getenv(k8sutil.PodNamespaceEnvVar) - clusterInfo := client.AdminClusterInfo(namespace) + clusterInfo := client.AdminClusterInfo(namespace, "") clusterInfo.FSID = clusterFSID // Build Sanitizer diff --git a/pkg/daemon/ceph/client/command_test.go b/pkg/daemon/ceph/client/command_test.go index d887ff46574b..ca22c8a3c5ac 100644 --- a/pkg/daemon/ceph/client/command_test.go +++ b/pkg/daemon/ceph/client/command_test.go @@ -44,7 +44,7 @@ func TestFinalizeCephCommandArgs(t *testing.T) { "--keyring=/var/lib/rook/rook-ceph/rook/client.admin.keyring", } - clusterInfo := AdminClusterInfo("rook") + clusterInfo := AdminTestClusterInfo("rook") cmd, args := FinalizeCephCommandArgs(expectedCommand, clusterInfo, args, configDir) assert.Exactly(t, expectedCommand, cmd) assert.Exactly(t, expectedArgs, args) @@ -74,7 +74,7 @@ func TestFinalizeRadosGWAdminCommandArgs(t *testing.T) { "--keyring=/var/lib/rook/rook-ceph/rook/client.admin.keyring", } - clusterInfo := AdminClusterInfo("rook") + clusterInfo := AdminTestClusterInfo("rook") cmd, args := FinalizeCephCommandArgs(expectedCommand, clusterInfo, args, configDir) assert.Exactly(t, expectedCommand, cmd) assert.Exactly(t, expectedArgs, args) @@ -99,7 +99,7 @@ func TestFinalizeCephCommandArgsToolBox(t *testing.T) { "--connect-timeout=15", } - clusterInfo := AdminClusterInfo("rook") + clusterInfo := AdminTestClusterInfo("rook") exec.CephCommandsTimeout = 15 * time.Second cmd, args := FinalizeCephCommandArgs(expectedCommand, clusterInfo, args, configDir) assert.Exactly(t, "kubectl", cmd) @@ -111,7 +111,7 @@ func TestNewRBDCommand(t *testing.T) { args := []string{"create", "--size", "1G", "myvol"} t.Run("rbd command with no multus", func(t *testing.T) { - clusterInfo := AdminClusterInfo("rook") + clusterInfo := AdminTestClusterInfo("rook") executor := &exectest.MockExecutor{} executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { switch { @@ -130,7 +130,7 @@ func TestNewRBDCommand(t *testing.T) { }) t.Run("rbd command with multus", func(t *testing.T) { - clusterInfo := AdminClusterInfo("rook") + clusterInfo := AdminTestClusterInfo("rook") clusterInfo.NetworkSpec.Provider = "multus" executor := &exectest.MockExecutor{} context := &clusterd.Context{Executor: executor, RemoteExecutor: exec.RemotePodCommandExecutor{ClientSet: test.New(t, 3)}} @@ -144,7 +144,7 @@ func TestNewRBDCommand(t *testing.T) { }) t.Run("context canceled nothing to run", func(t *testing.T) { - clusterInfo := AdminClusterInfo("rook") + clusterInfo := AdminTestClusterInfo("rook") ctx, cancel := context.WithCancel(context.TODO()) clusterInfo.Context = ctx cancel() diff --git a/pkg/daemon/ceph/client/crash_test.go b/pkg/daemon/ceph/client/crash_test.go index ccd40634a64d..e294197c56b6 100644 --- a/pkg/daemon/ceph/client/crash_test.go +++ b/pkg/daemon/ceph/client/crash_test.go @@ -46,7 +46,7 @@ func TestCephCrash(t *testing.T) { } return "", errors.Errorf("unexpected ceph command %q", args) } - crash, err := GetCrashList(context, AdminClusterInfo("mycluster")) + crash, err := GetCrashList(context, AdminTestClusterInfo("mycluster")) assert.NoError(t, err) assert.Equal(t, 1, len(crash)) } diff --git a/pkg/daemon/ceph/client/crush_rule_test.go b/pkg/daemon/ceph/client/crush_rule_test.go index a3fbaa2ffefb..77f19a60141b 100644 --- a/pkg/daemon/ceph/client/crush_rule_test.go +++ b/pkg/daemon/ceph/client/crush_rule_test.go @@ -97,7 +97,7 @@ func TestInjectCRUSHMapMap(t *testing.T) { return "", errors.Errorf("unexpected ceph command '%v'", args) } - err := injectCRUSHMap(&clusterd.Context{Executor: executor}, AdminClusterInfo("mycluster"), "/tmp/063990228.compiled") + err := injectCRUSHMap(&clusterd.Context{Executor: executor}, AdminTestClusterInfo("mycluster"), "/tmp/063990228.compiled") assert.Nil(t, err) } @@ -111,7 +111,7 @@ func TestSetCRUSHMapMap(t *testing.T) { return "", errors.Errorf("unexpected ceph command '%v'", args) } - err := setCRUSHMap(&clusterd.Context{Executor: executor}, AdminClusterInfo("mycluster"), "/tmp/063990228.compiled") + err := setCRUSHMap(&clusterd.Context{Executor: executor}, AdminTestClusterInfo("mycluster"), "/tmp/063990228.compiled") assert.Nil(t, err) } diff --git a/pkg/daemon/ceph/client/crush_test.go b/pkg/daemon/ceph/client/crush_test.go index db7d358249af..57c1d4989df2 100644 --- a/pkg/daemon/ceph/client/crush_test.go +++ b/pkg/daemon/ceph/client/crush_test.go @@ -272,7 +272,7 @@ func TestGetCrushMap(t *testing.T) { } return "", errors.Errorf("unexpected ceph command '%v'", args) } - crush, err := GetCrushMap(&clusterd.Context{Executor: executor}, AdminClusterInfo("mycluster")) + crush, err := GetCrushMap(&clusterd.Context{Executor: executor}, AdminTestClusterInfo("mycluster")) assert.Nil(t, err) assert.Equal(t, 11, len(crush.Types)) @@ -291,7 +291,7 @@ func TestGetOSDOnHost(t *testing.T) { return "", errors.Errorf("unexpected ceph command '%v'", args) } - _, err := GetOSDOnHost(&clusterd.Context{Executor: executor}, AdminClusterInfo("mycluster"), "my-host") + _, err := GetOSDOnHost(&clusterd.Context{Executor: executor}, AdminTestClusterInfo("mycluster"), "my-host") assert.Nil(t, err) } diff --git a/pkg/daemon/ceph/client/deviceclass_test.go b/pkg/daemon/ceph/client/deviceclass_test.go index 5470801c9ee4..b4a4ad0721f8 100644 --- a/pkg/daemon/ceph/client/deviceclass_test.go +++ b/pkg/daemon/ceph/client/deviceclass_test.go @@ -38,11 +38,11 @@ func TestGetDeviceClassOSDs(t *testing.T) { } return "", errors.Errorf("unexpected ceph command '%v'", args) } - osds, err := GetDeviceClassOSDs(&clusterd.Context{Executor: executor}, AdminClusterInfo("mycluster"), "ssd") + osds, err := GetDeviceClassOSDs(&clusterd.Context{Executor: executor}, AdminTestClusterInfo("mycluster"), "ssd") assert.Nil(t, err) assert.Equal(t, []int{0, 1, 2}, osds) - osds, err = GetDeviceClassOSDs(&clusterd.Context{Executor: executor}, AdminClusterInfo("mycluster"), "hdd") + osds, err = GetDeviceClassOSDs(&clusterd.Context{Executor: executor}, AdminTestClusterInfo("mycluster"), "hdd") assert.Nil(t, err) assert.Equal(t, []int{}, osds) } diff --git a/pkg/daemon/ceph/client/erasure-code-profile_test.go b/pkg/daemon/ceph/client/erasure-code-profile_test.go index df42accb904c..26603113b3ba 100644 --- a/pkg/daemon/ceph/client/erasure-code-profile_test.go +++ b/pkg/daemon/ceph/client/erasure-code-profile_test.go @@ -84,6 +84,6 @@ func testCreateProfile(t *testing.T, failureDomain, crushRoot, deviceClass strin return "", errors.Errorf("unexpected ceph command %q", args) } - err := CreateErasureCodeProfile(context, AdminClusterInfo("mycluster"), "myapp", spec) + err := CreateErasureCodeProfile(context, AdminTestClusterInfo("mycluster"), "myapp", spec) assert.Nil(t, err) } diff --git a/pkg/daemon/ceph/client/filesystem_mirror_test.go b/pkg/daemon/ceph/client/filesystem_mirror_test.go index 30d83924f523..8b575541cb8f 100644 --- a/pkg/daemon/ceph/client/filesystem_mirror_test.go +++ b/pkg/daemon/ceph/client/filesystem_mirror_test.go @@ -51,7 +51,7 @@ func TestEnableFilesystemSnapshotMirror(t *testing.T) { } context := &clusterd.Context{Executor: executor} - err := EnableFilesystemSnapshotMirror(context, AdminClusterInfo("mycluster"), fs) + err := EnableFilesystemSnapshotMirror(context, AdminTestClusterInfo("mycluster"), fs) assert.NoError(t, err) } @@ -70,7 +70,7 @@ func TestDisableFilesystemSnapshotMirror(t *testing.T) { } context := &clusterd.Context{Executor: executor} - err := DisableFilesystemSnapshotMirror(context, AdminClusterInfo("mycluster"), fs) + err := DisableFilesystemSnapshotMirror(context, AdminTestClusterInfo("mycluster"), fs) assert.NoError(t, err) } @@ -92,7 +92,7 @@ func TestImportFilesystemMirrorPeer(t *testing.T) { } context := &clusterd.Context{Executor: executor} - err := ImportFSMirrorBootstrapPeer(context, AdminClusterInfo("mycluster"), fs, token) + err := ImportFSMirrorBootstrapPeer(context, AdminTestClusterInfo("mycluster"), fs, token) assert.NoError(t, err) } @@ -112,7 +112,7 @@ func TestCreateFSMirrorBootstrapPeer(t *testing.T) { } context := &clusterd.Context{Executor: executor} - token, err := CreateFSMirrorBootstrapPeer(context, AdminClusterInfo("mycluster"), fs) + token, err := CreateFSMirrorBootstrapPeer(context, AdminTestClusterInfo("mycluster"), fs) assert.NoError(t, err) _, err = base64.StdEncoding.DecodeString(string(token)) assert.NoError(t, err) @@ -135,7 +135,7 @@ func TestRemoveFilesystemMirrorPeer(t *testing.T) { } context := &clusterd.Context{Executor: executor} - err := RemoveFilesystemMirrorPeer(context, AdminClusterInfo("mycluster"), peerUUID) + err := RemoveFilesystemMirrorPeer(context, AdminTestClusterInfo("mycluster"), peerUUID) assert.NoError(t, err) } @@ -155,7 +155,7 @@ func TestFSMirrorDaemonStatus(t *testing.T) { } context := &clusterd.Context{Executor: executor} - s, err := GetFSMirrorDaemonStatus(context, AdminClusterInfo("mycluster"), fs) + s, err := GetFSMirrorDaemonStatus(context, AdminTestClusterInfo("mycluster"), fs) assert.NoError(t, err) assert.Equal(t, "myfs", s[0].Filesystems[0].Name) } diff --git a/pkg/daemon/ceph/client/filesystem_test.go b/pkg/daemon/ceph/client/filesystem_test.go index 59df88c4414c..5280b98db86a 100644 --- a/pkg/daemon/ceph/client/filesystem_test.go +++ b/pkg/daemon/ceph/client/filesystem_test.go @@ -158,7 +158,7 @@ func TestFilesystemRemove(t *testing.T) { return "", errors.Errorf("unexpected ceph command %q", args) } - err := RemoveFilesystem(context, AdminClusterInfo("mycluster"), fs.MDSMap.FilesystemName, false) + err := RemoveFilesystem(context, AdminTestClusterInfo("mycluster"), fs.MDSMap.FilesystemName, false) assert.Nil(t, err) assert.True(t, metadataDeleted) assert.True(t, dataDeleted) @@ -213,7 +213,7 @@ func TestFailAllStandbyReplayMDS(t *testing.T) { return "", errors.Errorf("unexpected ceph command %q", args) } - err := FailAllStandbyReplayMDS(context, AdminClusterInfo("mycluster"), fs.MDSMap.FilesystemName) + err := FailAllStandbyReplayMDS(context, AdminTestClusterInfo("mycluster"), fs.MDSMap.FilesystemName) assert.NoError(t, err) assert.ElementsMatch(t, failedGids, []string{"124"}) @@ -259,7 +259,7 @@ func TestFailAllStandbyReplayMDS(t *testing.T) { } return "", errors.Errorf("unexpected ceph command %q", args) } - err = FailAllStandbyReplayMDS(context, AdminClusterInfo("mycluster"), fs.MDSMap.FilesystemName) + err = FailAllStandbyReplayMDS(context, AdminTestClusterInfo("mycluster"), fs.MDSMap.FilesystemName) assert.NoError(t, err) fs = CephFilesystemDetails{ @@ -304,7 +304,7 @@ func TestFailAllStandbyReplayMDS(t *testing.T) { } return "", errors.Errorf("unexpected ceph command %q", args) } - err = FailAllStandbyReplayMDS(context, AdminClusterInfo("mycluster"), fs.MDSMap.FilesystemName) + err = FailAllStandbyReplayMDS(context, AdminTestClusterInfo("mycluster"), fs.MDSMap.FilesystemName) assert.Error(t, err) assert.Contains(t, err.Error(), "expected execution of mds fail") } @@ -355,7 +355,7 @@ func TestGetMdsIdByRank(t *testing.T) { return "", errors.Errorf("unexpected ceph command %q", args) } - name, err := GetMdsIdByRank(context, AdminClusterInfo("mycluster"), fs.MDSMap.FilesystemName, 0) + name, err := GetMdsIdByRank(context, AdminTestClusterInfo("mycluster"), fs.MDSMap.FilesystemName, 0) assert.Equal(t, name, "myfs1-a") assert.NoError(t, err) @@ -378,7 +378,7 @@ func TestGetMdsIdByRank(t *testing.T) { return "", errors.Errorf("unexpected ceph command %q", args) } - name, err = GetMdsIdByRank(context, AdminClusterInfo("mycluster"), fs.MDSMap.FilesystemName, 0) + name, err = GetMdsIdByRank(context, AdminTestClusterInfo("mycluster"), fs.MDSMap.FilesystemName, 0) assert.Equal(t, "", name) assert.Error(t, err) assert.Contains(t, err.Error(), "test ceph fs get error") @@ -427,7 +427,7 @@ func TestGetMdsIdByRank(t *testing.T) { return "", errors.Errorf("unexpected ceph command %q", args) } - name, err = GetMdsIdByRank(context, AdminClusterInfo("mycluster"), fs.MDSMap.FilesystemName, 0) + name, err = GetMdsIdByRank(context, AdminTestClusterInfo("mycluster"), fs.MDSMap.FilesystemName, 0) assert.Equal(t, "", name) assert.Error(t, err) assert.Contains(t, err.Error(), "failed to get mds gid from rank 0") @@ -475,7 +475,7 @@ func TestGetMdsIdByRank(t *testing.T) { return "", errors.Errorf("unexpected ceph command %q", args) } - name, err = GetMdsIdByRank(context, AdminClusterInfo("mycluster"), fs.MDSMap.FilesystemName, 0) + name, err = GetMdsIdByRank(context, AdminTestClusterInfo("mycluster"), fs.MDSMap.FilesystemName, 0) assert.Equal(t, "", name) assert.Error(t, err) assert.Contains(t, err.Error(), "failed to get mds info for rank 0") @@ -503,7 +503,7 @@ func TestGetMDSDump(t *testing.T) { return "", errors.Errorf("unexpected ceph command %q", args) } - mdsDump, err := GetMDSDump(context, AdminClusterInfo("mycluster")) + mdsDump, err := GetMDSDump(context, AdminTestClusterInfo("mycluster")) assert.NoError(t, err) assert.ElementsMatch(t, mdsDump.Standbys, []MDSStandBy{{Name: "rook-ceph-filesystem-b", Rank: -1}}) @@ -517,7 +517,7 @@ func TestGetMDSDump(t *testing.T) { return "", errors.Errorf("unexpected ceph command %q", args) } - _, err = GetMDSDump(context, AdminClusterInfo("mycluster")) + _, err = GetMDSDump(context, AdminTestClusterInfo("mycluster")) assert.Error(t, err) } @@ -543,7 +543,7 @@ func TestWaitForNoStandbys(t *testing.T) { return "", errors.Errorf("unexpected ceph command %q", args) } - err := WaitForNoStandbys(context, AdminClusterInfo("mycluster"), 6*time.Second) + err := WaitForNoStandbys(context, AdminTestClusterInfo("mycluster"), 6*time.Second) assert.Error(t, err) executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { @@ -556,7 +556,7 @@ func TestWaitForNoStandbys(t *testing.T) { return "", errors.Errorf("unexpected ceph command %q", args) } - err = WaitForNoStandbys(context, AdminClusterInfo("mycluster"), 6*time.Second) + err = WaitForNoStandbys(context, AdminTestClusterInfo("mycluster"), 6*time.Second) assert.Error(t, err) firstCall := true @@ -583,7 +583,7 @@ func TestWaitForNoStandbys(t *testing.T) { } return "", errors.Errorf("unexpected ceph command %q", args) } - err = WaitForNoStandbys(context, AdminClusterInfo("mycluster"), 6*time.Second) + err = WaitForNoStandbys(context, AdminTestClusterInfo("mycluster"), 6*time.Second) assert.NoError(t, err) } diff --git a/pkg/daemon/ceph/client/image_test.go b/pkg/daemon/ceph/client/image_test.go index 3a51acc5aaee..335829d95dea 100644 --- a/pkg/daemon/ceph/client/image_test.go +++ b/pkg/daemon/ceph/client/image_test.go @@ -45,7 +45,7 @@ func TestCreateImage(t *testing.T) { } return "", errors.Errorf("unexpected ceph command %q", args) } - clusterInfo := AdminClusterInfo("mycluster") + clusterInfo := AdminTestClusterInfo("mycluster") _, err := CreateImage(context, clusterInfo, "image1", "pool1", "", uint64(sizeMB)) // 1MB assert.NotNil(t, err) assert.True(t, strings.Contains(err.Error(), "mocked detailed ceph error output stream")) @@ -156,7 +156,7 @@ func TestExpandImage(t *testing.T) { } return "", errors.Errorf("unexpected ceph command %q", args) } - clusterInfo := AdminClusterInfo("mycluster") + clusterInfo := AdminTestClusterInfo("mycluster") err := ExpandImage(context, clusterInfo, "error-name", "kube", "mon1,mon2,mon3", "/tmp/keyring", 1000000) assert.Error(t, err) @@ -186,7 +186,7 @@ func TestListImageLogLevelInfo(t *testing.T) { return "", errors.Errorf("unexpected ceph command %q", args) } - clusterInfo := AdminClusterInfo("mycluster") + clusterInfo := AdminTestClusterInfo("mycluster") images, err = ListImages(context, clusterInfo, "pool1") assert.Nil(t, err) assert.NotNil(t, images) @@ -250,7 +250,7 @@ func TestListImageLogLevelDebug(t *testing.T) { return "", errors.Errorf("unexpected ceph command %q", args) } - clusterInfo := AdminClusterInfo("mycluster") + clusterInfo := AdminTestClusterInfo("mycluster") images, err = ListImages(context, clusterInfo, "pool1") assert.Nil(t, err) assert.NotNil(t, images) diff --git a/pkg/daemon/ceph/client/info.go b/pkg/daemon/ceph/client/info.go index 217e99d2f665..e0c28890d086 100644 --- a/pkg/daemon/ceph/client/info.go +++ b/pkg/daemon/ceph/client/info.go @@ -87,22 +87,26 @@ func (c *ClusterInfo) NamespacedName() types.NamespacedName { } // AdminClusterInfo() creates a ClusterInfo with the basic info to access the cluster -// as an admin. Only a few fields are set in the struct, -// so this clusterInfo cannot be used to generate the mon config or request the -// namespacedName. A full cluster info must be populated for those operations. -func AdminClusterInfo(namespace string) *ClusterInfo { +// as an admin. +func AdminClusterInfo(namespace, name string) *ClusterInfo { ownerInfo := k8sutil.NewOwnerInfoWithOwnerRef(&metav1.OwnerReference{}, "") return &ClusterInfo{ Namespace: namespace, CephCred: CephCred{ Username: AdminUsername, }, - name: "testing", + name: name, OwnerInfo: ownerInfo, Context: context.TODO(), } } +// AdminTestClusterInfo() creates a ClusterInfo with the basic info to access the cluster +// as an admin. This cluster info should only be used by unit or integration tests. +func AdminTestClusterInfo(namespace string) *ClusterInfo { + return AdminClusterInfo(namespace, "testing") +} + // IsInitialized returns true if the critical information in the ClusterInfo struct has been filled // in. This method exists less out of necessity than the desire to be explicit about the lifecycle // of the ClusterInfo struct during startup, specifically that it is expected to exist after the diff --git a/pkg/daemon/ceph/client/mgr_test.go b/pkg/daemon/ceph/client/mgr_test.go index 95fe6ff17749..bbd08b2b1d5a 100644 --- a/pkg/daemon/ceph/client/mgr_test.go +++ b/pkg/daemon/ceph/client/mgr_test.go @@ -52,7 +52,7 @@ func TestEnableModuleRetries(t *testing.T) { } - clusterInfo := AdminClusterInfo("mycluster") + clusterInfo := AdminTestClusterInfo("mycluster") _ = MgrEnableModule(&clusterd.Context{Executor: executor}, clusterInfo, "invalidModuleName", false) assert.Equal(t, 5, moduleEnableRetries) @@ -91,7 +91,7 @@ func TestEnableModule(t *testing.T) { return "", errors.Errorf("unexpected ceph command %q", args) } - clusterInfo := AdminClusterInfo("mycluster") + clusterInfo := AdminTestClusterInfo("mycluster") err := enableModule(&clusterd.Context{Executor: executor}, clusterInfo, "pg_autoscaler", true, "enable") assert.NoError(t, err) @@ -121,7 +121,7 @@ func TestEnableDisableBalancerModule(t *testing.T) { return "", errors.Errorf("unexpected ceph command %q", args) } - clusterInfo := AdminClusterInfo("mycluster") + clusterInfo := AdminTestClusterInfo("mycluster") err := enableDisableBalancerModule(&clusterd.Context{Executor: executor}, clusterInfo, "on") assert.NoError(t, err) @@ -140,6 +140,6 @@ func TestSetBalancerMode(t *testing.T) { return "", errors.Errorf("unexpected ceph command %q", args) } - err := setBalancerMode(&clusterd.Context{Executor: executor}, AdminClusterInfo("mycluster"), "upmap") + err := setBalancerMode(&clusterd.Context{Executor: executor}, AdminTestClusterInfo("mycluster"), "upmap") assert.NoError(t, err) } diff --git a/pkg/daemon/ceph/client/mirror_test.go b/pkg/daemon/ceph/client/mirror_test.go index b4220e7d7522..a25566afac96 100644 --- a/pkg/daemon/ceph/client/mirror_test.go +++ b/pkg/daemon/ceph/client/mirror_test.go @@ -50,7 +50,7 @@ func TestCreateRBDMirrorBootstrapPeer(t *testing.T) { return "", errors.New("unknown command") } context := &clusterd.Context{Executor: executor} - c := AdminClusterInfo("mycluster") + c := AdminTestClusterInfo("mycluster") c.FSID = "4fe04ebb-ec0c-46c2-ac55-9eb52ebbfb82" token, err := CreateRBDMirrorBootstrapPeer(context, c, pool) @@ -73,7 +73,7 @@ func TestEnablePoolMirroring(t *testing.T) { } context := &clusterd.Context{Executor: executor} - err := enablePoolMirroring(context, AdminClusterInfo("mycluster"), poolSpec, pool) + err := enablePoolMirroring(context, AdminTestClusterInfo("mycluster"), poolSpec, pool) assert.NoError(t, err) } @@ -91,7 +91,7 @@ func TestGetPoolMirroringStatus(t *testing.T) { } context := &clusterd.Context{Executor: executor} - poolMirrorStatus, err := GetPoolMirroringStatus(context, AdminClusterInfo("mycluster"), pool) + poolMirrorStatus, err := GetPoolMirroringStatus(context, AdminTestClusterInfo("mycluster"), pool) assert.NoError(t, err) assert.Equal(t, "WARNING", poolMirrorStatus.Summary.Health) assert.Equal(t, "OK", poolMirrorStatus.Summary.DaemonHealth) @@ -114,7 +114,7 @@ func TestImportRBDMirrorBootstrapPeer(t *testing.T) { } context := &clusterd.Context{Executor: executor} - err := ImportRBDMirrorBootstrapPeer(context, AdminClusterInfo("mycluster"), pool, "", []byte(bootstrapPeerToken)) + err := ImportRBDMirrorBootstrapPeer(context, AdminTestClusterInfo("mycluster"), pool, "", []byte(bootstrapPeerToken)) assert.NoError(t, err) executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { @@ -132,7 +132,7 @@ func TestImportRBDMirrorBootstrapPeer(t *testing.T) { return "", errors.New("unknown command") } context = &clusterd.Context{Executor: executor} - err = ImportRBDMirrorBootstrapPeer(context, AdminClusterInfo("mycluster"), pool, "rx-tx", []byte(bootstrapPeerToken)) + err = ImportRBDMirrorBootstrapPeer(context, AdminTestClusterInfo("mycluster"), pool, "rx-tx", []byte(bootstrapPeerToken)) assert.NoError(t, err) } @@ -150,7 +150,7 @@ func TestGetPoolMirroringInfo(t *testing.T) { } context := &clusterd.Context{Executor: executor} - poolMirrorInfo, err := GetPoolMirroringInfo(context, AdminClusterInfo("mycluster"), pool) + poolMirrorInfo, err := GetPoolMirroringInfo(context, AdminTestClusterInfo("mycluster"), pool) assert.NoError(t, err) assert.Equal(t, "image", poolMirrorInfo.Mode) assert.Equal(t, 1, len(poolMirrorInfo.Peers)) @@ -180,7 +180,7 @@ func TestEnableSnapshotSchedule(t *testing.T) { context := &clusterd.Context{Executor: executor} poolSpec := &cephv1.PoolSpec{Mirroring: cephv1.MirroringSpec{SnapshotSchedules: []cephv1.SnapshotScheduleSpec{{Interval: interval}}}} - err := enableSnapshotSchedule(context, AdminClusterInfo("mycluster"), poolSpec.Mirroring.SnapshotSchedules[0], pool) + err := enableSnapshotSchedule(context, AdminTestClusterInfo("mycluster"), poolSpec.Mirroring.SnapshotSchedules[0], pool) assert.NoError(t, err) } @@ -206,7 +206,7 @@ func TestEnableSnapshotSchedule(t *testing.T) { context := &clusterd.Context{Executor: executor} poolSpec := &cephv1.PoolSpec{Mirroring: cephv1.MirroringSpec{SnapshotSchedules: []cephv1.SnapshotScheduleSpec{{Interval: interval, StartTime: startTime}}}} - err := enableSnapshotSchedule(context, AdminClusterInfo("mycluster"), poolSpec.Mirroring.SnapshotSchedules[0], pool) + err := enableSnapshotSchedule(context, AdminTestClusterInfo("mycluster"), poolSpec.Mirroring.SnapshotSchedules[0], pool) assert.NoError(t, err) } } @@ -228,7 +228,7 @@ func TestListSnapshotSchedules(t *testing.T) { } context := &clusterd.Context{Executor: executor} - snapshotScheduleStatus, err := listSnapshotSchedules(context, AdminClusterInfo("mycluster"), pool) + snapshotScheduleStatus, err := listSnapshotSchedules(context, AdminTestClusterInfo("mycluster"), pool) assert.NoError(t, err) assert.Equal(t, 2, len(snapshotScheduleStatus)) } @@ -251,7 +251,7 @@ func TestListSnapshotSchedulesRecursively(t *testing.T) { } context := &clusterd.Context{Executor: executor} - snapshotScheduleStatus, err := ListSnapshotSchedulesRecursively(context, AdminClusterInfo("mycluster"), pool) + snapshotScheduleStatus, err := ListSnapshotSchedulesRecursively(context, AdminTestClusterInfo("mycluster"), pool) assert.NoError(t, err) assert.Equal(t, 2, len(snapshotScheduleStatus)) } @@ -274,7 +274,7 @@ func TestRemoveSnapshotSchedule(t *testing.T) { context := &clusterd.Context{Executor: executor} snapScheduleResponse := cephv1.SnapshotSchedule{StartTime: "14:00:00-05:00", Interval: "1d"} - err := removeSnapshotSchedule(context, AdminClusterInfo("mycluster"), snapScheduleResponse, pool) + err := removeSnapshotSchedule(context, AdminTestClusterInfo("mycluster"), snapScheduleResponse, pool) assert.NoError(t, err) } @@ -298,7 +298,7 @@ func TestRemoveSnapshotSchedules(t *testing.T) { context := &clusterd.Context{Executor: executor} poolSpec := &cephv1.PoolSpec{Mirroring: cephv1.MirroringSpec{SnapshotSchedules: []cephv1.SnapshotScheduleSpec{{Interval: interval, StartTime: startTime}}}} - err := removeSnapshotSchedules(context, AdminClusterInfo("mycluster"), *poolSpec, pool) + err := removeSnapshotSchedules(context, AdminTestClusterInfo("mycluster"), *poolSpec, pool) assert.NoError(t, err) } @@ -316,7 +316,7 @@ func TestDisableMirroring(t *testing.T) { } context := &clusterd.Context{Executor: executor} - err := disablePoolMirroring(context, AdminClusterInfo("mycluster"), pool) + err := disablePoolMirroring(context, AdminTestClusterInfo("mycluster"), pool) assert.NoError(t, err) } @@ -337,6 +337,6 @@ func TestRemoveClusterPeer(t *testing.T) { } context := &clusterd.Context{Executor: executor} - err := removeClusterPeer(context, AdminClusterInfo("mycluster"), pool, peerUUID) + err := removeClusterPeer(context, AdminTestClusterInfo("mycluster"), pool, peerUUID) assert.NoError(t, err) } diff --git a/pkg/daemon/ceph/client/mon_test.go b/pkg/daemon/ceph/client/mon_test.go index 9c3a8c4414a7..dbd40974d8f6 100644 --- a/pkg/daemon/ceph/client/mon_test.go +++ b/pkg/daemon/ceph/client/mon_test.go @@ -30,7 +30,7 @@ import ( func TestCephArgs(t *testing.T) { // cluster a under /etc args := []string{} - clusterInfo := AdminClusterInfo("a") + clusterInfo := AdminTestClusterInfo("a") exec.CephCommandsTimeout = 15 * time.Second command, args := FinalizeCephCommandArgs(CephTool, clusterInfo, args, "/etc") assert.Equal(t, CephTool, command) @@ -79,7 +79,7 @@ func TestStretchElectionStrategy(t *testing.T) { return "", errors.Errorf("unexpected ceph command %q", args) } context := &clusterd.Context{Executor: executor} - clusterInfo := AdminClusterInfo("mycluster") + clusterInfo := AdminTestClusterInfo("mycluster") err := EnableStretchElectionStrategy(context, clusterInfo) assert.NoError(t, err) @@ -108,7 +108,7 @@ func TestStretchClusterMonTiebreaker(t *testing.T) { return "", errors.Errorf("unexpected ceph command %q", args) } context := &clusterd.Context{Executor: executor} - clusterInfo := AdminClusterInfo("mycluster") + clusterInfo := AdminTestClusterInfo("mycluster") err := SetMonStretchTiebreaker(context, clusterInfo, monName, failureDomain) assert.NoError(t, err) @@ -139,7 +139,7 @@ func TestMonDump(t *testing.T) { return "", errors.Errorf("unexpected ceph command %q", args) } context := &clusterd.Context{Executor: executor} - clusterInfo := AdminClusterInfo("mycluster") + clusterInfo := AdminTestClusterInfo("mycluster") dump, err := GetMonDump(context, clusterInfo) assert.NoError(t, err) diff --git a/pkg/daemon/ceph/client/osd_test.go b/pkg/daemon/ceph/client/osd_test.go index 2e9a8ff4fb92..c733ce4da8b5 100644 --- a/pkg/daemon/ceph/client/osd_test.go +++ b/pkg/daemon/ceph/client/osd_test.go @@ -76,14 +76,14 @@ func TestHostTree(t *testing.T) { return "", errors.Errorf("unexpected ceph command %q", args) } - tree, err := HostTree(&clusterd.Context{Executor: executor}, AdminClusterInfo("mycluster")) + tree, err := HostTree(&clusterd.Context{Executor: executor}, AdminTestClusterInfo("mycluster")) assert.NoError(t, err) assert.Equal(t, 2, len(tree.Nodes)) assert.Equal(t, "minikube", tree.Nodes[0].Name) assert.Equal(t, 3, len(tree.Nodes[0].Children)) emptyTreeResult = true - tree, err = HostTree(&clusterd.Context{Executor: executor}, AdminClusterInfo("mycluster")) + tree, err = HostTree(&clusterd.Context{Executor: executor}, AdminTestClusterInfo("mycluster")) assert.Error(t, err) assert.Equal(t, 0, len(tree.Nodes)) @@ -104,12 +104,12 @@ func TestOsdListNum(t *testing.T) { return "", errors.Errorf("unexpected ceph command %q", args) } - list, err := OsdListNum(&clusterd.Context{Executor: executor}, AdminClusterInfo("mycluster")) + list, err := OsdListNum(&clusterd.Context{Executor: executor}, AdminTestClusterInfo("mycluster")) assert.NoError(t, err) assert.Equal(t, 3, len(list)) emptyOsdListNumResult = true - list, err = OsdListNum(&clusterd.Context{Executor: executor}, AdminClusterInfo("mycluster")) + list, err = OsdListNum(&clusterd.Context{Executor: executor}, AdminTestClusterInfo("mycluster")) assert.Error(t, err) assert.Equal(t, 0, len(list)) } @@ -127,7 +127,7 @@ func TestOSDDeviceClasses(t *testing.T) { } context := &clusterd.Context{Executor: executor} - clusterInfo := AdminClusterInfo("mycluster") + clusterInfo := AdminTestClusterInfo("mycluster") t.Run("device classes returned", func(t *testing.T) { deviceClasses, err := OSDDeviceClasses(context, clusterInfo, []string{"0"}) @@ -161,7 +161,7 @@ func TestOSDOkToStop(t *testing.T) { } context := &clusterd.Context{Executor: executor} - clusterInfo := AdminClusterInfo("mycluster") + clusterInfo := AdminTestClusterInfo("mycluster") doSetup := func() { seenArgs = []string{} diff --git a/pkg/daemon/ceph/client/pool_test.go b/pkg/daemon/ceph/client/pool_test.go index 0ed868830a0e..afffdc410966 100644 --- a/pkg/daemon/ceph/client/pool_test.go +++ b/pkg/daemon/ceph/client/pool_test.go @@ -85,7 +85,7 @@ func testCreateECPool(t *testing.T, overwrite bool, compressionMode string) { return "", errors.Errorf("unexpected ceph command %q", args) } - err := CreateECPoolForApp(context, AdminClusterInfo("mycluster"), poolName, "mypoolprofile", p, DefaultPGCount, "myapp", overwrite) + err := CreateECPoolForApp(context, AdminTestClusterInfo("mycluster"), poolName, "mypoolprofile", p, DefaultPGCount, "myapp", overwrite) assert.Nil(t, err) if compressionMode != "" { assert.True(t, compressionModeCreated) @@ -173,7 +173,7 @@ func testCreateReplicaPool(t *testing.T, failureDomain, crushRoot, deviceClass, p.CompressionMode = compressionMode } clusterSpec := &cephv1.ClusterSpec{Storage: cephv1.StorageScopeSpec{Config: map[string]string{CrushRootConfigKey: "cluster-crush-root"}}} - err := CreateReplicatedPoolForApp(context, AdminClusterInfo("mycluster"), clusterSpec, "mypool", p, DefaultPGCount, "myapp") + err := CreateReplicatedPoolForApp(context, AdminTestClusterInfo("mycluster"), clusterSpec, "mypool", p, DefaultPGCount, "myapp") assert.Nil(t, err) assert.True(t, crushRuleCreated) if compressionMode != "" { @@ -218,7 +218,7 @@ func TestGetPoolStatistics(t *testing.T) { return "", errors.Errorf("unexpected rbd command %q", args) } - clusterInfo := AdminClusterInfo("mycluster") + clusterInfo := AdminTestClusterInfo("mycluster") stats, err := GetPoolStatistics(context, clusterInfo, "replicapool") assert.Nil(t, err) assert.True(t, reflect.DeepEqual(stats, &p)) @@ -245,7 +245,7 @@ func TestSetPoolReplicatedSizeProperty(t *testing.T) { return "", errors.Errorf("unexpected ceph command %q", args) } - err := SetPoolReplicatedSizeProperty(context, AdminClusterInfo("mycluster"), poolName, "3") + err := SetPoolReplicatedSizeProperty(context, AdminTestClusterInfo("mycluster"), poolName, "3") assert.NoError(t, err) // TEST POOL SIZE 1 AND RequireSafeReplicaSize True @@ -263,7 +263,7 @@ func TestSetPoolReplicatedSizeProperty(t *testing.T) { return "", errors.Errorf("unexpected ceph command %q", args) } - err = SetPoolReplicatedSizeProperty(context, AdminClusterInfo("mycluster"), poolName, "1") + err = SetPoolReplicatedSizeProperty(context, AdminTestClusterInfo("mycluster"), poolName, "1") assert.NoError(t, err) } @@ -302,7 +302,7 @@ func testCreateStretchCrushRule(t *testing.T, alreadyExists bool) { } return "", errors.Errorf("unexpected ceph command %q", args) } - clusterInfo := AdminClusterInfo("mycluster") + clusterInfo := AdminTestClusterInfo("mycluster") clusterSpec := &cephv1.ClusterSpec{} poolSpec := cephv1.PoolSpec{FailureDomain: "rack"} ruleName := "testrule" @@ -384,7 +384,7 @@ func testCreatePoolWithReplicasPerFailureDomain(t *testing.T, failureDomain, cru } context := &clusterd.Context{Executor: executor} clusterSpec := &cephv1.ClusterSpec{Storage: cephv1.StorageScopeSpec{Config: map[string]string{CrushRootConfigKey: "cluster-crush-root"}}} - err := CreateReplicatedPoolForApp(context, AdminClusterInfo("mycluster"), clusterSpec, poolName, poolSpec, DefaultPGCount, "myapp") + err := CreateReplicatedPoolForApp(context, AdminTestClusterInfo("mycluster"), clusterSpec, poolName, poolSpec, DefaultPGCount, "myapp") assert.Nil(t, err) assert.True(t, poolRuleCreated) assert.True(t, poolRuleSet) @@ -426,7 +426,7 @@ func testCreateHybridCrushRule(t *testing.T, alreadyExists bool) { } return "", errors.Errorf("unexpected ceph command %q", args) } - clusterInfo := AdminClusterInfo("mycluster") + clusterInfo := AdminTestClusterInfo("mycluster") clusterSpec := &cephv1.ClusterSpec{} poolSpec := cephv1.PoolSpec{ FailureDomain: "rack", diff --git a/pkg/daemon/ceph/client/upgrade_test.go b/pkg/daemon/ceph/client/upgrade_test.go index 023cf49bb37d..3d784790cbde 100644 --- a/pkg/daemon/ceph/client/upgrade_test.go +++ b/pkg/daemon/ceph/client/upgrade_test.go @@ -37,7 +37,7 @@ func TestGetCephMonVersionString(t *testing.T) { } context := &clusterd.Context{Executor: executor} - _, err := getCephMonVersionString(context, AdminClusterInfo("mycluster")) + _, err := getCephMonVersionString(context, AdminTestClusterInfo("mycluster")) assert.NoError(t, err) } @@ -49,7 +49,7 @@ func TestGetCephMonVersionsString(t *testing.T) { } context := &clusterd.Context{Executor: executor} - _, err := getAllCephDaemonVersionsString(context, AdminClusterInfo("mycluster")) + _, err := getAllCephDaemonVersionsString(context, AdminTestClusterInfo("mycluster")) assert.Nil(t, err) } @@ -62,7 +62,7 @@ func TestEnableMessenger2(t *testing.T) { } context := &clusterd.Context{Executor: executor} - err := EnableMessenger2(context, AdminClusterInfo("mycluster")) + err := EnableMessenger2(context, AdminTestClusterInfo("mycluster")) assert.NoError(t, err) } @@ -75,7 +75,7 @@ func TestEnableReleaseOSDFunctionality(t *testing.T) { } context := &clusterd.Context{Executor: executor} - err := EnableReleaseOSDFunctionality(context, AdminClusterInfo("mycluster"), "octopus") + err := EnableReleaseOSDFunctionality(context, AdminTestClusterInfo("mycluster"), "octopus") assert.NoError(t, err) } @@ -92,7 +92,7 @@ func TestOkToStopDaemon(t *testing.T) { context := &clusterd.Context{Executor: executor} deployment := "rook-ceph-mon-a" - err := okToStopDaemon(context, AdminClusterInfo("mycluster"), deployment, "mon", "a") + err := okToStopDaemon(context, AdminTestClusterInfo("mycluster"), deployment, "mon", "a") assert.NoError(t, err) // Second test @@ -105,7 +105,7 @@ func TestOkToStopDaemon(t *testing.T) { context = &clusterd.Context{Executor: executor} deployment = "rook-ceph-mgr-a" - err = okToStopDaemon(context, AdminClusterInfo("mycluster"), deployment, "mgr", "a") + err = okToStopDaemon(context, AdminTestClusterInfo("mycluster"), deployment, "mgr", "a") assert.NoError(t, err) // Third test @@ -118,7 +118,7 @@ func TestOkToStopDaemon(t *testing.T) { context = &clusterd.Context{Executor: executor} deployment = "rook-ceph-dummy-a" - err = okToStopDaemon(context, AdminClusterInfo("mycluster"), deployment, "dummy", "a") + err = okToStopDaemon(context, AdminTestClusterInfo("mycluster"), deployment, "dummy", "a") assert.NoError(t, err) } @@ -126,7 +126,7 @@ func TestOkToContinue(t *testing.T) { executor := &exectest.MockExecutor{} context := &clusterd.Context{Executor: executor} - err := OkToContinue(context, AdminClusterInfo("mycluster"), "rook-ceph-mon-a", "mon", "a") // mon is not checked on ok-to-continue so nil is expected + err := OkToContinue(context, AdminTestClusterInfo("mycluster"), "rook-ceph-mon-a", "mon", "a") // mon is not checked on ok-to-continue so nil is expected assert.NoError(t, err) } @@ -300,7 +300,7 @@ func TestGetRetryConfig(t *testing.T) { } func TestOSDUpdateShouldCheckOkToStop(t *testing.T) { - clusterInfo := AdminClusterInfo("mycluster") + clusterInfo := AdminTestClusterInfo("mycluster") lsOutput := "" treeOutput := "" context := &clusterd.Context{ diff --git a/pkg/daemon/ceph/osd/device_test.go b/pkg/daemon/ceph/osd/device_test.go index 899e541167f2..55c3f6a6ade1 100644 --- a/pkg/daemon/ceph/osd/device_test.go +++ b/pkg/daemon/ceph/osd/device_test.go @@ -40,7 +40,7 @@ func TestOSDBootstrap(t *testing.T) { context := &clusterd.Context{Executor: executor, ConfigDir: configDir} defer os.RemoveAll(context.ConfigDir) - err := createOSDBootstrapKeyring(context, client.AdminClusterInfo("mycluster"), configDir) + err := createOSDBootstrapKeyring(context, client.AdminTestClusterInfo("mycluster"), configDir) assert.Nil(t, err) targetPath := path.Join(configDir, bootstrapOsdKeyring) diff --git a/pkg/operator/ceph/cluster/cephstatus_test.go b/pkg/operator/ceph/cluster/cephstatus_test.go index 77bbb3a371b8..e09166609aa2 100644 --- a/pkg/operator/ceph/cluster/cephstatus_test.go +++ b/pkg/operator/ceph/cluster/cephstatus_test.go @@ -128,7 +128,7 @@ func TestCephStatus(t *testing.T) { } func TestNewCephStatusChecker(t *testing.T) { - clusterInfo := cephclient.AdminClusterInfo("ns") + clusterInfo := cephclient.AdminTestClusterInfo("ns") c := &clusterd.Context{} time10s, err := time.ParseDuration("10s") assert.NoError(t, err) @@ -159,7 +159,7 @@ func TestNewCephStatusChecker(t *testing.T) { func TestConfigureHealthSettings(t *testing.T) { c := &cephStatusChecker{ context: &clusterd.Context{}, - clusterInfo: cephclient.AdminClusterInfo("ns"), + clusterInfo: cephclient.AdminTestClusterInfo("ns"), } setGlobalIDReclaim := false c.context.Executor = &exectest.MockExecutor{ diff --git a/pkg/operator/ceph/cluster/cluster.go b/pkg/operator/ceph/cluster/cluster.go index 2177a7955161..9e775b336f3c 100755 --- a/pkg/operator/ceph/cluster/cluster.go +++ b/pkg/operator/ceph/cluster/cluster.go @@ -72,7 +72,7 @@ func newCluster(c *cephv1.CephCluster, context *clusterd.Context, csiMutex *sync // at this phase of the cluster creation process, the identity components of the cluster are // not yet established. we reserve this struct which is filled in as soon as the cluster's // identity can be established. - ClusterInfo: client.AdminClusterInfo(c.Namespace), + ClusterInfo: client.AdminClusterInfo(c.Namespace, c.Name), Namespace: c.Namespace, Spec: &c.Spec, context: context, diff --git a/pkg/operator/ceph/cluster/cluster_test.go b/pkg/operator/ceph/cluster/cluster_test.go index 41be7dd51784..c60caab5f777 100644 --- a/pkg/operator/ceph/cluster/cluster_test.go +++ b/pkg/operator/ceph/cluster/cluster_test.go @@ -35,27 +35,27 @@ func TestPreClusterStartValidation(t *testing.T) { args args wantErr bool }{ - {"no settings", args{&cluster{ClusterInfo: client.AdminClusterInfo("rook-ceph"), Spec: &cephv1.ClusterSpec{}, context: &clusterd.Context{Clientset: testop.New(t, 3)}}}, false}, - {"even mons", args{&cluster{ClusterInfo: client.AdminClusterInfo("rook-ceph"), context: &clusterd.Context{Clientset: testop.New(t, 3)}, Spec: &cephv1.ClusterSpec{Mon: cephv1.MonSpec{Count: 2}}}}, false}, - {"missing stretch zones", args{&cluster{ClusterInfo: client.AdminClusterInfo("rook-ceph"), context: &clusterd.Context{Clientset: testop.New(t, 3)}, Spec: &cephv1.ClusterSpec{Mon: cephv1.MonSpec{StretchCluster: &cephv1.StretchClusterSpec{Zones: []cephv1.StretchClusterZoneSpec{ + {"no settings", args{&cluster{ClusterInfo: client.AdminTestClusterInfo("rook-ceph"), Spec: &cephv1.ClusterSpec{}, context: &clusterd.Context{Clientset: testop.New(t, 3)}}}, false}, + {"even mons", args{&cluster{ClusterInfo: client.AdminTestClusterInfo("rook-ceph"), context: &clusterd.Context{Clientset: testop.New(t, 3)}, Spec: &cephv1.ClusterSpec{Mon: cephv1.MonSpec{Count: 2}}}}, false}, + {"missing stretch zones", args{&cluster{ClusterInfo: client.AdminTestClusterInfo("rook-ceph"), context: &clusterd.Context{Clientset: testop.New(t, 3)}, Spec: &cephv1.ClusterSpec{Mon: cephv1.MonSpec{StretchCluster: &cephv1.StretchClusterSpec{Zones: []cephv1.StretchClusterZoneSpec{ {Name: "a"}, }}}}}}, true}, - {"missing arbiter", args{&cluster{ClusterInfo: client.AdminClusterInfo("rook-ceph"), context: &clusterd.Context{Clientset: testop.New(t, 3)}, Spec: &cephv1.ClusterSpec{Mon: cephv1.MonSpec{StretchCluster: &cephv1.StretchClusterSpec{Zones: []cephv1.StretchClusterZoneSpec{ + {"missing arbiter", args{&cluster{ClusterInfo: client.AdminTestClusterInfo("rook-ceph"), context: &clusterd.Context{Clientset: testop.New(t, 3)}, Spec: &cephv1.ClusterSpec{Mon: cephv1.MonSpec{StretchCluster: &cephv1.StretchClusterSpec{Zones: []cephv1.StretchClusterZoneSpec{ {Name: "a"}, {Name: "b"}, {Name: "c"}, }}}}}}, true}, - {"missing zone name", args{&cluster{ClusterInfo: client.AdminClusterInfo("rook-ceph"), context: &clusterd.Context{Clientset: testop.New(t, 3)}, Spec: &cephv1.ClusterSpec{Mon: cephv1.MonSpec{StretchCluster: &cephv1.StretchClusterSpec{Zones: []cephv1.StretchClusterZoneSpec{ + {"missing zone name", args{&cluster{ClusterInfo: client.AdminTestClusterInfo("rook-ceph"), context: &clusterd.Context{Clientset: testop.New(t, 3)}, Spec: &cephv1.ClusterSpec{Mon: cephv1.MonSpec{StretchCluster: &cephv1.StretchClusterSpec{Zones: []cephv1.StretchClusterZoneSpec{ {Arbiter: true}, {Name: "b"}, {Name: "c"}, }}}}}}, true}, - {"valid stretch cluster", args{&cluster{ClusterInfo: client.AdminClusterInfo("rook-ceph"), context: &clusterd.Context{Clientset: testop.New(t, 3)}, Spec: &cephv1.ClusterSpec{Mon: cephv1.MonSpec{Count: 3, StretchCluster: &cephv1.StretchClusterSpec{Zones: []cephv1.StretchClusterZoneSpec{ + {"valid stretch cluster", args{&cluster{ClusterInfo: client.AdminTestClusterInfo("rook-ceph"), context: &clusterd.Context{Clientset: testop.New(t, 3)}, Spec: &cephv1.ClusterSpec{Mon: cephv1.MonSpec{Count: 3, StretchCluster: &cephv1.StretchClusterSpec{Zones: []cephv1.StretchClusterZoneSpec{ {Name: "a", Arbiter: true}, {Name: "b"}, {Name: "c"}, }}}}}}, false}, - {"not enough stretch nodes", args{&cluster{ClusterInfo: client.AdminClusterInfo("rook-ceph"), context: &clusterd.Context{Clientset: testop.New(t, 3)}, Spec: &cephv1.ClusterSpec{Mon: cephv1.MonSpec{Count: 5, StretchCluster: &cephv1.StretchClusterSpec{Zones: []cephv1.StretchClusterZoneSpec{ + {"not enough stretch nodes", args{&cluster{ClusterInfo: client.AdminTestClusterInfo("rook-ceph"), context: &clusterd.Context{Clientset: testop.New(t, 3)}, Spec: &cephv1.ClusterSpec{Mon: cephv1.MonSpec{Count: 5, StretchCluster: &cephv1.StretchClusterSpec{Zones: []cephv1.StretchClusterZoneSpec{ {Name: "a", Arbiter: true}, {Name: "b"}, {Name: "c"}, diff --git a/pkg/operator/ceph/cluster/mgr/mgr_test.go b/pkg/operator/ceph/cluster/mgr/mgr_test.go index 28fe2f152c14..4af73015a2e0 100644 --- a/pkg/operator/ceph/cluster/mgr/mgr_test.go +++ b/pkg/operator/ceph/cluster/mgr/mgr_test.go @@ -206,7 +206,7 @@ func TestMgrSidecarReconcile(t *testing.T) { ConfigDir: configDir, Clientset: clientset, } - clusterInfo := cephclient.AdminClusterInfo("mycluster") + clusterInfo := cephclient.AdminTestClusterInfo("mycluster") clusterInfo.SetName("test") c := &Cluster{spec: spec, context: ctx, clusterInfo: clusterInfo} @@ -281,7 +281,7 @@ func TestConfigureModules(t *testing.T) { clientset := testop.New(t, 3) context := &clusterd.Context{Executor: executor, Clientset: clientset} - clusterInfo := cephclient.AdminClusterInfo("mycluster") + clusterInfo := cephclient.AdminTestClusterInfo("mycluster") c := &Cluster{ context: context, clusterInfo: clusterInfo, @@ -382,7 +382,7 @@ func TestApplyMonitoringLabels(t *testing.T) { func TestCluster_enableBalancerModule(t *testing.T) { c := &Cluster{ context: &clusterd.Context{Executor: &exectest.MockExecutor{}, Clientset: testop.New(t, 3)}, - clusterInfo: cephclient.AdminClusterInfo("mycluster"), + clusterInfo: cephclient.AdminTestClusterInfo("mycluster"), } t.Run("on octopus we configure the balancer AND enable the upmap mode", func(t *testing.T) { diff --git a/pkg/operator/ceph/cluster/mon/mon_test.go b/pkg/operator/ceph/cluster/mon/mon_test.go index 012e1bd2686a..2fe512969b28 100644 --- a/pkg/operator/ceph/cluster/mon/mon_test.go +++ b/pkg/operator/ceph/cluster/mon/mon_test.go @@ -435,7 +435,7 @@ func TestWaitForQuorum(t *testing.T) { assert.NoError(t, err) requireAllInQuorum := false expectedMons := []string{"a"} - clusterInfo := cephclient.AdminClusterInfo("mycluster") + clusterInfo := cephclient.AdminTestClusterInfo("mycluster") err = waitForQuorumWithMons(context, clusterInfo, expectedMons, 0, requireAllInQuorum) assert.NoError(t, err) } diff --git a/pkg/operator/ceph/cluster/mon/service_test.go b/pkg/operator/ceph/cluster/mon/service_test.go index 4b4ebf861542..62b5dc574393 100644 --- a/pkg/operator/ceph/cluster/mon/service_test.go +++ b/pkg/operator/ceph/cluster/mon/service_test.go @@ -34,7 +34,7 @@ func TestCreateService(t *testing.T) { ctx := context.TODO() clientset := test.New(t, 1) c := New(&clusterd.Context{Clientset: clientset}, "ns", cephv1.ClusterSpec{}, &k8sutil.OwnerInfo{}, &sync.Mutex{}) - c.ClusterInfo = client.AdminClusterInfo("rook-ceph") + c.ClusterInfo = client.AdminTestClusterInfo("rook-ceph") m := &monConfig{ResourceName: "rook-ceph-mon-b", DaemonName: "b"} clusterIP, err := c.createService(m) assert.NoError(t, err) diff --git a/pkg/operator/ceph/cluster/osd/deviceset_test.go b/pkg/operator/ceph/cluster/osd/deviceset_test.go index 6bf7dd658426..db9cb180f2a5 100644 --- a/pkg/operator/ceph/cluster/osd/deviceset_test.go +++ b/pkg/operator/ceph/cluster/osd/deviceset_test.go @@ -59,7 +59,7 @@ func testPrepareDeviceSets(t *testing.T, setTemplateName bool) { } cluster := &Cluster{ context: context, - clusterInfo: client.AdminClusterInfo("testns"), + clusterInfo: client.AdminTestClusterInfo("testns"), spec: spec, } @@ -105,7 +105,7 @@ func TestPrepareDeviceSetWithHolesInPVCs(t *testing.T) { ns := "testns" cluster := &Cluster{ context: context, - clusterInfo: client.AdminClusterInfo(ns), + clusterInfo: client.AdminTestClusterInfo(ns), spec: spec, } @@ -255,7 +255,7 @@ func TestPrepareDeviceSetsWithCrushParams(t *testing.T) { } cluster := &Cluster{ context: context, - clusterInfo: client.AdminClusterInfo("testns"), + clusterInfo: client.AdminTestClusterInfo("testns"), spec: spec, } diff --git a/pkg/operator/ceph/cluster/osd/health_test.go b/pkg/operator/ceph/cluster/osd/health_test.go index ab68ce69d14f..f96ac9b7c65d 100644 --- a/pkg/operator/ceph/cluster/osd/health_test.go +++ b/pkg/operator/ceph/cluster/osd/health_test.go @@ -39,7 +39,7 @@ import ( func TestOSDHealthCheck(t *testing.T) { ctx := context.TODO() clientset := testexec.New(t, 2) - clusterInfo := client.AdminClusterInfo("fake") + clusterInfo := client.AdminTestClusterInfo("fake") var execCount = 0 executor := &exectest.MockExecutor{ @@ -102,14 +102,14 @@ func TestOSDHealthCheck(t *testing.T) { func TestMonitorStart(t *testing.T) { context, cancel := context.WithCancel(context.TODO()) - osdMon := NewOSDHealthMonitor(&clusterd.Context{}, client.AdminClusterInfo("ns"), true, cephv1.CephClusterHealthCheckSpec{}) + osdMon := NewOSDHealthMonitor(&clusterd.Context{}, client.AdminTestClusterInfo("ns"), true, cephv1.CephClusterHealthCheckSpec{}) logger.Infof("starting osd monitor") go osdMon.Start(context) cancel() } func TestNewOSDHealthMonitor(t *testing.T) { - clusterInfo := client.AdminClusterInfo("test") + clusterInfo := client.AdminTestClusterInfo("test") c := &clusterd.Context{} time10s, _ := time.ParseDuration("10s") type args struct { @@ -135,7 +135,7 @@ func TestNewOSDHealthMonitor(t *testing.T) { } func TestDeviceClasses(t *testing.T) { - clusterInfo := client.AdminClusterInfo("fake") + clusterInfo := client.AdminTestClusterInfo("fake") clusterInfo.SetName("rook-ceph") var execCount = 0 diff --git a/pkg/operator/ceph/cluster/osd/osd_test.go b/pkg/operator/ceph/cluster/osd/osd_test.go index 5b67c764d971..1cddf4f354d1 100644 --- a/pkg/operator/ceph/cluster/osd/osd_test.go +++ b/pkg/operator/ceph/cluster/osd/osd_test.go @@ -297,7 +297,7 @@ func TestAddRemoveNode(t *testing.T) { assert.NoError(t, err) removeIfOutAndSafeToRemove := true - healthMon := NewOSDHealthMonitor(context, cephclient.AdminClusterInfo(namespace), removeIfOutAndSafeToRemove, cephv1.CephClusterHealthCheckSpec{}) + healthMon := NewOSDHealthMonitor(context, cephclient.AdminTestClusterInfo(namespace), removeIfOutAndSafeToRemove, cephv1.CephClusterHealthCheckSpec{}) healthMon.checkOSDHealth() _, err = clientset.AppsV1().Deployments(namespace).Get(ctx, deploymentName(1), metav1.GetOptions{}) assert.True(t, k8serrors.IsNotFound(err)) diff --git a/pkg/operator/ceph/cluster/osd/spec_test.go b/pkg/operator/ceph/cluster/osd/spec_test.go index dabcfff7bd2d..8d8df2862649 100644 --- a/pkg/operator/ceph/cluster/osd/spec_test.go +++ b/pkg/operator/ceph/cluster/osd/spec_test.go @@ -38,7 +38,7 @@ import ( ) func TestPodContainer(t *testing.T) { - cluster := &Cluster{rookVersion: "23", clusterInfo: cephclient.AdminClusterInfo("myosd")} + cluster := &Cluster{rookVersion: "23", clusterInfo: cephclient.AdminTestClusterInfo("myosd")} cluster.clusterInfo.OwnerInfo = cephclient.NewMinimumOwnerInfo(t) osdProps := osdProperties{ crushHostname: "node", diff --git a/pkg/operator/ceph/cluster/watcher.go b/pkg/operator/ceph/cluster/watcher.go index 9e3bf676c37d..2f430149b08d 100644 --- a/pkg/operator/ceph/cluster/watcher.go +++ b/pkg/operator/ceph/cluster/watcher.go @@ -106,7 +106,7 @@ func (c *clientCluster) onK8sNode(object runtime.Object) bool { // Is the node in the CRUSH map already? // If so we don't need to reconcile, this is done to avoid double reconcile on operator restart // Assume the admin key since we are watching for node status to create OSDs - clusterInfo := cephclient.AdminClusterInfo(cluster.Namespace) + clusterInfo := cephclient.AdminClusterInfo(cluster.Namespace, cluster.Name) osds, err := cephclient.GetOSDOnHost(c.context, clusterInfo, nodeName) if err != nil { if strings.Contains(err.Error(), opcontroller.UninitializedCephConfigError) { diff --git a/pkg/operator/ceph/config/keyring/store_test.go b/pkg/operator/ceph/config/keyring/store_test.go index a9fe17de44ac..6166ee727170 100644 --- a/pkg/operator/ceph/config/keyring/store_test.go +++ b/pkg/operator/ceph/config/keyring/store_test.go @@ -50,7 +50,7 @@ func TestGenerateKey(t *testing.T) { } ns := "rook-ceph" ownerInfo := k8sutil.OwnerInfo{} - s := GetSecretStore(ctx, cephclient.AdminClusterInfo(ns), &ownerInfo) + s := GetSecretStore(ctx, cephclient.AdminTestClusterInfo(ns), &ownerInfo) generateKey = "generatedsecretkey" failGenerateKey = false diff --git a/pkg/operator/ceph/config/monstore_test.go b/pkg/operator/ceph/config/monstore_test.go index 369d4744ab3e..285b7503dafe 100644 --- a/pkg/operator/ceph/config/monstore_test.go +++ b/pkg/operator/ceph/config/monstore_test.go @@ -51,7 +51,7 @@ func TestMonStore_Set(t *testing.T) { return "", nil } - monStore := GetMonStore(ctx, client.AdminClusterInfo("mycluster")) + monStore := GetMonStore(ctx, client.AdminTestClusterInfo("mycluster")) // setting with spaces converts to underscores e := monStore.Set("global", "debug ms", "10") @@ -96,7 +96,7 @@ func TestMonStore_Delete(t *testing.T) { return "", nil } - monStore := GetMonStore(ctx, client.AdminClusterInfo("mycluster")) + monStore := GetMonStore(ctx, client.AdminTestClusterInfo("mycluster")) // ceph config rm called as expected e := monStore.Delete("global", "debug ms") @@ -135,7 +135,7 @@ func TestMonStore_GetDaemon(t *testing.T) { return execReturn, nil } - monStore := GetMonStore(ctx, client.AdminClusterInfo("mycluster")) + monStore := GetMonStore(ctx, client.AdminTestClusterInfo("mycluster")) // ceph config get called as expected options, e := monStore.GetDaemon("client.rgw.test.a") @@ -178,7 +178,7 @@ func TestMonStore_DeleteDaemon(t *testing.T) { return execReturn, nil } - monStore := GetMonStore(ctx, client.AdminClusterInfo("mycluster")) + monStore := GetMonStore(ctx, client.AdminTestClusterInfo("mycluster")) // ceph config rm rgw_enable_usage_log called as expected e := monStore.DeleteDaemon("client.rgw.test.a") @@ -209,7 +209,7 @@ func TestMonStore_SetAll(t *testing.T) { return "", nil } - monStore := GetMonStore(ctx, client.AdminClusterInfo("mycluster")) + monStore := GetMonStore(ctx, client.AdminTestClusterInfo("mycluster")) cfgOverrides := []Option{ configOverride("global", "debug ms", "10"), // setting w/ spaces converts to underscores diff --git a/pkg/operator/ceph/controller/mirror_peer_test.go b/pkg/operator/ceph/controller/mirror_peer_test.go index 236b966dde8d..54442f611bc2 100644 --- a/pkg/operator/ceph/controller/mirror_peer_test.go +++ b/pkg/operator/ceph/controller/mirror_peer_test.go @@ -98,7 +98,7 @@ func TestExpandBootstrapPeerToken(t *testing.T) { Executor: executor, } - newToken, err := expandBootstrapPeerToken(c, cephclient.AdminClusterInfo("mu-cluster"), []byte(`eyJmc2lkIjoiYzZiMDg3ZjItNzgyOS00ZGJiLWJjZmMtNTNkYzM0ZTBiMzVkIiwiY2xpZW50X2lkIjoicmJkLW1pcnJvci1wZWVyIiwia2V5IjoiQVFBV1lsWmZVQ1Q2RGhBQVBtVnAwbGtubDA5YVZWS3lyRVV1NEE9PSIsIm1vbl9ob3N0IjoiW3YyOjE5Mi4xNjguMTExLjEwOjMzMDAsdjE6MTkyLjE2OC4xMTEuMTA6Njc4OV0sW3YyOjE5Mi4xNjguMTExLjEyOjMzMDAsdjE6MTkyLjE2OC4xMTEuMTI6Njc4OV0sW3YyOjE5Mi4xNjguMTExLjExOjMzMDAsdjE6MTkyLjE2OC4xMTEuMTE6Njc4OV0ifQ==`)) + newToken, err := expandBootstrapPeerToken(c, cephclient.AdminTestClusterInfo("mu-cluster"), []byte(`eyJmc2lkIjoiYzZiMDg3ZjItNzgyOS00ZGJiLWJjZmMtNTNkYzM0ZTBiMzVkIiwiY2xpZW50X2lkIjoicmJkLW1pcnJvci1wZWVyIiwia2V5IjoiQVFBV1lsWmZVQ1Q2RGhBQVBtVnAwbGtubDA5YVZWS3lyRVV1NEE9PSIsIm1vbl9ob3N0IjoiW3YyOjE5Mi4xNjguMTExLjEwOjMzMDAsdjE6MTkyLjE2OC4xMTEuMTA6Njc4OV0sW3YyOjE5Mi4xNjguMTExLjEyOjMzMDAsdjE6MTkyLjE2OC4xMTEuMTI6Njc4OV0sW3YyOjE5Mi4xNjguMTExLjExOjMzMDAsdjE6MTkyLjE2OC4xMTEuMTE6Njc4OV0ifQ==`)) assert.NoError(t, err) newTokenDecoded, err := base64.StdEncoding.DecodeString(string(newToken)) assert.NoError(t, err) diff --git a/pkg/operator/ceph/controller/spec_test.go b/pkg/operator/ceph/controller/spec_test.go index e9661e994550..6efc0eeeeefa 100644 --- a/pkg/operator/ceph/controller/spec_test.go +++ b/pkg/operator/ceph/controller/spec_test.go @@ -249,7 +249,7 @@ func TestExtractMgrIP(t *testing.T) { } func TestConfigureExternalMetricsEndpoint(t *testing.T) { - clusterInfo := cephclient.AdminClusterInfo("rook-ceph") + clusterInfo := cephclient.AdminTestClusterInfo("rook-ceph") t.Run("spec and current active mgr endpoint identical with no existing endpoint object", func(t *testing.T) { monitoringSpec := cephv1.MonitoringSpec{ Enabled: true, diff --git a/pkg/operator/ceph/csi/peermap/config_test.go b/pkg/operator/ceph/csi/peermap/config_test.go index 98f7a36d828d..24fea9686745 100644 --- a/pkg/operator/ceph/csi/peermap/config_test.go +++ b/pkg/operator/ceph/csi/peermap/config_test.go @@ -275,7 +275,7 @@ var mockExecutor = &exectest.MockExecutor{ } func TestSinglePeerMappings(t *testing.T) { - clusterInfo := cephclient.AdminClusterInfo(ns) + clusterInfo := cephclient.AdminTestClusterInfo(ns) fakeContext := &clusterd.Context{ Executor: mockExecutor, Clientset: test.New(t, 3), @@ -299,7 +299,7 @@ func TestSinglePeerMappings(t *testing.T) { } func TestMultiPeerMappings(t *testing.T) { - clusterInfo := cephclient.AdminClusterInfo(ns) + clusterInfo := cephclient.AdminTestClusterInfo(ns) fakeContext := &clusterd.Context{ Executor: mockExecutor, Clientset: test.New(t, 3), diff --git a/pkg/operator/ceph/disruption/machinedisruption/reconcile.go b/pkg/operator/ceph/disruption/machinedisruption/reconcile.go index d9ea88157552..cd22d320eaf4 100644 --- a/pkg/operator/ceph/disruption/machinedisruption/reconcile.go +++ b/pkg/operator/ceph/disruption/machinedisruption/reconcile.go @@ -134,7 +134,7 @@ func (r *MachineDisruptionReconciler) reconcile(request reconcile.Request) (reco mdb.Spec.MaxUnavailable = &maxUnavailable } // Check if the cluster is clean or not - clusterInfo := cephClient.AdminClusterInfo(request.NamespacedName.Namespace) + clusterInfo := cephClient.AdminClusterInfo(request.NamespacedName.Namespace, request.NamespacedName.Name) _, isClean, err := cephClient.IsClusterClean(r.context.ClusterdContext, clusterInfo) if err != nil { maxUnavailable := int32(0) diff --git a/pkg/operator/ceph/object/admin_test.go b/pkg/operator/ceph/object/admin_test.go index 30b84e23ee51..77c006f5465b 100644 --- a/pkg/operator/ceph/object/admin_test.go +++ b/pkg/operator/ceph/object/admin_test.go @@ -127,7 +127,7 @@ this line can't be parsed as json func TestRunAdminCommandNoMultisite(t *testing.T) { objContext := &Context{ Context: &clusterd.Context{RemoteExecutor: exec.RemotePodCommandExecutor{ClientSet: test.New(t, 3)}}, - clusterInfo: client.AdminClusterInfo("mycluster"), + clusterInfo: client.AdminTestClusterInfo("mycluster"), } t.Run("no network provider - we run the radosgw-admin command from the operator", func(t *testing.T) { diff --git a/pkg/operator/ceph/object/bucket/provisioner_test.go b/pkg/operator/ceph/object/bucket/provisioner_test.go index ff18b3f3f727..2e4d7272d078 100644 --- a/pkg/operator/ceph/object/bucket/provisioner_test.go +++ b/pkg/operator/ceph/object/bucket/provisioner_test.go @@ -37,7 +37,7 @@ func TestPopulateDomainAndPort(t *testing.T) { ctx := context.TODO() store := "test-store" namespace := "ns" - clusterInfo := client.AdminClusterInfo(namespace) + clusterInfo := client.AdminTestClusterInfo(namespace) p := NewProvisioner(&clusterd.Context{RookClientset: rookclient.NewSimpleClientset(), Clientset: test.New(t, 1)}, clusterInfo) p.objectContext = object.NewContext(p.context, clusterInfo, store) sc := &storagev1.StorageClass{ diff --git a/pkg/operator/ceph/object/dependents_test.go b/pkg/operator/ceph/object/dependents_test.go index fe106b2f9adc..58fde43b2dac 100644 --- a/pkg/operator/ceph/object/dependents_test.go +++ b/pkg/operator/ceph/object/dependents_test.go @@ -91,7 +91,7 @@ func TestCephObjectStoreDependents(t *testing.T) { } } - clusterInfo := client.AdminClusterInfo(ns) + clusterInfo := client.AdminTestClusterInfo(ns) // Create objectmeta with the given name in our test namespace meta := func(name string) v1.ObjectMeta { return v1.ObjectMeta{ diff --git a/pkg/operator/ceph/object/objectstore_test.go b/pkg/operator/ceph/object/objectstore_test.go index 1875e5910d53..5df22d594f22 100644 --- a/pkg/operator/ceph/object/objectstore_test.go +++ b/pkg/operator/ceph/object/objectstore_test.go @@ -188,7 +188,7 @@ func deleteStore(t *testing.T, name string, existingStores string, expectedDelet executor.MockExecuteCommandWithTimeout = executorFuncWithTimeout executor.MockExecuteCommandWithOutput = executorFunc executor.MockExecuteCommandWithCombinedOutput = executorFunc - context := &Context{Context: &clusterd.Context{Executor: executor}, Name: "myobj", clusterInfo: client.AdminClusterInfo("mycluster")} + context := &Context{Context: &clusterd.Context{Executor: executor}, Name: "myobj", clusterInfo: client.AdminTestClusterInfo("mycluster")} // Delete an object store without deleting the pools spec := cephv1.ObjectStoreSpec{} diff --git a/pkg/operator/ceph/object/realm/controller_test.go b/pkg/operator/ceph/object/realm/controller_test.go index 73ba09ade401..41da9c89d488 100644 --- a/pkg/operator/ceph/object/realm/controller_test.go +++ b/pkg/operator/ceph/object/realm/controller_test.go @@ -255,7 +255,7 @@ func getObjectRealmAndReconcileObjectRealm(t *testing.T) (*ReconcileObjectRealm, // Create a fake client to mock API calls. cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() // Create a ReconcileObjectRealm object with the scheme and fake client. - clusterInfo := cephclient.AdminClusterInfo("rook") + clusterInfo := cephclient.AdminTestClusterInfo("rook") r := &ReconcileObjectRealm{client: cl, scheme: s, context: c, clusterInfo: clusterInfo} return r, objectRealm diff --git a/pkg/operator/ceph/object/rgw_test.go b/pkg/operator/ceph/object/rgw_test.go index 3d9937304997..e720bcd1ad93 100644 --- a/pkg/operator/ceph/object/rgw_test.go +++ b/pkg/operator/ceph/object/rgw_test.go @@ -180,7 +180,7 @@ func TestGetTlsCaCert(t *testing.T) { Context: &clusterd.Context{ Clientset: test.New(t, 3), }, - clusterInfo: client.AdminClusterInfo("rook-ceph"), + clusterInfo: client.AdminTestClusterInfo("rook-ceph"), } objectStore := simpleStore() diff --git a/pkg/operator/ceph/object/zone/controller_test.go b/pkg/operator/ceph/object/zone/controller_test.go index 4d2a01991735..c812e7508d8a 100644 --- a/pkg/operator/ceph/object/zone/controller_test.go +++ b/pkg/operator/ceph/object/zone/controller_test.go @@ -182,7 +182,7 @@ func TestCephObjectZoneController(t *testing.T) { cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() // Create a ReconcileObjectZone object with the scheme and fake client. - clusterInfo := cephclient.AdminClusterInfo("rook") + clusterInfo := cephclient.AdminTestClusterInfo("rook") r := &ReconcileObjectZone{client: cl, scheme: s, context: c, clusterInfo: clusterInfo} diff --git a/pkg/operator/ceph/object/zonegroup/controller_test.go b/pkg/operator/ceph/object/zonegroup/controller_test.go index 897c78041747..1bc0f53c1963 100644 --- a/pkg/operator/ceph/object/zonegroup/controller_test.go +++ b/pkg/operator/ceph/object/zonegroup/controller_test.go @@ -176,7 +176,7 @@ func TestCephObjectZoneGroupController(t *testing.T) { cl := fake.NewClientBuilder().WithScheme(s).WithRuntimeObjects(object...).Build() // Create a ReconcileObjectZoneGroup object with the scheme and fake client. - clusterInfo := cephclient.AdminClusterInfo("rook") + clusterInfo := cephclient.AdminTestClusterInfo("rook") r := &ReconcileObjectZoneGroup{client: cl, scheme: s, context: c, clusterInfo: clusterInfo} diff --git a/pkg/operator/ceph/pool/controller_test.go b/pkg/operator/ceph/pool/controller_test.go index 972cea9b5ea1..16fbcd43a8cf 100644 --- a/pkg/operator/ceph/pool/controller_test.go +++ b/pkg/operator/ceph/pool/controller_test.go @@ -45,7 +45,7 @@ import ( ) func TestCreatePool(t *testing.T) { - clusterInfo := client.AdminClusterInfo("mycluster") + clusterInfo := client.AdminTestClusterInfo("mycluster") executor := &exectest.MockExecutor{ MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { logger.Infof("Command: %s %v", command, args) @@ -78,7 +78,7 @@ func TestCreatePool(t *testing.T) { func TestDeletePool(t *testing.T) { failOnDelete := false - clusterInfo := cephclient.AdminClusterInfo("mycluster") + clusterInfo := cephclient.AdminTestClusterInfo("mycluster") executor := &exectest.MockExecutor{ MockExecuteCommandWithOutput: func(command string, args ...string) (string, error) { emptyPool := "{\"images\":{\"count\":0,\"provisioned_bytes\":0,\"snap_count\":0},\"trash\":{\"count\":1,\"provisioned_bytes\":2048,\"snap_count\":0}}" @@ -497,7 +497,7 @@ func TestConfigureRBDStats(t *testing.T) { context.Executor = executor context.Client = fake.NewClientBuilder().WithScheme(s).Build() - clusterInfo := cephclient.AdminClusterInfo(namespace) + clusterInfo := cephclient.AdminTestClusterInfo(namespace) // Case 1: CephBlockPoolList is not registered in scheme. // So, an error is expected as List() operation would fail. diff --git a/pkg/operator/ceph/pool/validate_test.go b/pkg/operator/ceph/pool/validate_test.go index 20c57f816103..edef5abf6c66 100644 --- a/pkg/operator/ceph/pool/validate_test.go +++ b/pkg/operator/ceph/pool/validate_test.go @@ -203,7 +203,7 @@ func TestValidatePool(t *testing.T) { func TestValidateCrushProperties(t *testing.T) { executor := &exectest.MockExecutor{} context := &clusterd.Context{Executor: executor} - clusterInfo := cephclient.AdminClusterInfo("mycluster") + clusterInfo := cephclient.AdminTestClusterInfo("mycluster") executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { logger.Infof("Command: %s %v", command, args) if args[1] == "crush" && args[2] == "dump" { @@ -289,7 +289,7 @@ func TestValidateDeviceClasses(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - clusterInfo := cephclient.AdminClusterInfo("mycluster") + clusterInfo := cephclient.AdminTestClusterInfo("mycluster") executor := &exectest.MockExecutor{} context := &clusterd.Context{Executor: executor} executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) { diff --git a/tests/framework/clients/filesystem.go b/tests/framework/clients/filesystem.go index 049e06fdc162..d66e8c747da9 100644 --- a/tests/framework/clients/filesystem.go +++ b/tests/framework/clients/filesystem.go @@ -150,7 +150,7 @@ func (f *FilesystemOperation) Delete(name, namespace string) error { // List lists filesystems in Rook func (f *FilesystemOperation) List(namespace string) ([]client.CephFilesystem, error) { context := f.k8sh.MakeContext() - clusterInfo := client.AdminClusterInfo(namespace) + clusterInfo := client.AdminTestClusterInfo(namespace) filesystems, err := client.ListFilesystems(context, clusterInfo) if err != nil { return nil, fmt.Errorf("failed to list pools: %+v", err) diff --git a/tests/framework/clients/object_user.go b/tests/framework/clients/object_user.go index 5ca3052b9ab1..667a8bef5d7d 100644 --- a/tests/framework/clients/object_user.go +++ b/tests/framework/clients/object_user.go @@ -42,7 +42,7 @@ func CreateObjectUserOperation(k8sh *utils.K8sHelper, manifests installer.CephMa // ObjectUserGet Function to get the details of an object user from radosgw func (o *ObjectUserOperation) GetUser(namespace string, store string, userid string) (*rgw.ObjectUser, error) { ctx := o.k8sh.MakeContext() - clusterInfo := client.AdminClusterInfo(namespace) + clusterInfo := client.AdminTestClusterInfo(namespace) objectStore, err := o.k8sh.RookClientset.CephV1().CephObjectStores(namespace).Get(context.TODO(), store, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("failed to get objectstore info: %+v", err) diff --git a/tests/framework/clients/pool.go b/tests/framework/clients/pool.go index 8571a4ad4510..813fe14cbb1d 100644 --- a/tests/framework/clients/pool.go +++ b/tests/framework/clients/pool.go @@ -97,7 +97,7 @@ func (p *PoolOperation) PoolCRDExists(namespace, name string) (bool, error) { } func (p *PoolOperation) CephPoolExists(namespace, name string) (bool, error) { - clusterInfo := client.AdminClusterInfo(namespace) + clusterInfo := client.AdminTestClusterInfo(namespace) pools, err := p.ListCephPools(clusterInfo) if err != nil { return false, err diff --git a/tests/framework/clients/test_client.go b/tests/framework/clients/test_client.go index f80baa0f1b2f..b59359539906 100644 --- a/tests/framework/clients/test_client.go +++ b/tests/framework/clients/test_client.go @@ -35,7 +35,7 @@ type TestClient struct { BucketClient *BucketOperation UserClient *ClientOperation RBDMirrorClient *RBDMirrorOperation - TopicClient *TopicOperation + TopicClient *TopicOperation NotificationClient *NotificationOperation k8sh *utils.K8sHelper } @@ -61,7 +61,7 @@ func CreateTestClient(k8sHelper *utils.K8sHelper, manifests installer.CephManife // Status returns rook status details func (c TestClient) Status(namespace string) (client.CephStatus, error) { context := c.k8sh.MakeContext() - clusterInfo := client.AdminClusterInfo(namespace) + clusterInfo := client.AdminTestClusterInfo(namespace) status, err := client.Status(context, clusterInfo) if err != nil { return client.CephStatus{}, fmt.Errorf("failed to get status: %+v", err) diff --git a/tests/framework/installer/ceph_installer.go b/tests/framework/installer/ceph_installer.go index d9015a2a2a5d..19e8e57eff5f 100644 --- a/tests/framework/installer/ceph_installer.go +++ b/tests/framework/installer/ceph_installer.go @@ -224,7 +224,7 @@ func (h *CephInstaller) CreateRookToolbox(manifests CephManifests) (err error) { // Execute a command in the ceph toolbox func (h *CephInstaller) Execute(command string, parameters []string, namespace string) (error, string) { - clusterInfo := client.AdminClusterInfo(namespace) + clusterInfo := client.AdminTestClusterInfo(namespace) cmd, args := client.FinalizeCephCommandArgs(command, clusterInfo, parameters, h.k8shelper.MakeContext().ConfigDir) result, err := h.k8shelper.MakeContext().Executor.ExecuteCommandWithOutput(cmd, args...) if err != nil { @@ -570,7 +570,7 @@ func (h *CephInstaller) InstallRook() (bool, error) { const loopCount = 20 for i := 0; i < loopCount; i++ { - _, err = client.Status(h.k8shelper.MakeContext(), client.AdminClusterInfo(h.settings.Namespace)) + _, err = client.Status(h.k8shelper.MakeContext(), client.AdminTestClusterInfo(h.settings.Namespace)) if err == nil { logger.Infof("toolbox ready") break diff --git a/tests/integration/ceph_base_block_test.go b/tests/integration/ceph_base_block_test.go index 86d6caa741b2..079842c4b435 100644 --- a/tests/integration/ceph_base_block_test.go +++ b/tests/integration/ceph_base_block_test.go @@ -260,7 +260,7 @@ func runBlockCSITest(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite. storageClassNameRetained := "rook-ceph-block-retained" blockNameRetained := "block-pv-claim-retained" - clusterInfo := client.AdminClusterInfo(namespace) + clusterInfo := client.AdminTestClusterInfo(namespace) defer blockTestDataCleanUp(helper, k8sh, s, clusterInfo, poolName, storageClassName, blockName, podName, true) defer blockTestDataCleanUp(helper, k8sh, s, clusterInfo, poolNameRetained, storageClassNameRetained, blockNameRetained, podNameWithPVRetained, true) logger.Infof("Block Storage End to End Integration Test - create, mount, write to, read from, and unmount") @@ -405,7 +405,7 @@ func restartOSDPods(k8sh *utils.K8sHelper, s suite.Suite, namespace string) { func runBlockCSITestLite(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite.Suite, settings *installer.TestCephSettings) { logger.Infof("Block Storage End to End Integration Test - create storageclass,pool and pvc") logger.Infof("Running on Rook Cluster %s", settings.Namespace) - clusterInfo := client.AdminClusterInfo(settings.Namespace) + clusterInfo := client.AdminTestClusterInfo(settings.Namespace) poolName := "rookpool" storageClassName := "rook-ceph-block-lite" blockName := "test-block-claim-lite" diff --git a/tests/integration/ceph_bucket_notification_test.go b/tests/integration/ceph_bucket_notification_test.go index 7b85b28cae6b..2d0fdf6f69ba 100644 --- a/tests/integration/ceph_bucket_notification_test.go +++ b/tests/integration/ceph_bucket_notification_test.go @@ -44,7 +44,7 @@ func (s *ObjectSuite) TestBucketNotificationsInOrder() { createCephObjectStore(s.T(), helper, k8sh, namespace, storeName, 3, tlsEnable) ctx := context.TODO() - clusterInfo := client.AdminClusterInfo(namespace) + clusterInfo := client.AdminTestClusterInfo(namespace) t := s.T() t.Run("create CephObjectStoreUser", func(t *testing.T) { diff --git a/tests/integration/ceph_multi_cluster_test.go b/tests/integration/ceph_multi_cluster_test.go index e584e992d5d9..90e75e04ffd9 100644 --- a/tests/integration/ceph_multi_cluster_test.go +++ b/tests/integration/ceph_multi_cluster_test.go @@ -130,7 +130,7 @@ func (s *MultiClusterDeploySuite) createPools() { func (s *MultiClusterDeploySuite) deletePools() { // create a test pool in each cluster so that we get some PGs - clusterInfo := client.AdminClusterInfo(s.settings.Namespace) + clusterInfo := client.AdminTestClusterInfo(s.settings.Namespace) if err := s.testClient.PoolClient.DeletePool(s.testClient.BlockClient, clusterInfo, s.poolName); err != nil { logger.Errorf("failed to delete pool %q. %v", s.poolName, err) } else { diff --git a/tests/integration/ceph_object_test.go b/tests/integration/ceph_object_test.go index 358d8045af56..6cb02a5f5f83 100644 --- a/tests/integration/ceph_object_test.go +++ b/tests/integration/ceph_object_test.go @@ -161,7 +161,7 @@ func runObjectE2ETest(helper *clients.TestClient, k8sh *utils.K8sHelper, s suite func testObjectStoreOperations(s suite.Suite, helper *clients.TestClient, k8sh *utils.K8sHelper, namespace, storeName string) { ctx := context.TODO() - clusterInfo := client.AdminClusterInfo(namespace) + clusterInfo := client.AdminTestClusterInfo(namespace) t := s.T() t.Run("create CephObjectStoreUser", func(t *testing.T) { diff --git a/tests/integration/ceph_smoke_test.go b/tests/integration/ceph_smoke_test.go index c69a4e0a3ce9..32c140e5b8d0 100644 --- a/tests/integration/ceph_smoke_test.go +++ b/tests/integration/ceph_smoke_test.go @@ -206,7 +206,7 @@ func (s *SmokeSuite) TestPoolResize() { require.NoError(s.T(), err) poolFound := false - clusterInfo := client.AdminClusterInfo(s.settings.Namespace) + clusterInfo := client.AdminTestClusterInfo(s.settings.Namespace) // Wait for pool to appear for i := 0; i < 10; i++ { @@ -283,7 +283,7 @@ func (s *SmokeSuite) TestCreateClient() { "mgr": "allow rwx", "osd": "allow rwx", } - clusterInfo := client.AdminClusterInfo(s.settings.Namespace) + clusterInfo := client.AdminTestClusterInfo(s.settings.Namespace) err := s.helper.UserClient.Create(clientName, s.settings.Namespace, caps) require.NoError(s.T(), err) diff --git a/tests/integration/ceph_upgrade_test.go b/tests/integration/ceph_upgrade_test.go index 734d834a0e45..9bdb94e12307 100644 --- a/tests/integration/ceph_upgrade_test.go +++ b/tests/integration/ceph_upgrade_test.go @@ -103,7 +103,7 @@ func (s *UpgradeSuite) TestUpgradeRookToMaster() { numOSDs, filesystemName, rbdFilesToRead, cephfsFilesToRead := s.deployClusterforUpgrade(objectStoreName, objectUserID, message, preFilename) s.settings.CephVersion = installer.OctopusVersion - clusterInfo := client.AdminClusterInfo(s.namespace) + clusterInfo := client.AdminTestClusterInfo(s.namespace) requireBlockImagesRemoved := false defer func() { blockTestDataCleanUp(s.helper, s.k8sh, s.Suite, clusterInfo, poolName, storageClassName, blockName, rbdPodName, requireBlockImagesRemoved) @@ -162,7 +162,7 @@ func (s *UpgradeSuite) TestUpgradeCephToOctopusDevel() { preFilename := "pre-upgrade-file" s.settings.CephVersion = installer.OctopusVersion numOSDs, filesystemName, rbdFilesToRead, cephfsFilesToRead := s.deployClusterforUpgrade(objectStoreName, objectUserID, message, preFilename) - clusterInfo := client.AdminClusterInfo(s.namespace) + clusterInfo := client.AdminTestClusterInfo(s.namespace) requireBlockImagesRemoved := false defer func() { blockTestDataCleanUp(s.helper, s.k8sh, s.Suite, clusterInfo, poolName, storageClassName, blockName, rbdPodName, requireBlockImagesRemoved) @@ -195,7 +195,7 @@ func (s *UpgradeSuite) TestUpgradeCephToPacificDevel() { preFilename := "pre-upgrade-file" s.settings.CephVersion = installer.PacificVersion numOSDs, filesystemName, rbdFilesToRead, cephfsFilesToRead := s.deployClusterforUpgrade(objectStoreName, objectUserID, message, preFilename) - clusterInfo := client.AdminClusterInfo(s.namespace) + clusterInfo := client.AdminTestClusterInfo(s.namespace) requireBlockImagesRemoved := false defer func() { blockTestDataCleanUp(s.helper, s.k8sh, s.Suite, clusterInfo, poolName, storageClassName, blockName, rbdPodName, requireBlockImagesRemoved) @@ -226,7 +226,7 @@ func (s *UpgradeSuite) deployClusterforUpgrade(objectStoreName, objectUserID, me // Create block, object, and file storage before the upgrade // logger.Infof("Initializing block before the upgrade") - clusterInfo := client.AdminClusterInfo(s.namespace) + clusterInfo := client.AdminTestClusterInfo(s.namespace) setupBlockLite(s.helper, s.k8sh, s.Suite, clusterInfo, poolName, storageClassName, blockName, rbdPodName) createPodWithBlock(s.helper, s.k8sh, s.Suite, s.namespace, storageClassName, rbdPodName, blockName) @@ -393,7 +393,7 @@ func (s *UpgradeSuite) verifyFilesAfterUpgrade(fsName, newFileToWrite, messageFo if fsName != "" { // wait for filesystem to be active - clusterInfo := client.AdminClusterInfo(s.namespace) + clusterInfo := client.AdminTestClusterInfo(s.namespace) err := waitForFilesystemActive(s.k8sh, clusterInfo, fsName) require.NoError(s.T(), err)