Skip to content

Commit

Permalink
core: ensure cluster name is available on cluster info
Browse files Browse the repository at this point in the history
The cluster info is important context for the cluster controller to
create the cluster, and all the fields must be properly set.
A test cluster name was being set temporarily, resulting in
mons incorrectly getting the wrong cluster CR name. There is no
known issue from the temporary value, it was just exposed by
rook#8678 setting the value to a label.

Now the functions are more clearly named so only unit and
integration tests should be using the test value for the cluster
name where it is not important.

Signed-off-by: Travis Nielsen <tnielsen@redhat.com>
  • Loading branch information
travisn authored and parth-gr committed Feb 22, 2022
1 parent 329c3da commit 33f4643
Show file tree
Hide file tree
Showing 56 changed files with 161 additions and 157 deletions.
2 changes: 1 addition & 1 deletion cmd/rook/ceph/cleanup.go
Expand Up @@ -67,7 +67,7 @@ func startCleanUp(cmd *cobra.Command, args []string) error {
}

namespace := os.Getenv(k8sutil.PodNamespaceEnvVar)
clusterInfo := client.AdminClusterInfo(namespace)
clusterInfo := client.AdminClusterInfo(namespace, "")
clusterInfo.FSID = clusterFSID

// Build Sanitizer
Expand Down
12 changes: 6 additions & 6 deletions pkg/daemon/ceph/client/command_test.go
Expand Up @@ -44,7 +44,7 @@ func TestFinalizeCephCommandArgs(t *testing.T) {
"--keyring=/var/lib/rook/rook-ceph/rook/client.admin.keyring",
}

clusterInfo := AdminClusterInfo("rook")
clusterInfo := AdminTestClusterInfo("rook")
cmd, args := FinalizeCephCommandArgs(expectedCommand, clusterInfo, args, configDir)
assert.Exactly(t, expectedCommand, cmd)
assert.Exactly(t, expectedArgs, args)
Expand Down Expand Up @@ -74,7 +74,7 @@ func TestFinalizeRadosGWAdminCommandArgs(t *testing.T) {
"--keyring=/var/lib/rook/rook-ceph/rook/client.admin.keyring",
}

clusterInfo := AdminClusterInfo("rook")
clusterInfo := AdminTestClusterInfo("rook")
cmd, args := FinalizeCephCommandArgs(expectedCommand, clusterInfo, args, configDir)
assert.Exactly(t, expectedCommand, cmd)
assert.Exactly(t, expectedArgs, args)
Expand All @@ -99,7 +99,7 @@ func TestFinalizeCephCommandArgsToolBox(t *testing.T) {
"--connect-timeout=15",
}

clusterInfo := AdminClusterInfo("rook")
clusterInfo := AdminTestClusterInfo("rook")
exec.CephCommandsTimeout = 15 * time.Second
cmd, args := FinalizeCephCommandArgs(expectedCommand, clusterInfo, args, configDir)
assert.Exactly(t, "kubectl", cmd)
Expand All @@ -111,7 +111,7 @@ func TestNewRBDCommand(t *testing.T) {
args := []string{"create", "--size", "1G", "myvol"}

t.Run("rbd command with no multus", func(t *testing.T) {
clusterInfo := AdminClusterInfo("rook")
clusterInfo := AdminTestClusterInfo("rook")
executor := &exectest.MockExecutor{}
executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) {
switch {
Expand All @@ -130,7 +130,7 @@ func TestNewRBDCommand(t *testing.T) {

})
t.Run("rbd command with multus", func(t *testing.T) {
clusterInfo := AdminClusterInfo("rook")
clusterInfo := AdminTestClusterInfo("rook")
clusterInfo.NetworkSpec.Provider = "multus"
executor := &exectest.MockExecutor{}
context := &clusterd.Context{Executor: executor, RemoteExecutor: exec.RemotePodCommandExecutor{ClientSet: test.New(t, 3)}}
Expand All @@ -144,7 +144,7 @@ func TestNewRBDCommand(t *testing.T) {
})

t.Run("context canceled nothing to run", func(t *testing.T) {
clusterInfo := AdminClusterInfo("rook")
clusterInfo := AdminTestClusterInfo("rook")
ctx, cancel := context.WithCancel(context.TODO())
clusterInfo.Context = ctx
cancel()
Expand Down
2 changes: 1 addition & 1 deletion pkg/daemon/ceph/client/crash_test.go
Expand Up @@ -46,7 +46,7 @@ func TestCephCrash(t *testing.T) {
}
return "", errors.Errorf("unexpected ceph command %q", args)
}
crash, err := GetCrashList(context, AdminClusterInfo("mycluster"))
crash, err := GetCrashList(context, AdminTestClusterInfo("mycluster"))
assert.NoError(t, err)
assert.Equal(t, 1, len(crash))
}
4 changes: 2 additions & 2 deletions pkg/daemon/ceph/client/crush_rule_test.go
Expand Up @@ -97,7 +97,7 @@ func TestInjectCRUSHMapMap(t *testing.T) {
return "", errors.Errorf("unexpected ceph command '%v'", args)
}

err := injectCRUSHMap(&clusterd.Context{Executor: executor}, AdminClusterInfo("mycluster"), "/tmp/063990228.compiled")
err := injectCRUSHMap(&clusterd.Context{Executor: executor}, AdminTestClusterInfo("mycluster"), "/tmp/063990228.compiled")
assert.Nil(t, err)
}

Expand All @@ -111,7 +111,7 @@ func TestSetCRUSHMapMap(t *testing.T) {
return "", errors.Errorf("unexpected ceph command '%v'", args)
}

err := setCRUSHMap(&clusterd.Context{Executor: executor}, AdminClusterInfo("mycluster"), "/tmp/063990228.compiled")
err := setCRUSHMap(&clusterd.Context{Executor: executor}, AdminTestClusterInfo("mycluster"), "/tmp/063990228.compiled")
assert.Nil(t, err)
}

Expand Down
4 changes: 2 additions & 2 deletions pkg/daemon/ceph/client/crush_test.go
Expand Up @@ -272,7 +272,7 @@ func TestGetCrushMap(t *testing.T) {
}
return "", errors.Errorf("unexpected ceph command '%v'", args)
}
crush, err := GetCrushMap(&clusterd.Context{Executor: executor}, AdminClusterInfo("mycluster"))
crush, err := GetCrushMap(&clusterd.Context{Executor: executor}, AdminTestClusterInfo("mycluster"))

assert.Nil(t, err)
assert.Equal(t, 11, len(crush.Types))
Expand All @@ -291,7 +291,7 @@ func TestGetOSDOnHost(t *testing.T) {
return "", errors.Errorf("unexpected ceph command '%v'", args)
}

_, err := GetOSDOnHost(&clusterd.Context{Executor: executor}, AdminClusterInfo("mycluster"), "my-host")
_, err := GetOSDOnHost(&clusterd.Context{Executor: executor}, AdminTestClusterInfo("mycluster"), "my-host")
assert.Nil(t, err)
}

Expand Down
4 changes: 2 additions & 2 deletions pkg/daemon/ceph/client/deviceclass_test.go
Expand Up @@ -38,11 +38,11 @@ func TestGetDeviceClassOSDs(t *testing.T) {
}
return "", errors.Errorf("unexpected ceph command '%v'", args)
}
osds, err := GetDeviceClassOSDs(&clusterd.Context{Executor: executor}, AdminClusterInfo("mycluster"), "ssd")
osds, err := GetDeviceClassOSDs(&clusterd.Context{Executor: executor}, AdminTestClusterInfo("mycluster"), "ssd")
assert.Nil(t, err)
assert.Equal(t, []int{0, 1, 2}, osds)

osds, err = GetDeviceClassOSDs(&clusterd.Context{Executor: executor}, AdminClusterInfo("mycluster"), "hdd")
osds, err = GetDeviceClassOSDs(&clusterd.Context{Executor: executor}, AdminTestClusterInfo("mycluster"), "hdd")
assert.Nil(t, err)
assert.Equal(t, []int{}, osds)
}
2 changes: 1 addition & 1 deletion pkg/daemon/ceph/client/erasure-code-profile_test.go
Expand Up @@ -84,6 +84,6 @@ func testCreateProfile(t *testing.T, failureDomain, crushRoot, deviceClass strin
return "", errors.Errorf("unexpected ceph command %q", args)
}

err := CreateErasureCodeProfile(context, AdminClusterInfo("mycluster"), "myapp", spec)
err := CreateErasureCodeProfile(context, AdminTestClusterInfo("mycluster"), "myapp", spec)
assert.Nil(t, err)
}
12 changes: 6 additions & 6 deletions pkg/daemon/ceph/client/filesystem_mirror_test.go
Expand Up @@ -51,7 +51,7 @@ func TestEnableFilesystemSnapshotMirror(t *testing.T) {
}
context := &clusterd.Context{Executor: executor}

err := EnableFilesystemSnapshotMirror(context, AdminClusterInfo("mycluster"), fs)
err := EnableFilesystemSnapshotMirror(context, AdminTestClusterInfo("mycluster"), fs)
assert.NoError(t, err)
}

Expand All @@ -70,7 +70,7 @@ func TestDisableFilesystemSnapshotMirror(t *testing.T) {
}
context := &clusterd.Context{Executor: executor}

err := DisableFilesystemSnapshotMirror(context, AdminClusterInfo("mycluster"), fs)
err := DisableFilesystemSnapshotMirror(context, AdminTestClusterInfo("mycluster"), fs)
assert.NoError(t, err)
}

Expand All @@ -92,7 +92,7 @@ func TestImportFilesystemMirrorPeer(t *testing.T) {
}
context := &clusterd.Context{Executor: executor}

err := ImportFSMirrorBootstrapPeer(context, AdminClusterInfo("mycluster"), fs, token)
err := ImportFSMirrorBootstrapPeer(context, AdminTestClusterInfo("mycluster"), fs, token)
assert.NoError(t, err)
}

Expand All @@ -112,7 +112,7 @@ func TestCreateFSMirrorBootstrapPeer(t *testing.T) {
}
context := &clusterd.Context{Executor: executor}

token, err := CreateFSMirrorBootstrapPeer(context, AdminClusterInfo("mycluster"), fs)
token, err := CreateFSMirrorBootstrapPeer(context, AdminTestClusterInfo("mycluster"), fs)
assert.NoError(t, err)
_, err = base64.StdEncoding.DecodeString(string(token))
assert.NoError(t, err)
Expand All @@ -135,7 +135,7 @@ func TestRemoveFilesystemMirrorPeer(t *testing.T) {
}
context := &clusterd.Context{Executor: executor}

err := RemoveFilesystemMirrorPeer(context, AdminClusterInfo("mycluster"), peerUUID)
err := RemoveFilesystemMirrorPeer(context, AdminTestClusterInfo("mycluster"), peerUUID)
assert.NoError(t, err)
}

Expand All @@ -155,7 +155,7 @@ func TestFSMirrorDaemonStatus(t *testing.T) {
}
context := &clusterd.Context{Executor: executor}

s, err := GetFSMirrorDaemonStatus(context, AdminClusterInfo("mycluster"), fs)
s, err := GetFSMirrorDaemonStatus(context, AdminTestClusterInfo("mycluster"), fs)
assert.NoError(t, err)
assert.Equal(t, "myfs", s[0].Filesystems[0].Name)
}
26 changes: 13 additions & 13 deletions pkg/daemon/ceph/client/filesystem_test.go
Expand Up @@ -158,7 +158,7 @@ func TestFilesystemRemove(t *testing.T) {
return "", errors.Errorf("unexpected ceph command %q", args)
}

err := RemoveFilesystem(context, AdminClusterInfo("mycluster"), fs.MDSMap.FilesystemName, false)
err := RemoveFilesystem(context, AdminTestClusterInfo("mycluster"), fs.MDSMap.FilesystemName, false)
assert.Nil(t, err)
assert.True(t, metadataDeleted)
assert.True(t, dataDeleted)
Expand Down Expand Up @@ -213,7 +213,7 @@ func TestFailAllStandbyReplayMDS(t *testing.T) {
return "", errors.Errorf("unexpected ceph command %q", args)
}

err := FailAllStandbyReplayMDS(context, AdminClusterInfo("mycluster"), fs.MDSMap.FilesystemName)
err := FailAllStandbyReplayMDS(context, AdminTestClusterInfo("mycluster"), fs.MDSMap.FilesystemName)
assert.NoError(t, err)
assert.ElementsMatch(t, failedGids, []string{"124"})

Expand Down Expand Up @@ -259,7 +259,7 @@ func TestFailAllStandbyReplayMDS(t *testing.T) {
}
return "", errors.Errorf("unexpected ceph command %q", args)
}
err = FailAllStandbyReplayMDS(context, AdminClusterInfo("mycluster"), fs.MDSMap.FilesystemName)
err = FailAllStandbyReplayMDS(context, AdminTestClusterInfo("mycluster"), fs.MDSMap.FilesystemName)
assert.NoError(t, err)

fs = CephFilesystemDetails{
Expand Down Expand Up @@ -304,7 +304,7 @@ func TestFailAllStandbyReplayMDS(t *testing.T) {
}
return "", errors.Errorf("unexpected ceph command %q", args)
}
err = FailAllStandbyReplayMDS(context, AdminClusterInfo("mycluster"), fs.MDSMap.FilesystemName)
err = FailAllStandbyReplayMDS(context, AdminTestClusterInfo("mycluster"), fs.MDSMap.FilesystemName)
assert.Error(t, err)
assert.Contains(t, err.Error(), "expected execution of mds fail")
}
Expand Down Expand Up @@ -355,7 +355,7 @@ func TestGetMdsIdByRank(t *testing.T) {
return "", errors.Errorf("unexpected ceph command %q", args)
}

name, err := GetMdsIdByRank(context, AdminClusterInfo("mycluster"), fs.MDSMap.FilesystemName, 0)
name, err := GetMdsIdByRank(context, AdminTestClusterInfo("mycluster"), fs.MDSMap.FilesystemName, 0)
assert.Equal(t, name, "myfs1-a")
assert.NoError(t, err)

Expand All @@ -378,7 +378,7 @@ func TestGetMdsIdByRank(t *testing.T) {
return "", errors.Errorf("unexpected ceph command %q", args)
}

name, err = GetMdsIdByRank(context, AdminClusterInfo("mycluster"), fs.MDSMap.FilesystemName, 0)
name, err = GetMdsIdByRank(context, AdminTestClusterInfo("mycluster"), fs.MDSMap.FilesystemName, 0)
assert.Equal(t, "", name)
assert.Error(t, err)
assert.Contains(t, err.Error(), "test ceph fs get error")
Expand Down Expand Up @@ -427,7 +427,7 @@ func TestGetMdsIdByRank(t *testing.T) {
return "", errors.Errorf("unexpected ceph command %q", args)
}

name, err = GetMdsIdByRank(context, AdminClusterInfo("mycluster"), fs.MDSMap.FilesystemName, 0)
name, err = GetMdsIdByRank(context, AdminTestClusterInfo("mycluster"), fs.MDSMap.FilesystemName, 0)
assert.Equal(t, "", name)
assert.Error(t, err)
assert.Contains(t, err.Error(), "failed to get mds gid from rank 0")
Expand Down Expand Up @@ -475,7 +475,7 @@ func TestGetMdsIdByRank(t *testing.T) {
return "", errors.Errorf("unexpected ceph command %q", args)
}

name, err = GetMdsIdByRank(context, AdminClusterInfo("mycluster"), fs.MDSMap.FilesystemName, 0)
name, err = GetMdsIdByRank(context, AdminTestClusterInfo("mycluster"), fs.MDSMap.FilesystemName, 0)
assert.Equal(t, "", name)
assert.Error(t, err)
assert.Contains(t, err.Error(), "failed to get mds info for rank 0")
Expand Down Expand Up @@ -503,7 +503,7 @@ func TestGetMDSDump(t *testing.T) {
return "", errors.Errorf("unexpected ceph command %q", args)
}

mdsDump, err := GetMDSDump(context, AdminClusterInfo("mycluster"))
mdsDump, err := GetMDSDump(context, AdminTestClusterInfo("mycluster"))
assert.NoError(t, err)
assert.ElementsMatch(t, mdsDump.Standbys, []MDSStandBy{{Name: "rook-ceph-filesystem-b", Rank: -1}})

Expand All @@ -517,7 +517,7 @@ func TestGetMDSDump(t *testing.T) {
return "", errors.Errorf("unexpected ceph command %q", args)
}

_, err = GetMDSDump(context, AdminClusterInfo("mycluster"))
_, err = GetMDSDump(context, AdminTestClusterInfo("mycluster"))
assert.Error(t, err)
}

Expand All @@ -543,7 +543,7 @@ func TestWaitForNoStandbys(t *testing.T) {
return "", errors.Errorf("unexpected ceph command %q", args)
}

err := WaitForNoStandbys(context, AdminClusterInfo("mycluster"), 6*time.Second)
err := WaitForNoStandbys(context, AdminTestClusterInfo("mycluster"), 6*time.Second)
assert.Error(t, err)

executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) {
Expand All @@ -556,7 +556,7 @@ func TestWaitForNoStandbys(t *testing.T) {
return "", errors.Errorf("unexpected ceph command %q", args)
}

err = WaitForNoStandbys(context, AdminClusterInfo("mycluster"), 6*time.Second)
err = WaitForNoStandbys(context, AdminTestClusterInfo("mycluster"), 6*time.Second)
assert.Error(t, err)

firstCall := true
Expand All @@ -583,7 +583,7 @@ func TestWaitForNoStandbys(t *testing.T) {
}
return "", errors.Errorf("unexpected ceph command %q", args)
}
err = WaitForNoStandbys(context, AdminClusterInfo("mycluster"), 6*time.Second)
err = WaitForNoStandbys(context, AdminTestClusterInfo("mycluster"), 6*time.Second)
assert.NoError(t, err)

}
8 changes: 4 additions & 4 deletions pkg/daemon/ceph/client/image_test.go
Expand Up @@ -45,7 +45,7 @@ func TestCreateImage(t *testing.T) {
}
return "", errors.Errorf("unexpected ceph command %q", args)
}
clusterInfo := AdminClusterInfo("mycluster")
clusterInfo := AdminTestClusterInfo("mycluster")
_, err := CreateImage(context, clusterInfo, "image1", "pool1", "", uint64(sizeMB)) // 1MB
assert.NotNil(t, err)
assert.True(t, strings.Contains(err.Error(), "mocked detailed ceph error output stream"))
Expand Down Expand Up @@ -156,7 +156,7 @@ func TestExpandImage(t *testing.T) {
}
return "", errors.Errorf("unexpected ceph command %q", args)
}
clusterInfo := AdminClusterInfo("mycluster")
clusterInfo := AdminTestClusterInfo("mycluster")
err := ExpandImage(context, clusterInfo, "error-name", "kube", "mon1,mon2,mon3", "/tmp/keyring", 1000000)
assert.Error(t, err)

Expand Down Expand Up @@ -186,7 +186,7 @@ func TestListImageLogLevelInfo(t *testing.T) {
return "", errors.Errorf("unexpected ceph command %q", args)
}

clusterInfo := AdminClusterInfo("mycluster")
clusterInfo := AdminTestClusterInfo("mycluster")
images, err = ListImages(context, clusterInfo, "pool1")
assert.Nil(t, err)
assert.NotNil(t, images)
Expand Down Expand Up @@ -250,7 +250,7 @@ func TestListImageLogLevelDebug(t *testing.T) {
return "", errors.Errorf("unexpected ceph command %q", args)
}

clusterInfo := AdminClusterInfo("mycluster")
clusterInfo := AdminTestClusterInfo("mycluster")
images, err = ListImages(context, clusterInfo, "pool1")
assert.Nil(t, err)
assert.NotNil(t, images)
Expand Down
14 changes: 9 additions & 5 deletions pkg/daemon/ceph/client/info.go
Expand Up @@ -87,22 +87,26 @@ func (c *ClusterInfo) NamespacedName() types.NamespacedName {
}

// AdminClusterInfo() creates a ClusterInfo with the basic info to access the cluster
// as an admin. Only a few fields are set in the struct,
// so this clusterInfo cannot be used to generate the mon config or request the
// namespacedName. A full cluster info must be populated for those operations.
func AdminClusterInfo(namespace string) *ClusterInfo {
// as an admin.
func AdminClusterInfo(namespace, name string) *ClusterInfo {
ownerInfo := k8sutil.NewOwnerInfoWithOwnerRef(&metav1.OwnerReference{}, "")
return &ClusterInfo{
Namespace: namespace,
CephCred: CephCred{
Username: AdminUsername,
},
name: "testing",
name: name,
OwnerInfo: ownerInfo,
Context: context.TODO(),
}
}

// AdminTestClusterInfo() creates a ClusterInfo with the basic info to access the cluster
// as an admin. This cluster info should only be used by unit or integration tests.
func AdminTestClusterInfo(namespace string) *ClusterInfo {
return AdminClusterInfo(namespace, "testing")
}

// IsInitialized returns true if the critical information in the ClusterInfo struct has been filled
// in. This method exists less out of necessity than the desire to be explicit about the lifecycle
// of the ClusterInfo struct during startup, specifically that it is expected to exist after the
Expand Down

0 comments on commit 33f4643

Please sign in to comment.