Skip to content

Commit

Permalink
multus: do not build all the args to exec commands
Browse files Browse the repository at this point in the history
When proxying commands to the cmd-proxy container we don't need to build
the command line with the same flags as the operator. The cmd-proxy
container does not use any ceph config file and just relies on the
CEPH_ARGS environment variable in the container. So passing the same
args as the operator causes to fail since we don't have a ceph config
file in `/var/lib/rook/openshift-storage/openshift-storage.config` thus
the remote exec fails with:

```
global_init: unable to open config file from search list ...
```

Signed-off-by: Sébastien Han <seb@redhat.com>
  • Loading branch information
leseb committed Sep 28, 2021
1 parent 62d66b0 commit 1582e7a
Show file tree
Hide file tree
Showing 2 changed files with 21 additions and 2 deletions.
21 changes: 19 additions & 2 deletions pkg/daemon/ceph/client/command.go
Expand Up @@ -126,10 +126,27 @@ func NewRBDCommand(context *clusterd.Context, clusterInfo *ClusterInfo, args []s
}

func (c *CephToolCommand) run() ([]byte, error) {
command, args := FinalizeCephCommandArgs(c.tool, c.clusterInfo, c.args, c.context.ConfigDir)
// Return if the context has been canceled
if c.clusterInfo.Context.Err() != nil {
return nil, c.clusterInfo.Context.Err()
}

// Initalize the command and args
command := c.tool
args := c.args

// If this is a remote execution, we don't want to build the full set of args. For instance all
// these args are not needed since those paths don't exist inside the cmd-proxy container:
// --cluster=openshift-storage
// --conf=/var/lib/rook/openshift-storage/openshift-storage.config
// --name=client.admin
// --keyring=/var/lib/rook/openshift-storage/client.admin.keyring
//
// The cmd-proxy container will take care of the rest with the help of the env CEPH_ARGS
if !c.RemoteExecution {
command, args = FinalizeCephCommandArgs(c.tool, c.clusterInfo, c.args, c.context.ConfigDir)
}

if c.JsonOutput {
args = append(args, "--format", "json")
} else {
Expand All @@ -147,7 +164,7 @@ func (c *CephToolCommand) run() ([]byte, error) {
if command == RBDTool {
if c.RemoteExecution {
output, stderr, err = c.context.RemoteExecutor.ExecCommandInContainerWithFullOutputWithTimeout(ProxyAppLabel, CommandProxyInitContainerName, c.clusterInfo.Namespace, append([]string{command}, args...)...)
output = fmt.Sprintf("%s.%s", output, stderr)
output = fmt.Sprintf("%s. %s", output, stderr)
} else if c.timeout == 0 {
output, err = c.context.Executor.ExecuteCommandWithOutput(command, args...)
} else {
Expand Down
2 changes: 2 additions & 0 deletions pkg/daemon/ceph/client/command_test.go
Expand Up @@ -116,6 +116,7 @@ func TestNewRBDCommand(t *testing.T) {
executor.MockExecuteCommandWithOutput = func(command string, args ...string) (string, error) {
switch {
case command == "rbd" && args[0] == "create":
assert.Len(t, args, 8)
return "success", nil
}
return "", errors.Errorf("unexpected ceph command %q", args)
Expand All @@ -137,6 +138,7 @@ func TestNewRBDCommand(t *testing.T) {
assert.True(t, cmd.RemoteExecution)
_, err := cmd.Run()
assert.Error(t, err)
assert.Len(t, cmd.args, 4)
// This is not the best but it shows we go through the right codepath
assert.EqualError(t, err, "no pods found with selector \"rook-ceph-mgr\"")
})
Expand Down

0 comments on commit 1582e7a

Please sign in to comment.