Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

mgr: run the watch side car config on /etc/ceph #9386

Merged
merged 2 commits into from Dec 15, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 1 addition & 0 deletions cmd/rook/ceph/mgr.go
Expand Up @@ -66,6 +66,7 @@ func init() {
// Start the mgr daemon sidecar
func runMgrSidecar(cmd *cobra.Command, args []string) error {
rook.SetLogLevel()
clusterInfo.Context = cmd.Context()

context := createContext()
clusterInfo.Monitors = mon.ParseMonEndpoints(cfg.monEndpoints)
Expand Down
4 changes: 1 addition & 3 deletions pkg/operator/ceph/cluster/cleanup.go
Expand Up @@ -127,9 +127,7 @@ func (c *ClusterController) cleanUpJobContainer(cluster *cephv1.CephCluster, mon
// See https://tracker.ceph.com/issues/53511
// Also, it's hard to catch the ceph version since the cluster is being deleted so not
// implementing a version check and simply always run this as root
rootUserID := int64(0)
securityContext := osd.PrivilegedContext()
securityContext.RunAsUser = &rootUserID
securityContext := controller.PrivilegedContext(true)

return v1.Container{
Name: "host-cleanup",
Expand Down
11 changes: 6 additions & 5 deletions pkg/operator/ceph/cluster/mgr/spec.go
Expand Up @@ -228,11 +228,12 @@ func (c *Cluster) makeMgrSidecarContainer(mgrConfig *mgrConfig) v1.Container {
}

return v1.Container{
Args: []string{"ceph", "mgr", "watch-active"},
Name: "watch-active",
Image: c.rookVersion,
Env: envVars,
Resources: cephv1.GetMgrSidecarResources(c.spec.Resources),
Args: []string{"ceph", "mgr", "watch-active"},
Name: "watch-active",
Image: c.rookVersion,
Env: envVars,
Resources: cephv1.GetMgrSidecarResources(c.spec.Resources),
SecurityContext: controller.PrivilegedContext(true),
}
}

Expand Down
10 changes: 0 additions & 10 deletions pkg/operator/ceph/cluster/osd/config.go
Expand Up @@ -25,22 +25,12 @@ import (
cephv1 "github.com/rook/rook/pkg/apis/ceph.rook.io/v1"
"github.com/rook/rook/pkg/operator/ceph/cluster/mgr"
opconfig "github.com/rook/rook/pkg/operator/ceph/config"
v1 "k8s.io/api/core/v1"
)

const (
dmCryptKeySize = 128
)

// PrivilegedContext returns a privileged Pod security context
func PrivilegedContext() *v1.SecurityContext {
privileged := true

return &v1.SecurityContext{
Privileged: &privileged,
}
}

func osdOnSDNFlag(network cephv1.NetworkSpec) []string {
var args []string
// OSD fails to find the right IP to bind to when running on SDN
Expand Down
12 changes: 6 additions & 6 deletions pkg/operator/ceph/cluster/osd/spec.go
Expand Up @@ -765,7 +765,7 @@ func (c *Cluster) getActivateOSDInitContainer(configDir, namespace, osdID string
Name: "activate",
Image: c.spec.CephVersion.Image,
VolumeMounts: volMounts,
SecurityContext: PrivilegedContext(),
SecurityContext: controller.PrivilegedContext(true),
Env: envVars,
Resources: osdProps.resources,
}
Expand Down Expand Up @@ -857,7 +857,7 @@ func (c *Cluster) generateEncryptionOpenBlockContainer(resources v1.ResourceRequ
fmt.Sprintf(openEncryptedBlock, c.clusterInfo.FSID, pvcName, encryptionKeyPath(), encryptionBlockDestinationCopy(mountPath, blockType), encryptionDMName(pvcName, cryptBlockType), encryptionDMPath(pvcName, cryptBlockType)),
},
VolumeMounts: []v1.VolumeMount{getPvcOSDBridgeMountActivate(mountPath, volumeMountPVCName), getDeviceMapperMount()},
SecurityContext: PrivilegedContext(),
SecurityContext: controller.PrivilegedContext(true),
Resources: resources,
}
}
Expand Down Expand Up @@ -1111,7 +1111,7 @@ func (c *Cluster) getActivatePVCInitContainer(osdProps osdProperties, osdID stri
},
},
VolumeMounts: []v1.VolumeMount{getPvcOSDBridgeMountActivate(osdDataPath, osdProps.pvc.ClaimName)},
SecurityContext: PrivilegedContext(),
SecurityContext: controller.PrivilegedContext(true),
Resources: osdProps.resources,
}

Expand Down Expand Up @@ -1139,7 +1139,7 @@ func (c *Cluster) getExpandPVCInitContainer(osdProps osdProperties, osdID string
},
Args: []string{"bluefs-bdev-expand", "--path", osdDataPath},
VolumeMounts: []v1.VolumeMount{getPvcOSDBridgeMountActivate(osdDataPath, osdProps.pvc.ClaimName)},
SecurityContext: PrivilegedContext(),
SecurityContext: controller.PrivilegedContext(true),
Resources: osdProps.resources,
}
}
Expand All @@ -1166,7 +1166,7 @@ func (c *Cluster) getExpandEncryptedPVCInitContainer(mountPath string, osdProps
},
Args: []string{"--verbose", "resize", encryptionDMName(osdProps.pvc.ClaimName, DmcryptBlockType)},
VolumeMounts: volMount,
SecurityContext: PrivilegedContext(),
SecurityContext: controller.PrivilegedContext(true),
Resources: osdProps.resources,
}
}
Expand Down Expand Up @@ -1196,7 +1196,7 @@ func (c *Cluster) getEncryptedStatusPVCInitContainer(mountPath string, osdProps
},
Args: []string{"--verbose", "status", encryptionDMName(osdProps.pvc.ClaimName, DmcryptBlockType)},
VolumeMounts: []v1.VolumeMount{getPvcOSDBridgeMountActivate(mountPath, osdProps.pvc.ClaimName)},
SecurityContext: PrivilegedContext(),
SecurityContext: controller.PrivilegedContext(true),
Resources: osdProps.resources,
}
}
16 changes: 16 additions & 0 deletions pkg/operator/ceph/controller/spec.go
Expand Up @@ -620,6 +620,22 @@ func PodSecurityContext() *v1.SecurityContext {
}
}

// PrivilegedContext returns a privileged Pod security context
func PrivilegedContext(runAsRoot bool) *v1.SecurityContext {
privileged := true
rootUser := int64(0)

sec := &v1.SecurityContext{
Privileged: &privileged,
}

if runAsRoot {
sec.RunAsUser = &rootUser
}

return sec
}

// LogCollectorContainer runs a cron job to rotate logs
func LogCollectorContainer(daemonID, ns string, c cephv1.ClusterSpec) *v1.Container {
return &v1.Container{
Expand Down