Skip to content

Commit

Permalink
security: add dry run mode for external cluster script
Browse files Browse the repository at this point in the history
Adding dry run mode for external cluster script.
This will add cli argument `--dry-run`. By default
`dry-run` option will be `False`
which means it will only print something like below.

```
Execute: 'ceph fs ls'
Execute: 'ceph fsid'
Execute: 'ceph quorum_status'
Execute: 'ceph auth get-or-create client.healthchecker mon allow r, allow command quorum_status,
	allow command version mgr allow command config osd allow rwx pool=default.rgw.meta, allow r pool=
	.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x
	pool=default.rgw.buckets.index'
Execute: 'ceph mgr services'
Execute: 'ceph auth get-or-create client.csi-rbd-node mon profile rbd osd profile rbd'
Execute: 'ceph auth get-or-create client.csi-rbd-provisioner mon profile rbd mgr allow rw osd
	profile rbd'
Execute: 'ceph status'

```

Signed-off-by: subhamkrai <srai@redhat.com>
  • Loading branch information
subhamkrai committed Dec 14, 2021
1 parent ca7d7dd commit bc413ab
Show file tree
Hide file tree
Showing 2 changed files with 44 additions and 3 deletions.
5 changes: 5 additions & 0 deletions .github/workflows/canary-integration-test.yml
Expand Up @@ -64,6 +64,11 @@ jobs:
toolbox=$(kubectl get pod -l app=rook-ceph-tools -n rook-ceph -o jsonpath='{.items[*].metadata.name}')
timeout 10 sh -c "until kubectl -n rook-ceph exec $toolbox -- python3 /etc/ceph/create-external-cluster-resources.py --rbd-data-pool-name=ec-pool --rbd-metadata-ec-pool-name=replicapool; do echo 'waiting for script to succeed' && sleep 1; done"
- name: dry run external script create-external-cluster-resources.py
run: |
toolbox=$(kubectl get pod -l app=rook-ceph-tools -n rook-ceph -o jsonpath='{.items[*].metadata.name}')
kubectl -n rook-ceph exec $toolbox -- python3 /etc/ceph/create-external-cluster-resources.py --rbd-data-pool-name=replicapool --dry-run
- name: run external script create-external-cluster-resources.py unit tests
run: |
kubectl -n rook-ceph exec $(kubectl get pod -l app=rook-ceph-tools -n rook-ceph -o jsonpath='{.items[0].metadata.name}') -- python3 -m unittest /etc/ceph/create-external-cluster-resources.py
Expand Down
42 changes: 39 additions & 3 deletions deploy/examples/create-external-cluster-resources.py
Expand Up @@ -195,6 +195,8 @@ def gen_arg_parser(cls, args_to_parse=None):
help="Ceph Manager prometheus exporter port")
output_group.add_argument("--rbd-metadata-ec-pool-name", default="", required=False,
help="Provides the name of erasure coded RBD metadata pool")
output_group.add_argument("--dry-run", default=False, action='store_true',
help="Dry run the python script")

upgrade_group = argP.add_argument_group('upgrade')
upgrade_group.add_argument("--upgrade", action='store_true', default=False,
Expand Down Expand Up @@ -250,6 +252,10 @@ def validate_rgw_metadata_ec_pool_name(self):
"Provided rbd_data_pool name, {}, does not exist".format(rbd_pool_name))
return rbd_metadata_ec_pool_name

def dry_run(self, msg):
if self._arg_parser.dry_run:
print("Execute: " + "'" + msg+"'")

def validate_rgw_endpoint_tls_cert(self):
if self._arg_parser.rgw_tls_cert_path:
with open(self._arg_parser.rgw_tls_cert_path, encoding='utf8') as f:
Expand Down Expand Up @@ -352,6 +358,8 @@ def shutdown(self):
self.cluster.shutdown()

def get_fsid(self):
if self._arg_parser.dry_run:
return self.dry_run("ceph fsid")
return str(self.cluster.get_fsid())

def _common_cmd_json_gen(self, cmd_json):
Expand All @@ -370,6 +378,8 @@ def _common_cmd_json_gen(self, cmd_json):

def get_ceph_external_mon_data(self):
cmd_json = {"prefix": "quorum_status", "format": "json"}
if self._arg_parser.dry_run:
return self.dry_run("ceph " + cmd_json['prefix'])
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
Expand Down Expand Up @@ -420,6 +430,8 @@ def _convert_hostname_to_ip(self, host_name):
return ip

def get_active_and_standby_mgrs(self):
if self._arg_parser.dry_run:
return "", self.dry_run("ceph status")
monitoring_endpoint_port = self._arg_parser.monitoring_endpoint_port
monitoring_endpoint_ip_list = self._arg_parser.monitoring_endpoint
standby_mgrs = []
Expand Down Expand Up @@ -515,6 +527,8 @@ def create_cephCSIKeyring_cephFSProvisioner(self):
"caps": ["mon", "allow r", "mgr", "allow rw",
"osd", "allow rw tag cephfs metadata=*"],
"format": "json"}
if self._arg_parser.dry_run:
return self.dry_run("ceph " + cmd_json['prefix'] + " " + cmd_json['entity'] + " " + " ".join(cmd_json['caps']))
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
Expand Down Expand Up @@ -550,6 +564,8 @@ def create_cephCSIKeyring_cephFSNode(self):
"osd", "allow rw tag cephfs *=*",
"mds", "allow rw"],
"format": "json"}
if self._arg_parser.dry_run:
return self.dry_run("ceph " + cmd_json['prefix'] + " " + cmd_json['entity'] + " " + " ".join(cmd_json['caps']))
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
Expand Down Expand Up @@ -582,6 +598,8 @@ def create_cephCSIKeyring_RBDProvisioner(self):
"mgr", "allow rw",
"osd", "profile rbd"],
"format": "json"}
if self._arg_parser.dry_run:
return self.dry_run("ceph " + cmd_json['prefix'] + " " + cmd_json['entity'] + " " + " ".join(cmd_json['caps']))
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
Expand All @@ -592,6 +610,8 @@ def create_cephCSIKeyring_RBDProvisioner(self):

def get_cephfs_data_pool_details(self):
cmd_json = {"prefix": "fs ls", "format": "json"}
if self._arg_parser.dry_run:
return self.dry_run("ceph " + cmd_json['prefix'])
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt, report an error
if ret_val != 0:
Expand Down Expand Up @@ -699,6 +719,8 @@ def create_cephCSIKeyring_RBDNode(self):
"caps": ["mon", "profile rbd",
"osd", "profile rbd"],
"format": "json"}
if self._arg_parser.dry_run:
return self.dry_run("ceph " + cmd_json['prefix'] + " " + cmd_json['entity'] + " " + " ".join(cmd_json['caps']))
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
Expand All @@ -714,6 +736,8 @@ def create_checkerKey(self):
"mgr", self.MIN_USER_CAP_PERMISSIONS['mgr'],
"osd", self.MIN_USER_CAP_PERMISSIONS['osd'].format(self._arg_parser.rgw_pool_prefix)],
"format": "json"}
if self._arg_parser.dry_run:
return self.dry_run("ceph " + cmd_json['prefix'] + " " + cmd_json['entity'] + " " + " ".join(cmd_json['caps']))
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
Expand All @@ -724,6 +748,8 @@ def create_checkerKey(self):

def get_ceph_dashboard_link(self):
cmd_json = {"prefix": "mgr services", "format": "json"}
if self._arg_parser.dry_run:
return self.dry_run("ceph " + cmd_json['prefix'])
ret_val, json_out, _ = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
Expand All @@ -735,6 +761,8 @@ def get_ceph_dashboard_link(self):
def create_rgw_admin_ops_user(self):
cmd = ['radosgw-admin', 'user', 'create', '--uid', self.EXTERNAL_RGW_ADMIN_OPS_USER_NAME, '--display-name',
'Rook RGW Admin Ops user', '--caps', 'buckets=*;users=*;usage=read;metadata=read;zone=read']
if self._arg_parser.dry_run:
return self.dry_run("ceph " + "".joing(cmd))
try:
output = subprocess.check_output(cmd,
stderr=subprocess.PIPE)
Expand Down Expand Up @@ -799,7 +827,8 @@ def _gen_output_map(self):
self.out_map['CSI_CEPHFS_PROVISIONER_SECRET'] = ''
# create CephFS node and provisioner keyring only when MDS exists
if self.out_map['CEPHFS_FS_NAME'] and self.out_map['CEPHFS_POOL_NAME']:
self.out_map['CSI_CEPHFS_NODE_SECRET'] = self.create_cephCSIKeyring_cephFSNode()
self.out_map['CSI_CEPHFS_NODE_SECRET'] = self.create_cephCSIKeyring_cephFSNode(
)
self.out_map['CSI_CEPHFS_PROVISIONER_SECRET'] = self.create_cephCSIKeyring_cephFSProvisioner()
self.out_map['RGW_ENDPOINT'] = self._arg_parser.rgw_endpoint
self.out_map['RGW_TLS_CERT'] = ''
Expand All @@ -824,6 +853,7 @@ def gen_shell_out(self):
return shOut

def gen_json_out(self):

self._gen_output_map()
json_out = [
{
Expand Down Expand Up @@ -967,6 +997,10 @@ def gen_json_out(self):
"cert": self.out_map['RGW_TLS_CERT'],
}
})

if self._arg_parser.dry_run:
return ""

return json.dumps(json_out)+LINESEP

def upgrade_user_permissions(self):
Expand Down Expand Up @@ -1096,14 +1130,16 @@ def test_method_main_output(self):

def test_method_create_cephCSIKeyring_cephFSProvisioner(self):
csiKeyring = self.rjObj.create_cephCSIKeyring_cephFSProvisioner()
print("cephCSIKeyring without restricting it to a metadata pool. {}".format(csiKeyring))
print("cephCSIKeyring without restricting it to a metadata pool. {}".format(
csiKeyring))
self.rjObj._arg_parser.restricted_auth_permission = True
self.rjObj._arg_parser.cephfs_filesystem_name = "myfs"
csiKeyring = self.rjObj.create_cephCSIKeyring_cephFSProvisioner()
print("cephCSIKeyring for a specific metadata pool. {}".format(csiKeyring))
self.rjObj._arg_parser.cluster_name = "openshift-storage"
csiKeyring = self.rjObj.create_cephCSIKeyring_cephFSProvisioner()
print("cephCSIKeyring for a specific metadata pool and cluster. {}".format(csiKeyring))
print("cephCSIKeyring for a specific metadata pool and cluster. {}".format(
csiKeyring))

def test_non_zero_return_and_error(self):
self.rjObj.cluster.return_val = 1
Expand Down

0 comments on commit bc413ab

Please sign in to comment.