Skip to content

Commit

Permalink
security: add dry run mode for external cluster script
Browse files Browse the repository at this point in the history
Adding dry run mode for external cluster script.
This will add cli argument `--dry-run`. By default
`dry-run` option will be `False`
which means it will only print something like below.

```
The script is running command ceph fs ls
The script is running ceph fsid
The script is running command ceph quorum_status
The script is running command ceph auth get-or-create client.healthchecker mon
The script is running command ceph mgr services
The script is running command ceph auth get-or-create client.csi-rbd-node mon
The script is running command ceph auth get-or-create client.csi-rbd-provisioner mon
The script is running command ceph status
None
```

Signed-off-by: subhamkrai <srai@redhat.com>
  • Loading branch information
subhamkrai committed Dec 10, 2021
1 parent 3d69e10 commit e44dcd4
Show file tree
Hide file tree
Showing 2 changed files with 53 additions and 11 deletions.
5 changes: 5 additions & 0 deletions .github/workflows/canary-integration-test.yml
Expand Up @@ -59,6 +59,11 @@ jobs:
kubectl -n rook-ceph cp deploy/examples/create-external-cluster-resources.py $toolbox:/etc/ceph
timeout 10 sh -c "until kubectl -n rook-ceph exec $toolbox -- python3 /etc/ceph/create-external-cluster-resources.py --rbd-data-pool-name replicapool; do echo 'waiting for script to succeed' && sleep 1; done"
- name: dry run external script create-external-cluster-resources.py
run: |
toolbox=$(kubectl get pod -l app=rook-ceph-tools -n rook-ceph -o jsonpath='{.items[*].metadata.name}')
kubectl -n rook-ceph exec $toolbox -- python3 /etc/ceph/create-external-cluster-resources.py --rbd-data-pool-name=replicapool --dry-run
- name: run external script create-external-cluster-resources.py unit tests
run: |
kubectl -n rook-ceph exec $(kubectl get pod -l app=rook-ceph-tools -n rook-ceph -o jsonpath='{.items[0].metadata.name}') -- python3 -m unittest /etc/ceph/create-external-cluster-resources.py
Expand Down
59 changes: 48 additions & 11 deletions deploy/examples/create-external-cluster-resources.py
Expand Up @@ -193,6 +193,8 @@ def gen_arg_parser(cls, args_to_parse=None):
help="Ceph Manager prometheus exporter endpoints (comma separated list of <IP> entries of active and standby mgrs)")
output_group.add_argument("--monitoring-endpoint-port", default="", required=False,
help="Ceph Manager prometheus exporter port")
output_group.add_argument("--dry-run", default=False, action='store_true',
help="Dry run the python script")

upgrade_group = argP.add_argument_group('upgrade')
upgrade_group.add_argument("--upgrade", action='store_true', default=False,
Expand All @@ -205,6 +207,10 @@ def gen_arg_parser(cls, args_to_parse=None):
args_to_parse = sys.argv[1:]
return argP.parse_args(args_to_parse)

def dry_run(self, msg):
if self._arg_parser.dry_run:
print("The script is " + msg)

def validate_rgw_endpoint_tls_cert(self):
if self._arg_parser.rgw_tls_cert_path:
with open(self._arg_parser.rgw_tls_cert_path, encoding='utf8') as f:
Expand Down Expand Up @@ -307,6 +313,8 @@ def shutdown(self):
self.cluster.shutdown()

def get_fsid(self):
if self._arg_parser.dry_run:
return self.dry_run("running ceph fsid")
return str(self.cluster.get_fsid())

def _common_cmd_json_gen(self, cmd_json):
Expand All @@ -325,6 +333,8 @@ def _common_cmd_json_gen(self, cmd_json):

def get_ceph_external_mon_data(self):
cmd_json = {"prefix": "quorum_status", "format": "json"}
if self._arg_parser.dry_run:
return self.dry_run("running command ceph " + cmd_json['prefix'])
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
Expand Down Expand Up @@ -375,6 +385,8 @@ def _convert_hostname_to_ip(self, host_name):
return ip

def get_active_and_standby_mgrs(self):
if self._arg_parser.dry_run:
return "", self.dry_run("running command ceph status")
monitoring_endpoint_port = self._arg_parser.monitoring_endpoint_port
monitoring_endpoint_ip_list = self._arg_parser.monitoring_endpoint
standby_mgrs = []
Expand Down Expand Up @@ -458,7 +470,7 @@ def create_cephCSIKeyring_cephFSProvisioner(self):
if self._arg_parser.restricted_auth_permission:
if metadata_pool == "":
raise ExecutionFailureException(
"'cephfs_metadata_pool_name' not found, please set the '--cephfs-metadata-pool-name' flag")
"'cephfs_metadata_pool_name' not found, please set the '--cephfs-metadata-pool-name' flag")
cmd_json = {"prefix": "auth get-or-create",
"entity": entity,
"caps": ["mon", "allow r", "mgr", "allow rw",
Expand All @@ -470,6 +482,8 @@ def create_cephCSIKeyring_cephFSProvisioner(self):
"caps": ["mon", "allow r", "mgr", "allow rw",
"osd", "allow rw tag cephfs metadata=*"],
"format": "json"}
if self._arg_parser.dry_run:
return self.dry_run("running command ceph " + cmd_json['prefix'] + " " + cmd_json['entity'] + " " + cmd_json['caps'][0])
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
Expand All @@ -492,9 +506,10 @@ def create_cephCSIKeyring_cephFSNode(self):
cmd_json = {"prefix": "auth get-or-create",
"entity": entity,
"caps": ["mon", "allow r",
"mgr", "allow rw",
"osd", "allow rw tag cephfs data={}".format(data_pool),
"mds", "allow rw"],
"mgr", "allow rw",
"osd", "allow rw tag cephfs data={}".format(
data_pool),
"mds", "allow rw"],
"format": "json"}
else:
cmd_json = {"prefix": "auth get-or-create",
Expand All @@ -504,6 +519,8 @@ def create_cephCSIKeyring_cephFSNode(self):
"osd", "allow rw tag cephfs *=*",
"mds", "allow rw"],
"format": "json"}
if self._arg_parser.dry_run:
return self.dry_run("running command ceph " + cmd_json['prefix'] + " " + cmd_json['entity'] + " " + cmd_json['caps'][0])
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
Expand All @@ -518,7 +535,7 @@ def create_cephCSIKeyring_RBDProvisioner(self):
entity = "client.csi-rbd-provisioner"
if cluster_name:
entity = "client.csi-rbd-provisioner-{}".format(cluster_name)
cmd_json={}
cmd_json = {}
if self._arg_parser.restricted_auth_permission:
if rbd_pool_name == "":
raise ExecutionFailureException(
Expand All @@ -536,6 +553,8 @@ def create_cephCSIKeyring_RBDProvisioner(self):
"mgr", "allow rw",
"osd", "profile rbd"],
"format": "json"}
if self._arg_parser.dry_run:
return self.dry_run("running command ceph " + cmd_json['prefix'] + " " + cmd_json['entity'] + " " + cmd_json['caps'][0])
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
Expand All @@ -546,6 +565,8 @@ def create_cephCSIKeyring_RBDProvisioner(self):

def get_cephfs_data_pool_details(self):
cmd_json = {"prefix": "fs ls", "format": "json"}
if self._arg_parser.dry_run:
return self.dry_run("running command ceph " + cmd_json['prefix'])
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt, report an error
if ret_val != 0:
Expand Down Expand Up @@ -597,8 +618,10 @@ def get_cephfs_data_pool_details(self):
return

if matching_json_out:
self._arg_parser.cephfs_filesystem_name = str(matching_json_out['name'])
self._arg_parser.cephfs_metadata_pool_name = str(matching_json_out['metadata_pool'])
self._arg_parser.cephfs_filesystem_name = str(
matching_json_out['name'])
self._arg_parser.cephfs_metadata_pool_name = str(
matching_json_out['metadata_pool'])

if type(matching_json_out['data_pools']) == list:
# if the user has already provided data-pool-name,
Expand Down Expand Up @@ -635,7 +658,7 @@ def create_cephCSIKeyring_RBDNode(self):
entity = "client.csi-rbd-node"
if cluster_name:
entity = "client.csi-rbd-node-{}".format(cluster_name)
cmd_json={}
cmd_json = {}
if self._arg_parser.restricted_auth_permission:
if rbd_pool_name == "":
raise ExecutionFailureException(
Expand All @@ -651,6 +674,8 @@ def create_cephCSIKeyring_RBDNode(self):
"caps": ["mon", "profile rbd",
"osd", "profile rbd"],
"format": "json"}
if self._arg_parser.dry_run:
return self.dry_run("running command ceph " + cmd_json['prefix'] + " " + cmd_json['entity'] + " " + cmd_json['caps'][0])
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
Expand All @@ -666,6 +691,8 @@ def create_checkerKey(self):
"mgr", self.MIN_USER_CAP_PERMISSIONS['mgr'],
"osd", self.MIN_USER_CAP_PERMISSIONS['osd'].format(self._arg_parser.rgw_pool_prefix)],
"format": "json"}
if self._arg_parser.dry_run:
return self.dry_run("running command ceph " + cmd_json['prefix'] + " " + cmd_json['entity'] + " " + cmd_json['caps'][0])
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
Expand All @@ -676,6 +703,8 @@ def create_checkerKey(self):

def get_ceph_dashboard_link(self):
cmd_json = {"prefix": "mgr services", "format": "json"}
if self._arg_parser.dry_run:
return self.dry_run("running command ceph " + cmd_json['prefix'])
ret_val, json_out, _ = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
Expand All @@ -687,6 +716,8 @@ def get_ceph_dashboard_link(self):
def create_rgw_admin_ops_user(self):
cmd = ['radosgw-admin', 'user', 'create', '--uid', self.EXTERNAL_RGW_ADMIN_OPS_USER_NAME, '--display-name',
'Rook RGW Admin Ops user', '--caps', 'buckets=*;users=*;usage=read;metadata=read;zone=read']
if self._arg_parser.dry_run:
return self.dry_run("running command ceph " + "".joing(cmd))
try:
output = subprocess.check_output(cmd,
stderr=subprocess.PIPE)
Expand Down Expand Up @@ -751,7 +782,8 @@ def _gen_output_map(self):
self.out_map['CSI_CEPHFS_PROVISIONER_SECRET'] = ''
# create CephFS node and provisioner keyring only when MDS exists
if self.out_map['CEPHFS_FS_NAME'] and self.out_map['CEPHFS_POOL_NAME']:
self.out_map['CSI_CEPHFS_NODE_SECRET'] = self.create_cephCSIKeyring_cephFSNode()
self.out_map['CSI_CEPHFS_NODE_SECRET'] = self.create_cephCSIKeyring_cephFSNode(
)
self.out_map['CSI_CEPHFS_PROVISIONER_SECRET'] = self.create_cephCSIKeyring_cephFSProvisioner()
self.out_map['RGW_ENDPOINT'] = self._arg_parser.rgw_endpoint
self.out_map['RGW_TLS_CERT'] = ''
Expand All @@ -775,6 +807,9 @@ def gen_shell_out(self):
return shOut

def gen_json_out(self):
if self._arg_parser.dry_run:
return self._gen_output_map()

self._gen_output_map()
json_out = [
{
Expand Down Expand Up @@ -1035,14 +1070,16 @@ def test_method_main_output(self):

def test_method_create_cephCSIKeyring_cephFSProvisioner(self):
csiKeyring = self.rjObj.create_cephCSIKeyring_cephFSProvisioner()
print("cephCSIKeyring without restricting it to a metadata pool. {}".format(csiKeyring))
print("cephCSIKeyring without restricting it to a metadata pool. {}".format(
csiKeyring))
self.rjObj._arg_parser.restricted_auth_permission = True
self.rjObj._arg_parser.cephfs_metadata_pool_name = "myfs-metadata"
csiKeyring = self.rjObj.create_cephCSIKeyring_cephFSProvisioner()
print("cephCSIKeyring for a specific metadata pool. {}".format(csiKeyring))
self.rjObj._arg_parser.cluster_name = "openshift-storage"
csiKeyring = self.rjObj.create_cephCSIKeyring_cephFSProvisioner()
print("cephCSIKeyring for a specific metadata pool and cluster. {}".format(csiKeyring))
print("cephCSIKeyring for a specific metadata pool and cluster. {}".format(
csiKeyring))

def test_non_zero_return_and_error(self):
self.rjObj.cluster.return_val = 1
Expand Down

0 comments on commit e44dcd4

Please sign in to comment.