Skip to content

Commit

Permalink
security: add dry run mode for external cluster script
Browse files Browse the repository at this point in the history
Adding dry run mode for external cluster script.
This will add cli argument `--dry-run`. By default
`dry-run` option will be `False`
which means it will only print something like below.

```
The script is checking conflicting options
The script is running command ceph fs ls to fetch cephFS data pool details
The script is getting fsid
The script is running command ceph quorum_status to fetch ceph mon external data
The script is running command ceph auth get-or-create client.healthchecker mon to fetch/create to
	key for rook external user
The script is running command ceph mgr services to fetch mgr services
The script is running command ceph auth get-or-create client.csi-rbd-node mon to fetch/create Ceph
	CSI keyring for RBD provisioner
The script is running command ceph auth get-or-create client.csi-rbd-provisioner mon to
	fetch/create Ceph CSI keyring for RBD provisioner
The script is running command ceph status to get mon endpoint and mon endpoint port
None
The script is checks if connected then shutdown
```

Signed-off-by: subhamkrai <srai@redhat.com>
  • Loading branch information
subhamkrai committed Dec 8, 2021
1 parent 3d69e10 commit 0419bf9
Show file tree
Hide file tree
Showing 2 changed files with 61 additions and 11 deletions.
3 changes: 3 additions & 0 deletions .github/workflows/canary-integration-test.yml
Expand Up @@ -59,6 +59,9 @@ jobs:
kubectl -n rook-ceph cp deploy/examples/create-external-cluster-resources.py $toolbox:/etc/ceph
timeout 10 sh -c "until kubectl -n rook-ceph exec $toolbox -- python3 /etc/ceph/create-external-cluster-resources.py --rbd-data-pool-name replicapool; do echo 'waiting for script to succeed' && sleep 1; done"
- name: dry run external script create-external-cluster-resources.py
run: timeout 10 sh -c "until kubectl -n rook-ceph exec $toolbox -- python3 /etc/ceph/create-external-cluster-resources.py --rbd-data-pool-name=replicapool --dry-run=True; do echo 'waiting for script to succeed' && sleep 1; done"

- name: run external script create-external-cluster-resources.py unit tests
run: |
kubectl -n rook-ceph exec $(kubectl get pod -l app=rook-ceph-tools -n rook-ceph -o jsonpath='{.items[0].metadata.name}') -- python3 -m unittest /etc/ceph/create-external-cluster-resources.py
Expand Down
69 changes: 58 additions & 11 deletions deploy/examples/create-external-cluster-resources.py
Expand Up @@ -193,6 +193,8 @@ def gen_arg_parser(cls, args_to_parse=None):
help="Ceph Manager prometheus exporter endpoints (comma separated list of <IP> entries of active and standby mgrs)")
output_group.add_argument("--monitoring-endpoint-port", default="", required=False,
help="Ceph Manager prometheus exporter port")
output_group.add_argument("--dry-run", default=False, required=False,
help="Dry run the python script")

upgrade_group = argP.add_argument_group('upgrade')
upgrade_group.add_argument("--upgrade", action='store_true', default=False,
Expand All @@ -205,13 +207,21 @@ def gen_arg_parser(cls, args_to_parse=None):
args_to_parse = sys.argv[1:]
return argP.parse_args(args_to_parse)

def dry_run(self, msg):
if self._arg_parser.dry_run:
print("The script is " + msg)

def validate_rgw_endpoint_tls_cert(self):
if self._arg_parser.dry_run:
return self.dry_run("validating rados gateWay endpoint TLS certificate")
if self._arg_parser.rgw_tls_cert_path:
with open(self._arg_parser.rgw_tls_cert_path, encoding='utf8') as f:
contents = f.read()
return contents.rstrip()

def _check_conflicting_options(self):
if self._arg_parser.dry_run:
return self.dry_run("checking conflicting options")
if not self._arg_parser.upgrade and not self._arg_parser.rbd_data_pool_name:
raise ExecutionFailureException(
"Either '--upgrade' or '--rbd-data-pool-name <pool_name>' should be specified")
Expand All @@ -224,6 +234,8 @@ def _check_conflicting_options(self):
"Please provide an existing user-name through '--run-as-user' (or '-u') flag while upgrading")

def _invalid_endpoint(self, endpoint_str):
if self._arg_parser.dry_run:
return self.dry_run("if checking if endpoint is valid or not")
try:
ipv4, port = endpoint_str.split(':')
except ValueError:
Expand All @@ -250,6 +262,8 @@ def _invalid_endpoint(self, endpoint_str):
return False

def endpoint_dial(self, endpoint_str, timeout=3, cert=None):
if self._arg_parser.dry_run:
return self.dry_run("trying to connect endpoints")
# if the 'cluster' instance is a dummy one,
# don't try to reach out to the endpoint
if isinstance(self.cluster, DummyRados):
Expand Down Expand Up @@ -303,10 +317,14 @@ def __init__(self, arg_list=None):
self.cluster.connect()

def shutdown(self):
if self._arg_parser.dry_run:
return self.dry_run("checks if connected then shutdown")
if self.cluster.state == "connected":
self.cluster.shutdown()

def get_fsid(self):
if self._arg_parser.dry_run:
return self.dry_run("getting fsid")
return str(self.cluster.get_fsid())

def _common_cmd_json_gen(self, cmd_json):
Expand All @@ -325,6 +343,8 @@ def _common_cmd_json_gen(self, cmd_json):

def get_ceph_external_mon_data(self):
cmd_json = {"prefix": "quorum_status", "format": "json"}
if self._arg_parser.dry_run:
return self.dry_run("running command ceph " + cmd_json['prefix'] + " to fetch ceph mon external data")
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
Expand Down Expand Up @@ -375,6 +395,8 @@ def _convert_hostname_to_ip(self, host_name):
return ip

def get_active_and_standby_mgrs(self):
if self._arg_parser.dry_run:
return "", self.dry_run("running command ceph status to get mon endpoint and mon endpoint port")
monitoring_endpoint_port = self._arg_parser.monitoring_endpoint_port
monitoring_endpoint_ip_list = self._arg_parser.monitoring_endpoint
standby_mgrs = []
Expand Down Expand Up @@ -458,7 +480,7 @@ def create_cephCSIKeyring_cephFSProvisioner(self):
if self._arg_parser.restricted_auth_permission:
if metadata_pool == "":
raise ExecutionFailureException(
"'cephfs_metadata_pool_name' not found, please set the '--cephfs-metadata-pool-name' flag")
"'cephfs_metadata_pool_name' not found, please set the '--cephfs-metadata-pool-name' flag")
cmd_json = {"prefix": "auth get-or-create",
"entity": entity,
"caps": ["mon", "allow r", "mgr", "allow rw",
Expand All @@ -470,6 +492,8 @@ def create_cephCSIKeyring_cephFSProvisioner(self):
"caps": ["mon", "allow r", "mgr", "allow rw",
"osd", "allow rw tag cephfs metadata=*"],
"format": "json"}
if self._arg_parser.dry_run:
return self.dry_run("running command ceph " + cmd_json['prefix'] + " " + cmd_json['entity'] + " " + cmd_json['caps'][0] + " to fetch/create Ceph CSI keyring for cephFS provisioner")
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
Expand All @@ -492,9 +516,10 @@ def create_cephCSIKeyring_cephFSNode(self):
cmd_json = {"prefix": "auth get-or-create",
"entity": entity,
"caps": ["mon", "allow r",
"mgr", "allow rw",
"osd", "allow rw tag cephfs data={}".format(data_pool),
"mds", "allow rw"],
"mgr", "allow rw",
"osd", "allow rw tag cephfs data={}".format(
data_pool),
"mds", "allow rw"],
"format": "json"}
else:
cmd_json = {"prefix": "auth get-or-create",
Expand All @@ -504,6 +529,8 @@ def create_cephCSIKeyring_cephFSNode(self):
"osd", "allow rw tag cephfs *=*",
"mds", "allow rw"],
"format": "json"}
if self._arg_parser.dry_run:
return self.dry_run("running command ceph " + cmd_json['prefix'] + " " + cmd_json['entity'] + " " + cmd_json['caps'][0] + " to fetch/create Ceph CSI keyring for cephFS node")
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
Expand All @@ -518,7 +545,7 @@ def create_cephCSIKeyring_RBDProvisioner(self):
entity = "client.csi-rbd-provisioner"
if cluster_name:
entity = "client.csi-rbd-provisioner-{}".format(cluster_name)
cmd_json={}
cmd_json = {}
if self._arg_parser.restricted_auth_permission:
if rbd_pool_name == "":
raise ExecutionFailureException(
Expand All @@ -536,6 +563,8 @@ def create_cephCSIKeyring_RBDProvisioner(self):
"mgr", "allow rw",
"osd", "profile rbd"],
"format": "json"}
if self._arg_parser.dry_run:
return self.dry_run("running command ceph " + cmd_json['prefix'] + " " + cmd_json['entity'] + " " + cmd_json['caps'][0] + " to fetch/create Ceph CSI keyring for RBD provisioner")
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
Expand All @@ -546,6 +575,8 @@ def create_cephCSIKeyring_RBDProvisioner(self):

def get_cephfs_data_pool_details(self):
cmd_json = {"prefix": "fs ls", "format": "json"}
if self._arg_parser.dry_run:
return self.dry_run("running command ceph " + cmd_json['prefix'] + " to fetch cephFS data pool details")
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt, report an error
if ret_val != 0:
Expand Down Expand Up @@ -597,8 +628,10 @@ def get_cephfs_data_pool_details(self):
return

if matching_json_out:
self._arg_parser.cephfs_filesystem_name = str(matching_json_out['name'])
self._arg_parser.cephfs_metadata_pool_name = str(matching_json_out['metadata_pool'])
self._arg_parser.cephfs_filesystem_name = str(
matching_json_out['name'])
self._arg_parser.cephfs_metadata_pool_name = str(
matching_json_out['metadata_pool'])

if type(matching_json_out['data_pools']) == list:
# if the user has already provided data-pool-name,
Expand Down Expand Up @@ -635,7 +668,7 @@ def create_cephCSIKeyring_RBDNode(self):
entity = "client.csi-rbd-node"
if cluster_name:
entity = "client.csi-rbd-node-{}".format(cluster_name)
cmd_json={}
cmd_json = {}
if self._arg_parser.restricted_auth_permission:
if rbd_pool_name == "":
raise ExecutionFailureException(
Expand All @@ -651,6 +684,8 @@ def create_cephCSIKeyring_RBDNode(self):
"caps": ["mon", "profile rbd",
"osd", "profile rbd"],
"format": "json"}
if self._arg_parser.dry_run:
return self.dry_run("running command ceph " + cmd_json['prefix'] + " " + cmd_json['entity'] + " " + cmd_json['caps'][0] + " to fetch/create Ceph CSI keyring for RBD provisioner")
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
Expand All @@ -666,6 +701,8 @@ def create_checkerKey(self):
"mgr", self.MIN_USER_CAP_PERMISSIONS['mgr'],
"osd", self.MIN_USER_CAP_PERMISSIONS['osd'].format(self._arg_parser.rgw_pool_prefix)],
"format": "json"}
if self._arg_parser.dry_run:
return self.dry_run("running command ceph " + cmd_json['prefix'] + "" + cmd_json['entity'] + "" + cmd_json['caps'][0] + " to fetch/create to key for rook external user")
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
Expand All @@ -676,6 +713,8 @@ def create_checkerKey(self):

def get_ceph_dashboard_link(self):
cmd_json = {"prefix": "mgr services", "format": "json"}
if self._arg_parser.dry_run:
return self.dry_run("running command ceph " + cmd_json['prefix'] + " to fetch mgr services")
ret_val, json_out, _ = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
Expand All @@ -687,6 +726,8 @@ def get_ceph_dashboard_link(self):
def create_rgw_admin_ops_user(self):
cmd = ['radosgw-admin', 'user', 'create', '--uid', self.EXTERNAL_RGW_ADMIN_OPS_USER_NAME, '--display-name',
'Rook RGW Admin Ops user', '--caps', 'buckets=*;users=*;usage=read;metadata=read;zone=read']
if self._arg_parser.dry_run:
return self.dry_run("running command ceph " + "".joing(cmd) + " to fetch/create rgw admin ops user for access and secret key")
try:
output = subprocess.check_output(cmd,
stderr=subprocess.PIPE)
Expand Down Expand Up @@ -751,7 +792,8 @@ def _gen_output_map(self):
self.out_map['CSI_CEPHFS_PROVISIONER_SECRET'] = ''
# create CephFS node and provisioner keyring only when MDS exists
if self.out_map['CEPHFS_FS_NAME'] and self.out_map['CEPHFS_POOL_NAME']:
self.out_map['CSI_CEPHFS_NODE_SECRET'] = self.create_cephCSIKeyring_cephFSNode()
self.out_map['CSI_CEPHFS_NODE_SECRET'] = self.create_cephCSIKeyring_cephFSNode(
)
self.out_map['CSI_CEPHFS_PROVISIONER_SECRET'] = self.create_cephCSIKeyring_cephFSProvisioner()
self.out_map['RGW_ENDPOINT'] = self._arg_parser.rgw_endpoint
self.out_map['RGW_TLS_CERT'] = ''
Expand All @@ -775,6 +817,9 @@ def gen_shell_out(self):
return shOut

def gen_json_out(self):
if self._arg_parser.dry_run:
return self._gen_output_map()

self._gen_output_map()
json_out = [
{
Expand Down Expand Up @@ -1035,14 +1080,16 @@ def test_method_main_output(self):

def test_method_create_cephCSIKeyring_cephFSProvisioner(self):
csiKeyring = self.rjObj.create_cephCSIKeyring_cephFSProvisioner()
print("cephCSIKeyring without restricting it to a metadata pool. {}".format(csiKeyring))
print("cephCSIKeyring without restricting it to a metadata pool. {}".format(
csiKeyring))
self.rjObj._arg_parser.restricted_auth_permission = True
self.rjObj._arg_parser.cephfs_metadata_pool_name = "myfs-metadata"
csiKeyring = self.rjObj.create_cephCSIKeyring_cephFSProvisioner()
print("cephCSIKeyring for a specific metadata pool. {}".format(csiKeyring))
self.rjObj._arg_parser.cluster_name = "openshift-storage"
csiKeyring = self.rjObj.create_cephCSIKeyring_cephFSProvisioner()
print("cephCSIKeyring for a specific metadata pool and cluster. {}".format(csiKeyring))
print("cephCSIKeyring for a specific metadata pool and cluster. {}".format(
csiKeyring))

def test_non_zero_return_and_error(self):
self.rjObj.cluster.return_val = 1
Expand Down

0 comments on commit 0419bf9

Please sign in to comment.