diff --git a/.github/workflows/canary-integration-test.yml b/.github/workflows/canary-integration-test.yml index a72f31b84ffbe..85d226a7b3168 100644 --- a/.github/workflows/canary-integration-test.yml +++ b/.github/workflows/canary-integration-test.yml @@ -59,6 +59,11 @@ jobs: kubectl -n rook-ceph cp deploy/examples/create-external-cluster-resources.py $toolbox:/etc/ceph timeout 10 sh -c "until kubectl -n rook-ceph exec $toolbox -- python3 /etc/ceph/create-external-cluster-resources.py --rbd-data-pool-name replicapool; do echo 'waiting for script to succeed' && sleep 1; done" + - name: dry run external script create-external-cluster-resources.py + run: | + toolbox=$(kubectl get pod -l app=rook-ceph-tools -n rook-ceph -o jsonpath='{.items[*].metadata.name}') + kubectl -n rook-ceph exec $toolbox -- python3 /etc/ceph/create-external-cluster-resources.py --rbd-data-pool-name=replicapool --dry-run + - name: run external script create-external-cluster-resources.py unit tests run: | kubectl -n rook-ceph exec $(kubectl get pod -l app=rook-ceph-tools -n rook-ceph -o jsonpath='{.items[0].metadata.name}') -- python3 -m unittest /etc/ceph/create-external-cluster-resources.py diff --git a/deploy/examples/create-external-cluster-resources.py b/deploy/examples/create-external-cluster-resources.py index a53e643f74c9a..93ce6abf64201 100644 --- a/deploy/examples/create-external-cluster-resources.py +++ b/deploy/examples/create-external-cluster-resources.py @@ -193,6 +193,8 @@ def gen_arg_parser(cls, args_to_parse=None): help="Ceph Manager prometheus exporter endpoints (comma separated list of entries of active and standby mgrs)") output_group.add_argument("--monitoring-endpoint-port", default="", required=False, help="Ceph Manager prometheus exporter port") + output_group.add_argument("--dry-run", default=False, action='store_true', + help="Dry run the python script") upgrade_group = argP.add_argument_group('upgrade') upgrade_group.add_argument("--upgrade", action='store_true', default=False, @@ -205,6 +207,10 @@ def gen_arg_parser(cls, args_to_parse=None): args_to_parse = sys.argv[1:] return argP.parse_args(args_to_parse) + def dry_run(self, msg): + if self._arg_parser.dry_run: + print("Execute: " + "'" + msg+"'") + def validate_rgw_endpoint_tls_cert(self): if self._arg_parser.rgw_tls_cert_path: with open(self._arg_parser.rgw_tls_cert_path, encoding='utf8') as f: @@ -307,6 +313,8 @@ def shutdown(self): self.cluster.shutdown() def get_fsid(self): + if self._arg_parser.dry_run: + return self.dry_run("ceph fsid") return str(self.cluster.get_fsid()) def _common_cmd_json_gen(self, cmd_json): @@ -325,6 +333,8 @@ def _common_cmd_json_gen(self, cmd_json): def get_ceph_external_mon_data(self): cmd_json = {"prefix": "quorum_status", "format": "json"} + if self._arg_parser.dry_run: + return self.dry_run("ceph " + cmd_json['prefix']) ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json) # if there is an unsuccessful attempt, if ret_val != 0 or len(json_out) == 0: @@ -375,6 +385,8 @@ def _convert_hostname_to_ip(self, host_name): return ip def get_active_and_standby_mgrs(self): + if self._arg_parser.dry_run: + return "", self.dry_run("ceph status") monitoring_endpoint_port = self._arg_parser.monitoring_endpoint_port monitoring_endpoint_ip_list = self._arg_parser.monitoring_endpoint standby_mgrs = [] @@ -470,6 +482,8 @@ def create_cephCSIKeyring_cephFSProvisioner(self): "caps": ["mon", "allow r", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=*"], "format": "json"} + if self._arg_parser.dry_run: + return self.dry_run("ceph " + cmd_json['prefix'] + " " + cmd_json['entity'] + " " + " ".join(cmd_json['caps'])) ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json) # if there is an unsuccessful attempt, if ret_val != 0 or len(json_out) == 0: @@ -504,6 +518,8 @@ def create_cephCSIKeyring_cephFSNode(self): "osd", "allow rw tag cephfs *=*", "mds", "allow rw"], "format": "json"} + if self._arg_parser.dry_run: + return self.dry_run("ceph " + cmd_json['prefix'] + " " + cmd_json['entity'] + " " + " ".join(cmd_json['caps'])) ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json) # if there is an unsuccessful attempt, if ret_val != 0 or len(json_out) == 0: @@ -518,7 +534,7 @@ def create_cephCSIKeyring_RBDProvisioner(self): entity = "client.csi-rbd-provisioner" if cluster_name: entity = "client.csi-rbd-provisioner-{}".format(cluster_name) - cmd_json={} + cmd_json = {} if self._arg_parser.restricted_auth_permission: if rbd_pool_name == "": raise ExecutionFailureException( @@ -536,6 +552,8 @@ def create_cephCSIKeyring_RBDProvisioner(self): "mgr", "allow rw", "osd", "profile rbd"], "format": "json"} + if self._arg_parser.dry_run: + return self.dry_run("ceph " + cmd_json['prefix'] + " " + cmd_json['entity'] + " " + " ".join(cmd_json['caps'])) ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json) # if there is an unsuccessful attempt, if ret_val != 0 or len(json_out) == 0: @@ -546,6 +564,8 @@ def create_cephCSIKeyring_RBDProvisioner(self): def get_cephfs_data_pool_details(self): cmd_json = {"prefix": "fs ls", "format": "json"} + if self._arg_parser.dry_run: + return self.dry_run("ceph " + cmd_json['prefix']) ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json) # if there is an unsuccessful attempt, report an error if ret_val != 0: @@ -597,8 +617,10 @@ def get_cephfs_data_pool_details(self): return if matching_json_out: - self._arg_parser.cephfs_filesystem_name = str(matching_json_out['name']) - self._arg_parser.cephfs_metadata_pool_name = str(matching_json_out['metadata_pool']) + self._arg_parser.cephfs_filesystem_name = str( + matching_json_out['name']) + self._arg_parser.cephfs_metadata_pool_name = str( + matching_json_out['metadata_pool']) if type(matching_json_out['data_pools']) == list: # if the user has already provided data-pool-name, @@ -635,7 +657,7 @@ def create_cephCSIKeyring_RBDNode(self): entity = "client.csi-rbd-node" if cluster_name: entity = "client.csi-rbd-node-{}".format(cluster_name) - cmd_json={} + cmd_json = {} if self._arg_parser.restricted_auth_permission: if rbd_pool_name == "": raise ExecutionFailureException( @@ -651,6 +673,8 @@ def create_cephCSIKeyring_RBDNode(self): "caps": ["mon", "profile rbd", "osd", "profile rbd"], "format": "json"} + if self._arg_parser.dry_run: + return self.dry_run("ceph " + cmd_json['prefix'] + " " + cmd_json['entity'] + " " + " ".join(cmd_json['caps'])) ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json) # if there is an unsuccessful attempt, if ret_val != 0 or len(json_out) == 0: @@ -666,6 +690,8 @@ def create_checkerKey(self): "mgr", self.MIN_USER_CAP_PERMISSIONS['mgr'], "osd", self.MIN_USER_CAP_PERMISSIONS['osd'].format(self._arg_parser.rgw_pool_prefix)], "format": "json"} + if self._arg_parser.dry_run: + return self.dry_run("ceph " + cmd_json['prefix'] + " " + cmd_json['entity'] + " " + " ".join(cmd_json['caps'])) ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json) # if there is an unsuccessful attempt, if ret_val != 0 or len(json_out) == 0: @@ -676,6 +702,8 @@ def create_checkerKey(self): def get_ceph_dashboard_link(self): cmd_json = {"prefix": "mgr services", "format": "json"} + if self._arg_parser.dry_run: + return self.dry_run("ceph " + cmd_json['prefix']) ret_val, json_out, _ = self._common_cmd_json_gen(cmd_json) # if there is an unsuccessful attempt, if ret_val != 0 or len(json_out) == 0: @@ -687,6 +715,8 @@ def get_ceph_dashboard_link(self): def create_rgw_admin_ops_user(self): cmd = ['radosgw-admin', 'user', 'create', '--uid', self.EXTERNAL_RGW_ADMIN_OPS_USER_NAME, '--display-name', 'Rook RGW Admin Ops user', '--caps', 'buckets=*;users=*;usage=read;metadata=read;zone=read'] + if self._arg_parser.dry_run: + return self.dry_run("ceph " + "".joing(cmd)) try: output = subprocess.check_output(cmd, stderr=subprocess.PIPE) @@ -751,7 +781,8 @@ def _gen_output_map(self): self.out_map['CSI_CEPHFS_PROVISIONER_SECRET'] = '' # create CephFS node and provisioner keyring only when MDS exists if self.out_map['CEPHFS_FS_NAME'] and self.out_map['CEPHFS_POOL_NAME']: - self.out_map['CSI_CEPHFS_NODE_SECRET'] = self.create_cephCSIKeyring_cephFSNode() + self.out_map['CSI_CEPHFS_NODE_SECRET'] = self.create_cephCSIKeyring_cephFSNode( + ) self.out_map['CSI_CEPHFS_PROVISIONER_SECRET'] = self.create_cephCSIKeyring_cephFSProvisioner() self.out_map['RGW_ENDPOINT'] = self._arg_parser.rgw_endpoint self.out_map['RGW_TLS_CERT'] = '' @@ -775,6 +806,9 @@ def gen_shell_out(self): return shOut def gen_json_out(self): + if self._arg_parser.dry_run: + return self._gen_output_map() + self._gen_output_map() json_out = [ { @@ -1035,14 +1069,16 @@ def test_method_main_output(self): def test_method_create_cephCSIKeyring_cephFSProvisioner(self): csiKeyring = self.rjObj.create_cephCSIKeyring_cephFSProvisioner() - print("cephCSIKeyring without restricting it to a metadata pool. {}".format(csiKeyring)) + print("cephCSIKeyring without restricting it to a metadata pool. {}".format( + csiKeyring)) self.rjObj._arg_parser.restricted_auth_permission = True self.rjObj._arg_parser.cephfs_filesystem_name = "myfs" csiKeyring = self.rjObj.create_cephCSIKeyring_cephFSProvisioner() print("cephCSIKeyring for a specific metadata pool. {}".format(csiKeyring)) self.rjObj._arg_parser.cluster_name = "openshift-storage" csiKeyring = self.rjObj.create_cephCSIKeyring_cephFSProvisioner() - print("cephCSIKeyring for a specific metadata pool and cluster. {}".format(csiKeyring)) + print("cephCSIKeyring for a specific metadata pool and cluster. {}".format( + csiKeyring)) def test_non_zero_return_and_error(self): self.rjObj.cluster.return_val = 1