/
create-external-cluster-resources.py
1227 lines (1147 loc) · 63.5 KB
/
create-external-cluster-resources.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
'''
Copyright 2020 The Rook Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import errno
import sys
import json
import argparse
import unittest
import re
import requests
import subprocess
from os import linesep as LINESEP
from os import path
# backward compatibility with 2.x
try:
ModuleNotFoundError
except:
ModuleNotFoundError = ImportError
try:
import rados
except ModuleNotFoundError as noModErr:
print("Error: %s\nExiting the script..." % noModErr)
sys.exit(1)
try:
# for 2.7.x
from StringIO import StringIO
except ModuleNotFoundError:
# for 3.x
from io import StringIO
try:
# for 2.7.x
from urlparse import urlparse
except ModuleNotFoundError:
# for 3.x
from urllib.parse import urlparse
class ExecutionFailureException(Exception):
pass
################################################
################## DummyRados ##################
################################################
# this is mainly for testing and could be used where 'rados' is not available
class DummyRados(object):
def __init__(self):
self.return_val = 0
self.err_message = ''
self.state = 'connected'
self.cmd_output_map = {}
self.cmd_names = {}
self._init_cmd_output_map()
self.dummy_host_ip_map = {}
def _init_cmd_output_map(self):
json_file_name = 'test-data/ceph-status-out'
script_dir = path.abspath(path.dirname(__file__))
ceph_status_str = ""
with open(path.join(script_dir, json_file_name), 'r') as json_file:
ceph_status_str = json_file.read()
self.cmd_names['fs ls'] = '''{"format": "json", "prefix": "fs ls"}'''
self.cmd_names['quorum_status'] = '''{"format": "json", "prefix": "quorum_status"}'''
self.cmd_names['caps_change_default_pool_prefix'] = '''{"caps": ["mon", "allow r, allow command quorum_status, allow command version", "mgr", "allow command config", "osd", "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"], "entity": "client.healthchecker", "format": "json", "prefix": "auth caps"}'''
self.cmd_names['mgr services'] = '''{"format": "json", "prefix": "mgr services"}'''
# all the commands and their output
self.cmd_output_map[self.cmd_names['fs ls']
] = '''[{"name":"myfs","metadata_pool":"myfs-metadata","metadata_pool_id":2,"data_pool_ids":[3],"data_pools":["myfs-replicated"]}]'''
self.cmd_output_map[self.cmd_names['quorum_status']] = '''{"election_epoch":3,"quorum":[0],"quorum_names":["a"],"quorum_leader_name":"a","quorum_age":14385,"features":{"quorum_con":"4540138292836696063","quorum_mon":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus"]},"monmap":{"epoch":1,"fsid":"af4e1673-0b72-402d-990a-22d2919d0f1c","modified":"2020-05-07T03:36:39.918035Z","created":"2020-05-07T03:36:39.918035Z","min_mon_release":15,"min_mon_release_name":"octopus","features":{"persistent":["kraken","luminous","mimic","osdmap-prune","nautilus","octopus"],"optional":[]},"mons":[{"rank":0,"name":"a","public_addrs":{"addrvec":[{"type":"v2","addr":"10.110.205.174:3300","nonce":0},{"type":"v1","addr":"10.110.205.174:6789","nonce":0}]},"addr":"10.110.205.174:6789/0","public_addr":"10.110.205.174:6789/0","priority":0,"weight":0}]}}'''
self.cmd_output_map[self.cmd_names['mgr services']
] = '''{"dashboard":"https://ceph-dashboard:8443/","prometheus":"http://ceph-dashboard-db:9283/"}'''
self.cmd_output_map['''{"caps": ["mon", "allow r, allow command quorum_status", "osd", "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow x pool=default.rgw.buckets.index"], "entity": "client.healthchecker", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon":"allow r, allow command quorum_status","osd":"allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow x pool=default.rgw.buckets.index"}}]'''
self.cmd_output_map['''{"caps": ["mon", "profile rbd", "osd", "profile rbd"], "entity": "client.csi-rbd-node", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.csi-rbd-node","key":"AQBOgrNeHbK1AxAAubYBeV8S1U/GPzq5SVeq6g==","caps":{"mon":"profile rbd","osd":"profile rbd"}}]'''
self.cmd_output_map['''{"caps": ["mon", "profile rbd", "mgr", "allow rw", "osd", "profile rbd"], "entity": "client.csi-rbd-provisioner", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.csi-rbd-provisioner","key":"AQBNgrNe1geyKxAA8ekViRdE+hss5OweYBkwNg==","caps":{"mgr":"allow rw","mon":"profile rbd","osd":"profile rbd"}}]'''
self.cmd_output_map['''{"caps": ["mon", "allow r", "mgr", "allow rw", "osd", "allow rw tag cephfs *=*", "mds", "allow rw"], "entity": "client.csi-cephfs-node", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.csi-cephfs-node","key":"AQBOgrNeENunKxAAPCmgE7R6G8DcXnaJ1F32qg==","caps":{"mds":"allow rw","mgr":"allow rw","mon":"allow r","osd":"allow rw tag cephfs *=*"}}]'''
self.cmd_output_map['''{"caps": ["mon", "allow r", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=*"], "entity": "client.csi-cephfs-provisioner", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.csi-cephfs-provisioner","key":"AQBOgrNeAFgcGBAAvGqKOAD0D3xxmVY0R912dg==","caps":{"mgr":"allow rw","mon":"allow r","osd":"allow rw tag cephfs metadata=*"}}]'''
self.cmd_output_map['''{"caps": ["mon", "allow r", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=myfs"], "entity": "client.csi-cephfs-provisioner", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.csi-cephfs-provisioner","key":"BQBOgrNeAFgcGBAAvGqKOAD0D3xxmVY0R912dg==","caps":{"mgr":"allow rw","mon":"allow r","osd":"allow rw tag cephfs metadata=myfs"}}]'''
self.cmd_output_map['''{"caps": ["mon", "allow r", "mgr", "allow rw", "osd", "allow rw tag cephfs metadata=myfs"], "entity": "client.csi-cephfs-provisioner-openshift-storage", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.csi-cephfs-provisioner-openshift-storage","key":"CQBOgrNeAFgcGBAAvGqKOAD0D3xxmVY0R912dg==","caps":{"mgr":"allow rw","mon":"allow r","osd":"allow rw tag cephfs metadata=myfs"}}]'''
self.cmd_output_map['''{"caps": ["mon", "allow r, allow command quorum_status, allow command version", "mgr", "allow command config", "osd", "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"], "entity": "client.healthchecker", "format": "json", "prefix": "auth get-or-create"}'''] = '''[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon": "allow r, allow command quorum_status, allow command version", "mgr": "allow command config", "osd": "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"}}]'''
self.cmd_output_map['''{"format": "json", "prefix": "mgr services"}'''] = '''{"dashboard": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:7000/", "prometheus": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:9283/"}'''
self.cmd_output_map['''{"entity": "client.healthchecker", "format": "json", "prefix": "auth get"}'''] = '''{"dashboard": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:7000/", "prometheus": "http://rook-ceph-mgr-a-57cf9f84bc-f4jnl:9283/"}'''
self.cmd_output_map['''{"entity": "client.healthchecker", "format": "json", "prefix": "auth get"}'''] = '''[{"entity":"client.healthchecker","key":"AQDFkbNeft5bFRAATndLNUSEKruozxiZi3lrdA==","caps":{"mon": "allow r, allow command quorum_status, allow command version", "mgr": "allow command config", "osd": "allow rwx pool=default.rgw.meta, allow r pool=.rgw.root, allow rw pool=default.rgw.control, allow rx pool=default.rgw.log, allow x pool=default.rgw.buckets.index"}}]'''
self.cmd_output_map[self.cmd_names['caps_change_default_pool_prefix']] = '''[{}]'''
self.cmd_output_map['{"format": "json", "prefix": "status"}'] = ceph_status_str
def shutdown(self):
pass
def get_fsid(self):
return 'af4e1673-0b72-402d-990a-22d2919d0f1c'
def conf_read_file(self):
pass
def connect(self):
pass
def pool_exists(self, pool_name):
return True
def mon_command(self, cmd, out):
json_cmd = json.loads(cmd)
json_cmd_str = json.dumps(json_cmd, sort_keys=True)
cmd_output = self.cmd_output_map[json_cmd_str]
return self.return_val, \
cmd_output, \
"{}".format(self.err_message).encode('utf-8')
def _convert_hostname_to_ip(self, host_name):
ip_reg_x = re.compile(r'\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3}')
# if provided host is directly an IP address, return the same
if ip_reg_x.match(host_name):
return host_name
import random
host_ip = self.dummy_host_ip_map.get(host_name, "")
if not host_ip:
host_ip = "172.9.{}.{}".format(
random.randint(0, 254), random.randint(0, 254))
self.dummy_host_ip_map[host_name] = host_ip
del random
return host_ip
@classmethod
def Rados(conffile=None):
return DummyRados()
class RadosJSON:
EXTERNAL_USER_NAME = "client.healthchecker"
EXTERNAL_RGW_ADMIN_OPS_USER_NAME = "rgw-admin-ops-user"
EMPTY_OUTPUT_LIST = "Empty output list"
DEFAULT_RGW_POOL_PREFIX = "default"
DEFAULT_MONITORING_ENDPOINT_PORT = "9283"
@classmethod
def gen_arg_parser(cls, args_to_parse=None):
argP = argparse.ArgumentParser()
common_group = argP.add_argument_group('common')
common_group.add_argument("--verbose", "-v",
action='store_true', default=False)
common_group.add_argument("--ceph-conf", "-c",
help="Provide a ceph conf file.", type=str)
common_group.add_argument("--run-as-user", "-u", default="", type=str,
help="Provides a user name to check the cluster's health status, must be prefixed by 'client.'")
common_group.add_argument("--cluster-name", default="",
help="Ceph cluster name")
common_group.add_argument("--namespace", default="",
help="Namespace where CephCluster is running")
common_group.add_argument("--rgw-pool-prefix", default="",
help="RGW Pool prefix")
common_group.add_argument("--restricted-auth-permission", default=False,
help="Restricted cephCSIKeyrings auth permissions to specific pools and cluster. Mandatory flags that need to be set are --cephfs-filesystem-name and --rbd-data-pool-name. Note: Restricting the users per pool and per cluster will require to create new users and new secrets for that users.")
output_group = argP.add_argument_group('output')
output_group.add_argument("--format", "-t", choices=["json", "bash"],
default='json', help="Provides the output format (json | bash)")
output_group.add_argument("--output", "-o", default="",
help="Output will be stored into the provided file")
output_group.add_argument("--cephfs-filesystem-name", default="",
help="Provides the name of the Ceph filesystem")
output_group.add_argument("--cephfs-metadata-pool-name", default="",
help="Provides the name of the cephfs metadata pool")
output_group.add_argument("--cephfs-data-pool-name", default="",
help="Provides the name of the cephfs data pool")
output_group.add_argument("--rbd-data-pool-name", default="", required=False,
help="Provides the name of the RBD datapool")
output_group.add_argument("--rgw-endpoint", default="", required=False,
help="Rados GateWay endpoint (in <IP>:<PORT> format)")
output_group.add_argument("--rgw-tls-cert-path", default="", required=False,
help="Rados GateWay endpoint TLS certificate")
output_group.add_argument("--rgw-skip-tls", required=False, default=False,
help="Ignore TLS certification validation when a self-signed certificate is provided (NOT RECOMMENDED")
output_group.add_argument("--monitoring-endpoint", default="", required=False,
help="Ceph Manager prometheus exporter endpoints (comma separated list of <IP> entries of active and standby mgrs)")
output_group.add_argument("--monitoring-endpoint-port", default="", required=False,
help="Ceph Manager prometheus exporter port")
upgrade_group = argP.add_argument_group('upgrade')
upgrade_group.add_argument("--upgrade", action='store_true', default=False,
help="Upgrades the 'user' with all the permissions needed for the new cluster version")
if args_to_parse:
assert type(args_to_parse) == list, \
"Argument to 'gen_arg_parser' should be a list"
else:
args_to_parse = sys.argv[1:]
return argP.parse_args(args_to_parse)
def validate_rgw_endpoint_tls_cert(self):
if self._arg_parser.rgw_tls_cert_path:
with open(self._arg_parser.rgw_tls_cert_path, encoding='utf8') as f:
contents = f.read()
return contents.rstrip()
def _check_conflicting_options(self):
if not self._arg_parser.upgrade and not self._arg_parser.rbd_data_pool_name:
raise ExecutionFailureException(
"Either '--upgrade' or '--rbd-data-pool-name <pool_name>' should be specified")
if self._arg_parser.upgrade and self._arg_parser.rbd_data_pool_name:
raise ExecutionFailureException(
"Both '--upgrade' and '--rbd-data-pool-name <pool_name>' should not be specified, choose only one")
# a user name must be provided while using '--upgrade' option
if not self._arg_parser.run_as_user and self._arg_parser.upgrade:
raise ExecutionFailureException(
"Please provide an existing user-name through '--run-as-user' (or '-u') flag while upgrading")
def _invalid_endpoint(self, endpoint_str):
try:
ipv4, port = endpoint_str.split(':')
except ValueError:
raise ExecutionFailureException(
"Not a proper endpoint: {}, <IPv4>:<PORT>, format is expected".format(endpoint_str))
ipParts = ipv4.split('.')
if len(ipParts) != 4:
raise ExecutionFailureException(
"Not a valid IP address: {}".format(ipv4))
for eachPart in ipParts:
if not eachPart.isdigit():
raise ExecutionFailureException(
"IP address parts should be numbers: {}".format(ipv4))
intPart = int(eachPart)
if intPart < 0 or intPart > 254:
raise ExecutionFailureException(
"Out of range IP addresses: {}".format(ipv4))
if not port.isdigit():
raise ExecutionFailureException("Port not valid: {}".format(port))
intPort = int(port)
if intPort < 1 or intPort > 2**16-1:
raise ExecutionFailureException(
"Out of range port number: {}".format(port))
return False
def endpoint_dial(self, endpoint_str, timeout=3, cert=None):
# if the 'cluster' instance is a dummy one,
# don't try to reach out to the endpoint
if isinstance(self.cluster, DummyRados):
return
protocols = ["http", "https"]
for prefix in protocols:
try:
ep = "{}://{}".format(prefix, endpoint_str)
# If verify is set to a path to a directory,
# the directory must have been processed using the c_rehash utility supplied with OpenSSL.
if prefix == "https" and cert and self._arg_parser.rgw_skip_tls:
r = requests.head(ep, timeout=timeout, verify=False)
elif prefix == "https" and cert:
r = requests.head(ep, timeout=timeout, verify=cert)
else:
r = requests.head(ep, timeout=timeout)
if r.status_code == 200:
return
except:
continue
raise ExecutionFailureException(
"unable to connect to endpoint: {}".format(endpoint_str))
def __init__(self, arg_list=None):
self.out_map = {}
self._excluded_keys = set()
self._arg_parser = self.gen_arg_parser(args_to_parse=arg_list)
self._check_conflicting_options()
self.run_as_user = self._arg_parser.run_as_user
self.output_file = self._arg_parser.output
self.ceph_conf = self._arg_parser.ceph_conf
self.MIN_USER_CAP_PERMISSIONS = {
'mgr': 'allow command config',
'mon': 'allow r, allow command quorum_status, allow command version',
'osd': "allow rwx pool={0}.rgw.meta, " +
"allow r pool=.rgw.root, " +
"allow rw pool={0}.rgw.control, " +
"allow rx pool={0}.rgw.log, " +
"allow x pool={0}.rgw.buckets.index"
}
# if user not provided, give a default user
if not self.run_as_user and not self._arg_parser.upgrade:
self.run_as_user = self.EXTERNAL_USER_NAME
if not self._arg_parser.rgw_pool_prefix and not self._arg_parser.upgrade:
self._arg_parser.rgw_pool_prefix = self.DEFAULT_RGW_POOL_PREFIX
if self.ceph_conf:
self.cluster = rados.Rados(conffile=self.ceph_conf)
else:
self.cluster = rados.Rados()
self.cluster.conf_read_file()
self.cluster.connect()
def shutdown(self):
if self.cluster.state == "connected":
self.cluster.shutdown()
def get_fsid(self):
return str(self.cluster.get_fsid())
def _common_cmd_json_gen(self, cmd_json):
cmd = json.dumps(cmd_json, sort_keys=True)
ret_val, cmd_out, err_msg = self.cluster.mon_command(cmd, b'')
if self._arg_parser.verbose:
print("Command Input: {}".format(cmd))
print("Return Val: {}\nCommand Output: {}\nError Message: {}\n----------\n".format(
ret_val, cmd_out, err_msg))
json_out = {}
# if there is no error (i.e; ret_val is ZERO) and 'cmd_out' is not empty
# then convert 'cmd_out' to a json output
if ret_val == 0 and cmd_out:
json_out = json.loads(cmd_out)
return ret_val, json_out, err_msg
def get_ceph_external_mon_data(self):
cmd_json = {"prefix": "quorum_status", "format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'quorum_status' command failed.\n" +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
q_leader_name = json_out['quorum_leader_name']
q_leader_details = {}
q_leader_matching_list = [l for l in json_out['monmap']['mons']
if l['name'] == q_leader_name]
if len(q_leader_matching_list) == 0:
raise ExecutionFailureException("No matching 'mon' details found")
q_leader_details = q_leader_matching_list[0]
# get the address vector of the quorum-leader
q_leader_addrvec = q_leader_details.get(
'public_addrs', {}).get('addrvec', [])
# if the quorum-leader has only one address in the address-vector
# and it is of type 'v2' (ie; with <IP>:3300),
# raise an exception to make user aware that
# they have to enable 'v1' (ie; with <IP>:6789) type as well
if len(q_leader_addrvec) == 1 and q_leader_addrvec[0]['type'] == 'v2':
raise ExecutionFailureException(
"Only 'v2' address type is enabled, user should also enable 'v1' type as well")
ip_port = str(q_leader_details['public_addr'].split('/')[0])
return "{}={}".format(str(q_leader_name), ip_port)
def _join_host_port(self, endpoint, port):
port = "{}".format(port)
# regex to check the given endpoint is enclosed in square brackets
ipv6_regx = re.compile(r'^\[[^]]*\]$')
# endpoint has ':' in it and if not (already) enclosed in square brackets
if endpoint.count(':') and not ipv6_regx.match(endpoint):
endpoint = '[{}]'.format(endpoint)
if not port:
return endpoint
return ':'.join([endpoint, port])
def _convert_hostname_to_ip(self, host_name):
# if 'cluster' instance is a dummy type,
# call the dummy instance's "convert" method
if not host_name:
raise ExecutionFailureException("Empty hostname provided")
if isinstance(self.cluster, DummyRados):
return self.cluster._convert_hostname_to_ip(host_name)
import socket
ip = socket.gethostbyname(host_name)
del socket
return ip
def get_active_and_standby_mgrs(self):
monitoring_endpoint_port = self._arg_parser.monitoring_endpoint_port
monitoring_endpoint_ip_list = self._arg_parser.monitoring_endpoint
standby_mgrs = []
if not monitoring_endpoint_ip_list:
cmd_json = {"prefix": "status", "format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'mgr services' command failed.\n" +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
monitoring_endpoint = json_out.get('mgrmap', {}).get(
'services', {}).get('prometheus', '')
if not monitoring_endpoint:
raise ExecutionFailureException(
"'prometheus' service not found, is the exporter enabled?'.\n")
# now check the stand-by mgr-s
standby_arr = json_out.get('mgrmap', {}).get('standbys', [])
for each_standby in standby_arr:
if 'name' in each_standby.keys():
standby_mgrs.append(each_standby['name'])
try:
parsed_endpoint = urlparse(monitoring_endpoint)
except ValueError:
raise ExecutionFailureException(
"invalid endpoint: {}".format(monitoring_endpoint))
monitoring_endpoint_ip_list = parsed_endpoint.hostname
if not monitoring_endpoint_port:
monitoring_endpoint_port = "{}".format(parsed_endpoint.port)
# if monitoring endpoint port is not set, put a default mon port
if not monitoring_endpoint_port:
monitoring_endpoint_port = self.DEFAULT_MONITORING_ENDPOINT_PORT
# user could give comma and space separated inputs (like --monitoring-endpoint="<ip1>, <ip2>")
monitoring_endpoint_ip_list = monitoring_endpoint_ip_list.replace(
",", " ")
monitoring_endpoint_ip_list_split = monitoring_endpoint_ip_list.split()
# if monitoring-endpoint could not be found, raise an error
if len(monitoring_endpoint_ip_list_split) == 0:
raise ExecutionFailureException("No 'monitoring-endpoint' found")
# first ip is treated as the main monitoring-endpoint
monitoring_endpoint_ip = monitoring_endpoint_ip_list_split[0]
# rest of the ip-s are added to the 'standby_mgrs' list
standby_mgrs.extend(monitoring_endpoint_ip_list_split[1:])
try:
failed_ip = monitoring_endpoint_ip
monitoring_endpoint_ip = self._convert_hostname_to_ip(
monitoring_endpoint_ip)
# collect all the 'stand-by' mgr ips
mgr_ips = []
for each_standby_mgr in standby_mgrs:
failed_ip = each_standby_mgr
mgr_ips.append(
self._convert_hostname_to_ip(each_standby_mgr))
except:
raise ExecutionFailureException(
"Conversion of host: {} to IP failed. "
"Please enter the IP addresses of all the ceph-mgrs with the '--monitoring-endpoint' flag".format(failed_ip))
monitoring_endpoint = self._join_host_port(
monitoring_endpoint_ip, monitoring_endpoint_port)
self._invalid_endpoint(monitoring_endpoint)
self.endpoint_dial(monitoring_endpoint)
# add the validated active mgr IP into the first index
mgr_ips.insert(0, monitoring_endpoint_ip)
all_mgr_ips_str = ",".join(mgr_ips)
return all_mgr_ips_str, monitoring_endpoint_port
def create_cephCSIKeyring_cephFSProvisioner(self):
'''
command: ceph auth get-or-create client.csi-cephfs-provisioner mon 'allow r' mgr 'allow rw' osd 'allow rw tag cephfs metadata=*'
'''
cluster_name = self._arg_parser.cluster_name
cephfs_filesystem = self._arg_parser.cephfs_filesystem_name
entity = "client.csi-cephfs-provisioner"
if cluster_name:
entity = "client.csi-cephfs-provisioner-{}".format(cluster_name)
cmd_json = {}
if self._arg_parser.restricted_auth_permission:
if cephfs_filesystem == "":
raise ExecutionFailureException(
"'cephfs_filesystem_name' not found, please set the '--cephfs-filesystem-name' flag")
cmd_json = {"prefix": "auth get-or-create",
"entity": entity,
"caps": ["mon", "allow r", "mgr", "allow rw",
"osd", "allow rw tag cephfs metadata={}".format(cephfs_filesystem)],
"format": "json"}
else:
cmd_json = {"prefix": "auth get-or-create",
"entity": entity,
"caps": ["mon", "allow r", "mgr", "allow rw",
"osd", "allow rw tag cephfs metadata=*"],
"format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'auth get-or-create client.csi-cephfs-provisioner' command failed.\n" +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
return str(json_out[0]['key'])
def create_cephCSIKeyring_cephFSNode(self):
cluster_name = self._arg_parser.cluster_name
cephfs_filesystem = self._arg_parser.cephfs_filesystem_name
entity = "client.csi-cephfs-node"
if cluster_name:
entity = "client.csi-cephfs-node-{}".format(cluster_name)
cmd_json = {}
if self._arg_parser.restricted_auth_permission:
if cephfs_filesystem == "":
raise ExecutionFailureException(
"'cephfs_filesystem_name' not found, please set the '--cephfs-filesystem-name' flag")
cmd_json = {"prefix": "auth get-or-create",
"entity": entity,
"caps": ["mon", "allow r",
"mgr", "allow rw",
"osd", "allow rw tag cephfs data={}".format(cephfs_filesystem),
"mds", "allow rw"],
"format": "json"}
else:
cmd_json = {"prefix": "auth get-or-create",
"entity": entity,
"caps": ["mon", "allow r",
"mgr", "allow rw",
"osd", "allow rw tag cephfs *=*",
"mds", "allow rw"],
"format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'auth get-or-create client.csi-cephfs-node' command failed.\n" +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
return str(json_out[0]['key'])
def create_cephCSIKeyring_RBDProvisioner(self):
rbd_pool_name = self._arg_parser.rbd_data_pool_name
cluster_name = self._arg_parser.cluster_name
entity = "client.csi-rbd-provisioner"
if cluster_name:
entity = "client.csi-rbd-provisioner-{}".format(cluster_name)
cmd_json={}
if self._arg_parser.restricted_auth_permission:
if rbd_pool_name == "":
raise ExecutionFailureException(
"'rbd_data_pool_name' not found, please set the '--rbd-data-pool-name' flag")
cmd_json = {"prefix": "auth get-or-create",
"entity": entity,
"caps": ["mon", "profile rbd",
"mgr", "allow rw",
"osd", "profile rbd pool={}".format(rbd_pool_name)],
"format": "json"}
else:
cmd_json = {"prefix": "auth get-or-create",
"entity": entity,
"caps": ["mon", "profile rbd",
"mgr", "allow rw",
"osd", "profile rbd"],
"format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'auth get-or-create client.csi-rbd-provisioner' command failed.\n" +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
return str(json_out[0]['key'])
def get_cephfs_data_pool_details(self):
cmd_json = {"prefix": "fs ls", "format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt, report an error
if ret_val != 0:
# if fs and data_pool arguments are not set, silently return
if self._arg_parser.cephfs_filesystem_name == "" and self._arg_parser.cephfs_data_pool_name == "":
return
# if user has provided any of the
# '--cephfs-filesystem-name' or '--cephfs-data-pool-name' arguments,
# raise an exception as we are unable to verify the args
raise ExecutionFailureException(
"'fs ls' ceph call failed with error: {}".format(err_msg))
matching_json_out = {}
# if '--cephfs-filesystem-name' argument is provided,
# check whether the provided filesystem-name exists or not
if self._arg_parser.cephfs_filesystem_name:
# get the matching list
matching_json_out_list = [matched for matched in json_out
if str(matched['name']) == self._arg_parser.cephfs_filesystem_name]
# unable to find a matching fs-name, raise an error
if len(matching_json_out_list) == 0:
raise ExecutionFailureException(
("Filesystem provided, '{}', " +
"is not found in the fs-list: '{}'").format(
self._arg_parser.cephfs_filesystem_name,
[str(x['name']) for x in json_out]))
matching_json_out = matching_json_out_list[0]
# if cephfs filesystem name is not provided,
# try to get a default fs name by doing the following
else:
# a. check if there is only one filesystem is present
if len(json_out) == 1:
matching_json_out = json_out[0]
# b. or else, check if data_pool name is provided
elif self._arg_parser.cephfs_data_pool_name:
# and if present, check whether there exists a fs which has the data_pool
for eachJ in json_out:
if self._arg_parser.cephfs_data_pool_name in eachJ['data_pools']:
matching_json_out = eachJ
break
# if there is no matching fs exists, that means provided data_pool name is invalid
if not matching_json_out:
raise ExecutionFailureException(
"Provided data_pool name, {}, does not exists".format(
self._arg_parser.cephfs_data_pool_name))
# c. if nothing is set and couldn't find a default,
else:
# just return silently
return
if matching_json_out:
self._arg_parser.cephfs_filesystem_name = str(matching_json_out['name'])
self._arg_parser.cephfs_metadata_pool_name = str(matching_json_out['metadata_pool'])
if type(matching_json_out['data_pools']) == list:
# if the user has already provided data-pool-name,
# through --cephfs-data-pool-name
if self._arg_parser.cephfs_data_pool_name:
# if the provided name is not matching with the one in the list
if self._arg_parser.cephfs_data_pool_name not in matching_json_out['data_pools']:
raise ExecutionFailureException(
"{}: '{}', {}: {}".format(
"Provided data-pool-name",
self._arg_parser.cephfs_data_pool_name,
"doesn't match from the data-pools' list",
[str(x) for x in matching_json_out['data_pools']]))
# if data_pool name is not provided,
# then try to find a default data pool name
else:
# if no data_pools exist, silently return
if len(matching_json_out['data_pools']) == 0:
return
self._arg_parser.cephfs_data_pool_name = str(
matching_json_out['data_pools'][0])
# if there are more than one 'data_pools' exist,
# then warn the user that we are using the selected name
if len(matching_json_out['data_pools']) > 1:
print("{}: {}\n{}: '{}'\n".format(
"WARNING: Multiple data pools detected",
[str(x) for x in matching_json_out['data_pools']],
"Using the data-pool",
self._arg_parser.cephfs_data_pool_name))
def create_cephCSIKeyring_RBDNode(self):
rbd_pool_name = self._arg_parser.rbd_data_pool_name
cluster_name = self._arg_parser.cluster_name
entity = "client.csi-rbd-node"
if cluster_name:
entity = "client.csi-rbd-node-{}".format(cluster_name)
cmd_json={}
if self._arg_parser.restricted_auth_permission:
if rbd_pool_name == "":
raise ExecutionFailureException(
"'rbd_data_pool_name' not found, please set the '--rbd-data-pool-name' flag")
cmd_json = {"prefix": "auth get-or-create",
"entity": entity,
"caps": ["mon", "profile rbd",
"osd", "profile rbd pool={}".format(rbd_pool_name)],
"format": "json"}
else:
cmd_json = {"prefix": "auth get-or-create",
"entity": entity,
"caps": ["mon", "profile rbd",
"osd", "profile rbd"],
"format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'auth get-or-create client.csi-rbd-node' command failed\n" +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
return str(json_out[0]['key'])
def create_checkerKey(self):
cmd_json = {"prefix": "auth get-or-create",
"entity": self.run_as_user,
"caps": ["mon", self.MIN_USER_CAP_PERMISSIONS['mon'],
"mgr", self.MIN_USER_CAP_PERMISSIONS['mgr'],
"osd", self.MIN_USER_CAP_PERMISSIONS['osd'].format(self._arg_parser.rgw_pool_prefix)],
"format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException(
"'auth get-or-create {}' command failed\n".format(self.run_as_user) +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
return str(json_out[0]['key'])
def get_ceph_dashboard_link(self):
cmd_json = {"prefix": "mgr services", "format": "json"}
ret_val, json_out, _ = self._common_cmd_json_gen(cmd_json)
# if there is an unsuccessful attempt,
if ret_val != 0 or len(json_out) == 0:
return None
if not 'dashboard' in json_out:
return None
return json_out['dashboard']
def create_rgw_admin_ops_user(self):
cmd = ['radosgw-admin', 'user', 'create', '--uid', self.EXTERNAL_RGW_ADMIN_OPS_USER_NAME, '--display-name',
'Rook RGW Admin Ops user', '--caps', 'buckets=*;users=*;usage=read;metadata=read;zone=read']
try:
output = subprocess.check_output(cmd,
stderr=subprocess.PIPE)
except subprocess.CalledProcessError as execErr:
# if the user already exists, we just query it
if execErr.returncode == errno.EEXIST:
cmd = ['radosgw-admin', 'user', 'info',
'--uid', self.EXTERNAL_RGW_ADMIN_OPS_USER_NAME
]
try:
output = subprocess.check_output(cmd,
stderr=subprocess.PIPE)
except subprocess.CalledProcessError as execErr:
err_msg = "failed to execute command %s. Output: %s. Code: %s. Error: %s" % (
cmd, execErr.output, execErr.returncode, execErr.stderr)
raise Exception(err_msg)
else:
err_msg = "failed to execute command %s. Output: %s. Code: %s. Error: %s" % (
cmd, execErr.output, execErr.returncode, execErr.stderr)
raise Exception(err_msg)
jsonoutput = json.loads(output)
return jsonoutput["keys"][0]['access_key'], jsonoutput["keys"][0]['secret_key']
def _gen_output_map(self):
if self.out_map:
return
pools_to_validate = [self._arg_parser.rbd_data_pool_name]
# if rgw_endpoint is provided, validate it
if self._arg_parser.rgw_endpoint:
self._invalid_endpoint(self._arg_parser.rgw_endpoint)
self.endpoint_dial(self._arg_parser.rgw_endpoint,
cert=self.validate_rgw_endpoint_tls_cert())
rgw_pool_to_validate = ["{0}.rgw.meta".format(self._arg_parser.rgw_pool_prefix),
".rgw.root",
"{0}.rgw.control".format(
self._arg_parser.rgw_pool_prefix),
"{0}.rgw.log".format(
self._arg_parser.rgw_pool_prefix)]
pools_to_validate.extend(rgw_pool_to_validate)
for pool in pools_to_validate:
if not self.cluster.pool_exists(pool):
raise ExecutionFailureException(
"The provided pool, '{}', does not exist".format(pool))
self._excluded_keys.add('CLUSTER_NAME')
self.get_cephfs_data_pool_details()
self.out_map['NAMESPACE'] = self._arg_parser.namespace
self.out_map['CLUSTER_NAME'] = self._arg_parser.cluster_name
self.out_map['ROOK_EXTERNAL_FSID'] = self.get_fsid()
self.out_map['ROOK_EXTERNAL_USERNAME'] = self.run_as_user
self.out_map['ROOK_EXTERNAL_CEPH_MON_DATA'] = self.get_ceph_external_mon_data()
self.out_map['ROOK_EXTERNAL_USER_SECRET'] = self.create_checkerKey()
self.out_map['ROOK_EXTERNAL_DASHBOARD_LINK'] = self.get_ceph_dashboard_link()
self.out_map['CSI_RBD_NODE_SECRET_SECRET'] = self.create_cephCSIKeyring_RBDNode()
self.out_map['CSI_RBD_PROVISIONER_SECRET'] = self.create_cephCSIKeyring_RBDProvisioner()
self.out_map['CEPHFS_POOL_NAME'] = self._arg_parser.cephfs_data_pool_name
self.out_map['CEPHFS_METADATA_POOL_NAME'] = self._arg_parser.cephfs_metadata_pool_name
self.out_map['CEPHFS_FS_NAME'] = self._arg_parser.cephfs_filesystem_name
self.out_map['RESTRICTED_AUTH_PERMISSION'] = self._arg_parser.restricted_auth_permission
self.out_map['CSI_CEPHFS_NODE_SECRET'] = ''
self.out_map['CSI_CEPHFS_PROVISIONER_SECRET'] = ''
# create CephFS node and provisioner keyring only when MDS exists
if self.out_map['CEPHFS_FS_NAME'] and self.out_map['CEPHFS_POOL_NAME']:
self.out_map['CSI_CEPHFS_NODE_SECRET'] = self.create_cephCSIKeyring_cephFSNode()
self.out_map['CSI_CEPHFS_PROVISIONER_SECRET'] = self.create_cephCSIKeyring_cephFSProvisioner()
self.out_map['RGW_ENDPOINT'] = self._arg_parser.rgw_endpoint
self.out_map['RGW_TLS_CERT'] = ''
self.out_map['MONITORING_ENDPOINT'], \
self.out_map['MONITORING_ENDPOINT_PORT'] = self.get_active_and_standby_mgrs()
self.out_map['RBD_POOL_NAME'] = self._arg_parser.rbd_data_pool_name
self.out_map['RGW_POOL_PREFIX'] = self._arg_parser.rgw_pool_prefix
if self._arg_parser.rgw_endpoint:
self.out_map['ACCESS_KEY'], self.out_map['SECRET_KEY'] = self.create_rgw_admin_ops_user()
if self._arg_parser.rgw_tls_cert_path:
self.out_map['RGW_TLS_CERT'] = self.validate_rgw_endpoint_tls_cert()
def gen_shell_out(self):
self._gen_output_map()
shOutIO = StringIO()
for k, v in self.out_map.items():
if v and k not in self._excluded_keys:
shOutIO.write('export {}={}{}'.format(k, v, LINESEP))
shOut = shOutIO.getvalue()
shOutIO.close()
return shOut
def gen_json_out(self):
self._gen_output_map()
json_out = [
{
"name": "rook-ceph-mon-endpoints",
"kind": "ConfigMap",
"data": {
"data": self.out_map['ROOK_EXTERNAL_CEPH_MON_DATA'],
"maxMonId": "0",
"mapping": "{}"
}
},
{
"name": "rook-ceph-mon",
"kind": "Secret",
"data": {
"admin-secret": "admin-secret",
"fsid": self.out_map['ROOK_EXTERNAL_FSID'],
"mon-secret": "mon-secret"
},
},
{
"name": "rook-ceph-operator-creds",
"kind": "Secret",
"data": {
"userID": self.out_map['ROOK_EXTERNAL_USERNAME'],
"userKey": self.out_map['ROOK_EXTERNAL_USER_SECRET']
}
},
{
"name": "rook-csi-rbd-node",
"kind": "Secret",
"data": {
"userID": 'csi-rbd-node',
"userKey": self.out_map['CSI_RBD_NODE_SECRET_SECRET']
}
},
{
"name": "ceph-rbd",
"kind": "StorageClass",
"data": {
"pool": self.out_map['RBD_POOL_NAME']
}
},
{
"name": "monitoring-endpoint",
"kind": "CephCluster",
"data": {
"MonitoringEndpoint": self.out_map['MONITORING_ENDPOINT'],
"MonitoringPort": self.out_map['MONITORING_ENDPOINT_PORT']
}
}
]
# if 'ROOK_EXTERNAL_DASHBOARD_LINK' exists, then only add 'rook-ceph-dashboard-link' Secret
if self.out_map['ROOK_EXTERNAL_DASHBOARD_LINK']:
json_out.append({
"name": "rook-ceph-dashboard-link",
"kind": "Secret",
"data": {
"userID": 'ceph-dashboard-link',
"userKey": self.out_map['ROOK_EXTERNAL_DASHBOARD_LINK']
}
})
# if 'CSI_RBD_PROVISIONER_SECRET' exists, then only add 'rook-csi-rbd-provisioner' Secret
if self.out_map['CSI_RBD_PROVISIONER_SECRET']:
json_out.append({
"name": "rook-csi-rbd-provisioner",
"kind": "Secret",
"data": {
"userID": 'csi-rbd-provisioner',
"userKey": self.out_map['CSI_RBD_PROVISIONER_SECRET']
},
})
# if 'CSI_CEPHFS_PROVISIONER_SECRET' exists, then only add 'rook-csi-cephfs-provisioner' Secret
if self.out_map['CSI_CEPHFS_PROVISIONER_SECRET']:
json_out.append({
"name": "rook-csi-cephfs-provisioner",
"kind": "Secret",
"data": {
"adminID": 'csi-cephfs-provisioner',
"adminKey": self.out_map['CSI_CEPHFS_PROVISIONER_SECRET']
},
})
# if 'CSI_CEPHFS_NODE_SECRET' exists, then only add 'rook-csi-cephfs-node' Secret
if self.out_map['CSI_CEPHFS_NODE_SECRET']:
json_out.append({
"name": "rook-csi-cephfs-node",
"kind": "Secret",
"data": {
"adminID": 'csi-cephfs-node',
"adminKey": self.out_map['CSI_CEPHFS_NODE_SECRET']
}
})
# if 'CEPHFS_FS_NAME' exists, then only add 'cephfs' StorageClass
if self.out_map['CEPHFS_FS_NAME']:
json_out.append({
"name": "cephfs",
"kind": "StorageClass",
"data": {
"fsName": self.out_map['CEPHFS_FS_NAME'],
"pool": self.out_map['CEPHFS_POOL_NAME']
}
})
# if 'RGW_ENDPOINT' exists, then only add 'ceph-rgw' StorageClass
if self.out_map['RGW_ENDPOINT']:
json_out.append({
"name": "ceph-rgw",
"kind": "StorageClass",
"data": {
"endpoint": self.out_map['RGW_ENDPOINT'],
"poolPrefix": self.out_map['RGW_POOL_PREFIX']
}
})
json_out.append(
{
"name": "rgw-admin-ops-user",
"kind": "Secret",
"data": {
"accessKey": self.out_map['ACCESS_KEY'],
"secretKey": self.out_map['SECRET_KEY']
}
})
# if 'RGW_TLS_CERT' exists, then only add the "ceph-rgw-tls-cert" secret
if self.out_map['RGW_TLS_CERT']:
json_out.append({
"name": "ceph-rgw-tls-cert",
"kind": "Secret",
"data": {
"cert": self.out_map['RGW_TLS_CERT'],
}
})
return json.dumps(json_out)+LINESEP
def upgrade_user_permissions(self):
# check whether the given user exists or not
cmd_json = {"prefix": "auth get", "entity": "{}".format(
self.run_as_user), "format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
if ret_val != 0 or len(json_out) == 0:
raise ExecutionFailureException("'auth get {}' command failed.\n".format(self.run_as_user) +
"Error: {}".format(err_msg if ret_val != 0 else self.EMPTY_OUTPUT_LIST))
j_first = json_out[0]
existing_caps = j_first['caps']
osd_cap = "osd"
cap_keys = ["mon", "mgr", "osd"]
for eachCap in cap_keys:
min_cap_values = self.MIN_USER_CAP_PERMISSIONS.get(eachCap, '')
cur_cap_values = existing_caps.get(eachCap, '')
# detect rgw-pool-prefix
if eachCap == osd_cap:
# if directly provided through '--rgw-pool-prefix' argument, use it
if self._arg_parser.rgw_pool_prefix:
min_cap_values = min_cap_values.format(
self._arg_parser.rgw_pool_prefix)
# or else try to detect one from the existing/current osd cap values
else:
rc = re.compile(r' pool=([^.]+)\.rgw\.[^ ]*')
# 'findall()' method will give a list of prefixes
# and 'set' will eliminate any duplicates
cur_rgw_pool_prefix_list = list(
set(rc.findall(cur_cap_values)))
if len(cur_rgw_pool_prefix_list) != 1:
raise ExecutionFailureException(
"Unable to determine 'rgw-pool-prefx'. Please provide one with '--rgw-pool-prefix' flag")
min_cap_values = min_cap_values.format(
cur_rgw_pool_prefix_list[0])
cur_cap_perm_list = [x.strip()
for x in cur_cap_values.split(',') if x.strip()]
min_cap_perm_list = [x.strip()
for x in min_cap_values.split(',') if x.strip()]
min_cap_perm_list.extend(cur_cap_perm_list)
# eliminate duplicates without using 'set'
# set re-orders items in the list and we have to keep the order
new_cap_perm_list = []
[new_cap_perm_list.append(
x) for x in min_cap_perm_list if x not in new_cap_perm_list]
existing_caps[eachCap] = ", ".join(new_cap_perm_list)
cmd_json = {"prefix": "auth caps",
"entity": self.run_as_user,
"caps": ["mon", existing_caps["mon"],
"mgr", existing_caps["mgr"],
"osd", existing_caps["osd"]],
"format": "json"}
ret_val, json_out, err_msg = self._common_cmd_json_gen(cmd_json)
if ret_val != 0:
raise ExecutionFailureException("'auth caps {}' command failed.\n".format(self.run_as_user) +
"Error: {}".format(err_msg))
print("Updated user, {}, successfully.".format(self.run_as_user))
def main(self):
generated_output = ''
if self._arg_parser.upgrade:
self.upgrade_user_permissions()
elif self._arg_parser.format == 'json':
generated_output = self.gen_json_out()
elif self._arg_parser.format == 'bash':
generated_output = self.gen_shell_out()
else:
raise ExecutionFailureException("Unsupported format: {}".format(
self._arg_parser.format))
print('{}'.format(generated_output))
if self.output_file and generated_output:
fOut = open(self.output_file, 'w')
fOut.write(generated_output)
fOut.close()
################################################
##################### MAIN #####################
################################################
if __name__ == '__main__':
rjObj = RadosJSON()
try:
rjObj.main()
except ExecutionFailureException as err:
print("Execution Failed: {}".format(err))
raise err
except KeyError as kErr:
print("KeyError: %s", kErr)
except OSError as osErr:
print("Error while trying to output the data: {}".format(osErr))
finally:
rjObj.shutdown()