xref: /4.6.0/couchbase-cli/node.py (revision b17bd8ad)
1"""
2  Implementation for rebalance, add, remove, stop rebalance.
3"""
4
5import cluster_manager
6import time
7import os
8import sys
9import util_cli as util
10import socket
11import re
12import urlparse
13import json
14
15from usage import command_error
16from restclient import *
17from listservers import *
18from _csv import reader
19
20try:
21    import pump_bfd2
22    IS_ENTERPRISE= True
23except ImportError:
24    IS_ENTERPRISE = False
25
26MAX_LEN_PASSWORD = 24
27
28# the rest commands and associated URIs for various node operations
29
30rest_cmds = {
31    'rebalance'         :'/controller/rebalance',
32    'rebalance-stop'    :'/controller/stopRebalance',
33    'rebalance-status'  :'/pools/default/rebalanceProgress',
34    'server-add'        :'/controller/addNode',
35    'server-readd'      :'/controller/reAddNode',
36    'failover'          :'/controller/failOver',
37    'recovery'          :'/controller/setRecoveryType',
38    'cluster-init'      :'/settings/web',
39    'cluster-edit'      :'/settings/web',
40    'node-init'         :'/nodes/self/controller/settings',
41    'setting-cluster'   :'/pools/default',
42    'setting-compaction'    :'/controller/setAutoCompaction',
43    'setting-notification'  :'/settings/stats',
44    'setting-autofailover'  :'/settings/autoFailover',
45    'setting-alert'         :'/settings/alerts',
46    'setting-audit'         :'/settings/audit',
47    'setting-ldap'          :'/settings/saslauthdAuth',
48    'user-manage'           :'/settings/readOnlyUser',
49    'setting-index'         :'/settings/indexes',
50    'group-manage'          :'/pools/default/serverGroups',
51    'ssl-manage'            :'/pools/default/certificate',
52    'collect-logs-start'  : '/controller/startLogsCollection',
53    'collect-logs-stop'   : '/controller/cancelLogsCollection',
54    'collect-logs-status' : '/pools/default/tasks',
55    'admin-role-manage'   : '/settings/rbac/users',
56    'master-password'     : '/node/controller/changeMasterPassword'
57}
58
59server_no_remove = [
60    'rebalance-stop',
61    'rebalance-status',
62    'server-add',
63    'server-readd',
64    'failover',
65    'recovery',
66]
67server_no_add = [
68    'rebalance-stop',
69    'rebalance-status',
70    'failover',
71    'recovery',
72]
73
74# Map of operations and the HTTP methods used against the REST interface
75
76methods = {
77    'rebalance'         :'POST',
78    'rebalance-stop'    :'POST',
79    'rebalance-status'  :'GET',
80    'eject-server'      :'POST',
81    'server-add'        :'POST',
82    'server-readd'      :'POST',
83    'failover'          :'POST',
84    'recovery'          :'POST',
85    'cluster-init'      :'POST',
86    'cluster-edit'      :'POST',
87    'node-init'         :'POST',
88    'setting-cluster'   :'POST',
89    'setting-compaction'    :'POST',
90    'setting-notification'  :'POST',
91    'setting-autofailover'  :'POST',
92    'setting-alert'         :'POST',
93    'setting-audit'         :'POST',
94    'setting-ldap'          :'POST',
95    'setting-index'         :'POST',
96    'user-manage'           :'POST',
97    'group-manage'          :'POST',
98    'ssl-manage'            :'GET',
99    'collect-logs-start'  : 'POST',
100    'collect-logs-stop'   : 'POST',
101    'collect-logs-status' : 'GET',
102    'admin-role-manage'   : 'PUT',
103    'master-password'     : 'POST',
104}
105
106bool_to_str = lambda value: str(bool(int(value))).lower()
107
108# Map of HTTP success code, success message and error message for
109# handling HTTP response properly
110
111class Node:
112    SEP = ","
113    def __init__(self):
114        self.rest_cmd = rest_cmds['rebalance-status']
115        self.method = 'GET'
116        self.debug = False
117        self.server = ''
118        self.port = ''
119        self.user = ''
120        self.password = ''
121        self.ssl = False
122
123        self.ro_username = ''
124        self.ro_password = ''
125        self.params = {}
126        self.output = 'standard'
127        self.password_new = None
128        self.username_new = None
129        self.sa_username = None
130        self.sa_password = None
131        self.port_new = None
132        self.per_node_quota = None
133        self.cluster_index_ramsize = None
134        self.cluster_fts_ramsize = None
135        self.index_storage_setting = None
136        self.cluster_name = None
137        self.data_path = None
138        self.index_path = None
139        self.hostname = None
140        self.enable_auto_failover = None
141        self.enable_notification = None
142        self.autofailover_timeout = None
143        self.enable_email_alert = None
144
145        #compaction related settings
146        self.compaction_db_percentage = None
147        self.compaction_db_size = None
148        self.compaction_view_percentage = None
149        self.compaction_view_size = None
150        self.compaction_period_from = None
151        self.compaction_period_to = None
152        self.enable_compaction_abort = None
153        self.enable_compaction_parallel = None
154        self.purge_interval = None
155        self.gsi_compact_mode = None
156        self.gsi_compact_perc = None
157        self.gsi_compact_interval = None
158        self.gsi_compact_period_from = None
159        self.gsi_compact_period_to = None
160        self.gsi_compact_abort = None
161
162        #alert settings
163        self.email_recipient = None
164        self.email_sender = None
165        self.email_user = None
166        self.email_password = None
167        self.email_host = None
168        self.email_port = None
169        self.email_enable_encrypt = None
170        self.autofailover_node = None
171        self.autofailover_max_reached = None
172        self.autofailover_node_down = None
173        self.autofailover_cluster_small = None
174        self.autofailover_disabled = None
175        self.alert_ip_changed = None
176        self.alert_disk_space = None
177        self.alert_meta_overhead = None
178        self.alert_meta_oom = None
179        self.alert_write_failed = None
180        self.alert_audit_dropped = None
181
182        #group management
183        self.group_name = None
184        self.server_list = []
185        self.from_group = None
186        self.to_group = None
187        self.group_rename = None
188
189        #SSL certificate management
190        self.certificate_file = None
191        self.extended = False
192        self.cmd = None
193
194        self.hard_failover = None
195        self.recovery_type = None
196        self.recovery_buckets = None
197
198        # Collect logs
199        self.nodes = None
200        self.all_nodes = None
201        self.upload = False
202        self.upload_host = None
203        self.customer = None
204        self.ticket = ""
205
206        #auditing
207        self.audit_enabled = None
208        self.audit_log_path = None
209        self.audit_log_rotate_interval = None
210
211        #ldap
212        self.ldap_enabled = None
213        self.ldap_admins = ''
214        self.ldap_roadmins = ''
215        self.ldap_default = "none"
216
217        #index
218        self.max_rollback_points = None
219        self.stable_snapshot_interval = None
220        self.memory_snapshot_interval = None
221        self.index_threads = None
222        self.services = None
223        self.log_level = None
224
225        #set-roles / delete-roles
226        self.roles = None
227        self.my_roles = False
228        self.get_roles = None
229        self.set_users = None
230        self.set_names = None
231        self.delete_users = None
232
233        # master password
234        self.new_master_password = None
235        self.rotate_data_key = False
236
237    def runCmd(self, cmd, server, port,
238               user, password, ssl, opts):
239        self.rest_cmd = rest_cmds[cmd]
240        self.method = methods[cmd]
241        self.server = server
242        self.port = int(port)
243        self.user = user
244        self.password = password
245        self.ssl = ssl
246
247        servers = self.processOpts(cmd, opts)
248        if self.debug:
249            print "INFO: servers %s" % servers
250
251        if cmd == 'server-add' and not servers['add']:
252            command_error("please list one or more --server-add=HOST[:PORT],"
253                  " or use -h for more help.")
254
255        if cmd == 'server-readd' and not servers['add']:
256            command_error("please list one or more --server-add=HOST[:PORT],"
257                  " or use -h for more help.")
258
259        if cmd in ('server-add', 'rebalance'):
260            if len(servers['add']) > 0:
261                if cmd == 'rebalance':
262                    print "DEPRECATED: Adding server from the rebalance command is " + \
263                          "deprecated and will be removed in future release, use " + \
264                          "the server-add command to add servers instead."
265                self.groupAddServers()
266            if cmd == 'rebalance':
267                self.rebalance(servers)
268
269        elif cmd == 'server-readd':
270            self.reAddServers(servers)
271
272        elif cmd == 'rebalance-status':
273            output_result = self.rebalanceStatus()
274            print output_result
275
276        elif cmd == 'rebalance-stop':
277            output_result = self.rebalanceStop()
278            print output_result
279
280        elif cmd == 'failover':
281            if len(servers['failover']) <= 0:
282                command_error("please list one or more --server-failover=HOST[:PORT];"
283                      " or use -h for more help.")
284            if len(servers['failover'].keys()[0].split(',')) > 1:
285                print "DEPRECATED: Failing over more than one server at a time is deprecated" + \
286                    "and will not be allowed in future release."
287
288            self.failover(servers)
289
290        elif cmd == 'recovery':
291            if len(servers['recovery']) <= 0:
292                command_error("please list one or more --server-recovery=HOST[:PORT];"
293                      " or use -h for more help.")
294            self.recovery(servers)
295
296        elif cmd in ('cluster-init', 'cluster-edit'):
297            self.clusterInit(cmd)
298
299        elif cmd == 'master-password':
300            self.masterPassword()
301
302        elif cmd == 'node-init':
303            self.nodeInit()
304
305        elif cmd == 'setting-cluster':
306            self.clusterSetting()
307
308        elif cmd == 'setting-compaction':
309            self.compaction()
310
311        elif cmd == 'setting-notification':
312            self.notification()
313
314        elif cmd == 'setting-alert':
315            self.alert()
316
317        elif cmd == 'setting-autofailover':
318            self.autofailover()
319
320        elif cmd == 'setting-audit':
321            self.audit()
322
323        elif cmd == 'setting-ldap':
324            self.ldap()
325
326        elif cmd == 'setting-index':
327            self.index()
328
329        elif cmd == 'user-manage':
330            self.userManage()
331
332        elif cmd == 'group-manage':
333            self.groupManage()
334
335        elif cmd == 'ssl-manage':
336            self.retrieveCert()
337
338        elif cmd == 'collect-logs-start':
339            self.collectLogsStart(servers)
340
341        elif cmd == 'collect-logs-stop':
342            self.collectLogsStop()
343
344        elif cmd == 'collect-logs-status':
345            self.collectLogsStatus()
346
347        elif cmd == 'admin-role-manage':
348            self.alterRoles()
349
350    def masterPassword(self):
351        if self.new_master_password is None and not self.rotate_data_key:
352            _exitIfErrors(["ERROR: no parameters set"])
353
354        if self.new_master_password is not None:
355            opts = {
356                "error_msg": "Unable to set master password",
357                "success_msg": "Master password set"
358            }
359            rest = util.restclient_factory(self.server, self.port, {'debug':self.debug}, self.ssl)
360            rest.setParam('newPassword', self.new_master_password)
361            output_result = rest.restCmd(self.method,
362                                         '/node/controller/changeMasterPassword',
363                                         self.user,
364                                         self.password,
365                                         opts)
366            print output_result
367
368        if self.rotate_data_key:
369            opts = {
370                "error_msg": "Unable to rotate data key",
371                "success_msg": "Data key rotated"
372            }
373            rest = util.restclient_factory(self.server, self.port, {'debug':self.debug}, self.ssl)
374            output_result = rest.restCmd(self.method,
375                                         '/node/controller/rotateDataKey',
376                                         self.user,
377                                         self.password,
378                                         opts)
379            print output_result
380
381    def clusterInit(self, cmd):
382        # We need to ensure that creating the REST username/password is the
383        # last REST API that is called because once that API succeeds the
384        # cluster is initialized and cluster-init cannot be run again.
385
386        cm = cluster_manager.ClusterManager(self.server, self.port, self.user,
387                                            self.password, self.ssl)
388
389        if cmd == 'cluster-init':
390            data, errors = cm.pools()
391            _exitIfErrors(errors)
392            if data['pools'] and len(data['pools']) > 0:
393                print "Error: cluster is already initialized, use cluster-edit to change settings"
394                return
395
396        err, services = self.process_services(False)
397        if err:
398            print err
399            return
400
401        allowDefault = 'index' in services.split(',')
402        param = self.index_storage_to_param(self.index_storage_setting, allowDefault)
403
404        #set memory quota
405        if cmd == 'cluster-init':
406            if 'kv' in services.split(',') and not self.per_node_quota:
407                print "ERROR: option cluster-ramsize is not specified"
408                return
409            elif 'index' in services.split(','):
410                if not self.cluster_index_ramsize:
411                    print "ERROR: option cluster-index-ramsize is not specified"
412                    return
413                if param is None:
414                    print "ERROR: invalid index storage setting `%s`. Must be [default, memopt]" \
415                        % self.index_storage_setting
416                    return
417            elif 'fts' in services.split(',') and not self.cluster_fts_ramsize:
418                print "ERROR: option fts-index-ramsize is not specified"
419                return
420
421        if param is not None:
422            _, errors = cm.set_index_settings(param)
423            _exitIfErrors(errors)
424
425        opts = {
426            "error_msg": "unable to set memory quota",
427            "success_msg": "set memory quota successfully"
428        }
429        rest = util.restclient_factory(self.server,
430                                       self.port,
431                                       {'debug':self.debug},
432                                       self.ssl)
433        if self.per_node_quota:
434            rest.setParam('memoryQuota', self.per_node_quota)
435        if self.cluster_index_ramsize:
436            rest.setParam('indexMemoryQuota', self.cluster_index_ramsize)
437        if self.cluster_fts_ramsize:
438            rest.setParam('ftsMemoryQuota', self.cluster_fts_ramsize)
439        if rest.params:
440            output_result = rest.restCmd(self.method,
441                                         '/pools/default',
442                                         self.user,
443                                         self.password,
444                                         opts)
445
446        #setup services
447        if cmd == "cluster-init":
448            opts = {
449                "error_msg": "unable to setup services",
450                "success_msg": "setup service successfully"
451            }
452            rest = util.restclient_factory(self.server,
453                                           self.port,
454                                           {'debug':self.debug},
455                                           self.ssl)
456            rest.setParam('services', services)
457            output_result = rest.restCmd(self.method,
458                                         '/node/controller/setupServices',
459                                         self.user,
460                                         self.password,
461                                         opts)
462
463        # setup REST credentials/REST port
464        if cmd == 'cluster-init' or self.username_new or self.password_new or self.port_new:
465            self.enable_notification = "true"
466            self.notification(False)
467            rest = util.restclient_factory(self.server,
468                                         self.port,
469                                         {'debug':self.debug},
470                                         self.ssl)
471            if self.port_new:
472                rest.setParam('port', self.port_new)
473            else:
474                rest.setParam('port', 'SAME')
475            rest.setParam('initStatus', 'done')
476            if self.username_new:
477                rest.setParam('username', self.username_new)
478            else:
479                rest.setParam('username', self.user)
480            if self.password_new:
481                rest.setParam('password', self.password_new)
482            else:
483                rest.setParam('password', self.password)
484
485            if not (rest.getParam('username') and rest.getParam('password')):
486                print "ERROR: Both username and password are required."
487                return
488
489            if len(rest.getParam('password')) > MAX_LEN_PASSWORD:
490                print "ERROR: Password length %s exceeds maximum number of characters allowed, which is %s" \
491                      % (len(rest.getParam('password')), MAX_LEN_PASSWORD)
492                return
493
494            opts = {
495                "error_msg": "unable to init/modify %s" % self.server,
496                "success_msg": "init/edit %s" % self.server
497            }
498
499            output_result = rest.restCmd(self.method,
500                                         self.rest_cmd,
501                                         self.user,
502                                         self.password,
503                                         opts)
504        print output_result
505
506    def index_storage_to_param(self, value, allowDefault):
507        if (not value and allowDefault) or value == "default":
508            return "forestdb"
509        if value == "memopt":
510            return "memory_optimized"
511        return None
512
513    def process_services(self, data_required):
514        if not self.services:
515            self.services = "data"
516        sep = Node.SEP
517        if self.services.find(sep) < 0:
518            #backward compatible when using ";" as separator
519            sep = ";"
520        svc_list = list(set([w.strip() for w in self.services.split(sep)]))
521        svc_candidate = ["data", "index", "query", "fts"]
522        for svc in svc_list:
523            if svc not in svc_candidate:
524                return "ERROR: invalid service: %s" % svc, None
525        if data_required and "data" not in svc_list:
526            svc_list.append("data")
527        if not IS_ENTERPRISE:
528            if len(svc_list) != len(svc_candidate):
529                if len(svc_list) != 1 or "data" not in svc_list:
530                    return "ERROR: Community Edition requires that all nodes provision all services or data service only", None
531
532        services = ",".join(svc_list)
533        for old, new in [[";", ","], ["data", "kv"], ["query", "n1ql"]]:
534            services = services.replace(old, new)
535        return None, services
536
537    def nodeInit(self):
538        rest = util.restclient_factory(self.server,
539                                     self.port,
540                                     {'debug':self.debug},
541                                     self.ssl)
542        if self.data_path:
543            rest.setParam('path', self.data_path)
544
545        if self.index_path:
546            rest.setParam('index_path', self.index_path)
547
548        opts = {
549            "error_msg": "unable to init %s" % self.server,
550            "success_msg": "init %s" % self.server
551        }
552
553        output_result = rest.restCmd(self.method,
554                                     self.rest_cmd,
555                                     self.user,
556                                     self.password,
557                                     opts)
558        if self.hostname:
559            rest = util.restclient_factory(self.server,
560                                         self.port,
561                                         {'debug':self.debug},
562                                         self.ssl)
563            if self.hostname:
564                rest.setParam('hostname', self.hostname)
565
566            opts = {
567                "error_msg": "unable to set hostname for %s" % self.server,
568                "success_msg": "set hostname for %s" % self.server
569            }
570
571            output_result = rest.restCmd('POST',
572                                         '/node/controller/rename',
573                                         self.user,
574                                         self.password,
575                                         opts)
576        print output_result
577
578    def compaction(self):
579        rest = util.restclient_factory(self.server,
580                                     self.port,
581                                     {'debug':self.debug},
582                                     self.ssl)
583
584        if self.compaction_db_percentage:
585            rest.setParam('databaseFragmentationThreshold[percentage]', self.compaction_db_percentage)
586        if self.compaction_db_size:
587            self.compaction_db_size = int(self.compaction_db_size) * 1024**2
588            rest.setParam('databaseFragmentationThreshold[size]', self.compaction_db_size)
589        if self.compaction_view_percentage:
590            rest.setParam('viewFragmentationThreshold[percentage]', self.compaction_view_percentage)
591        if self.compaction_view_size:
592            self.compaction_view_size = int(self.compaction_view_size) * 1024**2
593            rest.setParam('viewFragmentationThreshold[size]', self.compaction_view_size)
594        if self.compaction_period_from:
595            hour, minute = self.compaction_period_from.split(':')
596            if (int(hour) not in range(24)) or (int(minute) not in range(60)):
597                print "ERROR: invalid hour or minute value for compaction period"
598                return
599            else:
600                rest.setParam('allowedTimePeriod[fromHour]', int(hour))
601                rest.setParam('allowedTimePeriod[fromMinute]', int(minute))
602        if self.compaction_period_to:
603            hour, minute = self.compaction_period_to.split(':')
604            if (int(hour) not in range(24)) or (int(minute) not in range(60)):
605                print "ERROR: invalid hour or minute value for compaction"
606                return
607            else:
608                rest.setParam('allowedTimePeriod[toHour]', hour)
609                rest.setParam('allowedTimePeriod[toMinute]', minute)
610        if self.enable_compaction_abort:
611            rest.setParam('allowedTimePeriod[abortOutside]', self.enable_compaction_abort)
612        if self.enable_compaction_parallel:
613            rest.setParam('parallelDBAndViewCompaction', self.enable_compaction_parallel)
614        else:
615            self.enable_compaction_parallel = bool_to_str(0)
616            rest.setParam('parallelDBAndViewCompaction', self.enable_compaction_parallel)
617
618        if self.compaction_period_from or self.compaction_period_to or self.enable_compaction_abort:
619            if not (self.compaction_period_from and self.compaction_period_to and \
620                    self.enable_compaction_abort):
621                print "ERROR: compaction-period-from, compaction-period-to and enable-compaction-abort have to be specified at the same time"
622                return
623        if self.purge_interval:
624            rest.setParam('purgeInterval', self.purge_interval)
625
626        if self.gsi_compact_mode is not None and self.gsi_compact_mode not in ["append", "circular"]:
627            _exitIfErrors(["ERROR: --gsi-compaction-mode must be \"append\" or \"circular\""])
628
629        if self.gsi_compact_mode == "append":
630            rest.setParam('indexCompactionMode', "full")
631            if self.gsi_compact_perc is None:
632                _exitIfErrors(["ERROR: --compaction-gsi-percentage must be specified when --gsi-compaction-mode is append"])
633
634            if self.gsi_compact_perc is not None:
635                rest.setParam('indexFragmentationThreshold[percentage]', self.gsi_compact_perc)
636        elif self.gsi_compact_mode == "circular":
637            rest.setParam('indexCompactionMode', "circular")
638
639            if self.gsi_compact_interval is None:
640                self.gsi_compact_interval = ""
641            rest.setParam('indexCircularCompaction[daysOfWeek]', self.gsi_compact_interval)
642
643            if self.gsi_compact_period_from is not None:
644                hour, minute = self.gsi_compact_period_from.split(':')
645                if (int(hour) not in range(24)) or (int(minute) not in range(60)):
646                    _exitIfErrors(["ERROR: invalid hour or minute value for gsi compaction from period"])
647                else:
648                    rest.setParam('indexCircularCompaction[interval][fromHour]', int(hour))
649                    rest.setParam('indexCircularCompaction[interval][fromMinute]', int(minute))
650            if self.gsi_compact_period_to is not None:
651                hour, minute = self.gsi_compact_period_to.split(':')
652                if (int(hour) not in range(24)) or (int(minute) not in range(60)):
653                    _exitIfErrors(["ERROR: invalid hour or minute value for gsi compaction to period"])
654                else:
655                    rest.setParam('indexCircularCompaction[interval][toHour]', hour)
656                    rest.setParam('indexCircularCompaction[interval][toMinute]', minute)
657
658            if self.enable_compaction_abort is not None:
659                rest.setParam('indexCircularCompaction[interval][abortOutside]', self.gsi_compact_abort)
660            else:
661                rest.setParam('indexCircularCompaction[interval][abortOutside]', "false")
662
663        opts = {
664            "error_msg": "unable to set compaction settings",
665            "success_msg": "set compaction settings"
666        }
667        output_result = rest.restCmd(self.method,
668                                     self.rest_cmd,
669                                     self.user,
670                                     self.password,
671                                     opts)
672        print output_result
673
674    def clusterSetting(self):
675        rest = util.restclient_factory(self.server,
676                                     self.port,
677                                     {'debug':self.debug},
678                                     self.ssl)
679        if self.per_node_quota:
680            rest.setParam('memoryQuota', self.per_node_quota)
681        if self.cluster_name is not None:
682            rest.setParam('clusterName', self.cluster_name)
683        if self.cluster_index_ramsize:
684            rest.setParam('indexMemoryQuota', self.cluster_index_ramsize)
685        if self.cluster_fts_ramsize:
686            rest.setParam('ftsMemoryQuota', self.cluster_fts_ramsize)
687        opts = {
688            "error_msg": "unable to set cluster configurations",
689            "success_msg": "set cluster settings"
690        }
691        if rest.params:
692            output_result = rest.restCmd(self.method,
693                                     self.rest_cmd,
694                                     self.user,
695                                     self.password,
696                                     opts)
697            print output_result
698        else:
699            print "Error: No parameters specified"
700
701    def notification(self, print_status=True):
702        rest = util.restclient_factory(self.server,
703                                     self.port,
704                                     {'debug':self.debug},
705                                     self.ssl)
706        if self.enable_notification:
707            rest.setParam('sendStats', self.enable_notification)
708
709        opts = {
710            "error_msg": "unable to set notification settings",
711            "success_msg": "set notification settings"
712        }
713        output_result = rest.restCmd(self.method,
714                                     '/settings/stats',
715                                     self.user,
716                                     self.password,
717                                     opts)
718
719        if print_status:
720            print output_result
721
722    def alert(self):
723        rest = util.restclient_factory(self.server,
724                                     self.port,
725                                     {'debug':self.debug},
726                                     self.ssl)
727        alert_opts = ''
728        if self.enable_email_alert:
729            rest.setParam('enabled', self.enable_email_alert)
730        if self.email_recipient:
731            rest.setParam('recipients', self.email_recipient)
732        if self.email_sender:
733            rest.setParam('sender', self.email_sender)
734        if self.email_user:
735            rest.setParam('emailUser', self.email_user)
736        if self.email_password:
737            rest.setParam('emailPass', self.email_password)
738        if self.email_host:
739            rest.setParam('emailHost', self.email_host)
740        if self.email_port:
741            rest.setParam('emailPort', self.email_port)
742        if self.email_enable_encrypt:
743            rest.setParam('emailEncrypt', self.email_enable_encrypt)
744        if self.autofailover_node:
745            alert_opts = alert_opts + 'auto_failover_node,'
746        if self.autofailover_max_reached:
747            alert_opts = alert_opts + 'auto_failover_maximum_reached,'
748        if self.autofailover_node_down:
749            alert_opts = alert_opts + 'auto_failover_other_nodes_down,'
750        if self.autofailover_cluster_small:
751            alert_opts = alert_opts + 'auto_failover_cluster_too_small,'
752        if self.autofailover_disabled:
753            alert_opts = alert_opts + 'auto_failover_disabled,'
754        if self.alert_ip_changed:
755            alert_opts = alert_opts + 'ip,'
756        if self.alert_disk_space:
757            alert_opts = alert_opts + 'disk,'
758        if self.alert_meta_overhead:
759            alert_opts = alert_opts + 'overhead,'
760        if self.alert_meta_oom:
761            alert_opts = alert_opts + 'ep_oom_errors,'
762        if self.alert_write_failed:
763            alert_opts = alert_opts + 'ep_item_commit_failed,'
764        if self.alert_audit_dropped:
765            alert_opts = alert_opts + 'audit_dropped_events,'
766
767        if alert_opts:
768            # remove last separator
769            alert_opts = alert_opts[:-1]
770            rest.setParam('alerts', alert_opts)
771
772        opts = {
773            "error_msg": "unable to set alert settings",
774            "success_msg": "set alert settings"
775        }
776        output_result = rest.restCmd(self.method,
777                                     self.rest_cmd,
778                                     self.user,
779                                     self.password,
780                                     opts)
781        print output_result
782
783    def autofailover(self):
784        rest = util.restclient_factory(self.server,
785                                     self.port,
786                                     {'debug':self.debug},
787                                     self.ssl)
788        if self.autofailover_timeout:
789            if int(self.autofailover_timeout) < 30:
790                print "ERROR: Timeout value must be larger than 30 second."
791                return
792            else:
793                rest.setParam('timeout', self.autofailover_timeout)
794
795        if self.enable_auto_failover:
796            rest.setParam('enabled', self.enable_auto_failover)
797
798        opts = {
799            "error_msg": "unable to set auto failover settings",
800            "success_msg": "set auto failover settings"
801        }
802        output_result = rest.restCmd(self.method,
803                                     self.rest_cmd,
804                                     self.user,
805                                     self.password,
806                                     opts)
807        print output_result
808
809    def audit(self):
810        rest = util.restclient_factory(self.server,
811                                     self.port,
812                                     {'debug':self.debug},
813                                     self.ssl)
814        if self.audit_enabled:
815            rest.setParam('auditdEnabled', self.audit_enabled)
816
817        if self.audit_log_path:
818            rest.setParam('logPath', self.audit_log_path)
819        elif self.audit_enabled == "true":
820             rest.setParam('logPath', "/opt/couchbase/var/lib/couchbase/logs")
821        if self.audit_log_rotate_interval:
822            rest.setParam('rotateInterval', int(self.audit_log_rotate_interval)*60)
823        elif self.audit_enabled == "true":
824            rest.setParam('rotateInterval', 86400)
825
826        opts = {
827            "error_msg": "unable to set audit settings",
828            "success_msg": "set audit settings"
829        }
830        output_result = rest.restCmd(self.method,
831                                     self.rest_cmd,
832                                     self.user,
833                                     self.password,
834                                     opts)
835        print output_result
836
837    def ldap(self):
838        print "DEPRECATED: The settings ldap command is deprecated and will be " + \
839              "removed in a future release. Please use admin-role-manage instead."
840        rest = util.restclient_factory(self.server,
841                                     self.port,
842                                     {'debug':self.debug},
843                                     self.ssl)
844        if self.ldap_enabled == 'true':
845            rest.setParam('enabled', 'true')
846            if self.ldap_default == 'admins':
847                rest.setParam('roAdmins', self.ldap_roadmins.replace(Node.SEP, "\n"))
848            elif self.ldap_default == 'roadmins':
849                rest.setParam('admins', self.ldap_admins.replace(Node.SEP, "\n"))
850            else:
851                rest.setParam('admins', self.ldap_admins.replace(Node.SEP,"\n"))
852                rest.setParam('roAdmins', self.ldap_roadmins.replace(Node.SEP, "\n"))
853        else:
854            rest.setParam('enabled', 'false')
855
856        opts = {
857            "error_msg": "unable to set LDAP auth settings",
858            "success_msg": "set LDAP auth settings"
859        }
860        output_result = rest.restCmd(self.method,
861                                     self.rest_cmd,
862                                     self.user,
863                                     self.password,
864                                     opts)
865        print output_result
866
867    # Role-Based Access Control
868    def alterRoles(self):
869        cm = cluster_manager.ClusterManager(self.server, self.port, self.user,
870                                            self.password, self.ssl)
871
872        # need to check arguments
873        if (self.my_roles == None and self.get_roles == None and \
874            self.set_users == None and self.delete_users == None):
875            print "ERROR: You must specify either '--my-roles', '--get-roles', " \
876                "'--set-users', or '--delete-users'"
877            return
878
879        if self.my_roles and (self.get_roles or self.set_users or self.roles or self.delete_users):
880            print "ERROR: The 'my-roles' option may not be used with any other option."
881            return
882
883        if self.get_roles and (self.my_roles or self.set_users or self.roles or self.delete_users):
884            print "ERROR: The 'get-roles' option may not be used with any other option."
885            return
886
887        if (self.set_users and self.roles == None) or (self.set_users == None and self.roles):
888            print "ERROR: You must specify lists of both users and roles for those users.\n  --set-users=[comma delimited user list] --roles=[comma-delimited list of one or more from admin, ro_admin, cluster_admin, replication_admin, bucket_admin[bucket name or '*'], views_admin[bucket name or '*']"
889            return
890
891        # my_roles
892        if self.my_roles:
893            data, errors = cm.myRoles()
894            if errors == None:
895                print "SUCCESS: my roles:"
896
897        # get_roles
898        elif self.get_roles:
899            data, errors = cm.getRoles()
900            if errors == None:
901                print "SUCCESS: user/role list:"
902
903        # set_users
904        elif self.set_users:
905            data, errors = cm.setRoles(self.set_users,self.roles,self.set_names)
906            if errors == None:
907                print "SUCCESS: set roles for ",self.set_users,". New user/role list:"
908
909        # delete_users
910        else:
911            data, errors = cm.deleteRoles(self.delete_users)
912            if errors == None:
913                print "SUCCESS: removed users ", self.delete_users, ". New user/role list:"
914
915        _exitIfErrors(errors)
916        print json.dumps(data,indent=2)
917
918    def index(self):
919        rest = util.restclient_factory(self.server,
920                                     self.port,
921                                     {'debug':self.debug},
922                                     self.ssl)
923        if self.max_rollback_points:
924            rest.setParam("maxRollbackPoints", self.max_rollback_points)
925        if self.stable_snapshot_interval:
926            rest.setParam("stableSnapshotInterval", self.stable_snapshot_interval)
927        if self.memory_snapshot_interval:
928            rest.setParam("memorySnapshotInterval", self.memory_snapshot_interval)
929        if self.index_threads:
930            rest.setParam("indexerThreads", self.index_threads)
931        if self.log_level:
932            rest.setParam("logLevel", self.log_level)
933
934        opts = {
935            "error_msg": "unable to set index settings",
936            "success_msg": "set index settings"
937        }
938        output_result = rest.restCmd(self.method,
939                                     self.rest_cmd,
940                                     self.user,
941                                     self.password,
942                                     opts)
943        print output_result
944
945    def processOpts(self, cmd, opts):
946        """ Set standard opts.
947            note: use of a server key keeps optional
948            args aligned with server.
949            """
950        servers = {
951            'add': {},
952            'remove': {},
953            'failover': {},
954            'recovery': {},
955            'log': {},
956        }
957
958        # don't allow options that don't correspond to given commands
959
960        for o, a in opts:
961            command_error_msg = "option '%s' is not used with command '%s'" % (o, cmd)
962
963            if o in ( "-r", "--server-remove"):
964                if cmd in server_no_remove:
965                    command_error(command_error_msg)
966            elif o in ( "-a", "--server-add",
967                        "--server-add-username",
968                        "--server-add-password"):
969                if cmd in server_no_add:
970                    command_error(command_error_msg)
971
972        server = None
973        for o, a in opts:
974            if o in ("-a", "--server-add"):
975                if a == "self":
976                    a = socket.gethostbyname(socket.getfqdn())
977                server = "%s:%d" % util.hostport(a)
978                servers['add'][server] = { 'user':'', 'password':''}
979                self.server_list.append(server)
980            elif o == "--server-add-username":
981                if server:
982                    servers['add'][server]['user'] = a
983                self.sa_username = a
984            elif o == "--server-add-password":
985                if server:
986                    servers['add'][server]['password'] = a
987                self.sa_password = a
988            elif o in ( "-r", "--server-remove"):
989                server = "%s:%d" % util.hostport(a)
990                servers['remove'][server] = True
991                server = None
992            elif o in ( "--server-failover"):
993                server = "%s:%d" % util.hostport(a)
994                servers['failover'][server] = True
995                server = None
996            elif o in ( "--server-recovery"):
997                server = "%s:%d" % util.hostport(a)
998                servers['recovery'][server] = True
999                server = None
1000            elif o == "--nodes":
1001                for server in self.normalize_servers(a):
1002                    servers['log'][server] = True
1003            elif o in ('-o', '--output'):
1004                if a == 'json':
1005                    self.output = a
1006                server = None
1007            elif o in ('-d', '--debug'):
1008                self.debug = True
1009                server = None
1010            elif o in ('--cluster-init-password', '--cluster-password'):
1011                self.password_new = a
1012            elif o in ('--cluster-init-username', '--cluster-username'):
1013                self.username_new = a
1014            elif o in ('--cluster-init-port', '--cluster-port'):
1015                self.port_new = a
1016            elif o in ('--cluster-init-ramsize', '--cluster-ramsize'):
1017                self.per_node_quota = a
1018            elif o == '--cluster-index-ramsize':
1019                self.cluster_index_ramsize = a
1020            elif o == '--cluster-fts-ramsize':
1021                self.cluster_fts_ramsize = a
1022            elif o == '--index-storage-setting':
1023                self.index_storage_setting = a
1024            elif o == '--cluster-name':
1025                self.cluster_name = a
1026            elif o == '--enable-auto-failover':
1027                self.enable_auto_failover = bool_to_str(a)
1028            elif o == '--enable-notification':
1029                self.enable_notification = bool_to_str(a)
1030            elif o == '--auto-failover-timeout':
1031                self.autofailover_timeout = a
1032            elif o == '--compaction-db-percentage':
1033                self.compaction_db_percentage = a
1034            elif o == '--compaction-db-size':
1035                self.compaction_db_size = a
1036            elif o == '--compaction-view-percentage':
1037                self.compaction_view_percentage = a
1038            elif o == '--compaction-view-size':
1039                self.compaction_view_size = a
1040            elif o == '--compaction-period-from':
1041                self.compaction_period_from = a
1042            elif o == '--compaction-period-to':
1043                self.compaction_period_to = a
1044            elif o == '--enable-compaction-abort':
1045                self.enable_compaction_abort = bool_to_str(a)
1046            elif o == '--enable-compaction-parallel':
1047                self.enable_compaction_parallel = bool_to_str(a)
1048            elif o == '--gsi-compaction-mode':
1049                self.gsi_compact_mode = a
1050            elif o == '--compaction-gsi-percentage':
1051                self.gsi_compact_perc = a
1052            elif o == '--compaction-gsi-interval':
1053                self.gsi_compact_interval = a
1054            elif o == '--compaction-gsi-period-from':
1055                self.gsi_compact_period_from = a
1056            elif o == '--compaction-gsi-period-to':
1057                self.gsi_compact_period_to = a
1058            elif o == '--enable-gsi-compaction-abort':
1059                self.gsi_compact_abort = bool_to_str(a)
1060            elif o == '--enable-email-alert':
1061                self.enable_email_alert = bool_to_str(a)
1062            elif o == '--new-master-password':
1063                self.new_master_password = a
1064            elif o == '--rotate-data-key':
1065                self.rotate_data_key = True
1066            elif o == '--node-init-data-path':
1067                self.data_path = a
1068            elif o == '--node-init-index-path':
1069                self.index_path = a
1070            elif o == '--node-init-hostname':
1071                self.hostname = a
1072            elif o == '--email-recipients':
1073                self.email_recipient = a
1074            elif o == '--email-sender':
1075                self.email_sender = a
1076            elif o == '--email-user':
1077                self.email_user = a
1078            elif o == '--email-password':
1079                self.email_password = a
1080            elif o == '--email-host':
1081                self.email_host = a
1082            elif o == '--email-port':
1083                self.email_port = a
1084            elif o == '--enable-email-encrypt':
1085                self.email_enable_encrypt = bool_to_str(a)
1086            elif o == '--alert-auto-failover-node':
1087                self.autofailover_node = True
1088            elif o == '--alert-auto-failover-max-reached':
1089                self.autofailover_max_reached = True
1090            elif o == '--alert-auto-failover-node-down':
1091                self.autofailover_node_down = True
1092            elif o == '--alert-auto-failover-cluster-small':
1093                self.autofailover_cluster_small = True
1094            elif o == '--alert-auto-failover-disabled':
1095                self.autofailover_disabled = True
1096            elif o == '--alert-ip-changed':
1097                self.alert_ip_changed = True
1098            elif o == '--alert-disk-space':
1099                self.alert_disk_space = True
1100            elif o == '--alert-meta-overhead':
1101                self.alert_meta_overhead = True
1102            elif o == '--alert-meta-oom':
1103                self.alert_meta_oom = True
1104            elif o == '--alert-write-failed':
1105                self.alert_write_failed = True
1106            elif o == '--alert-audit-msg-dropped':
1107                self.alert_audit_dropped = True
1108            elif o == '--create':
1109                self.cmd = 'create'
1110            elif o == '--list':
1111                self.cmd = 'list'
1112            elif o == '--delete':
1113                self.cmd = 'delete'
1114            elif o == '--set':
1115                self.cmd = 'set'
1116            elif o == '--ro-username':
1117                self.ro_username = a
1118            elif o == '--ro-password':
1119                self.ro_password = a
1120            elif o == '--metadata-purge-interval':
1121                self.purge_interval = a
1122            elif o == '--group-name':
1123                self.group_name = a
1124            elif o == '--add-servers':
1125                self.server_list = self.normalize_servers(a)
1126                self.cmd = 'add-servers'
1127            elif o == '--remove-servers':
1128                self.server_list = self.normalize_servers(a)
1129                self.cmd = 'remove-servers'
1130            elif o == '--move-servers':
1131                self.server_list = self.normalize_servers(a)
1132                self.cmd = 'move-servers'
1133            elif o == '--from-group':
1134                self.from_group = a
1135            elif o == '--to-group':
1136                self.to_group = a
1137            elif o == '--rename':
1138                self.group_rename = a
1139                self.cmd = 'rename'
1140            elif o == '--retrieve-cert':
1141                self.cmd = 'retrieve'
1142                self.certificate_file = a
1143            elif o == '--regenerate-cert':
1144                self.cmd = 'regenerate'
1145                self.certificate_file = a
1146            elif o == '--cluster-cert-info':
1147                self.cmd = 'cluster-cert-info'
1148            elif o == '--extended':
1149                self.extended = True
1150            elif o == '--node-cert-info':
1151                self.cmd = 'node-cert-info'
1152            elif o == '--upload-cluster-ca':
1153                self.cmd = 'upload-cluster-ca'
1154                self.certificate_file = a
1155            elif o == '--set-node-certificate':
1156                self.cmd = 'set-node-certificate'
1157            elif o == '--force':
1158                self.hard_failover = True
1159            elif o == '--recovery-type':
1160                self.recovery_type = a
1161            elif o == '--recovery-buckets':
1162                self.recovery_buckets = a
1163            elif o == '--nodes':
1164                self.nodes = a
1165            elif o == '--all-nodes':
1166                self.all_nodes = True
1167            elif o == '--upload':
1168                self.upload = True
1169            elif o == '--upload-host':
1170                self.upload_host = a
1171            elif o == '--customer':
1172                self.customer = a
1173            elif o == '--ticket':
1174                self.ticket = a
1175            elif o == '--services':
1176                self.services = a
1177            elif o == '--audit-log-rotate-interval':
1178                self.audit_log_rotate_interval = a
1179            elif o == '--audit-log-path':
1180                self.audit_log_path = a
1181            elif o == '--audit-enabled':
1182                self.audit_enabled = bool_to_str(a)
1183            elif o == '--ldap-enabled':
1184                self.ldap_enabled = bool_to_str(a)
1185            elif o == '--ldap-admins':
1186                self.ldap_admins = a
1187            elif o == '--ldap-roadmins':
1188                self.ldap_roadmins = a
1189            elif o == '--ldap-default':
1190                self.ldap_default = a
1191            elif o == '--index-max-rollback-points':
1192                self.max_rollback_points = a
1193            elif o == '--index-stable-snapshot-interval':
1194                self.stable_snapshot_interval = a
1195            elif o == '--index-memory-snapshot-interval':
1196                self.memory_snapshot_interval = a
1197            elif o == '--index-threads':
1198                self.index_threads = a
1199            elif o == '--index-log-level':
1200                self.log_level = a
1201
1202            elif o == '--roles':
1203                self.roles = a
1204            elif o == '--my-roles':
1205                self.my_roles = True
1206            elif o == '--get-roles':
1207                self.get_roles = True
1208            elif o == '--set-users':
1209                self.set_users = a
1210            elif o == '--set-names':
1211                self.set_names = a
1212            elif o == '--delete-users':
1213                self.delete_users = a
1214
1215        return servers
1216
1217    def normalize_servers(self, server_list):
1218        slist = []
1219        sep = Node.SEP
1220        if server_list.find(sep) < 0:
1221            #backward compatible with ";" as separator
1222            sep = ";"
1223        for server in server_list.split(sep):
1224            hostport = "%s:%d" % util.hostport(server)
1225            slist.append(hostport)
1226        return slist
1227
1228    def reAddServers(self, servers):
1229        print "DEPRECATED: The server-readd command is deprecated and will be " + \
1230              "removed in a future release. Please use recovery instead."
1231        known_otps, eject_otps, failover_otps, readd_otps, _ = \
1232            self.getNodeOtps(to_readd=servers['add'])
1233
1234        for readd_otp in readd_otps:
1235            rest = util.restclient_factory(self.server,
1236                                         self.port,
1237                                         {'debug':self.debug},
1238                                         self.ssl)
1239            rest.setParam('otpNode', readd_otp)
1240
1241            opts = {
1242                'error_msg': "unable to re-add %s" % readd_otp,
1243                'success_msg': "re-add %s" % readd_otp
1244            }
1245            output_result = rest.restCmd('POST',
1246                                         rest_cmds['server-readd'],
1247                                         self.user,
1248                                         self.password,
1249                                         opts)
1250            print output_result
1251
1252    def getNodeOtps(self, to_eject=[], to_failover=[], to_readd=[]):
1253        """ Convert known nodes into otp node id's.
1254            """
1255        listservers = ListServers()
1256        known_nodes_list = listservers.getNodes(
1257                                listservers.getData(self.server,
1258                                                    self.port,
1259                                                    self.user,
1260                                                    self.password))
1261        known_otps = []
1262        eject_otps = []
1263        failover_otps = []
1264        readd_otps = []
1265        hostnames = []
1266
1267        for node in known_nodes_list:
1268            if node.get('otpNode') is None:
1269                raise Exception("could not access node")
1270            known_otps.append(node['otpNode'])
1271            hostnames.append(node['hostname'])
1272            if node['hostname'] in to_eject:
1273                eject_otps.append(node['otpNode'])
1274            if node['hostname'] in to_failover:
1275                if node['clusterMembership'] != 'active':
1276                    raise Exception('node %s is not active' % node['hostname'])
1277                else:
1278                    failover_otps.append((node['otpNode'], node['status']))
1279            _, host = node['otpNode'].split('@')
1280            hostport = "%s:%d" % util.hostport(host)
1281            if node['hostname'] in to_readd or hostport in to_readd:
1282                readd_otps.append(node['otpNode'])
1283
1284        return (known_otps, eject_otps, failover_otps, readd_otps, hostnames)
1285
1286    def recovery(self, servers):
1287        known_otps, eject_otps, failover_otps, readd_otps, _ = \
1288            self.getNodeOtps(to_readd=servers['recovery'])
1289        for readd_otp in readd_otps:
1290            rest = util.restclient_factory(self.server,
1291                                         self.port,
1292                                         {'debug':self.debug},
1293                                         self.ssl)
1294            opts = {
1295                'error_msg': "unable to setRecoveryType for node %s" % readd_otp,
1296                'success_msg': "setRecoveryType for node %s" % readd_otp
1297            }
1298            rest.setParam('otpNode', readd_otp)
1299            if self.recovery_type:
1300                rest.setParam('recoveryType', self.recovery_type)
1301            else:
1302                rest.setParam('recoveryType', 'delta')
1303            output_result = rest.restCmd('POST',
1304                                         '/controller/setRecoveryType',
1305                                         self.user,
1306                                         self.password,
1307                                         opts)
1308            print output_result
1309
1310    def rebalance(self, servers):
1311        known_otps, eject_otps, failover_otps, readd_otps, _ = \
1312            self.getNodeOtps(to_eject=servers['remove'])
1313        rest = util.restclient_factory(self.server,
1314                                     self.port,
1315                                     {'debug':self.debug},
1316                                     self.ssl)
1317        rest.setParam('knownNodes', ','.join(known_otps))
1318        rest.setParam('ejectedNodes', ','.join(eject_otps))
1319        if self.recovery_buckets:
1320            rest.setParam('requireDeltaRecoveryBuckets', self.recovery_buckets)
1321        opts = {
1322            'success_msg': 'rebalanced cluster',
1323            'error_msg': 'unable to rebalance cluster'
1324        }
1325        output_result = rest.restCmd('POST',
1326                                     rest_cmds['rebalance'],
1327                                     self.user,
1328                                     self.password,
1329                                     opts)
1330        if self.debug:
1331            print "INFO: rebalance started: %s" % output_result
1332
1333        sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
1334
1335        print "INFO: rebalancing",
1336
1337        status, error = self.rebalanceStatus(prefix='\n')
1338        while status in['running', 'unknown']:
1339            print ".",
1340            time.sleep(0.5)
1341            try:
1342                status, error = self.rebalanceStatus(prefix='\n')
1343            except socket.error:
1344                time.sleep(2)
1345                status, error = self.rebalanceStatus(prefix='\n')
1346
1347        if error:
1348            print '\n' + error
1349            sys.exit(1)
1350        else:
1351            print '\n' + output_result
1352
1353    def rebalanceStatus(self, prefix=''):
1354        rest = util.restclient_factory(self.server,
1355                                     self.port,
1356                                     {'debug':self.debug},
1357                                     self.ssl)
1358
1359        opts = {
1360            'error_msg': "unable to obtain rebalance status",
1361            'success_msg': "retrieve replication status successfully"
1362        }
1363        output_result = rest.restCmd('GET',
1364                                     '/pools/default/tasks',
1365                                     self.user,
1366                                     self.password,
1367                                     opts)
1368        tasks = rest.getJson(output_result)
1369        for task in tasks:
1370            error_message = None
1371            if "errorMessage" in task:
1372                error_message = task['errorMessage']
1373
1374            if task["type"] == "rebalance":
1375                if task["status"] == "running":
1376                    return task["status"], error_message
1377                if task["status"] == "notRunning":
1378                    if task.has_key("statusIsStale"):
1379                        if task["statusIsStale"] or task["statusIsStale"] == "true":
1380                            return "unknown", error_message
1381
1382                return task["status"], error_message
1383
1384        return "unknown", error_message
1385
1386    def rebalanceStop(self):
1387        rest = util.restclient_factory(self.server,
1388                                     self.port,
1389                                     {'debug':self.debug},
1390                                     self.ssl)
1391
1392        opts = {
1393            'success_msg': 'rebalance cluster stopped',
1394            'error_msg': 'unable to stop rebalance'
1395        }
1396        output_result = rest.restCmd('POST',
1397                                     rest_cmds['rebalance-stop'],
1398                                     self.user,
1399                                     self.password,
1400                                     opts)
1401        return output_result
1402
1403
1404    def failover(self, servers):
1405        known_otps, eject_otps, failover_otps, readd_otps, _ = \
1406            self.getNodeOtps(to_failover=servers['failover'])
1407
1408        if len(failover_otps) <= 0:
1409            command_error("specified servers are not part of the cluster: %s" %
1410                  servers['failover'].keys())
1411
1412        for failover_otp, node_status in failover_otps:
1413            rest = util.restclient_factory(self.server,
1414                                         self.port,
1415                                         {'debug':self.debug},
1416                                         self.ssl)
1417            opts = {
1418                'error_msg': "unable to failover %s" % failover_otp,
1419                'success_msg': "failover %s" % failover_otp
1420            }
1421            rest.setParam('otpNode', failover_otp)
1422            if self.hard_failover or node_status != 'healthy':
1423                output_result = rest.restCmd('POST',
1424                                             rest_cmds['failover'],
1425                                             self.user,
1426                                             self.password,
1427                                             opts)
1428                print output_result
1429            else:
1430                output_result = rest.restCmd('POST',
1431                                             '/controller/startGracefulFailover',
1432                                             self.user,
1433                                             self.password,
1434                                             opts)
1435                if self.debug:
1436                    print "INFO: rebalance started: %s" % output_result
1437
1438                sys.stdout = os.fdopen(sys.stdout.fileno(), 'w', 0)
1439
1440                print "INFO: graceful failover",
1441
1442                status, error = self.rebalanceStatus(prefix='\n')
1443                while status == 'running':
1444                    print ".",
1445                    time.sleep(0.5)
1446                    try:
1447                        status, error = self.rebalanceStatus(prefix='\n')
1448                    except socket.error:
1449                        time.sleep(2)
1450                        status, error = self.rebalanceStatus(prefix='\n')
1451
1452                if error:
1453                    print '\n' + error
1454                else:
1455                    print '\n' + output_result
1456
1457    def userManage(self):
1458        if self.cmd == 'list':
1459            self.roUserList()
1460        elif self.cmd == 'delete':
1461            self.roUserDelete()
1462        elif self.cmd == 'set':
1463            self.roUserSet()
1464
1465    def roUserList(self):
1466        rest = util.restclient_factory(self.server,
1467                                     self.port,
1468                                     {'debug':self.debug},
1469                                     self.ssl)
1470        opts = { 'error_msg':'not any read only user defined'}
1471        try:
1472            output_result = rest.restCmd('GET',
1473                                         '/settings/readOnlyAdminName',
1474                                         self.user,
1475                                         self.password,
1476                                         opts)
1477            json = rest.getJson(output_result)
1478            print json
1479        except:
1480            pass
1481
1482    def roUserDelete(self):
1483        rest = util.restclient_factory(self.server,
1484                                     self.port,
1485                                     {'debug':self.debug},
1486                                     self.ssl)
1487
1488        opts = {
1489            'success_msg': 'readOnly user deleted',
1490            'error_msg': 'unable to delete readOnly user'
1491        }
1492        output_result = rest.restCmd('DELETE',
1493                                     "/settings/readOnlyUser",
1494                                     self.user,
1495                                     self.password,
1496                                     opts)
1497        print output_result
1498
1499    def roUserSet(self):
1500        rest = util.restclient_factory(self.server,
1501                                     self.port,
1502                                     {'debug':self.debug},
1503                                     self.ssl)
1504        try:
1505            output_result = rest.restCmd('GET',
1506                                         '/settings/readOnlyAdminName',
1507                                         self.user,
1508                                         self.password)
1509            json = rest.getJson(output_result)
1510            print "ERROR: readonly user %s exist already. Delete it before creating a new one" % json
1511            return
1512        except:
1513            pass
1514
1515        rest = util.restclient_factory(self.server,
1516                                     self.port,
1517                                     {'debug':self.debug},
1518                                     self.ssl)
1519        if self.ro_username:
1520            rest.setParam('username', self.ro_username)
1521        if self.ro_password:
1522            rest.setParam('password', self.ro_password)
1523        opts = {
1524            'success_msg': 'readOnly user created',
1525            'error_msg': 'fail to create readOnly user'
1526        }
1527        output_result = rest.restCmd('POST',
1528                                     "/settings/readOnlyUser",
1529                                     self.user,
1530                                     self.password,
1531                                     opts)
1532        print output_result
1533
1534    def groupManage(self):
1535        if self.cmd == 'move-servers':
1536            self.groupMoveServer()
1537        elif self.cmd == 'list':
1538             self.groupList()
1539        else:
1540            if self.group_name is None:
1541                command_error("please specify --group-name for the operation")
1542            elif self.cmd == 'delete':
1543                self.groupDelete()
1544            elif self.cmd == 'create':
1545                self.groupCreate()
1546            elif self.cmd == 'add-servers':
1547                print "DEPRECATED: Adding server from group-manage is deprecated, use server-add instead"
1548                self.groupAddServers()
1549            elif self.cmd == 'rename':
1550                self.groupRename()
1551            else:
1552                print "Unknown group command:%s" % self.cmd
1553
1554    def getGroupUri(self, groupName):
1555        rest = util.restclient_factory(self.server,
1556                                     self.port,
1557                                     {'debug':self.debug},
1558                                     self.ssl)
1559        output_result = rest.restCmd('GET',
1560                                     '/pools/default/serverGroups',
1561                                     self.user,
1562                                     self.password)
1563        groups = rest.getJson(output_result)
1564        for group in groups["groups"]:
1565            if groupName == group["name"]:
1566                return group["uri"]
1567        return None
1568
1569    def getServerGroups(self):
1570        rest = util.restclient_factory(self.server,
1571                                     self.port,
1572                                     {'debug':self.debug},
1573                                     self.ssl)
1574        output_result = rest.restCmd('GET',
1575                                     '/pools/default/serverGroups',
1576                                     self.user,
1577                                     self.password)
1578        return rest.getJson(output_result)
1579
1580    def groupList(self):
1581        rest = util.restclient_factory(self.server,
1582                                     self.port,
1583                                     {'debug':self.debug},
1584                                     self.ssl)
1585        output_result = rest.restCmd('GET',
1586                                     '/pools/default/serverGroups',
1587                                     self.user,
1588                                     self.password)
1589        groups = rest.getJson(output_result)
1590        found = False
1591        for group in groups["groups"]:
1592            if self.group_name is None or self.group_name == group['name']:
1593                found = True
1594                print '%s' % group['name']
1595                for node in group['nodes']:
1596                    print ' server: %s' % node["hostname"]
1597        if not found and self.group_name:
1598            print "Invalid group name: %s" % self.group_name
1599
1600    def groupCreate(self):
1601        rest = util.restclient_factory(self.server,
1602                                     self.port,
1603                                     {'debug':self.debug},
1604                                     self.ssl)
1605        rest.setParam('name', self.group_name)
1606        opts = {
1607            'error_msg': "unable to create group %s" % self.group_name,
1608            'success_msg': "group created %s" % self.group_name
1609        }
1610        output_result = rest.restCmd('POST',
1611                                     '/pools/default/serverGroups',
1612                                     self.user,
1613                                     self.password,
1614                                     opts)
1615        print output_result
1616
1617    def groupRename(self):
1618        uri = self.getGroupUri(self.group_name)
1619        if uri is None:
1620            command_error("invalid group name:%s" % self.group_name)
1621        if self.group_rename is None:
1622            command_error("invalid group name:%s" % self.group_name)
1623
1624        rest = util.restclient_factory(self.server,
1625                                     self.port,
1626                                     {'debug':self.debug},
1627                                     self.ssl)
1628        rest.setParam('name', self.group_rename)
1629        opts = {
1630            'error_msg': "unable to rename group %s" % self.group_name,
1631            'success_msg': "group renamed %s" % self.group_name
1632        }
1633        output_result = rest.restCmd('PUT',
1634                                     uri,
1635                                     self.user,
1636                                     self.password,
1637                                     opts)
1638        print output_result
1639
1640    def groupDelete(self):
1641        uri = self.getGroupUri(self.group_name)
1642        if uri is None:
1643            command_error("invalid group name:%s" % self.group_name)
1644
1645        rest = util.restclient_factory(self.server,
1646                                     self.port,
1647                                     {'debug':self.debug},
1648                                     self.ssl)
1649        rest.setParam('name', self.group_name)
1650        opts = {
1651            'error_msg': "unable to delete group %s" % self.group_name,
1652            'success_msg': "group deleted %s" % self.group_name
1653        }
1654        output_result = rest.restCmd('DELETE',
1655                                     uri,
1656                                     self.user,
1657                                     self.password,
1658                                     opts)
1659        print output_result
1660
1661    def groupAddServers(self):
1662        # If this is the first index node added then we need to make sure to
1663        # set the index storage setting.
1664        indexStorageParam = self.index_storage_to_param(self.index_storage_setting, False)
1665        if not indexStorageParam:
1666            print "ERROR: invalid index storage setting `%s`. Must be [default, memopt]" \
1667                % self.index_storage_setting
1668            return
1669
1670        cm = cluster_manager.ClusterManager(self.server, self.port, self.user,
1671                                            self.password, self.ssl)
1672
1673        settings, errors = cm.index_settings()
1674        _exitIfErrors(errors)
1675
1676        if not settings:
1677            print "Error: unable to infer the current index storage mode"
1678            return
1679
1680        if settings['storageMode'] == "":
1681            _, errors = cm.set_index_settings(indexStorageParam)
1682            if errors:
1683                _exitIfErrors(errors)
1684        elif settings['storageMode'] != self.index_storage_setting and \
1685             self.index_storage_setting:
1686            print "Error: Cannot change index storage mode from `%s` to `%s`" % \
1687                (settings['storageMode'], self.index_storage_setting)
1688            return
1689
1690        err, services = self.process_services(False)
1691        if err:
1692            print err
1693            return
1694        for server in self.server_list:
1695            _, errors = cm.add_server(server, self.group_name, self.sa_username,
1696                                           self.sa_password, services)
1697            if errors:
1698                _exitIfErrors(errors, "Error: Failed to add server %s: " % server)
1699
1700            if self.group_name:
1701                print "Server %s added to group %s" % (server, self.group_name)
1702            else:
1703                print "Server %s added" % server
1704
1705    def groupMoveServer(self):
1706        groups = self.getServerGroups()
1707        node_info = {}
1708        for group in groups["groups"]:
1709            if self.from_group == group['name']:
1710                for server in self.server_list:
1711                    for node in group["nodes"]:
1712                        if server == node["hostname"]:
1713                            node_info[server] = node
1714                            group["nodes"].remove(node)
1715        if not node_info:
1716            print "No servers removed from group '%s'" % self.from_group
1717            return
1718
1719        for group in groups["groups"]:
1720            if self.to_group == group['name']:
1721                for server in self.server_list:
1722                    found = False
1723                    for node in group["nodes"]:
1724                        if server == node["hostname"]:
1725                            found = True
1726                            break
1727                    if not found:
1728                        group["nodes"].append(node_info[server])
1729
1730        payload = json.dumps(groups)
1731        rest = util.restclient_factory(self.server,
1732                                     self.port,
1733                                     {'debug':self.debug},
1734                                     self.ssl)
1735        rest.setPayload(payload)
1736
1737        opts = {
1738            'error_msg': "unable to move servers from group '%s' to group '%s'" % (self.from_group, self.to_group),
1739            'success_msg': "move servers from group '%s' to group '%s'" % (self.from_group, self.to_group)
1740        }
1741        output_result = rest.restCmd('PUT',
1742                                     groups["uri"],
1743                                     self.user,
1744                                     self.password,
1745                                     opts)
1746        print output_result
1747
1748    def retrieveCert(self):
1749        if self.cmd in ['retrieve', 'regenerate', 'upload-cluster-ca'] and self.certificate_file is None:
1750            command_error("please specify certificate file name for the operation")
1751
1752        cm = cluster_manager.ClusterManager(self.server, self.port, self.user, self.password, self.ssl)
1753
1754        if self.cmd == 'retrieve':
1755            print "Warning --retrieve-cert is deprecated, use --cluster-cert-info"
1756            certificate, errors = cm.retrieve_cluster_certificate()
1757            _exitIfErrors(errors)
1758            _exitOnFileWriteFailure(self.certificate_file, certificate)
1759            print "SUCCESS: %s certificate to '%s'" % (self.cmd, self.certificate_file)
1760        elif self.cmd  == 'regenerate':
1761            certificate, errors = cm.regenerate_cluster_certificate()
1762            _exitIfErrors(errors)
1763            _exitOnFileWriteFailure(self.certificate_file, certificate)
1764            print "SUCCESS: %s certificate to '%s'" % (self.cmd, self.certificate_file)
1765        elif self.cmd == 'cluster-cert-info':
1766            certificate, errors = cm.retrieve_cluster_certificate(self.extended)
1767            _exitIfErrors(errors)
1768            if isinstance(certificate, dict):
1769                print json.dumps(certificate, sort_keys=True, indent=2)
1770            else:
1771                print certificate
1772        elif self.cmd == 'node-cert-info':
1773            certificate, errors = cm.retrieve_node_certificate('%s:%d' % (self.server, self.port))
1774            _exitIfErrors(errors)
1775            print json.dumps(certificate, sort_keys=True, indent=2)
1776        elif self.cmd == 'upload-cluster-ca':
1777            certificate = _exitOnFileReadFailure(self.certificate_file)
1778            _, errors = cm.upload_cluster_certificate(certificate)
1779            _exitIfErrors(errors)
1780            print "SUCCESS: uploaded cluster certificate to %s:%d" % (self.server, self.port)
1781        elif self.cmd == 'set-node-certificate':
1782            _, errors = cm.set_node_certificate()
1783            _exitIfErrors(errors)
1784            print "SUCCESS: node certificate set"
1785        else:
1786            print "ERROR: unknown request:", self.cmd
1787
1788    def collectLogsStart(self, servers):
1789        """Starts a cluster-wide log collection task"""
1790        if (servers['log'] is None) and (self.all_nodes is not True):
1791            command_error("please specify a list of nodes to collect logs from, " +
1792                  " or 'all-nodes'")
1793
1794        rest = util.restclient_factory(self.server, self.port,
1795                                     {'debug': self.debug}, self.ssl)
1796        if self.all_nodes:
1797            rest.setParam("nodes", "*")
1798        else:
1799            known_otps, eject_otps, failover_otps, readd_otps, hostnames = \
1800                self.getNodeOtps(to_readd=servers['log'])
1801            if not len(readd_otps):
1802                msg = ",".join(hostnames)
1803                command_error("invalid node name specified for collecting logs, available nodes are:\n"+msg)
1804
1805            nodelist = ",".join(readd_otps)
1806            rest.setParam("nodes", nodelist)
1807            print "NODES:", nodelist
1808
1809        if self.upload:
1810            if self.upload_host is None:
1811                command_error("please specify an upload-host when using --upload")
1812
1813            rest.setParam("uploadHost", self.upload_host)
1814
1815            if not self.customer:
1816                command_error("please specify a value for --customer when using" +
1817                      " --upload")
1818
1819            rest.setParam("customer", self.customer)
1820            rest.setParam("ticket", self.ticket)
1821
1822        opts = {
1823            'error_msg': "unable to start log collection:",
1824            'success_msg': "Log collection started"
1825        }
1826
1827        output_result = rest.restCmd(self.method, self.rest_cmd, self.user,
1828                                     self.password, opts)
1829        print output_result
1830
1831    def collectLogsStop(self):
1832        """Stops a cluster-wide log collection task"""
1833        rest = util.restclient_factory(self.server, self.port,
1834                                     {'debug': self.debug}, self.ssl)
1835
1836        opts = {
1837            'success_msg': 'collect logs successfully stopped',
1838            'error_msg': 'unable to stop collect logs'
1839        }
1840        output_result = rest.restCmd(self.method, self.rest_cmd, self.user,
1841                                     self.password, opts)
1842        print output_result
1843
1844    def collectLogsStatus(self):
1845        """Shows the current status of log collection task"""
1846        rest = util.restclient_factory(self.server, self.port,
1847                                     {'debug': self.debug}, self.ssl)
1848
1849        opts = {
1850            'error_msg': 'unable to obtain collect logs status'
1851        }
1852        output_result = rest.restCmd(self.method, self.rest_cmd, self.user,
1853                                     self.password, opts)
1854
1855        output_json = rest.getJson(output_result)
1856
1857        for e in output_json:
1858            if ((type(e) == type(dict()) and ('type' in e) and
1859                (e['type'] == 'clusterLogsCollection'))):
1860                print "Status:   %s" % e['status']
1861                if 'perNode' in e:
1862                    print "Details:"
1863                    for node, ns in e["perNode"].iteritems():
1864                        print '\tNode:', node
1865                        print '\tStatus:', ns['status']
1866                        for f in ["path", "statusCode", "url", "uploadStatusCode", "uploadOutput"]:
1867                            if f in ns:
1868                                print '\t', f, ":", ns[f]
1869                        print
1870                return
1871
1872    def getCommandSummary(self, cmd):
1873        """Return one-line summary info for each supported command"""
1874        command_summary = {
1875            "server-list" :"list all servers in a cluster",
1876            "server-info" :"show details on one server",
1877            "server-add" :"add one or more servers to the cluster",
1878            "server-readd" :"readd a server that was failed over",
1879            "group-manage" :"manage server groups",
1880            "rebalance" :"start a cluster rebalancing",
1881            "rebalance-stop" :"stop current cluster rebalancing",
1882            "rebalance-status" :"show status of current cluster rebalancing",
1883            "failover" :"failover one or more servers",
1884            "recovery" :"recover one or more servers",
1885            "setting-cluster" : "set cluster settings",
1886            "setting-compaction" : "set auto compaction settings",
1887            "setting-notification" : "set notification settings",
1888            "setting-alert" : "set email alert settings",
1889            "setting-autofailover" : "set auto failover settings",
1890            "collect-logs-start" : "start a cluster-wide log collection",
1891            "collect-logs-stop" : "stop a cluster-wide log collection",
1892            "collect-logs-status" : "show the status of cluster-wide log collection",
1893            "cluster-init" : "set the username,password and port of the cluster",
1894            "cluster-edit" : "modify cluster settings",
1895            "node-init" : "set node specific parameters",
1896            "ssl-manage" : "manage cluster certificate",
1897            "user-manage" : "manage read only user",
1898            "setting-index" : "set index settings",
1899            "setting-ldap" : "set ldap settings",
1900            "setting-audit" : "set audit settings",
1901            "admin-role-manage" : "set access-control roles for users"
1902        }
1903        if cmd in command_summary:
1904            return command_summary[cmd]
1905        else:
1906            return None
1907
1908    def getCommandHelp(self, cmd):
1909        """ Obtain detailed parameter help for Node commands
1910        Returns a list of pairs (arg1, arg1-information) or None if there's
1911        no help or cmd is unknown.
1912        """
1913
1914        # Some common flags for server- commands
1915        server_common = [("--server-add=HOST[:PORT]", "server to be added,"),
1916                         ("--server-add-username=USERNAME",
1917                          "admin username for the server to be added"),
1918                         ("--server-add-password=PASSWORD",
1919                          "admin password for the server to be added"),
1920                         ("--group-name=GROUPNAME", "group that server belongs")]
1921
1922        services = [("--services=data,index,query,fts",
1923                     "services that server runs")]
1924
1925        if cmd == "server-add" or cmd == "rebalance":
1926            return [("--index-storage-setting=SETTING", "index storage type [default, memopt]")] \
1927            + server_common + services
1928        elif cmd == "server-readd":
1929            return server_common
1930        elif cmd == "group-manage":
1931            return [
1932            ("--group-name=GROUPNAME", "group name"),
1933            ("--create", "create a new group"),
1934            ("--delete", "delete an empty group"),
1935            ("--list", "show group/server relationship map"),
1936            ("--rename=NEWGROUPNAME", "rename group to new name"),
1937            ("--add-servers=HOST[:PORT],HOST[:PORT]",
1938             "add a list of servers to group"),
1939            ("--move-servers=HOST[:PORT],HOST[:PORT]",
1940             "move a list of servers from group"),
1941            ("--from-group=GROUPNAME", "group name to move servers from"),
1942            ("--to-group=GROUPNAME", "group name to move servers into"),
1943            ("--index-storage-setting=SETTING", "index storage type [default, memopt]")] + services
1944        elif cmd == "cluster-init" or cmd == "cluster-edit":
1945            return [
1946            ("--cluster-username=USER", "new admin username"),
1947            ("--cluster-password=PASSWORD", "new admin password"),
1948            ("--cluster-port=PORT", "new cluster REST/http port"),
1949            ("--cluster-ramsize=RAMSIZEMB", "per node data service ram quota in MB"),
1950            ("--cluster-index-ramsize=RAMSIZEMB", "per node index service ram quota in MB"),
1951            ("--cluster-fts-ramsize=RAMSIZEMB", "per node fts service ram quota in MB"),
1952            ("--index-storage-setting=SETTING", "index storage type [default, memopt]")] + services
1953        elif cmd == "node-init":
1954            return [
1955            ("--node-init-data-path=PATH", "data path for database files"),
1956            ("--node-init-index-path=PATH", "index path for view data")]
1957        elif cmd == "failover":
1958            return [
1959            ("--server-failover=HOST[:PORT]", "server to failover"),
1960            ("--force", "failover node from cluster right away")]
1961        elif cmd == "recovery":
1962            return [
1963            ("--server-recovery=HOST[:PORT]", "server to recover"),
1964            ("--recovery-type=TYPE[delta|full]",
1965             "type of recovery to be performed for a node")]
1966        elif cmd == "user-manage":
1967            return [
1968            ("--set", "create/modify a read only user"),
1969            ("--list", "list any read only user"),
1970            ("--delete", "delete read only user"),
1971            ("--ro-username=USERNAME", "readonly user name"),
1972            ("--ro-password=PASSWORD", "readonly user password")]
1973        elif cmd == "setting-compaction":
1974            return [
1975            ("", ""),
1976            ("Data/View compaction settings:", ""),
1977            ("  --compaction-db-percentage=PERC",
1978            "Starts compaction once data file fragmentation has reached this percentage"),
1979            ("  --compaction-db-size=SIZE",
1980             "Starts compaction once data file fragmentation has reached this size"),
1981            ("  --compaction-view-percentage=PERC",
1982            "Starts compaction once view file fragmentation has reached this percentage"),
1983            ("  --compaction-view-size=SIZE",
1984             "Starts compaction once view file fragmentation has reached this size"),
1985            ("  --compaction-period-from=HH:MM", "Allow compaction to run after this time"),
1986            ("  --compaction-period-to=HH:MM", "Allow compaction to run before this time"),
1987            ("  --enable-compaction-abort=[0|1]",
1988            "Abort compaction if when run outside of the accepted interval"),
1989            ("  --enable-compaction-parallel=[0|1]", "Allow view/data file compaction at the same time"),
1990            ("", ""),
1991            ("GSI index compaction settings:", ""),
1992            ("  --gsi-compaction-mode", "Sets the gsi compaction mode [append|circular]"),
1993            ("  --compaction-gsi-percentage=PERC",
1994            "Starts compaction once gsi file fragmentation has reached this percentage (Append mode only)"),
1995            ("  --compaction-gsi-interval",
1996            "A comma separated list of days compaction can run (Circular mode only)"),
1997            ("  --compaction-gsi-period-from=HH:MM",
1998            "Allow gsi compaction to run after this time (Circular mode only)"),
1999            ("  --compaction-gsi-period-to=HH:MM",
2000            "Allow gsi compaction to run before this time (Circular mode only)"),
2001            ("  --enable-gsi-compaction-abort=[0|1]",
2002            "Abort gsi compaction if when run outside of the accepted interaval (Circular mode only)")]
2003        elif cmd == "setting-alert":
2004            return [
2005            ("--enable-email-alert=[0|1]", "allow email alert"),
2006            ("--email-recipients=RECIPIENT",
2007             "email recipients, separate addresses with , or ;"),
2008            ("--email-sender=SENDER", "sender email address"),
2009            ("--email-user=USER", "email server username"),
2010            ("--email-password=PWD", "email server password"),
2011            ("--email-host=HOST", "email server host"),
2012            ("--email-port=PORT", "email server port"),
2013            ("--enable-email-encrypt=[0|1]", "email encrypt"),
2014            ("--alert-auto-failover-node", "node was auto failover"),
2015            ("--alert-auto-failover-max-reached",
2016             "maximum number of auto failover nodes was reached"),
2017            ("--alert-auto-failover-node-down",
2018             "node wasn't auto failover as other nodes are down at the same time"),
2019            ("--alert-auto-failover-cluster-small",
2020             "node wasn't auto fail over as cluster was too small"),
2021            ("--alert-auto-failover-disabled",
2022             "node was not auto-failed-over as auto-failover for one or more services running on the node is disabled"),
2023            ("--alert-ip-changed", "node ip address has changed unexpectedly"),
2024            ("--alert-disk-space",
2025             "disk space used for persistent storgage has reached at least 90% capacity"),
2026            ("--alert-meta-overhead",
2027             "metadata overhead is more than 50%"),
2028            ("--alert-meta-oom",
2029             "bucket memory on a node is entirely used for metadata"),
2030            ("--alert-write-failed",
2031             "writing data to disk for a specific bucket has failed"),
2032            ("--alert-audit-msg-dropped", "writing event to audit log has failed")]
2033        elif cmd == "setting-cluster":
2034            return [("--cluster-name=[CLUSTERNAME]", "cluster name"),
2035                    ("--cluster-ramsize=[RAMSIZEMB]", "per node data service ram quota in MB"),
2036                    ("--cluster-index-ramsize=[RAMSIZEMB]","per node index service ram quota in MB"),
2037                    ("--cluster-fts-ramsize=RAMSIZEMB", "per node fts service ram quota in MB")]
2038        elif cmd == "setting-notification":
2039            return [("--enable-notification=[0|1]", "allow notification")]
2040        elif cmd == "setting-autofailover":
2041            return [("--enable-auto-failover=[0|1]", "allow auto failover"),
2042                    ("--auto-failover-timeout=TIMEOUT (>=30)",
2043                     "specify timeout that expires to trigger auto failover")]
2044        elif cmd == "ssl-manage":
2045            return [("--cluster-cert-info", "prints cluster certificate info"),
2046                    ("--node-cert-info", "prints node certificate info"),
2047                    ("--retrieve-cert=CERTIFICATE",
2048                     "retrieve cluster certificate AND save to a pem file"),
2049                    ("--regenerate-cert=CERTIFICATE",
2050                     "regenerate cluster certificate AND save to a pem file"),
2051                    ("--set-node-certificate", "sets the node certificate"),
2052                    ("--upload-cluster-ca", "uploads a new cluster certificate")]
2053        elif cmd == "setting-audit":
2054            return [
2055            ("--audit-log-rotate-interval=[MINUTES]", "log rotation interval"),
2056            ("--audit-log-path=[PATH]", "target log directory"),
2057            ("--audit-enabled=[0|1]", "enable auditing or not")]
2058        elif cmd == "setting-ldap":
2059            return [
2060            ("--ldap-admins=", "full admins, separated by comma"),
2061            ("--ldap-roadmins=", "read only admins, separated by comma"),
2062            ("--ldap-enabled=[0|1]", "using LDAP protocol for authentication"),
2063            ("--ldap-default=[admins|roadmins|none]", "set default ldap accounts")]
2064        elif cmd == "setting-index":
2065            return [
2066            ("--index-max-rollback-points=[5]", "max rollback points"),
2067            ("--index-stable-snapshot-interval=SECONDS", "stable snapshot interval"),
2068            ("--index-memory-snapshot-interval=SECONDS", "in memory snapshot interval"),
2069            ("--index-threads=[4]", "indexer threads"),
2070            ("--index-log-level=[debug|silent|fatal|error|warn|info|verbose|timing|trace]", "indexer log level")]
2071        elif cmd == "collect-logs-start":
2072            return [
2073            ("--all-nodes", "Collect logs from all accessible cluster nodes"),
2074            ("--nodes=HOST[:PORT],HOST[:PORT]",
2075             "Collect logs from the specified subset of cluster nodes"),
2076            ("--upload", "Upload collects logs to specified host"),
2077            ("--upload-host=HOST",
2078             "Host to upload logs to (Manditory when --upload specified)"),
2079            ("--customer=CUSTOMER",
2080             "Customer name to use when uploading logs (Mandatory when --upload specified)"),
2081            ("--ticket=TICKET_NUMBER",
2082             "Ticket number to associate the uploaded logs with")]
2083        elif cmd == "admin-role-manage":
2084            return [
2085            ("--my-roles", "Return a list of roles for the current user."),
2086            ("--get-roles", "Return list of users and roles."),
2087            ("--set-users", "A comma-delimited list of user ids to set acess-control roles for"),
2088            ("--set-names", "A optional quoted, comma-delimited list names, one for each specified user id"),
2089            ("--roles", "A comma-delimited list of roles to set for users, one or more from admin, ro_admin, cluster_admin, replication_admin, bucket_admin[bucket name or '*'], views_admin[bucket name or '*']"),
2090            ("--delete-users", "A comma-delimited list of users to remove from access control")
2091            ]
2092        elif cmd == "master-password":
2093            return [
2094            ("--new-master-password", "Changes the master password on this node."),
2095            ("--rotate-data-key", "Rotates the master password data key."),
2096            ]
2097        else:
2098            return None
2099
2100    def getCommandExampleHelp(self, cmd):
2101        """ Obtain detailed example help for command
2102        Returns a list of command examples to illustrate how to use command
2103        or None if there's no example help or cmd is unknown.
2104        """
2105
2106        if cmd == "cluster-init":
2107            return [("Set data service ram quota and index ram quota",
2108"""
2109    couchbase-cli cluster-init -c 192.168.0.1:8091 \\
2110       --cluster-username=Administrator \\
2111       --cluster-password=password \\
2112       --cluster-port=8080 \\
2113       --services=data,index \\
2114       --cluster-ramsize=300 \\
2115       --cluster-index-ramsize=256\\
2116       --index-storage-setting=memopt""")]
2117        elif cmd == "cluster-edit":
2118            return [("Change the cluster username, password, port and data service ram quota",
2119"""
2120    couchbase-cli cluster-edit -c 192.168.0.1:8091 \\
2121       --cluster-username=Administrator1 \\
2122       --cluster-password=password1 \\
2123       --cluster-port=8080 \\
2124       --cluster-ramsize=300 \\
2125       -u Administrator -p password""")]
2126        elif cmd == "node-init":
2127            return [("Set data path and hostname for an unprovisioned cluster",
2128"""
2129    couchbse-cli node-init -c 192.168.0.1:8091 \\
2130       --node-init-data-path=/tmp/data \\
2131       --node-init-index-path=/tmp/index \\
2132       --node-init-hostname=myhostname \\
2133       -u Administrator -p password"""),
2134                    ("Change the data path",
2135"""
2136     couchbase-cli node-init -c 192.168.0.1:8091 \\
2137       --node-init-data-path=/tmp \\
2138       -u Administrator -p password""")]
2139        elif cmd == "server-add":
2140            return [("Add a node to a cluster, but do not rebalance",
2141"""
2142    couchbase-cli server-add -c 192.168.0.1:8091 \\
2143       --server-add=192.168.0.2:8091 \\
2144       --server-add-username=Administrator1 \\
2145       --server-add-password=password1 \\
2146
2147       --group-name=group1 \\
2148       --index-storage-setting=memopt \\
2149       -u Administrator -p password"""),
2150                    ("Add a node to a cluster, but do not rebalance",
2151"""
2152    couchbase-cli server-add -c 192.168.0.1:8091 \\
2153       --server-add=192.168.0.2:8091 \\
2154       --server-add-username=Administrator1 \\
2155       --server-add-password=password1 \\
2156       --group-name=group1 \\
2157       -u Administrator -p password""")]
2158        elif cmd == "rebalance":
2159            return [("Add a node to a cluster and rebalance",
2160"""
2161    couchbase-cli rebalance -c 192.168.0.1:8091 \\
2162       --server-add=192.168.0.2:8091 \\
2163       --server-add-username=Administrator1 \\
2164       --server-add-password=password1 \\
2165       --group-name=group1 \\
2166       -u Administrator -p password"""),
2167                    ("Add a node to a cluster and rebalance",
2168"""
2169    couchbase-cli rebalance -c 192.168.0.1:8091 \\
2170       --server-add=192.168.0.2:8091 \\
2171       --server-add-username=Administrator1 \\
2172       --server-add-password=password1 \\
2173       --group-name=group1 \\
2174       -u Administrator -p password"""),
2175                    ("Remove a node from a cluster and rebalance",
2176"""
2177    couchbase-cli rebalance -c 192.168.0.1:8091 \\
2178       --server-remove=192.168.0.2:8091 \\
2179       -u Administrator -p password"""),
2180                    ("Remove and add nodes from/to a cluster and rebalance",
2181"""
2182    couchbase-cli rebalance -c 192.168.0.1:8091 \\
2183      --server-remove=192.168.0.2 \\
2184      --server-add=192.168.0.4 \\
2185      --server-add-username=Administrator1 \\
2186      --server-add-password=password1 \\
2187      --group-name=group1 \\
2188      -u Administrator -p password""")
2189       ]
2190        elif cmd == "rebalance-stop":
2191            return [("Stop the current rebalancing",
2192"""
2193    couchbase-cli rebalance-stop -c 192.168.0.1:8091 \\
2194       -u Administrator -p password""")]
2195        elif cmd == "recovery":
2196            return [("Set recovery type to a server",
2197"""
2198    couchbase-cli recovery -c 192.168.0.1:8091 \\
2199       --server-recovery=192.168.0.2 \\
2200       --recovery-type=full \\
2201       -u Administrator -p password""")]
2202        elif cmd == "failover":
2203            return [("Set a failover, readd, recovery and rebalance sequence operations",
2204"""
2205    couchbase-cli failover -c 192.168.0.1:8091 \\
2206       --server-failover=192.168.0.2 \\
2207       -u Administrator -p password
2208
2209    couchbase-cli server-readd -c 192.168.0.1:8091 \\
2210       --server-add=192.168.0.2 \\
2211       -u Administrator -p password
2212
2213    couchbase-cli recovery -c 192.168.0.1:8091 \\
2214       --server-recovery=192.168.0.2 \\
2215       --recovery-type=delta \\
2216       -u Administrator -p password
2217
2218    couchbase-cli rebalance -c 192.168.0.1:8091 \\
2219       --recovery-buckets="default,bucket1" \\
2220       -u Administrator -p password""")]
2221        elif cmd == "user-manage":
2222            return [("List read only user in a cluster",
2223"""
2224    couchbase-cli user-manage --list -c 192.168.0.1:8091 \\
2225           -u Administrator -p password"""),
2226                ("Delete a read only user in a cluster",
2227"""
2228    couchbase-cli user-manage -c 192.168.0.1:8091 \\
2229        --delete --ro-username=readonlyuser \\
2230        -u Administrator -p password"""),
2231                ("Create/modify a read only user in a cluster",
2232"""
2233    couchbase-cli user-manage -c 192.168.0.1:8091 \\
2234        --set --ro-username=readonlyuser --ro-password=readonlypassword \\
2235        -u Administrator -p password""")]
2236        elif cmd == "group-manage":
2237            return [("Create a new group",
2238"""
2239    couchbase-cli group-manage -c 192.168.0.1:8091 \\
2240        --create --group-name=group1 -u Administrator -p password"""),
2241                ("Delete an empty group",
2242"""
2243    couchbase-cli group-manage -c 192.168.0.1:8091 \\
2244        --delete --group-name=group1 -u Administrator -p password"""),
2245                ("Rename an existed group",
2246"""
2247    couchbase-cli group-manage -c 192.168.0.1:8091 \\
2248        --rename=newgroup --group-name=group1 -u Administrator -p password"""),
2249                ("Show group/server map",
2250"""
2251    couchbase-cli group-manage -c 192.168.0.1:8091 \\
2252        --list -u Administrator -p password"""),
2253                ("Add a server to a group",
2254"""
2255    couchbase-cli group-manage -c 192.168.0.1:8091 \\
2256        --add-servers=10.1.1.1:8091,10.1.1.2:8091 \\
2257        --group-name=group1 \\
2258        --server-add-username=Administrator1 \\
2259        --server-add-password=password1 \\
2260        --services=data,index,query,fts \\
2261        -u Administrator -p password"""),
2262                ("Move list of servers from group1 to group2",
2263"""
2264    couchbase-cli group-manage -c 192.168.0.1:8091 \\
2265        --move-servers=10.1.1.1:8091,10.1.1.2:8091 \\
2266        --from-group=group1 \\
2267        --to-group=group2 \\
2268        -u Administrator -p password""")]
2269        elif cmd == "ssl-manage":
2270            return [("Download a cluster certificate",
2271"""
2272    couchbase-cli ssl-manage -c 192.168.0.1:8091 \\
2273        --retrieve-cert=/tmp/test.pem \\
2274        -u Administrator -p password
2275
2276    couchbase-cli ssl-manage -c 192.168.0.1:8091 \\
2277        --cluster-cert-info \\
2278        -u Administrator -p password"""),
2279                ("Regenerate AND download a cluster certificate",
2280"""
2281    couchbase-cli ssl-manage -c 192.168.0.1:8091 \\
2282        --regenerate-cert=/tmp/test.pem \\
2283        -u Administrator -p password"""),
2284                ("Download the extended cluster certificate",
2285"""
2286    couchbase-cli ssl-manage -c 192.168.0.1:8091 \\
2287        --cluster-cert-info --extended \\
2288        -u Administrator -p password"""),
2289                ("Download the current node certificate",
2290"""
2291    couchbase-cli ssl-manage -c 192.168.0.1:8091 \\
2292        --node-cert-info \\
2293        -u Administrator -p password"""),
2294                ("Upload a new cluster certificate",
2295"""
2296    couchbase-cli ssl-manage -c 192.168.0.1:8091 \\
2297        --upload-cluster-ca=/tmp/test.pem \\
2298        -u Administrator -p password"""),
2299                ("Set the new node certificate",
2300"""
2301    couchbase-cli ssl-manage -c 192.168.0.1:8091 \\
2302        --set-node-certificate \\
2303        -u Administrator -p password""")]
2304        elif cmd == "collect-logs-start":
2305            return [("Start cluster-wide log collection for whole cluster",
2306"""
2307    couchbase-cli collect-logs-start -c 192.168.0.1:8091 \\
2308        -u Administrator -p password \\
2309        --all-nodes --upload --upload-host=host.upload.com \\
2310        --customer="example inc" --ticket=12345"""),
2311                ("Start cluster-wide log collection for selected nodes",
2312"""
2313    couchbase-cli collect-logs-start -c 192.168.0.1:8091 \\
2314        -u Administrator -p password \\
2315        --nodes=10.1.2.3:8091,10.1.2.4 --upload --upload-host=host.upload.com \\
2316        --customer="example inc" --ticket=12345""")]
2317        elif cmd == "collect-logs-stop":
2318            return [("Stop cluster-wide log collection",
2319"""
2320    couchbase-cli collect-logs-stop -c 192.168.0.1:8091 \\
2321        -u Administrator -p password""")]
2322        elif cmd == "collect-logs-status":
2323            return [("Show status of cluster-wide log collection",
2324"""
2325    couchbase-cli collect-logs-status -c 192.168.0.1:8091 \\
2326        -u Administrator -p password""")]
2327        elif cmd == "setting-ldap":
2328            return [("Enable LDAP with None default",
2329"""
2330    couchbase-cli setting-ldap -c 192.168.0.1:8091 \\
2331        --ldap-enabled=1 --ldap-admins=u1,u2 --ldap-roadmins=u3,u3,u5 \\
2332        -u Administrator -p password"""),
2333                ("Enable LDAP with full admin default",
2334"""
2335    couchbase-cli setting-ldap -c 192.168.0.1:8091 \\
2336        --ldap-enabled=1 --ldap-default=admins --ldap-roadmins=u3,u3,u5 \\
2337        -u Administrator -p password"""),
2338                ("Enable LDAP with read only default",
2339"""
2340    couchbase-cli setting-ldap -c 192.168.0.1:8091 \\
2341        --ldap-enabled=1 --ldap-default=roadmins --ldap-admins=u1,u2 \\
2342        -u Administrator -p password"""),
2343                ("Disable LDAP",
2344"""
2345    couchbase-cli setting-ldap -c 192.168.0.1:8091 \\
2346        --ldap-enabled=0  -u Administrator -p password""")]
2347        elif cmd == "setting-audit":
2348            return [("Enable audit",
2349"""
2350    couchbase-cli setting-audit -c 192.168.0.1:8091 \\
2351        --audit-enabled=1 --audit-log-rotate-interval=900 \\
2352        --audit-log-path="/opt/couchbase/var/lib/couchbase/logs"
2353        -u Administrator -p password"""),
2354                ("Disable audit",
2355"""
2356    couchbase-cli setting-audit -c 192.168.0.1:8091 \\
2357        --audit-enabled=0 -u Administrator -p password""")]
2358        elif cmd == "setting-index":
2359            return [("Indexer setting",
2360"""
2361    couchbase-cli setting-index  -c 192.168.0.1:8091 \\
2362        --index-max-rollback-points=5 \\
2363        --index-stable-snapshot-interval=5000 \\
2364        --index-memory-snapshot-interval=200 \\
2365        --index-threads=5 \\
2366        --index-log-level=debug \\
2367        -u Administrator -p password""")]
2368
2369        elif cmd == "admin-role-manage":
2370            return [("Show the current users roles.",
2371"""
2372    couchbase-cli admin-role-manage -c 192.168.0.1:8091 --my-roles
2373            """),
2374            ("Get a list of all users and roles",
2375"""
2376    couchbase-cli admin-role-manage -c 192.168.0.1:8091 --get-roles
2377            """),
2378                    ("Make bob and mary cluster_admins, and bucket admins for the default bucket",
2379"""
2380    couchbase-cli admin-role-manage -c 192.168.0.1:8091 \\
2381        --set-users=bob,mary --set-names="Bob Smith,Mary Jones" --roles=cluster_admin,bucket_admin[default]
2382            """),
2383                    ("Make jen bucket admins for all buckets",
2384"""
2385    couchbase-cli admin-role-manage -c 192.168.0.1:8091 \\
2386        --set-users=jen --roles=bucket_admin[*]
2387            """),
2388            ("Remove all roles for bob",
2389"""
2390    couchbase-cli admin-role-manage -c 192.168.0.1:8091 --delete-users=bob
2391            """)
2392            ]
2393        elif cmd == "master-password":
2394            return [("Change the master password",
2395"""
2396    couchbase-cli master-password -c 192.168.0.1:8091 -u Administrator \\
2397        -p password --new-master-password password123
2398            """),
2399            ("Rotate the master password data key",
2400"""
2401    couchbase-cli master-password -c 192.168.0.1:8091 -u Administrator \\
2402        -p password --rotate-data-key
2403            """),
2404            ]
2405
2406        else:
2407            return None
2408
2409def _exitIfErrors(errors, prefix=""):
2410    if errors:
2411        for error in errors:
2412            print prefix + error
2413        sys.exit(1)
2414
2415def _exitOnFileWriteFailure(fname, bytes):
2416    try:
2417        fp = open(fname, 'w')
2418        fp.write(bytes)
2419        fp.close()
2420    except IOError, error:
2421        print "ERROR:", error
2422        sys.exit(1)
2423
2424def _exitOnFileReadFailure(fname):
2425    try:
2426        fp = open(fname, 'r')
2427        bytes = fp.read()
2428        fp.close()
2429        return bytes
2430    except IOError, error:
2431        print "ERROR:", error
2432        sys.exit(1)
2433