1import random
2import time
3import json, subprocess
4
5import logger
6from basetestcase import BaseTestCase
7from membase.api.rest_client import RestConnection
8from remote.remote_util import RemoteMachineShellConnection
9from testconstants import LINUX_COUCHBASE_SAMPLE_PATH, \
10    WIN_COUCHBASE_SAMPLE_PATH_C, \
11    WIN_BACKUP_C_PATH, LINUX_BACKUP_PATH, LINUX_COUCHBASE_LOGS_PATH, \
12    WIN_COUCHBASE_LOGS_PATH, WIN_TMP_PATH, WIN_TMP_PATH_RAW, \
13    WIN_BACKUP_PATH, LINUX_COUCHBASE_BIN_PATH, LINUX_ROOT_PATH, LINUX_CB_PATH,\
14    MAC_COUCHBASE_BIN_PATH, WIN_COUCHBASE_BIN_PATH, WIN_ROOT_PATH
15
16from couchbase_helper.cluster import Cluster
17from security.rbac_base import RbacBase
18from security.rbacmain import rbacmain
19from membase.helper.bucket_helper import BucketOperationHelper
20from membase.helper.cluster_helper import ClusterOperationHelper
21
22
23log = logger.Logger.get_logger()
24
25
26class CliBaseTest(BaseTestCase):
27    vbucketId = 0
28
29    def setUp(self):
30        self.times_teardown_called = 1
31        super(CliBaseTest, self).setUp()
32        self.r = random.Random()
33        self.vbucket_count = 1024
34        self.cluster = Cluster()
35        self.clusters_dic = self.input.clusters
36        if self.clusters_dic:
37            if len(self.clusters_dic) > 1:
38                self.dest_nodes = self.clusters_dic[1]
39                self.dest_master = self.dest_nodes[0]
40            elif len(self.clusters_dic) == 1:
41                self.log.error("=== need 2 cluster to setup xdcr in ini file ===")
42        else:
43            self.log.error("**** Cluster config is setup in ini file. ****")
44        self.shell = RemoteMachineShellConnection(self.master)
45        if not self.skip_init_check_cbserver:
46            self.rest = RestConnection(self.master)
47            self.cb_version = self.rest.get_nodes_version()
48            """ cli output message """
49            self.cli_bucket_create_msg = "SUCCESS: Bucket created"
50            self.cli_rebalance_msg = "SUCCESS: Rebalance complete"
51            if self.cb_version[:3] == "4.6":
52                self.cli_bucket_create_msg = "SUCCESS: bucket-create"
53                self.cli_rebalance_msg = "SUCCESS: rebalanced cluster"
54        self.import_back = self.input.param("import_back", False)
55        if self.import_back:
56            if len(self.servers) < 3:
57                self.fail("This test needs minimum of 3 vms to run ")
58        self.test_type = self.input.param("test_type", "import")
59        self.import_file = self.input.param("import_file", None)
60        self.imex_type = self.input.param("imex_type", "json")
61        self.format_type = self.input.param("format_type", "lines")
62        self.import_method = self.input.param("import_method", "file://")
63        self.force_failover = self.input.param("force_failover", False)
64        self.json_invalid_errors = self.input.param("json-invalid-errors", None)
65        self.field_separator = self.input.param("field-separator", "comma")
66        self.key_gen = self.input.param("key-gen", True)
67        self.skip_docs = self.input.param("skip-docs", None)
68        self.limit_docs = self.input.param("limit-docs", None)
69        self.limit_rows = self.input.param("limit-rows", None)
70        self.skip_rows = self.input.param("skip-rows", None)
71        self.omit_empty = self.input.param("omit-empty", None)
72        self.infer_types = self.input.param("infer-types", None)
73        self.fx_generator = self.input.param("fx-generator", None)
74        self.fx_gen_start = self.input.param("fx-gen-start", None)
75        self.secure_conn = self.input.param("secure-conn", False)
76        self.no_cacert = self.input.param("no-cacert", False)
77        self.no_ssl_verify = self.input.param("no-ssl-verify", False)
78        self.verify_data = self.input.param("verify-data", False)
79        self.field_substitutions = self.input.param("field-substitutions", None)
80        self.check_preload_keys = self.input.param("check-preload-keys", True)
81        self.debug_logs = self.input.param("debug-logs", False)
82        self.should_fail = self.input.param("should-fail", False)
83        info = self.shell.extract_remote_info()
84        self.os_version = info.distribution_version.lower()
85        type = info.type.lower()
86        self.excluded_commands = self.input.param("excluded_commands", None)
87        self.os = 'linux'
88        self.full_v = None
89        self.short_v = None
90        self.build_number = None
91        cmd =  'curl -g {0}:8091/diag/eval -u {1}:{2} '.format(self.master.ip,
92                                                              self.master.rest_username,
93                                                              self.master.rest_password)
94        cmd += '-d "path_config:component_path(bin)."'
95        bin_path  = subprocess.check_output(cmd, shell=True)
96        if "bin" not in bin_path:
97            self.fail("Check if cb server install on %s" % self.master.ip)
98        else:
99            self.cli_command_path = bin_path.replace('"','') + "/"
100        self.root_path = LINUX_ROOT_PATH
101        self.tmp_path = "/tmp/"
102        self.tmp_path_raw = "/tmp/"
103        self.cmd_backup_path = LINUX_BACKUP_PATH
104        self.backup_path = LINUX_BACKUP_PATH
105        self.cmd_ext = ""
106        self.src_file = ""
107        self.des_file = ""
108        self.sample_files_path = LINUX_COUCHBASE_SAMPLE_PATH
109        self.log_path = LINUX_COUCHBASE_LOGS_PATH
110        self.base_cb_path = LINUX_CB_PATH
111        """ non root path """
112        if self.nonroot:
113            self.sample_files_path = "/home/%s%s" % (self.master.ssh_username,
114                                                     LINUX_COUCHBASE_SAMPLE_PATH)
115            self.log_path = "/home/%s%s" % (self.master.ssh_username,
116                                            LINUX_COUCHBASE_LOGS_PATH)
117            self.base_cb_path = "/home/%s%s" % (self.master.ssh_username,
118                                                LINUX_CB_PATH)
119            self.root_path = "/home/%s/" % self.master.ssh_username
120        if type == 'windows':
121            self.os = 'windows'
122            self.cmd_ext = ".exe"
123            self.root_path = WIN_ROOT_PATH
124            self.tmp_path = WIN_TMP_PATH
125            self.tmp_path_raw = WIN_TMP_PATH_RAW
126            self.cmd_backup_path = WIN_BACKUP_C_PATH
127            self.backup_path = WIN_BACKUP_PATH
128            self.sample_files_path = WIN_COUCHBASE_SAMPLE_PATH_C
129            self.log_path = WIN_COUCHBASE_LOGS_PATH
130            win_format = "C:/Program Files"
131            cygwin_format = "/cygdrive/c/Program\ Files"
132            if win_format in self.cli_command_path:
133                self.cli_command_path = self.cli_command_path.replace(win_format,
134                                                                      cygwin_format)
135        if info.distribution_type.lower() == 'mac':
136            self.os = 'mac'
137        self.full_v, self.short_v, self.build_number = self.shell.get_cbversion(type)
138        self.couchbase_usrname = "%s" % (self.input.membase_settings.rest_username)
139        self.couchbase_password = "%s" % (self.input.membase_settings.rest_password)
140        self.cb_login_info = "%s:%s" % (self.couchbase_usrname,
141                                        self.couchbase_password)
142        self.path_type = self.input.param("path_type", None)
143        if self.path_type is None:
144            self.log.info("Test command with absolute path ")
145        elif self.path_type == "local":
146            self.log.info("Test command at %s dir " % self.cli_command_path)
147            self.cli_command_path = "cd %s; ./" % self.cli_command_path
148        self.cli_command = self.input.param("cli_command", None)
149        self.command_options = self.input.param("command_options", None)
150        if self.command_options is not None:
151            self.command_options = self.command_options.split(";")
152        if str(self.__class__).find('couchbase_clitest.CouchbaseCliTest') == -1:
153            if len(self.servers) > 1 and int(self.nodes_init) == 1:
154                servers_in = [self.servers[i + 1] for i in range(self.num_servers - 1)]
155                self.cluster.rebalance(self.servers[:1], servers_in, [])
156        for bucket in self.buckets:
157            testuser = [{'id': bucket.name, 'name': bucket.name, 'password': 'password'}]
158            rolelist = [{'id': bucket.name, 'name': bucket.name, 'roles': 'admin'}]
159            self.add_built_in_server_user(testuser=testuser, rolelist=rolelist)
160
161
162    def tearDown(self):
163        if not self.input.param("skip_cleanup", True):
164            if self.times_teardown_called > 1 :
165                self.shell.disconnect()
166        if self.input.param("skip_cleanup", True):
167            if self.case_number > 1 or self.times_teardown_called > 1:
168                self.shell.disconnect()
169        self.times_teardown_called += 1
170        serverInfo = self.servers[0]
171        rest = RestConnection(serverInfo)
172        zones = rest.get_zone_names()
173        for zone in zones:
174            if zone != "Group 1":
175                rest.delete_zone(zone)
176        self.clusters_dic = self.input.clusters
177        if self.clusters_dic:
178            if len(self.clusters_dic) > 1:
179                self.dest_nodes = self.clusters_dic[1]
180                self.dest_master = self.dest_nodes[0]
181                if self.dest_nodes and len(self.dest_nodes) > 1:
182                    self.log.info("======== clean up destination cluster =======")
183                    rest = RestConnection(self.dest_nodes[0])
184                    rest.remove_all_remote_clusters()
185                    rest.remove_all_replications()
186                    BucketOperationHelper.delete_all_buckets_or_assert(self.dest_nodes, self)
187                    ClusterOperationHelper.cleanup_cluster(self.dest_nodes)
188            elif len(self.clusters_dic) == 1:
189                self.log.error("=== need 2 cluster to setup xdcr in ini file ===")
190        else:
191            self.log.info("**** If run xdcr test, need cluster config is setup in ini file. ****")
192        super(CliBaseTest, self).tearDown()
193
194
195    """ in sherlock, there is an extra value called runCmd in the 1st element """
196    def del_runCmd_value(self, output):
197        if "runCmd" in output[0]:
198            output = output[1:]
199        return output
200
201    def verifyCommandOutput(self, output, expect_error, message):
202        """Inspects each line of the output and checks to see if the expected error was found
203
204        Options:
205        output - A list of output lines
206        expect_error - Whether or not the command should have succeeded or failed
207        message - The success or error message
208
209        Returns a boolean indicating whether or not the error/success message was found in the output
210        """
211        if expect_error:
212            for line in output:
213                if line == "ERROR: " + message:
214                    return True
215            log.info("Did not receive expected error message `ERROR: %s`", message)
216            return False
217        else:
218            for line in output:
219                if line == "SUCCESS: " + message:
220                    return True
221            log.info("Did not receive expected success message `SUCCESS: %s`", message)
222            return False
223
224    def verifyWarningOutput(self, output, message):
225        for line in output:
226            if line == "WARNING: " + message:
227                return True
228        log.info("Did not receive expected error message `WARNING: %s`", message)
229        return False
230
231    def verifyServices(self, server, expected_services):
232        """Verifies that the services on a given node match the expected service
233
234            Options:
235            server - A TestInputServer object of the server to connect to
236            expected_services - A comma separated list of services
237
238            Returns a boolean corresponding to whether or not the expected services
239            are available on the server.
240        """
241        rest = RestConnection(server)
242        hostname = "%s:%s" % (server.ip, server.port)
243        expected_services = expected_services.replace("data", "kv")
244        expected_services = expected_services.replace("query", "n1ql")
245        expected_services = expected_services.split(",")
246
247        nodes_services = rest.get_nodes_services()
248        for node, services in nodes_services.iteritems():
249            if node.encode('ascii') == hostname:
250                if len(services) != len(expected_services):
251                    log.info("Services on %s do not match expected services (%s vs. %s)",
252                             hostname, services, expected_services)
253                    return False
254                for service in services:
255                    if service.encode("ascii") not in expected_services:
256                        log.info("Services on %s do not match expected services (%s vs. %s)",
257                                 hostname, services, expected_services)
258                        return False
259                return True
260
261        log.info("Services on %s not found, the server may not exist", hostname)
262        return False
263
264    def verifyRamQuotas(self, server, data, index, fts):
265        """Verifies that the RAM quotas for each service are set properly
266
267        Options:
268        server - A TestInputServer object of the server to connect to
269        data - An int containing the data service RAM quota, None will skip the check
270        index - An int containing the index service RAM quota, None will skip the check
271        fts - An int containing the FTS service RAM quota, None will skip the check
272
273        Returns a boolean corresponding to whether or not the RAM quotas were set properly
274        """
275        rest = RestConnection(server)
276        settings = rest.get_pools_default()
277        if data:
278            if "memoryQuota" not in settings:
279                log.info("Unable to get data service ram quota")
280                return False
281            if int(settings["memoryQuota"]) != int(data):
282                log.info("Data service memory quota does not match (%d vs %d)",
283                         int(settings["memoryQuota"]), int(data))
284                return False
285
286        if index:
287            if "indexMemoryQuota" not in settings:
288                log.info("Unable to get index service ram quota")
289                return False
290            if int(settings["indexMemoryQuota"]) != int(index):
291                log.info(
292                    "Index service memory quota does not match (%d vs %d)",
293                    int(settings["indexMemoryQuota"]), int(index))
294                return False
295
296        if fts:
297            if "ftsMemoryQuota" not in settings:
298                log.info("Unable to get fts service ram quota")
299                return False
300            if int(settings["ftsMemoryQuota"]) != int(fts):
301                log.info("FTS service memory quota does not match (%d vs %d)",
302                         int(settings["ftsMemoryQuota"]), int(fts))
303                return False
304
305        return True
306
307    def verifyBucketSettings(self, server, bucket_name, bucket_type, memory_quota,
308                             eviction_policy, replica_count, enable_index_replica,
309                             priority, enable_flush):
310        rest = RestConnection(server)
311        result = rest.get_bucket_json(bucket_name)
312
313        if bucket_type == "couchbase":
314            bucket_type = "membase"
315
316        if bucket_type is not None and bucket_type != result["bucketType"]:
317            log.info("Memory quota does not match (%s vs %s)", bucket_type,
318                     result["bucketType"])
319            return False
320
321        quota = result["quota"]["rawRAM"] / 1024 / 1024
322        if memory_quota is not None and memory_quota != quota:
323            log.info("Bucket quota does not match (%s vs %s)", memory_quota,
324                     quota)
325            return False
326
327        if eviction_policy is not None and eviction_policy != result[
328            "evictionPolicy"]:
329            log.info("Eviction policy does not match (%s vs %s)",
330                     eviction_policy, result["evictionPolicy"])
331            return False
332
333        if replica_count is not None and replica_count != result[
334            "replicaNumber"]:
335            log.info("Replica count does not match (%s vs %s)", replica_count,
336                     result["replicaNumber"])
337            return False
338
339        if enable_index_replica == 1:
340            enable_index_replica = True
341        elif enable_index_replica == 0:
342            enable_index_replica = False
343
344        if enable_index_replica is not None and enable_index_replica != result[
345            "replicaIndex"]:
346            log.info("Replica index enabled does not match (%s vs %s)",
347                     enable_index_replica, result["replicaIndex"])
348            return False
349
350        if priority == "high":
351            priority = 8
352        elif priority == "low":
353            priority = 3
354
355        if priority is not None and priority != result["threadsNumber"]:
356            log.info("Bucket priority does not match (%s vs %s)", priority,
357                     result["threadsNumber"])
358            return False
359
360        if enable_flush is not None:
361            if enable_flush == 1 and "flush" not in result["controllers"]:
362                log.info("Bucket flush is not enabled, but it should be")
363                return False
364            elif enable_flush == 0 and "flush" in result["controllers"]:
365                log.info("Bucket flush is not enabled, but it should be")
366                return False
367
368        return True
369
370    def verifyContainsBucket(self, server, name):
371        rest = RestConnection(server)
372        buckets = rest.get_buckets()
373
374        for bucket in buckets:
375            if bucket.name == name:
376                return True
377        return False
378
379    def verifyClusterName(self, server, name):
380        rest = RestConnection(server)
381        settings = rest.get_pools_default("waitChange=0")
382
383        if name is None:
384            name = ""
385
386        if "clusterName" not in settings:
387            log.info("Unable to get cluster name from server")
388            return False
389        if settings["clusterName"] != name:
390            log.info("Cluster name does not match (%s vs %s)",
391                     settings["clusterName"], name)
392            return False
393
394        return True
395
396    def isClusterInitialized(self, server):
397        """Checks whether or not the server is initialized
398
399        Options:
400        server - A TestInputServer object of the server to connect to
401
402        Checks to see whether or not the default pool was created in order to
403        determine whether or no the server was initialized. Returns a boolean value
404        to indicate initialization.
405        """
406        rest = RestConnection(server)
407        settings = rest.get_pools_info()
408        if "pools" in settings and len(settings["pools"]) > 0:
409            return True
410
411        return False
412
413    def verifyNotificationsEnabled(self, server):
414        rest = RestConnection(server)
415        enabled = rest.get_notifications()
416        if enabled:
417            return True
418        return False
419
420    def verifyIndexSettings(self, server, max_rollbacks, stable_snap_interval,
421                            mem_snap_interval,
422                            storage_mode, threads, log_level):
423        rest = RestConnection(server)
424        settings = rest.get_global_index_settings()
425
426        if storage_mode == "default":
427            storage_mode = "plasma"
428        elif storage_mode == "memopt":
429            storage_mode = "memory_optimized"
430
431        if max_rollbacks and str(settings["maxRollbackPoints"]) != str(
432                max_rollbacks):
433            log.info("Max rollbacks does not match (%s vs. %s)",
434                     str(settings["maxRollbackPoints"]), str(max_rollbacks))
435            return False
436        if stable_snap_interval and str(
437                settings["stableSnapshotInterval"]) != str(
438                stable_snap_interval):
439            log.info("Stable snapshot interval does not match (%s vs. %s)",
440                     str(settings["stableSnapshotInterval"]),
441                     str(stable_snap_interval))
442            return False
443        if mem_snap_interval and str(
444                settings["memorySnapshotInterval"]) != str(mem_snap_interval):
445            log.info("Memory snapshot interval does not match (%s vs. %s)",
446                     str(settings["memorySnapshotInterval"]),
447                     str(mem_snap_interval))
448            return False
449        if storage_mode and str(settings["storageMode"]) != str(storage_mode):
450            log.info("Storage mode does not match (%s vs. %s)",
451                     str(settings["storageMode"]), str(storage_mode))
452            return False
453        if threads and str(settings["indexerThreads"]) != str(threads):
454            log.info("Threads does not match (%s vs. %s)",
455                     str(settings["indexerThreads"]), str(threads))
456            return False
457        if log_level and str(settings["logLevel"]) != str(log_level):
458            log.info("Log level does not match (%s vs. %s)",
459                     str(settings["logLevel"]), str(log_level))
460            return False
461
462        return True
463
464    def verifyAutofailoverSettings(self, server, enabled, timeout):
465        rest = RestConnection(server)
466        settings = rest.get_autofailover_settings()
467
468        if enabled and not ((str(enabled) == "1" and settings.enabled) or (
469                str(enabled) == "0" and not settings.enabled)):
470            log.info("Enabled does not match (%s vs. %s)", str(enabled),
471                     str(settings.enabled))
472            return False
473        if timeout and str(settings.timeout) != str(timeout):
474            log.info("Timeout does not match (%s vs. %s)", str(timeout),
475                     str(settings.timeout))
476            return False
477
478        return True
479
480    def verifyAutoreprovisionSettings(self, server, enabled, max_nodes):
481        rest = RestConnection(server)
482        settings = rest.get_autoreprovision_settings()
483
484        if enabled and not ((str(enabled) == "1" and settings.enabled) or (
485                str(enabled) == "0" and not settings.enabled)):
486            log.info("Enabled does not match (%s vs. %s)", str(max_nodes),
487                     str(settings.enabled))
488            return False
489        if max_nodes and str(settings.max_nodes) != str(max_nodes):
490            log.info("max_nodes does not match (%s vs. %s)", str(max_nodes),
491                     str(settings.max_nodes))
492            return False
493
494        return True
495
496    def verifyAuditSettings(self, server, enabled, log_path, rotate_interval):
497        rest = RestConnection(server)
498        settings = rest.getAuditSettings()
499
500        if enabled and not (
501            (str(enabled) == "1" and settings["auditdEnabled"]) or (
502                str(enabled) == "0" and not settings["auditdEnabled"])):
503            log.info("Enabled does not match (%s vs. %s)", str(enabled),
504                     str(settings["auditdEnabled"]))
505            return False
506        if log_path and str(str(settings["logPath"])) != str(log_path):
507            log.info("Log path does not match (%s vs. %s)", str(log_path),
508                     str(settings["logPath"]))
509            return False
510
511        if rotate_interval and str(str(settings["rotateInterval"])) != str(
512                rotate_interval):
513            log.info("Rotate interval does not match (%s vs. %s)",
514                     str(rotate_interval), str(settings["rotateInterval"]))
515            return False
516
517        return True
518
519    def verifyPendingServer(self, server, server_to_add, group_name, services):
520        rest = RestConnection(server)
521        settings = rest.get_all_zones_info()
522        if not settings or "groups" not in settings:
523            log.info("Group settings payload appears to be invalid")
524            return False
525
526        expected_services = services.replace("data", "kv")
527        expected_services = expected_services.replace("query", "n1ql")
528        expected_services = expected_services.split(",")
529
530        for group in settings["groups"]:
531            for node in group["nodes"]:
532                if node["hostname"] == server_to_add:
533                    if node["clusterMembership"] != "inactiveAdded":
534                        log.info("Node `%s` not in pending status",
535                                 server_to_add)
536                        return False
537
538                    if group["name"] != group_name:
539                        log.info("Node `%s` not in correct group (%s vs %s)",
540                                 node["hostname"], group_name,
541                                 group["name"])
542                        return False
543
544                    if len(node["services"]) != len(expected_services):
545                        log.info("Services do not match on %s (%s vs %s) ",
546                                 node["hostname"], services,
547                                 ",".join(node["services"]))
548                        return False
549
550                    for service in node["services"]:
551                        if service not in expected_services:
552                            log.info("Services do not match on %s (%s vs %s) ",
553                                     node["hostname"], services,
554                                     ",".join(node["services"]))
555                            return False
556                    return True
557
558        log.info("Node `%s` not found in nodes list", server_to_add)
559        return False
560
561    def verifyPendingServerDoesNotExist(self, server, server_to_add):
562        rest = RestConnection(server)
563        settings = rest.get_all_zones_info()
564        if not settings or "groups" not in settings:
565            log.info("Group settings payload appears to be invalid")
566            return False
567
568        for group in settings["groups"]:
569            for node in group["nodes"]:
570                if node["hostname"] == server_to_add:
571                    return False
572
573        log.info("Node `%s` not found in nodes list", server_to_add)
574        return True
575
576    def verifyActiveServers(self, server, expected_num_servers):
577        return self._verifyServersByStatus(server, expected_num_servers,
578                                           "active")
579
580    def verifyFailedServers(self, server, expected_num_servers):
581        return self._verifyServersByStatus(server, expected_num_servers,
582                                           "inactiveFailed")
583
584    def _verifyServersByStatus(self, server, expected_num_servers, status):
585        rest = RestConnection(server)
586        settings = rest.get_pools_default()
587
588        count = 0
589        for node in settings["nodes"]:
590            if node["clusterMembership"] == status:
591                count += 1
592
593        return count == expected_num_servers
594
595    def verifyRecoveryType(self, server, recovery_servers, recovery_type):
596        rest = RestConnection(server)
597        settings = rest.get_all_zones_info()
598        if not settings or "groups" not in settings:
599            log.info("Group settings payload appears to be invalid")
600            return False
601
602        if not recovery_servers:
603            return True
604
605        num_found = 0
606        recovery_servers = recovery_servers.split(",")
607        for group in settings["groups"]:
608            for node in group["nodes"]:
609                for rs in recovery_servers:
610                    if node["hostname"] == rs:
611                        if node["recoveryType"] != recovery_type:
612                            log.info(
613                                "Node %s doesn't contain recovery type %s ",
614                                rs, recovery_type)
615                            return False
616                        else:
617                            num_found = num_found + 1
618
619        if num_found == len(recovery_servers):
620            return True
621
622        log.info("Node `%s` not found in nodes list",
623                 ",".join(recovery_servers))
624        return False
625
626    def verifyUserRoles(self, server, username, roles):
627        rest = RestConnection(server)
628        status, content, header = rbacmain(server)._retrieve_user_roles()
629        content = json.loads(content)
630        temp = rbacmain()._parse_get_user_response(content, username, username, roles)
631        return temp
632
633    def verifyLdapSettings(self, server, admins, ro_admins, default, enabled):
634        rest = RestConnection(server)
635        settings = rest.ldapRestOperationGetResponse()
636
637        if admins is None:
638            admins = []
639        else:
640            admins = admins.split(",")
641
642        if ro_admins is None:
643            ro_admins = []
644        else:
645            ro_admins = ro_admins.split(",")
646
647        if str(enabled) == "0":
648            admins = []
649            ro_admins = []
650
651        if default == "admins" and str(enabled) == "1":
652            if settings["admins"] != "asterisk":
653                log.info("Admins don't match (%s vs asterisk)",
654                         settings["admins"])
655                return False
656        elif not self._list_compare(settings["admins"], admins):
657            log.info("Admins don't match (%s vs %s)", settings["admins"],
658                     admins)
659            return False
660
661        if default == "roadmins" and str(enabled) == "1":
662            if settings["roAdmins"] != "asterisk":
663                log.info("Read only admins don't match (%s vs asterisk)",
664                         settings["roAdmins"])
665                return False
666        elif not self._list_compare(settings["roAdmins"], ro_admins):
667            log.info("Read only admins don't match (%s vs %s)",
668                     settings["roAdmins"], ro_admins)
669            return False
670
671        return True
672
673    def verifyAlertSettings(self, server, enabled, email_recipients,
674                            email_sender, email_username, email_password,
675                            email_host,
676                            email_port, encrypted, alert_af_node,
677                            alert_af_max_reached, alert_af_node_down,
678                            alert_af_small,
679                            alert_af_disable, alert_ip_changed,
680                            alert_disk_space, alert_meta_overhead,
681                            alert_meta_oom,
682                            alert_write_failed, alert_audit_dropped):
683        rest = RestConnection(server)
684        settings = rest.get_alerts_settings()
685        print settings
686
687        if not enabled:
688            if not settings["enabled"]:
689                return True
690            else:
691                log.info("Alerts should be disabled")
692                return False
693
694        if encrypted is None or encrypted == "0":
695            encrypted = False
696        else:
697            encrypted = True
698
699        if email_recipients is not None and not self._list_compare(
700                email_recipients.split(","), settings["recipients"]):
701            log.info("Email recipients don't match (%s vs %s)",
702                     email_recipients.split(","), settings["recipients"])
703            return False
704
705        if email_sender is not None and email_sender != settings["sender"]:
706            log.info("Email sender does not match (%s vs %s)", email_sender,
707                     settings["sender"])
708            return False
709
710        if email_username is not None and email_username != \
711                settings["emailServer"]["user"]:
712            log.info("Email username does not match (%s vs %s)",
713                     email_username, settings["emailServer"]["user"])
714            return False
715
716        if email_host is not None and email_host != settings["emailServer"][
717            "host"]:
718            log.info("Email host does not match (%s vs %s)", email_host,
719                     settings["emailServer"]["host"])
720            return False
721
722        if email_port is not None and email_port != settings["emailServer"][
723            "port"]:
724            log.info("Email port does not match (%s vs %s)", email_port,
725                     settings["emailServer"]["port"])
726            return False
727
728        if encrypted is not None and encrypted != settings["emailServer"][
729            "encrypt"]:
730            log.info("Email encryption does not match (%s vs %s)", encrypted,
731                     settings["emailServer"]["encrypt"])
732            return False
733
734        alerts = list()
735        if alert_af_node:
736            alerts.append('auto_failover_node')
737        if alert_af_max_reached:
738            alerts.append('auto_failover_maximum_reached')
739        if alert_af_node_down:
740            alerts.append('auto_failover_other_nodes_down')
741        if alert_af_small:
742            alerts.append('auto_failover_cluster_too_small')
743        if alert_af_disable:
744            alerts.append('auto_failover_disabled')
745        if alert_ip_changed:
746            alerts.append('ip')
747        if alert_disk_space:
748            alerts.append('disk')
749        if alert_meta_overhead:
750            alerts.append('overhead')
751        if alert_meta_oom:
752            alerts.append('ep_oom_errors')
753        if alert_write_failed:
754            alerts.append('ep_item_commit_failed')
755        if alert_audit_dropped:
756            alerts.append('audit_dropped_events')
757
758        if not self._list_compare(alerts, settings["alerts"]):
759            log.info("Alerts don't match (%s vs %s)", alerts,
760                     settings["alerts"])
761            return False
762
763        return True
764
765    def verify_node_settings(self, server, data_path, index_path, hostname):
766        rest = RestConnection(server)
767        node_settings = rest.get_nodes_self()
768
769        if data_path != node_settings.storage[0].path:
770            log.info("Data path does not match (%s vs %s)", data_path,
771                     node_settings.storage[0].path)
772            return False
773        if index_path != node_settings.storage[0].index_path:
774            log.info("Index path does not match (%s vs %s)", index_path,
775                     node_settings.storage[0].index_path)
776            return False
777        if hostname is not None:
778            if hostname != node_settings.hostname:
779                log.info("Hostname does not match (%s vs %s)", hostname,
780                         node_settings.hostname)
781                return True
782        return True
783
784    def verifyCompactionSettings(self, server, db_frag_perc, db_frag_size,
785                                 view_frag_perc, view_frag_size, from_period,
786                                 to_period, abort_outside, parallel_compact,
787                                 purgeInt):
788        rest = RestConnection(server)
789        settings = rest.get_auto_compaction_settings()
790        ac = settings["autoCompactionSettings"]
791
792        if db_frag_perc is not None and str(db_frag_perc) != str(
793                ac["databaseFragmentationThreshold"]["percentage"]):
794            log.info("DB frag perc does not match (%s vs %s)",
795                     str(db_frag_perc),
796                     str(ac["databaseFragmentationThreshold"]["percentage"]))
797            return False
798
799        if db_frag_size is not None and str(db_frag_size * 1024 ** 2) != str(
800                ac["databaseFragmentationThreshold"]["size"]):
801            log.info("DB frag size does not match (%s vs %s)",
802                     str(db_frag_size * 1024 ** 2),
803                     str(ac["databaseFragmentationThreshold"]["size"]))
804            return False
805
806        if view_frag_perc is not None and str(view_frag_perc) != str(
807                ac["viewFragmentationThreshold"]["percentage"]):
808            log.info("View frag perc does not match (%s vs %s)",
809                     str(view_frag_perc),
810                     str(ac["viewFragmentationThreshold"]["percentage"]))
811            return False
812
813        if view_frag_size is not None and str(
814                        view_frag_size * 1024 ** 2) != str(
815                ac["viewFragmentationThreshold"]["size"]):
816            log.info("View frag size does not match (%s vs %s)",
817                     str(view_frag_size * 1024 ** 2),
818                     str(ac["viewFragmentationThreshold"]["size"]))
819            return False
820
821        print from_period, to_period
822        if from_period is not None:
823            fromHour, fromMin = from_period.split(":", 1)
824            if int(fromHour) != int(ac["allowedTimePeriod"]["fromHour"]):
825                log.info("From hour does not match (%s vs %s)", str(fromHour),
826                         str(ac["allowedTimePeriod"]["fromHour"]))
827                return False
828            if int(fromMin) != int(ac["allowedTimePeriod"]["fromMinute"]):
829                log.info("From minute does not match (%s vs %s)", str(fromMin),
830                         str(ac["allowedTimePeriod"]["fromMinute"]))
831                return False
832
833        if to_period is not None:
834            toHour, toMin = to_period.split(":", 1)
835            if int(toHour) != int(ac["allowedTimePeriod"]["toHour"]):
836                log.info("To hour does not match (%s vs %s)", str(toHour),
837                         str(ac["allowedTimePeriod"]["toHour"]))
838                return False
839            if int(toMin) != int(ac["allowedTimePeriod"]["toMinute"]):
840                log.info("To minute does not match (%s vs %s)", str(toMin),
841                         str(ac["allowedTimePeriod"]["toMinute"]))
842                return False
843
844        if str(abort_outside) == "1":
845            abort_outside = True
846        elif str(abort_outside) == "0":
847            abort_outside = False
848
849        if abort_outside is not None and abort_outside != \
850                ac["allowedTimePeriod"]["abortOutside"]:
851            log.info("Abort outside does not match (%s vs %s)", abort_outside,
852                     ac["allowedTimePeriod"]["abortOutside"])
853            return False
854
855        if str(parallel_compact) == "1":
856            parallel_compact = True
857        elif str(parallel_compact) == "0":
858            parallel_compact = False
859
860        if parallel_compact is not None and parallel_compact != ac[
861            "parallelDBAndViewCompaction"]:
862            log.info("Parallel compact does not match (%s vs %s)",
863                     str(parallel_compact),
864                     str(ac["parallelDBAndViewCompaction"]))
865            return False
866
867        if purgeInt is not None and str(purgeInt) != str(
868                settings["purgeInterval"]):
869            log.info("Purge interval does not match (%s vs %s)", str(purgeInt),
870                     str(settings["purgeInterval"]))
871            return False
872
873        return True
874
875    def verify_gsi_compact_settings(self, compact_mode, compact_percent,
876                                    compact_interval,
877                                    from_period, to_period, enable_abort):
878        rest = RestConnection(self.master)
879        settings = rest.get_auto_compaction_settings()
880        ac = settings["autoCompactionSettings"]["indexFragmentationThreshold"]
881        cc = settings["autoCompactionSettings"]["indexCircularCompaction"]
882        if compact_mode is not None:
883            if compact_mode == "append":
884                self.log.info("append compactino settings %s " % ac)
885                if compact_percent is not None and \
886                                compact_percent != ac["percentage"]:
887                    raise Exception(
888                        "setting percent does not match.  Set: %s vs %s :Actual"
889                        % (compact_percent, ac["percentage"]))
890            if compact_mode == "circular":
891                self.log.info("circular compaction settings %s " % cc)
892                if enable_abort and not cc["interval"]["abortOutside"]:
893                    raise Exception("setting enable abort failed")
894                if compact_interval is not None:
895                    if compact_interval != cc["daysOfWeek"]:
896                        raise Exception(
897                            "Failed to set compaction on %s " % compact_interval)
898                    elif from_period is None and int(
899                            cc["interval"]["fromHour"]) != 0 and \
900                                    int(cc["interval"]["fromMinute"]) != 0:
901                        raise Exception(
902                            "fromHour and fromMinute should be zero")
903                if compact_interval is None:
904                    if (from_period != str(cc["interval"][
905                                                    "fromHour"]) + ":" + str(
906                                cc["interval"]["fromMinute"])) \
907                    and (to_period != str(cc["interval"]["toHour"]) + ":" + str(
908                                cc["interval"]["toMinute"])):
909                        raise Exception(
910                            "fromHour and fromMinute do not set correctly")
911        return True
912
913    def verifyGroupExists(self, server, name):
914        rest = RestConnection(server)
915        groups = rest.get_zone_names()
916        print groups
917
918        for gname, _ in groups.iteritems():
919            if name == gname:
920                return True
921
922        return False
923
924    def _list_compare(self, list1, list2):
925        if len(list1) != len(list2):
926            return False
927        for elem1 in list1:
928            found = False
929            for elem2 in list2:
930                if elem1 == elem2:
931                    found = True
932                    break
933            if not found:
934                return False
935        return True
936
937    def waitForItemCount(self, server, bucket_name, count, timeout=30):
938        rest = RestConnection(server)
939        for sec in range(timeout):
940            items = int(
941                rest.get_bucket_json(bucket_name)["basicStats"]["itemCount"])
942            if items != count:
943                time.sleep(1)
944            else:
945                return True
946        log.info("Waiting for item count to be %d timed out", count)
947        return False
948