1from couchbase_helper.cluster import Cluster
2from ent_backup_restore.enterprise_backup_restore_base import EnterpriseBackupMergeBase
3from remote.remote_util import RemoteMachineShellConnection
4from membase.api.rest_client import RestConnection
5
6
7class EnterpriseBackupMergeTest(EnterpriseBackupMergeBase):
8    def setUp(self):
9        super(EnterpriseBackupMergeTest, self).setUp()
10        for server in [self.backupset.backup_host,
11                       self.backupset.restore_cluster_host]:
12            conn = RemoteMachineShellConnection(server)
13            conn.extract_remote_info()
14            conn.terminate_processes(conn.info, ["cbbackupmgr"])
15            conn.disconnect()
16
17    def tearDown(self):
18        super(EnterpriseBackupMergeTest, self).tearDown()
19
20    def test_multiple_backups_merges(self):
21        self.log.info("*** start to load items to all buckets")
22        self.expected_error = self.input.param("expected_error", None)
23        if int(self.active_resident_threshold) > 0:
24            self.log.info("Disable compaction to speed up dgm")
25            RestConnection(self.master).disable_auto_compaction()
26        if self.expires:
27            for bucket in self.buckets:
28                cb = self._get_python_sdk_client(self.master.ip, bucket, self.backupset.cluster_host)
29                for i in range(1, self.num_items + 1):
30                    cb.upsert("doc" + str(i), {"key":"value"})
31        else:
32            self._load_all_buckets(self.master, self.initial_load_gen,
33                               "create", self.expires)
34        self.log.info("*** done to load items to all buckets")
35        self.backup_create_validate()
36        for i in range(1, self.number_of_repeats + 1):
37            self.do_backup_merge_actions()
38        start = self.number_of_backups_taken
39        end = self.number_of_backups_taken
40        if self.reset_restore_cluster:
41            self.log.info("*** start to reset cluster")
42            self.backup_reset_clusters(self.cluster_to_restore)
43            if self.same_cluster:
44                self._initialize_nodes(Cluster(),
45                                       self.servers[:self.nodes_init])
46            else:
47                rest = RestConnection(self.input.clusters[0][0])
48                rest.force_eject_node()
49                master_services = self.get_services([self.backupset.cluster_host],
50                                                 self.services_init, start_node=0)
51                info = rest.get_nodes_self()
52                if info.memoryQuota and int(info.memoryQuota) > 0:
53                     self.quota = info.memoryQuota
54                rest.init_node()
55            self.log.info("Done reset cluster")
56        self.sleep(10)
57        """ Add built-in user cbadminbucket to second cluster """
58        self.add_built_in_server_user(
59            node=self.input.clusters[0][:self.nodes_init][0])
60
61        self.backupset.start = start
62        self.backupset.end = end
63        self.log.info("*** start restore validation")
64        self.backup_restore_validate(compare_uuid=False,
65                                     seqno_compare_function=">=",
66                                     expected_error=self.expected_error)
67
68    def test_multiple_backups_merge_with_tombstoning(self):
69        self.log.info("*** start to load items to all buckets")
70        self.expected_error = self.input.param("expected_error", None)
71        if int(self.active_resident_threshold) > 0:
72            self.log.info("Disable compaction to speed up dgm")
73            RestConnection(self.master).disable_auto_compaction()
74        if self.expires:
75            for bucket in self.buckets:
76                cb = self._get_python_sdk_client(self.master.ip, bucket)
77                for i in range(1, self.num_items + 1):
78                    cb.upsert("doc" + str(i), {"key": "value"})
79        else:
80            self._load_all_buckets(self.master, self.initial_load_gen,
81                                   "create", self.expires)
82        self.log.info("*** done to load items to all buckets")
83        self.backup_create_validate()
84        self.backup()
85        self.set_meta_purge_interval()
86        self._load_all_buckets(self.master, self.delete_gen, "delete",
87                               self.expires)
88        self.sleep(360, "Sleep for 6 minutes for the meta-data purge "
89                        "interval to be completed")
90        self.compact_buckets()
91        self.backup()
92        self.backupset.start = 1
93        self.backupset.end = len(self.backups)
94        self.merge()
95        start = self.number_of_backups_taken
96        end = self.number_of_backups_taken
97        if self.reset_restore_cluster:
98            self.log.info("*** start to reset cluster")
99            self.backup_reset_clusters(self.cluster_to_restore)
100            if self.same_cluster:
101                self._initialize_nodes(Cluster(),
102                                       self.servers[:self.nodes_init])
103            else:
104                self._initialize_nodes(Cluster(), self.input.clusters[0][
105                                                  :self.nodes_init])
106            self.log.info("Done reset cluster")
107        self.sleep(10)
108        self.backupset.start = start
109        self.backupset.end = end
110        self.log.info("*** start restore validation")
111        self.backup_restore_validate(compare_uuid=False,
112                                     seqno_compare_function=">=",
113                                     expected_error=self.expected_error)
114