1from TestInput import TestInputSingleton
2import time
3import logger
4import string
5import random
6import math
7from lib.remote.remote_util import RemoteMachineShellConnection
8from membase.api.rest_client import RestConnection
9from membase.helper.bucket_helper import BucketOperationHelper
10from couchbase_helper.documentgenerator import BlobGenerator, DocumentGenerator
11from basetestcase import BaseTestCase
12from memcached.helper.data_helper import MemcachedClientHelper, VBucketAwareMemcached
13from couchbase_helper.stats_tools import StatsCommon
14
15class MemorySanity(BaseTestCase):
16
17    def setUp(self):
18        super(MemorySanity, self).setUp()
19        self.kv_verify = self.input.param('kv_verify', True)
20        self.log.info("==============  MemorySanityTest setup was started for test #{0} {1}=============="\
21                      .format(self.case_number, self._testMethodName))
22        self.gen_create = BlobGenerator('loadOne', 'loadOne_', self.value_size, end=self.num_items)
23
24    def tearDown(self):
25        self.log.info("==============  teardown was started for test #{0} {1}=============="\
26                      .format(self.case_number, self._testMethodName))
27        super(MemorySanity, self).tearDown()
28
29    """
30        This test creates a bucket, adds an initial front end load,
31        checks memory stats, deletes the bucket, and recreates the
32        same scenario repetitively for the specified number of times,
33        and checks after the last repetition if the memory usage is
34        the same that was at the end of the very first front end load.
35    """
36    def repetitive_create_delete(self):
37        self.repetitions = self.input.param("repetition_count", 1)
38        self.bufferspace = self.input.param("bufferspace", 100000)
39        #the first front end load
40        self._load_all_buckets(self.master, self.gen_create, "create", 0,
41                               batch_size=10000, pause_secs=5, timeout_secs=100)
42        self._wait_for_stats_all_buckets(self.servers)
43        rest = RestConnection(self.servers[0])
44        max_data_sizes = {}
45        initial_memory_usage = {}
46        self.sleep(30)
47        for bucket in self.buckets:
48            max_data_sizes[bucket.name] = rest.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["ep_max_size"][-1]
49            self.log.info("Initial max_data_size of bucket '{0}': {1}".format(bucket.name, max_data_sizes[bucket.name]))
50            initial_memory_usage[bucket.name] = rest.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["mem_used"][-1]
51            self.log.info("initial memory consumption of bucket '{0}' with load: {1}".format(bucket.name, initial_memory_usage[bucket.name]))
52        mem_usage = {}
53        self.sleep(10)
54        #the repetitions
55        for i in range(0, self.repetitions):
56            BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
57            del self.buckets[:]
58            self._bucket_creation()
59            self._load_all_buckets(self.master, self.gen_create, "create", 0,
60                               batch_size=10000, pause_secs=5, timeout_secs=100)
61            self._wait_for_stats_all_buckets(self.servers)
62            self.sleep(30)
63            for bucket in self.buckets:
64                mem_usage[bucket.name] = rest.fetch_bucket_stats(bucket.name)["op"]["samples"]["mem_used"][-1]
65                self.log.info("Memory used after attempt {0} = {1}, Difference from initial snapshot: {2}"\
66                              .format(i + 1, mem_usage[bucket.name], (mem_usage[bucket.name] - initial_memory_usage[bucket.name])))
67            self.sleep(10)
68        if (self.repetitions > 0):
69            self.log.info("After {0} repetitive deletion-creation-load of the buckets, the memory consumption difference is .."\
70                          .format(self.repetitions));
71            for bucket in self.buckets:
72                self.log.info("{0} :: Initial: {1} :: Now: {2} :: Difference: {3}"\
73                              .format(bucket.name, initial_memory_usage[bucket.name], mem_usage[bucket.name],
74                                  (mem_usage[bucket.name] - initial_memory_usage[bucket.name])))
75                msg = "Memory used now, much greater than initial usage!"
76                assert mem_usage[bucket.name] <= initial_memory_usage[bucket.name] + self.bufferspace, msg
77        else:
78            self.log.info("Verification skipped, as there weren't any repetitions..");
79
80    '''
81    Test created based on MB-8432
82    steps:
83    create a bucket size of 100M
84    load data enough to create a DGM
85    check the "tc_malloc_allocated" stat is within 100M bound.
86    '''
87    def memory_quota_default_bucket(self):
88        resident_ratio = self.input.param("resident_ratio", 50)
89        delta_items = 200000
90        mc = MemcachedClientHelper.direct_client(self.master, self.default_bucket_name)
91
92        self.log.info("LOAD PHASE")
93        end_time = time.time() + self.wait_timeout * 30
94        while (int(mc.stats()["vb_active_perc_mem_resident"]) == 0 or\
95               int(mc.stats()["vb_active_perc_mem_resident"]) > resident_ratio) and\
96              time.time() < end_time:
97            self.log.info("Resident ratio is %s" % mc.stats()["vb_active_perc_mem_resident"])
98            gen = DocumentGenerator('test_docs', '{{"age": {0}}}', xrange(5),
99                                    start=self.num_items, end=(self.num_items + delta_items))
100            self._load_all_buckets(self.master, gen, 'create', 0)
101            self.num_items += delta_items
102            self.log.info("Resident ratio is %s" % mc.stats()["vb_active_perc_mem_resident"])
103        memory_mb = int(mc.stats("memory")["mem_used"]) / (1024 * 1024)
104        self.log.info("Memory Used is %s" % memory_mb)
105        self.assertTrue(memory_mb <= self.quota, "Memory Used %s should be within %s" % (
106                                                  memory_mb, self.quota))
107
108
109    def random_str_generator(self, size=4, chars=string.ascii_uppercase + string.digits):
110        return ''.join(random.choice(chars) for x in range(size))
111
112    """
113    Test to load items of a specified size.
114    Append a selected list of keys, until
115    the items are match a desired size.
116    """
117    def test_items_append(self):
118        self.desired_item_size = self.input.param("desired_item_size", 2048)
119        self.append_size = self.input.param("append_size", 1024)
120        self.fixed_append_size = self.input.param("fixed_append_size", True)
121        self.append_ratio = self.input.param("append_ratio", 0.5)
122        self._load_all_buckets(self.master, self.gen_create, "create", 0,
123                               batch_size=1000, pause_secs=5, timeout_secs=100)
124
125        for bucket in self.buckets:
126            self.value_size = self.input.param("value_size", 512)
127            verify_dict = {}
128            vkeys, dkeys = bucket.kvs[1].key_set()
129
130            key_count = len(vkeys)
131            app_ratio = self.append_ratio * key_count
132            selected_keys = []
133            i = 0
134            for key in vkeys:
135                i += 1
136                if i >= app_ratio:
137                    break
138                selected_keys.append(key)
139
140            awareness = VBucketAwareMemcached(RestConnection(self.master), bucket.name)
141            if self.kv_verify:
142                for key in selected_keys:
143                    value = awareness.memcached(key).get(key)[2]
144                    verify_dict[key] = value
145
146            self.log.info("Bucket: {0}".format(bucket.name))
147            self.log.info("Appending to have items whose initial size was "
148                            + "{0} to equal or cross a size of {1}".format(self.value_size, self.desired_item_size))
149            self.log.info("Item-appending of {0} items starting ..".format(len(selected_keys) + 1))
150
151            index = 3
152            while self.value_size < self.desired_item_size:
153                str_len = self.append_size
154                if not self.fixed_append_size:
155                    str_len = int(math.pow(2, index))
156
157                for key in selected_keys:
158                    random_string = self.random_str_generator(str_len)
159                    awareness.memcached(key).append(key, random_string)
160                    if self.kv_verify:
161                        verify_dict[key] = verify_dict[key] + random_string
162                self.log.info("for {0} items size was increased to {1} Bytes".format(len(selected_keys) + 1, self.value_size))
163                self.value_size += str_len
164                index += 1
165
166            self.log.info("The appending of {0} items ended".format(len(selected_keys) + 1))
167
168        for bucket in self.buckets:
169            msg = "Bucket:{0}".format(bucket.name)
170            self.log.info("VERIFICATION <" + msg + ">: Phase 0 - Check the gap between "
171                      + "mem_used by the bucket and total_allocated_bytes")
172            stats = StatsCommon()
173            mem_used_stats = stats.get_stats(self.servers, bucket, 'memory', 'mem_used')
174            total_allocated_bytes_stats = stats.get_stats(self.servers, bucket, 'memory', 'total_allocated_bytes')
175            total_fragmentation_bytes_stats = stats.get_stats(self.servers, bucket, 'memory', 'total_fragmentation_bytes')
176
177            for server in self.servers:
178                self.log.info("In {0} bucket {1}, total_fragmentation_bytes + the total_allocated_bytes = {2}"
179                              .format(server.ip, bucket.name, (int(total_fragmentation_bytes_stats[server]) + int(total_allocated_bytes_stats[server]))))
180                self.log.info("In {0} bucket {1}, mem_used = {2}".format(server.ip, bucket.name, mem_used_stats[server]))
181                self.log.info("In {0} bucket {1}, the difference between actual memory used by memcached and mem_used is {2} times"
182                              .format(server.ip, bucket.name, float(int(total_fragmentation_bytes_stats[server]) + int(total_allocated_bytes_stats[server])) / float(mem_used_stats[server])))
183
184
185            self.log.info("VERIFICATION <" + msg + ">: Phase1 - Check if any of the "
186                    + "selected keys have value less than the desired value size")
187            for key in selected_keys:
188                value = awareness.memcached(key).get(key)[2]
189                if len(value) < self.desired_item_size:
190                    self.fail("Failed to append enough to make value size surpass the "
191                                + "size {0}, key {1} has size {2}".format(self.desired_item_size, key, len(value)))
192
193            if self.kv_verify:
194                self.log.info("VERIFICATION <" + msg + ">: Phase2 - Check if the content "
195                        + "after the appends match what's expected")
196                for k in verify_dict:
197                    if awareness.memcached(k).get(k)[2] != verify_dict[k]:
198                        self.fail("Content at key {0}: not what's expected.".format(k))
199                self.log.info("VERIFICATION <" + msg + ">: Successful")
200
201        shell = RemoteMachineShellConnection(self.master)
202        shell.execute_cbstats("", "raw", keyname="allocator", vbid="")
203        shell.disconnect()
204