1/* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */
2
3/**
4 * @copyright 2013 Couchbase, Inc.
5 *
6 * @author Filipe Manana  <filipe@couchbase.com>
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License"); you may not
9 * use this file except in compliance with the License. You may obtain a copy of
10 * the License at
11 *
12 *  http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
16 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
17 * License for the specific language governing permissions and limitations under
18 * the License.
19 **/
20
21#include "view_tests.h"
22
23
24static index_header_t *test_index_header_decoding_v1(const char *header_bin,
25                                                     size_t header_bin_size)
26{
27    uint16_t active[] = { 3,7,19,23,27,31,35,39,43,47,51,55,59,63 };
28    uint16_t passive[] = {
29        0,1,2,4,5,6,8,9,10,12,13,14,16,17,18,20,21,22,24,25,
30        26,28,29,30,32,33,34,36,37,38,40,41,42,44,45,46,48,49,
31        50,52,53,54,56,57,60,62
32    };
33    uint16_t cleanup[] = { 11,15,58,61 };
34    uint16_t unindexable[] = { 0,63 };
35    uint16_t replicas_on_transfer[] = { 5, 10, 60, 62 };
36    uint16_t pending_active[] = { 11,15 };
37    uint16_t pending_passive[] = { 58,61 };
38    uint16_t pending_unindexable[] = { 15,58 };
39    index_header_t *header = NULL;
40    bitmap_t expected_active, expected_passive, expected_cleanup;
41    unsigned i;
42    int ii;
43    uint16_t jj;
44    int num_unindexable;
45    int num_reps;
46    int num_pending_active;
47    int num_pending_passive;
48    int num_pending_unindexable;
49
50    cb_assert(decode_index_header(header_bin, header_bin_size, &header) == COUCHSTORE_SUCCESS);
51    cb_assert(header != NULL);
52
53    cb_assert(header->version == 1);
54    cb_assert(memcmp(header->signature, header_bin, 16) == 0);
55    cb_assert(header->num_partitions == 64);
56    cb_assert(header->num_views == 2);
57
58    memset(&expected_active, 0, sizeof(expected_active));
59    for (i = 0; i < (sizeof(active) / sizeof(active[0])); ++i) {
60        set_bit(&expected_active, active[i]);
61    }
62    cb_assert(memcmp(&header->active_bitmask, &expected_active, sizeof(expected_active)) == 0);
63
64    memset(&expected_passive, 0, sizeof(expected_passive));
65    for (i = 0; i < (sizeof(passive) / sizeof(passive[0])); ++i) {
66        set_bit(&expected_passive, passive[i]);
67    }
68    cb_assert(memcmp(&header->passive_bitmask, &expected_passive, sizeof(expected_passive)) == 0);
69
70    memset(&expected_cleanup, 0, sizeof(expected_cleanup));
71    for (i = 0; i < (sizeof(cleanup) / sizeof(cleanup[0])); ++i) {
72        set_bit(&expected_cleanup, cleanup[i]);
73    }
74    cb_assert(memcmp(&header->cleanup_bitmask, &expected_cleanup, sizeof(expected_cleanup)) == 0);
75
76    cb_assert(sorted_list_size(header->seqs) == 58);
77    for (jj = 0; jj < 64; ++jj) {
78        part_seq_t rs, *pseq;
79
80        switch (jj) {
81            /* unindexable */
82        case 0:
83        case 63:
84            /* cleanup */
85        case 11:
86        case 15:
87        case 58:
88        case 61:
89            continue;
90        default:
91            break;
92        }
93
94        rs.part_id = jj;
95
96        pseq = (part_seq_t *) sorted_list_get(header->seqs, &rs);
97        cb_assert(pseq != NULL);
98        cb_assert(pseq->part_id == jj);
99        cb_assert(pseq->seq == 1221);
100    }
101
102    num_unindexable = sizeof(unindexable) / sizeof(unindexable[0]);
103    cb_assert(sorted_list_size(header->unindexable_seqs) == num_unindexable);
104    for (ii = 0; ii < num_unindexable; ++ii) {
105        part_seq_t rs, *pseq;
106        rs.part_id = unindexable[ii];
107
108        pseq = (part_seq_t *) sorted_list_get(header->unindexable_seqs, &rs);
109        cb_assert(pseq != NULL);
110        cb_assert(pseq->part_id == unindexable[ii]);
111        cb_assert(pseq->seq == 1221);
112    }
113
114    cb_assert(header->id_btree_state->pointer == 1617507);
115    cb_assert(header->id_btree_state->subtreesize == 1286028);
116    cb_assert(header->id_btree_state->reduce_value.size == 133);
117    /* TODO: once view reduction decoding is done, test the exact reduction value. */
118
119    cb_assert(header->view_states[0]->pointer == 2901853);
120    cb_assert(header->view_states[0]->subtreesize == 1284202);
121    cb_assert(header->view_states[0]->reduce_value.size == 140);
122    /* TODO: once view reduction decoding is done, test the exact reduction value. */
123
124    cb_assert(header->view_states[1]->pointer == 4180175);
125    cb_assert(header->view_states[1]->subtreesize == 1278451);
126    cb_assert(header->view_states[1]->reduce_value.size == 140);
127    /* TODO: once view reduction decoding is done, test the exact reduction value. */
128
129    cb_assert(header->has_replica == 1);
130    cb_assert(header->replicas_on_transfer != NULL);
131
132    num_reps = (sizeof(replicas_on_transfer) / sizeof(replicas_on_transfer[0]));
133
134    cb_assert(sorted_list_size(header->replicas_on_transfer) == num_reps);
135    for (ii = 0; ii < num_reps; ++ii) {
136        uint16_t *part_id = sorted_list_get(header->replicas_on_transfer,
137                                            &replicas_on_transfer[ii]);
138        cb_assert(part_id != NULL);
139        cb_assert(*part_id == replicas_on_transfer[ii]);
140    }
141
142    num_pending_active = sizeof(pending_active) / sizeof(pending_active[0]);
143    cb_assert(sorted_list_size(header->pending_transition.active) == num_pending_active);
144    for (ii = 0; ii < num_pending_active; ++ii) {
145        uint16_t *part_id = sorted_list_get(header->pending_transition.active,
146                                            &pending_active[ii]);
147        cb_assert(part_id != NULL);
148        cb_assert(*part_id == pending_active[ii]);
149    }
150
151    num_pending_passive = sizeof(pending_passive) / sizeof(pending_passive[0]);
152    cb_assert(sorted_list_size(header->pending_transition.passive) == num_pending_passive);
153    for (ii = 0; ii < num_pending_passive; ++ii) {
154        uint16_t *part_id = sorted_list_get(header->pending_transition.passive,
155                                            &pending_passive[ii]);
156        cb_assert(part_id != NULL);
157        cb_assert(*part_id == pending_passive[ii]);
158    }
159
160    num_pending_unindexable = sizeof(pending_unindexable) / sizeof(pending_unindexable[0]);
161    cb_assert(sorted_list_size(header->pending_transition.unindexable) == num_pending_unindexable);
162    for (ii = 0; ii < num_pending_unindexable; ++ii) {
163        uint16_t *part_id = sorted_list_get(header->pending_transition.unindexable,
164                                            &pending_unindexable[ii]);
165        cb_assert(part_id != NULL);
166        cb_assert(*part_id == pending_unindexable[ii]);
167    }
168
169    return header;
170}
171
172static index_header_t *test_index_header_decoding_v2(const char *header_bin,
173                                                     size_t header_bin_size)
174{
175    uint16_t active[] = { 3, 4, 21, 24 };
176    uint16_t passive[] = { 1, 5, 14, 28, 31};
177    uint16_t cleanup[] = { 2, 13 };
178    uint16_t unindexable[] = { 3, 31 };
179    uint16_t replicas_on_transfer[] = { 5 };
180    uint16_t pending_active[] = { 14, 28 };
181    uint16_t pending_passive[] = { 1 };
182    uint16_t pending_unindexable[] = { 1, 28 };
183    index_header_t *header = NULL;
184    bitmap_t expected_active, expected_passive, expected_cleanup;
185    unsigned i;
186    int ii;
187    uint16_t jj;
188    int num_unindexable;
189    int num_reps;
190    int num_pending_active;
191    int num_pending_passive;
192    int num_pending_unindexable;
193
194    cb_assert(decode_index_header(header_bin, header_bin_size, &header) == COUCHSTORE_SUCCESS);
195    cb_assert(header != NULL);
196
197    cb_assert(header->version == 2);
198    cb_assert(memcmp(header->signature, header_bin, 16) == 0);
199    cb_assert(header->num_partitions == 32);
200    cb_assert(header->num_views == 2);
201
202    memset(&expected_active, 0, sizeof(expected_active));
203    for (i = 0; i < (sizeof(active) / sizeof(active[0])); ++i) {
204        set_bit(&expected_active, active[i]);
205    }
206    cb_assert(memcmp(&header->active_bitmask, &expected_active, sizeof(expected_active)) == 0);
207
208    memset(&expected_passive, 0, sizeof(expected_passive));
209    for (i = 0; i < (sizeof(passive) / sizeof(passive[0])); ++i) {
210        set_bit(&expected_passive, passive[i]);
211    }
212    cb_assert(memcmp(&header->passive_bitmask, &expected_passive, sizeof(expected_passive)) == 0);
213
214    memset(&expected_cleanup, 0, sizeof(expected_cleanup));
215    for (i = 0; i < (sizeof(cleanup) / sizeof(cleanup[0])); ++i) {
216        set_bit(&expected_cleanup, cleanup[i]);
217    }
218    cb_assert(memcmp(&header->cleanup_bitmask, &expected_cleanup, sizeof(expected_cleanup)) == 0);
219
220    cb_assert(sorted_list_size(header->seqs) == 32);
221    for (jj = 0; jj < 32; ++jj) {
222        part_seq_t rs, *pseq;
223        rs.part_id = jj;
224        pseq = (part_seq_t *) sorted_list_get(header->seqs, &rs);
225        cb_assert(pseq != NULL);
226        cb_assert(pseq->part_id == jj);
227        cb_assert(pseq->seq == jj * jj);
228    }
229
230    num_unindexable = sizeof(unindexable) / sizeof(unindexable[0]);
231    cb_assert(sorted_list_size(header->unindexable_seqs) == num_unindexable);
232    for (ii = 0; ii < num_unindexable; ++ii) {
233        part_seq_t rs, *pseq;
234        rs.part_id = unindexable[ii];
235
236        pseq = (part_seq_t *) sorted_list_get(header->unindexable_seqs, &rs);
237        cb_assert(pseq != NULL);
238        cb_assert(pseq->part_id == unindexable[ii]);
239        cb_assert(pseq->seq == unindexable[ii] * unindexable[ii]);
240    }
241    cb_assert(header->id_btree_state->pointer == 123);
242    cb_assert(header->id_btree_state->subtreesize == 567);
243    cb_assert(header->id_btree_state->reduce_value.size == 6);
244    cb_assert(memcmp(
245        header->id_btree_state->reduce_value.buf, "redval",
246        header->id_btree_state->reduce_value.size) == 0);
247
248    cb_assert(header->view_states[0]->pointer == 2345);
249    cb_assert(header->view_states[0]->subtreesize == 789);
250    cb_assert(header->view_states[0]->reduce_value.size == 7);
251    cb_assert(memcmp(
252        header->view_states[0]->reduce_value.buf, "redval2",
253        header->view_states[0]->reduce_value.size) == 0);
254
255    cb_assert(header->view_states[1]->pointer == 3456);
256    cb_assert(header->view_states[1]->subtreesize == 8901);
257    cb_assert(header->view_states[1]->reduce_value.size == 7);
258    cb_assert(memcmp(
259        header->view_states[1]->reduce_value.buf, "redval3",
260        header->view_states[1]->reduce_value.size) == 0);
261
262    cb_assert(header->has_replica == 0);
263    cb_assert(header->replicas_on_transfer != NULL);
264
265    num_reps = (sizeof(replicas_on_transfer) / sizeof(replicas_on_transfer[0]));
266
267    cb_assert(sorted_list_size(header->replicas_on_transfer) == num_reps);
268    for (ii = 0; ii < num_reps; ++ii) {
269        uint16_t *part_id = sorted_list_get(header->replicas_on_transfer,
270                                            &replicas_on_transfer[ii]);
271        cb_assert(part_id != NULL);
272        cb_assert(*part_id == replicas_on_transfer[ii]);
273    }
274
275    num_pending_active = sizeof(pending_active) / sizeof(pending_active[0]);
276    cb_assert(sorted_list_size(header->pending_transition.active) == num_pending_active);
277    for (ii = 0; ii < num_pending_active; ++ii) {
278        uint16_t *part_id = sorted_list_get(header->pending_transition.active,
279                                            &pending_active[ii]);
280        cb_assert(part_id != NULL);
281        cb_assert(*part_id == pending_active[ii]);
282    }
283
284    num_pending_passive = sizeof(pending_passive) / sizeof(pending_passive[0]);
285    cb_assert(sorted_list_size(header->pending_transition.passive) == num_pending_passive);
286    for (ii = 0; ii < num_pending_passive; ++ii) {
287        uint16_t *part_id = sorted_list_get(header->pending_transition.passive,
288                                            &pending_passive[ii]);
289        cb_assert(part_id != NULL);
290        cb_assert(*part_id == pending_passive[ii]);
291    }
292
293    num_pending_unindexable = sizeof(pending_unindexable) / sizeof(pending_unindexable[0]);
294    cb_assert(sorted_list_size(header->pending_transition.unindexable) == num_pending_unindexable);
295    for (ii = 0; ii < num_pending_unindexable; ++ii) {
296        uint16_t *part_id = sorted_list_get(header->pending_transition.unindexable,
297                                            &pending_unindexable[ii]);
298        cb_assert(part_id != NULL);
299        cb_assert(*part_id == pending_unindexable[ii]);
300    }
301
302    cb_assert(sorted_list_size(header->part_versions) == 32);
303    for (jj = 0; jj < 32; ++jj) {
304        part_version_t rs, *pver;
305        rs.part_id = jj;
306        pver = (part_version_t *) sorted_list_get(header->part_versions, &rs);
307        cb_assert(pver != NULL);
308        cb_assert(pver->part_id == jj);
309        cb_assert(pver->num_failover_log == 2);
310        cb_assert(memcmp(pver->failover_log[0].uuid, "auuid123", 8) == 0);
311        cb_assert(pver->failover_log[0].seq == jj);
312        cb_assert(memcmp(pver->failover_log[1].uuid, "another1", 8) == 0);
313        cb_assert(pver->failover_log[1].seq == jj * jj);
314    }
315
316    return header;
317}
318
319static void test_index_header_encoding(const index_header_t *header,
320                                       char **buffer,
321                                       size_t *size)
322{
323    couchstore_error_t res;
324
325    res = encode_index_header(header, buffer, size);
326    cb_assert(res == COUCHSTORE_SUCCESS);
327}
328
329void test_index_headers_v1(void)
330{
331    index_header_t*header;
332    index_header_t *header2;
333    char *header_bin2 = NULL;
334    size_t header_bin2_size = 0;
335    char *header_bin3 = NULL;
336    size_t header_bin3_size = 0;
337
338    unsigned char header_bin[] = {
339        5,226,251,160,170,107,207,39,248,218,139,62,137,58,95,46,204,10,12,1,0,64,0,
340        254,1,0,218,1,0,0,136,5,1,4,0,136,254,127,0,218,127,0,8,0,83,119,9,1,254,128,
341        0,222,128,0,0,36,5,121,20,136,0,0,58,0,1,1,11,12,4,197,0,2,13,8,0,3,13,8,0,4,
342        13,8,0,5,13,8,0,6,13,8,0,7,13,8,0,8,13,8,0,9,13,8,0,10,13,8,0,12,13,8,0,13,
343        13,8,0,14,13,8,0,16,13,8,0,17,13,8,0,18,13,8,0,19,13,8,0,20,13,8,0,21,13,8,0,
344        22,13,8,0,23,13,8,0,24,13,8,0,25,13,8,0,26,13,8,0,27,13,8,0,28,13,8,0,29,13,
345        8,0,30,13,8,0,31,13,8,0,32,13,8,0,33,13,8,0,34,13,8,0,35,13,8,37,19,12,4,197,
346        0,37,13,16,0,38,13,8,0,39,13,8,0,40,13,8,0,41,13,8,0,42,13,8,0,43,13,8,0,44,
347        13,8,0,45,13,8,0,46,13,8,0,47,13,8,0,48,13,8,0,49,13,8,0,50,13,8,0,51,13,8,0,
348        52,13,8,0,53,13,8,0,54,13,8,0,55,13,8,0,56,13,8,0,57,13,8,0,59,13,8,0,60,13,
349        8,0,62,13,8,64,145,0,0,0,24,174,99,0,0,0,19,159,140,0,0,1,49,254,101,3,226,
350        101,3,0,255,13,1,32,2,0,152,0,0,0,44,71,93,1,148,8,152,106,0,254,148,0,254,
351        148,0,1,148,24,0,5,55,56,49,52,52,5,154,8,63,200,207,1,154,4,129,243,254,154,
352        0,254,154,0,46,154,0,112,1,0,4,0,5,0,10,0,60,0,62,0,2,0,11,0,15,0,2,0,58,0,
353        61,0,2,0,15,0,58,105,173,44,0,0,4,197,0,63,0,0,0,0,4,197
354    };
355
356    fprintf(stderr, "Decoding an index header v1...\n");
357    header = test_index_header_decoding_v1((const char*)header_bin, sizeof(header_bin));
358
359    fprintf(stderr, "Encoding the previously decoded header...\n");
360    test_index_header_encoding(header, &header_bin2, &header_bin2_size);
361
362    cb_assert(header_bin2_size == sizeof(header_bin));
363    cb_assert(memcmp(header_bin2, header_bin, header_bin2_size) == 0);
364
365    fprintf(stderr, "Decoding the previously encoded header...\n");
366    header2 = test_index_header_decoding_v1(header_bin2, header_bin2_size);
367
368    fprintf(stderr, "Encoding the previously decoded header...\n");
369    test_index_header_encoding(header2, &header_bin3, &header_bin3_size);
370
371    cb_assert(header_bin3_size == sizeof(header_bin));
372    cb_assert(memcmp(header_bin3, header_bin, header_bin3_size) == 0);
373
374    free_index_header(header);
375    free_index_header(header2);
376    free(header_bin2);
377    free(header_bin3);
378}
379
380
381void test_index_headers_v2(void)
382{
383    index_header_t*header;
384    index_header_t *header2;
385    char *header_bin2 = NULL;
386    size_t header_bin2_size = 0;
387    char *header_bin3 = NULL;
388    size_t header_bin3_size = 0;
389
390    unsigned char header_bin[] = {
391        0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,237,14,12,2,0,32,0,254,1,0,234,1,0,12,1,32,0,
392        24,254,127,0,234,127,0,16,0,144,0,64,34,254,128,0,238,128,0,12,0,0,32,4,61,
393        130,0,1,5,140,8,1,0,2,5,8,8,4,0,3,5,8,8,9,0,4,5,8,8,16,0,5,5,8,8,25,0,6,5,8,
394        8,36,0,7,5,8,8,49,0,8,5,8,8,64,0,9,5,8,8,81,0,10,5,8,8,100,0,11,5,8,8,121,0,
395        12,5,8,8,144,0,13,5,8,8,169,0,14,5,8,8,196,0,15,5,8,8,225,0,16,1,8,12,1,0,0,
396        17,5,8,8,33,0,18,5,8,8,68,0,19,5,8,8,105,0,20,5,8,8,144,0,21,5,8,8,185,0,22,
397        5,8,8,228,0,23,1,8,4,2,17,41,196,12,2,64,0,25,5,16,8,113,0,26,5,8,8,164,0,27,
398        5,8,8,217,0,28,1,8,12,3,16,0,29,5,8,8,73,0,30,5,8,8,132,0,31,5,8,0,193,9,112,
399        4,0,123,1,14,32,2,55,114,101,100,118,97,108,2,9,125,4,9,41,1,21,4,3,21,9,21,
400        0,50,9,21,4,13,128,1,21,4,34,197,9,21,52,51,0,0,1,0,5,0,2,0,14,0,28,0,1,37,
401        62,16,1,0,28,0,2,53,62,17,102,65,234,32,2,97,117,117,105,100,49,50,51,1,66,1,
402        1,28,97,110,111,116,104,101,114,49,1,12,5,1,0,1,66,36,0,0,1,58,36,0,33,160,
403        62,72,0,0,2,58,36,0,33,188,62,36,0,0,3,58,36,0,33,216,62,36,0,0,4,58,36,0,33,
404        244,62,36,0,0,5,58,36,0,8,25,0,6,66,36,0,0,6,58,36,0,65,44,62,72,0,0,7,58,36,
405        0,65,72,62,36,0,0,8,58,36,0,8,64,0,9,66,36,0,0,9,58,36,0,65,128,62,72,0,0,10,
406        58,36,0,65,156,62,36,0,0,11,58,36,0,65,184,62,36,0,0,12,58,36,0,65,212,62,36,
407        0,0,13,58,36,0,65,240,62,36,0,0,14,58,36,0,97,12,62,36,0,0,15,58,36,0,97,40,
408        62,36,0,0,16,54,36,0,101,68,62,36,0,0,17,58,36,0,97,96,62,36,0,0,18,58,36,0,
409        97,124,62,36,0,0,19,58,36,0,97,152,62,36,0,0,20,58,36,0,97,180,62,36,0,0,21,
410        58,36,0,97,208,62,36,0,0,22,58,36,0,97,236,62,36,0,0,23,54,36,0,133,8,62,36,
411        0,0,24,58,36,0,129,36,62,36,0,0,25,58,36,0,129,64,62,36,0,0,26,58,36,0,129,
412        92,62,36,0,0,27,58,36,0,129,120,62,36,0,0,28,54,36,0,133,148,62,36,0,0,29,58,
413        36,0,129,176,62,36,0,0,30,58,36,0,129,204,62,36,0,0,31,58,36,0,0,193
414    };
415
416    fprintf(stderr, "Decoding an index header v2...\n");
417    header = test_index_header_decoding_v2(
418        (const char*)header_bin, sizeof(header_bin));
419
420    fprintf(stderr, "Encoding the previously decoded header...\n");
421    test_index_header_encoding(header, &header_bin2, &header_bin2_size);
422
423    cb_assert(header_bin2_size == sizeof(header_bin));
424    cb_assert(memcmp(header_bin2, header_bin, header_bin2_size) == 0);
425
426    fprintf(stderr, "Decoding the previously encoded header...\n");
427    header2 = test_index_header_decoding_v2(header_bin2, header_bin2_size);
428
429    fprintf(stderr, "Encoding the previously decoded header...\n");
430    test_index_header_encoding(header2, &header_bin3, &header_bin3_size);
431
432    cb_assert(header_bin3_size == sizeof(header_bin));
433    cb_assert(memcmp(header_bin3, header_bin, header_bin3_size) == 0);
434
435    free_index_header(header);
436    free_index_header(header2);
437    free(header_bin2);
438    free(header_bin3);
439}
440