1 // Copyright 2005 Google Inc. All Rights Reserved.
2 //
3 // Redistribution and use in source and binary forms, with or without
4 // modification, are permitted provided that the following conditions are
5 // met:
6 //
7 //     * Redistributions of source code must retain the above copyright
8 // notice, this list of conditions and the following disclaimer.
9 //     * Redistributions in binary form must reproduce the above
10 // copyright notice, this list of conditions and the following disclaimer
11 // in the documentation and/or other materials provided with the
12 // distribution.
13 //     * Neither the name of Google Inc. nor the names of its
14 // contributors may be used to endorse or promote products derived from
15 // this software without specific prior written permission.
16 //
17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 
29 #include "snappy.h"
30 #include "snappy-internal.h"
31 #include "snappy-sinksource.h"
32 
33 #if !defined(SNAPPY_HAVE_SSSE3)
34 // __SSSE3__ is defined by GCC and Clang. Visual Studio doesn't target SIMD
35 // support between SSE2 and AVX (so SSSE3 instructions require AVX support), and
36 // defines __AVX__ when AVX support is available.
37 #if defined(__SSSE3__) || defined(__AVX__)
38 #define SNAPPY_HAVE_SSSE3 1
39 #else
40 #define SNAPPY_HAVE_SSSE3 0
41 #endif
42 #endif  // !defined(SNAPPY_HAVE_SSSE3)
43 
44 #if !defined(SNAPPY_HAVE_BMI2)
45 // __BMI2__ is defined by GCC and Clang. Visual Studio doesn't target BMI2
46 // specifically, but it does define __AVX2__ when AVX2 support is available.
47 // Fortunately, AVX2 was introduced in Haswell, just like BMI2.
48 //
49 // BMI2 is not defined as a subset of AVX2 (unlike SSSE3 and AVX above). So,
50 // GCC and Clang can build code with AVX2 enabled but BMI2 disabled, in which
51 // case issuing BMI2 instructions results in a compiler error.
52 #if defined(__BMI2__) || (defined(_MSC_VER) && defined(__AVX2__))
53 #define SNAPPY_HAVE_BMI2 1
54 #else
55 #define SNAPPY_HAVE_BMI2 0
56 #endif
57 #endif  // !defined(SNAPPY_HAVE_BMI2)
58 
59 #if SNAPPY_HAVE_SSSE3
60 // Please do not replace with <x86intrin.h>. or with headers that assume more
61 // advanced SSE versions without checking with all the OWNERS.
62 #include <tmmintrin.h>
63 #endif
64 
65 #if SNAPPY_HAVE_BMI2
66 // Please do not replace with <x86intrin.h>. or with headers that assume more
67 // advanced SSE versions without checking with all the OWNERS.
68 #include <immintrin.h>
69 #endif
70 
71 #include <stdio.h>
72 
73 #include <algorithm>
74 #include <string>
75 #include <vector>
76 
77 namespace snappy {
78 
79 using internal::COPY_1_BYTE_OFFSET;
80 using internal::COPY_2_BYTE_OFFSET;
81 using internal::LITERAL;
82 using internal::char_table;
83 using internal::kMaximumTagLength;
84 
85 // Any hash function will produce a valid compressed bitstream, but a good
86 // hash function reduces the number of collisions and thus yields better
87 // compression for compressible input, and more speed for incompressible
88 // input. Of course, it doesn't hurt if the hash function is reasonably fast
89 // either, as it gets called a lot.
HashBytes(uint32 bytes, int shift)90 static inline uint32 HashBytes(uint32 bytes, int shift) {
91   uint32 kMul = 0x1e35a7bd;
92   return (bytes * kMul) >> shift;
93 }
Hash(const char* p, int shift)94 static inline uint32 Hash(const char* p, int shift) {
95   return HashBytes(UNALIGNED_LOAD32(p), shift);
96 }
97 
MaxCompressedLength(size_t source_len)98 size_t MaxCompressedLength(size_t source_len) {
99   // Compressed data can be defined as:
100   //    compressed := item* literal*
101   //    item       := literal* copy
102   //
103   // The trailing literal sequence has a space blowup of at most 62/60
104   // since a literal of length 60 needs one tag byte + one extra byte
105   // for length information.
106   //
107   // Item blowup is trickier to measure.  Suppose the "copy" op copies
108   // 4 bytes of data.  Because of a special check in the encoding code,
109   // we produce a 4-byte copy only if the offset is < 65536.  Therefore
110   // the copy op takes 3 bytes to encode, and this type of item leads
111   // to at most the 62/60 blowup for representing literals.
112   //
113   // Suppose the "copy" op copies 5 bytes of data.  If the offset is big
114   // enough, it will take 5 bytes to encode the copy op.  Therefore the
115   // worst case here is a one-byte literal followed by a five-byte copy.
116   // I.e., 6 bytes of input turn into 7 bytes of "compressed" data.
117   //
118   // This last factor dominates the blowup, so the final estimate is:
119   return 32 + source_len + source_len/6;
120 }
121 
122 namespace {
123 
UnalignedCopy64(const void* src, void* dst)124 void UnalignedCopy64(const void* src, void* dst) {
125   char tmp[8];
126   memcpy(tmp, src, 8);
127   memcpy(dst, tmp, 8);
128 }
129 
UnalignedCopy128(const void* src, void* dst)130 void UnalignedCopy128(const void* src, void* dst) {
131   // memcpy gets vectorized when the appropriate compiler options are used.
132   // For example, x86 compilers targeting SSE2+ will optimize to an SSE2 load
133   // and store.
134   char tmp[16];
135   memcpy(tmp, src, 16);
136   memcpy(dst, tmp, 16);
137 }
138 
139 // Copy [src, src+(op_limit-op)) to [op, (op_limit-op)) a byte at a time. Used
140 // for handling COPY operations where the input and output regions may overlap.
141 // For example, suppose:
142 //    src       == "ab"
143 //    op        == src + 2
144 //    op_limit  == op + 20
145 // After IncrementalCopySlow(src, op, op_limit), the result will have eleven
146 // copies of "ab"
147 //    ababababababababababab
148 // Note that this does not match the semantics of either memcpy() or memmove().
IncrementalCopySlow(const char* src, char* op, char* const op_limit)149 inline char* IncrementalCopySlow(const char* src, char* op,
150                                  char* const op_limit) {
151   // TODO: Remove pragma when LLVM is aware this
152   // function is only called in cold regions and when cold regions don't get
153   // vectorized or unrolled.
154 #ifdef __clang__
155 #pragma clang loop unroll(disable)
156 #endif
157   while (op < op_limit) {
158     *op++ = *src++;
159   }
160   return op_limit;
161 }
162 
163 #if SNAPPY_HAVE_SSSE3
164 
165 // This is a table of shuffle control masks that can be used as the source
166 // operand for PSHUFB to permute the contents of the destination XMM register
167 // into a repeating byte pattern.
168 alignas(16) const char pshufb_fill_patterns[7][16] = {
169   {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
170   {0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1},
171   {0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0},
172   {0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3},
173   {0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0},
174   {0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3},
175   {0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 6, 0, 1},
176 };
177 
178 #endif  // SNAPPY_HAVE_SSSE3
179 
180 // Copy [src, src+(op_limit-op)) to [op, (op_limit-op)) but faster than
181 // IncrementalCopySlow. buf_limit is the address past the end of the writable
182 // region of the buffer.
IncrementalCopy(const char* src, char* op, char* const op_limit, char* const buf_limit)183 inline char* IncrementalCopy(const char* src, char* op, char* const op_limit,
184                              char* const buf_limit) {
185   // Terminology:
186   //
187   // slop = buf_limit - op
188   // pat  = op - src
189   // len  = limit - op
190   assert(src < op);
191   assert(op <= op_limit);
192   assert(op_limit <= buf_limit);
193   // NOTE: The compressor always emits 4 <= len <= 64. It is ok to assume that
194   // to optimize this function but we have to also handle other cases in case
195   // the input does not satisfy these conditions.
196 
197   size_t pattern_size = op - src;
198   // The cases are split into different branches to allow the branch predictor,
199   // FDO, and static prediction hints to work better. For each input we list the
200   // ratio of invocations that match each condition.
201   //
202   // input        slop < 16   pat < 8  len > 16
203   // ------------------------------------------
204   // html|html4|cp   0%         1.01%    27.73%
205   // urls            0%         0.88%    14.79%
206   // jpg             0%        64.29%     7.14%
207   // pdf             0%         2.56%    58.06%
208   // txt[1-4]        0%         0.23%     0.97%
209   // pb              0%         0.96%    13.88%
210   // bin             0.01%     22.27%    41.17%
211   //
212   // It is very rare that we don't have enough slop for doing block copies. It
213   // is also rare that we need to expand a pattern. Small patterns are common
214   // for incompressible formats and for those we are plenty fast already.
215   // Lengths are normally not greater than 16 but they vary depending on the
216   // input. In general if we always predict len <= 16 it would be an ok
217   // prediction.
218   //
219   // In order to be fast we want a pattern >= 8 bytes and an unrolled loop
220   // copying 2x 8 bytes at a time.
221 
222   // Handle the uncommon case where pattern is less than 8 bytes.
223   if (SNAPPY_PREDICT_FALSE(pattern_size < 8)) {
224 #if SNAPPY_HAVE_SSSE3
225     // Load the first eight bytes into an 128-bit XMM register, then use PSHUFB
226     // to permute the register's contents in-place into a repeating sequence of
227     // the first "pattern_size" bytes.
228     // For example, suppose:
229     //    src       == "abc"
230     //    op        == op + 3
231     // After _mm_shuffle_epi8(), "pattern" will have five copies of "abc"
232     // followed by one byte of slop: abcabcabcabcabca.
233     //
234     // The non-SSE fallback implementation suffers from store-forwarding stalls
235     // because its loads and stores partly overlap. By expanding the pattern
236     // in-place, we avoid the penalty.
237     if (SNAPPY_PREDICT_TRUE(op <= buf_limit - 16)) {
238       const __m128i shuffle_mask = _mm_load_si128(
239           reinterpret_cast<const __m128i*>(pshufb_fill_patterns)
240           + pattern_size - 1);
241       const __m128i pattern = _mm_shuffle_epi8(
242           _mm_loadl_epi64(reinterpret_cast<const __m128i*>(src)), shuffle_mask);
243       // Uninitialized bytes are masked out by the shuffle mask.
244       // TODO: remove annotation and macro defs once MSan is fixed.
245       SNAPPY_ANNOTATE_MEMORY_IS_INITIALIZED(&pattern, sizeof(pattern));
246       pattern_size *= 16 / pattern_size;
247       char* op_end = std::min(op_limit, buf_limit - 15);
248       while (op < op_end) {
249         _mm_storeu_si128(reinterpret_cast<__m128i*>(op), pattern);
250         op += pattern_size;
251       }
252       if (SNAPPY_PREDICT_TRUE(op >= op_limit)) return op_limit;
253     }
254     return IncrementalCopySlow(src, op, op_limit);
255 #else  // !SNAPPY_HAVE_SSSE3
256     // If plenty of buffer space remains, expand the pattern to at least 8
257     // bytes. The way the following loop is written, we need 8 bytes of buffer
258     // space if pattern_size >= 4, 11 bytes if pattern_size is 1 or 3, and 10
259     // bytes if pattern_size is 2.  Precisely encoding that is probably not
260     // worthwhile; instead, invoke the slow path if we cannot write 11 bytes
261     // (because 11 are required in the worst case).
262     if (SNAPPY_PREDICT_TRUE(op <= buf_limit - 11)) {
263       while (pattern_size < 8) {
264         UnalignedCopy64(src, op);
265         op += pattern_size;
266         pattern_size *= 2;
267       }
268       if (SNAPPY_PREDICT_TRUE(op >= op_limit)) return op_limit;
269     } else {
270       return IncrementalCopySlow(src, op, op_limit);
271     }
272 #endif  // SNAPPY_HAVE_SSSE3
273   }
274   assert(pattern_size >= 8);
275 
276   // Copy 2x 8 bytes at a time. Because op - src can be < 16, a single
277   // UnalignedCopy128 might overwrite data in op. UnalignedCopy64 is safe
278   // because expanding the pattern to at least 8 bytes guarantees that
279   // op - src >= 8.
280   //
281   // Typically, the op_limit is the gating factor so try to simplify the loop
282   // based on that.
283   if (SNAPPY_PREDICT_TRUE(op_limit <= buf_limit - 16)) {
284     // There is at least one, and at most four 16-byte blocks. Writing four
285     // conditionals instead of a loop allows FDO to layout the code with respect
286     // to the actual probabilities of each length.
287     // TODO: Replace with loop with trip count hint.
288     UnalignedCopy64(src, op);
289     UnalignedCopy64(src + 8, op + 8);
290 
291     if (op + 16 < op_limit) {
292       UnalignedCopy64(src + 16, op + 16);
293       UnalignedCopy64(src + 24, op + 24);
294     }
295     if (op + 32 < op_limit) {
296       UnalignedCopy64(src + 32, op + 32);
297       UnalignedCopy64(src + 40, op + 40);
298     }
299     if (op + 48 < op_limit) {
300       UnalignedCopy64(src + 48, op + 48);
301       UnalignedCopy64(src + 56, op + 56);
302     }
303     return op_limit;
304   }
305 
306   // Fall back to doing as much as we can with the available slop in the
307   // buffer. This code path is relatively cold however so we save code size by
308   // avoiding unrolling and vectorizing.
309   //
310   // TODO: Remove pragma when when cold regions don't get vectorized
311   // or unrolled.
312 #ifdef __clang__
313 #pragma clang loop unroll(disable)
314 #endif
315   for (char *op_end = buf_limit - 16; op < op_end; op += 16, src += 16) {
316     UnalignedCopy64(src, op);
317     UnalignedCopy64(src + 8, op + 8);
318   }
319   if (op >= op_limit)
320     return op_limit;
321 
322   // We only take this branch if we didn't have enough slop and we can do a
323   // single 8 byte copy.
324   if (SNAPPY_PREDICT_FALSE(op <= buf_limit - 8)) {
325     UnalignedCopy64(src, op);
326     src += 8;
327     op += 8;
328   }
329   return IncrementalCopySlow(src, op, op_limit);
330 }
331 
332 }  // namespace
333 
334 template <bool allow_fast_path>
335 static inline char* EmitLiteral(char* op,
336                                 const char* literal,
337                                 int len) {
338   // The vast majority of copies are below 16 bytes, for which a
339   // call to memcpy is overkill. This fast path can sometimes
340   // copy up to 15 bytes too much, but that is okay in the
341   // main loop, since we have a bit to go on for both sides:
342   //
343   //   - The input will always have kInputMarginBytes = 15 extra
344   //     available bytes, as long as we're in the main loop, and
345   //     if not, allow_fast_path = false.
346   //   - The output will always have 32 spare bytes (see
347   //     MaxCompressedLength).
348   assert(len > 0);      // Zero-length literals are disallowed
349   int n = len - 1;
350   if (allow_fast_path && len <= 16) {
351     // Fits in tag byte
352     *op++ = LITERAL | (n << 2);
353 
354     UnalignedCopy128(literal, op);
355     return op + len;
356   }
357 
358   if (n < 60) {
359     // Fits in tag byte
360     *op++ = LITERAL | (n << 2);
361   } else {
362     int count = (Bits::Log2Floor(n) >> 3) + 1;
363     assert(count >= 1);
364     assert(count <= 4);
365     *op++ = LITERAL | ((59 + count) << 2);
366     // Encode in upcoming bytes.
367     // Write 4 bytes, though we may care about only 1 of them. The output buffer
368     // is guaranteed to have at least 3 more spaces left as 'len >= 61' holds
369     // here and there is a memcpy of size 'len' below.
370     LittleEndian::Store32(op, n);
371     op += count;
372   }
373   memcpy(op, literal, len);
374   return op + len;
375 }
376 
377 template <bool len_less_than_12>
378 static inline char* EmitCopyAtMost64(char* op, size_t offset, size_t len) {
379   assert(len <= 64);
380   assert(len >= 4);
381   assert(offset < 65536);
382   assert(len_less_than_12 == (len < 12));
383 
384   if (len_less_than_12 && SNAPPY_PREDICT_TRUE(offset < 2048)) {
385     // offset fits in 11 bits.  The 3 highest go in the top of the first byte,
386     // and the rest go in the second byte.
387     *op++ = COPY_1_BYTE_OFFSET + ((len - 4) << 2) + ((offset >> 3) & 0xe0);
388     *op++ = offset & 0xff;
389   } else {
390     // Write 4 bytes, though we only care about 3 of them.  The output buffer
391     // is required to have some slack, so the extra byte won't overrun it.
392     uint32 u = COPY_2_BYTE_OFFSET + ((len - 1) << 2) + (offset << 8);
393     LittleEndian::Store32(op, u);
394     op += 3;
395   }
396   return op;
397 }
398 
399 template <bool len_less_than_12>
400 static inline char* EmitCopy(char* op, size_t offset, size_t len) {
401   assert(len_less_than_12 == (len < 12));
402   if (len_less_than_12) {
403     return EmitCopyAtMost64</*len_less_than_12=*/true>(op, offset, len);
404   } else {
405     // A special case for len <= 64 might help, but so far measurements suggest
406     // it's in the noise.
407 
408     // Emit 64 byte copies but make sure to keep at least four bytes reserved.
409     while (SNAPPY_PREDICT_FALSE(len >= 68)) {
410       op = EmitCopyAtMost64</*len_less_than_12=*/false>(op, offset, 64);
411       len -= 64;
412     }
413 
414     // One or two copies will now finish the job.
415     if (len > 64) {
416       op = EmitCopyAtMost64</*len_less_than_12=*/false>(op, offset, 60);
417       len -= 60;
418     }
419 
420     // Emit remainder.
421     if (len < 12) {
422       op = EmitCopyAtMost64</*len_less_than_12=*/true>(op, offset, len);
423     } else {
424       op = EmitCopyAtMost64</*len_less_than_12=*/false>(op, offset, len);
425     }
426     return op;
427   }
428 }
429 
430 bool GetUncompressedLength(const char* start, size_t n, size_t* result) {
431   uint32 v = 0;
432   const char* limit = start + n;
433   if (Varint::Parse32WithLimit(start, limit, &v) != NULL) {
434     *result = v;
435     return true;
436   } else {
437     return false;
438   }
439 }
440 
441 namespace {
442 uint32 CalculateTableSize(uint32 input_size) {
443   static_assert(
444       kMaxHashTableSize >= kMinHashTableSize,
445       "kMaxHashTableSize should be greater or equal to kMinHashTableSize.");
446   if (input_size > kMaxHashTableSize) {
447     return kMaxHashTableSize;
448   }
449   if (input_size < kMinHashTableSize) {
450     return kMinHashTableSize;
451   }
452   // This is equivalent to Log2Ceiling(input_size), assuming input_size > 1.
453   // 2 << Log2Floor(x - 1) is equivalent to 1 << (1 + Log2Floor(x - 1)).
454   return 2u << Bits::Log2Floor(input_size - 1);
455 }
456 }  // namespace
457 
458 namespace internal {
459 WorkingMemory::WorkingMemory(size_t input_size) {
460   const size_t max_fragment_size = std::min(input_size, kBlockSize);
461   const size_t table_size = CalculateTableSize(max_fragment_size);
462   size_ = table_size * sizeof(*table_) + max_fragment_size +
463           MaxCompressedLength(max_fragment_size);
464   mem_ = std::allocator<char>().allocate(size_);
465   table_ = reinterpret_cast<uint16*>(mem_);
466   input_ = mem_ + table_size * sizeof(*table_);
467   output_ = input_ + max_fragment_size;
468 }
469 
470 WorkingMemory::~WorkingMemory() {
471   std::allocator<char>().deallocate(mem_, size_);
472 }
473 
474 uint16* WorkingMemory::GetHashTable(size_t fragment_size,
475                                     int* table_size) const {
476   const size_t htsize = CalculateTableSize(fragment_size);
477   memset(table_, 0, htsize * sizeof(*table_));
478   *table_size = htsize;
479   return table_;
480 }
481 }  // end namespace internal
482 
483 // For 0 <= offset <= 4, GetUint32AtOffset(GetEightBytesAt(p), offset) will
484 // equal UNALIGNED_LOAD32(p + offset).  Motivation: On x86-64 hardware we have
485 // empirically found that overlapping loads such as
486 //  UNALIGNED_LOAD32(p) ... UNALIGNED_LOAD32(p+1) ... UNALIGNED_LOAD32(p+2)
487 // are slower than UNALIGNED_LOAD64(p) followed by shifts and casts to uint32.
488 //
489 // We have different versions for 64- and 32-bit; ideally we would avoid the
490 // two functions and just inline the UNALIGNED_LOAD64 call into
491 // GetUint32AtOffset, but GCC (at least not as of 4.6) is seemingly not clever
492 // enough to avoid loading the value multiple times then. For 64-bit, the load
493 // is done when GetEightBytesAt() is called, whereas for 32-bit, the load is
494 // done at GetUint32AtOffset() time.
495 
496 #ifdef ARCH_K8
497 
498 typedef uint64 EightBytesReference;
499 
500 static inline EightBytesReference GetEightBytesAt(const char* ptr) {
501   return UNALIGNED_LOAD64(ptr);
502 }
503 
504 static inline uint32 GetUint32AtOffset(uint64 v, int offset) {
505   assert(offset >= 0);
506   assert(offset <= 4);
507   return v >> (LittleEndian::IsLittleEndian() ? 8 * offset : 32 - 8 * offset);
508 }
509 
510 #else
511 
512 typedef const char* EightBytesReference;
513 
514 static inline EightBytesReference GetEightBytesAt(const char* ptr) {
515   return ptr;
516 }
517 
518 static inline uint32 GetUint32AtOffset(const char* v, int offset) {
519   assert(offset >= 0);
520   assert(offset <= 4);
521   return UNALIGNED_LOAD32(v + offset);
522 }
523 
524 #endif
525 
526 // Flat array compression that does not emit the "uncompressed length"
527 // prefix. Compresses "input" string to the "*op" buffer.
528 //
529 // REQUIRES: "input" is at most "kBlockSize" bytes long.
530 // REQUIRES: "op" points to an array of memory that is at least
531 // "MaxCompressedLength(input.size())" in size.
532 // REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
533 // REQUIRES: "table_size" is a power of two
534 //
535 // Returns an "end" pointer into "op" buffer.
536 // "end - op" is the compressed size of "input".
537 namespace internal {
538 char* CompressFragment(const char* input,
539                        size_t input_size,
540                        char* op,
541                        uint16* table,
542                        const int table_size) {
543   // "ip" is the input pointer, and "op" is the output pointer.
544   const char* ip = input;
545   assert(input_size <= kBlockSize);
546   assert((table_size & (table_size - 1)) == 0);  // table must be power of two
547   const int shift = 32 - Bits::Log2Floor(table_size);
548   assert(static_cast<int>(kuint32max >> shift) == table_size - 1);
549   const char* ip_end = input + input_size;
550   const char* base_ip = ip;
551   // Bytes in [next_emit, ip) will be emitted as literal bytes.  Or
552   // [next_emit, ip_end) after the main loop.
553   const char* next_emit = ip;
554 
555   const size_t kInputMarginBytes = 15;
556   if (SNAPPY_PREDICT_TRUE(input_size >= kInputMarginBytes)) {
557     const char* ip_limit = input + input_size - kInputMarginBytes;
558 
559     for (uint32 next_hash = Hash(++ip, shift); ; ) {
560       assert(next_emit < ip);
561       // The body of this loop calls EmitLiteral once and then EmitCopy one or
562       // more times.  (The exception is that when we're close to exhausting
563       // the input we goto emit_remainder.)
564       //
565       // In the first iteration of this loop we're just starting, so
566       // there's nothing to copy, so calling EmitLiteral once is
567       // necessary.  And we only start a new iteration when the
568       // current iteration has determined that a call to EmitLiteral will
569       // precede the next call to EmitCopy (if any).
570       //
571       // Step 1: Scan forward in the input looking for a 4-byte-long match.
572       // If we get close to exhausting the input then goto emit_remainder.
573       //
574       // Heuristic match skipping: If 32 bytes are scanned with no matches
575       // found, start looking only at every other byte. If 32 more bytes are
576       // scanned (or skipped), look at every third byte, etc.. When a match is
577       // found, immediately go back to looking at every byte. This is a small
578       // loss (~5% performance, ~0.1% density) for compressible data due to more
579       // bookkeeping, but for non-compressible data (such as JPEG) it's a huge
580       // win since the compressor quickly "realizes" the data is incompressible
581       // and doesn't bother looking for matches everywhere.
582       //
583       // The "skip" variable keeps track of how many bytes there are since the
584       // last match; dividing it by 32 (ie. right-shifting by five) gives the
585       // number of bytes to move ahead for each iteration.
586       uint32 skip = 32;
587 
588       const char* next_ip = ip;
589       const char* candidate;
590       do {
591         ip = next_ip;
592         uint32 hash = next_hash;
593         assert(hash == Hash(ip, shift));
594         uint32 bytes_between_hash_lookups = skip >> 5;
595         skip += bytes_between_hash_lookups;
596         next_ip = ip + bytes_between_hash_lookups;
597         if (SNAPPY_PREDICT_FALSE(next_ip > ip_limit)) {
598           goto emit_remainder;
599         }
600         next_hash = Hash(next_ip, shift);
601         candidate = base_ip + table[hash];
602         assert(candidate >= base_ip);
603         assert(candidate < ip);
604 
605         table[hash] = ip - base_ip;
606       } while (SNAPPY_PREDICT_TRUE(UNALIGNED_LOAD32(ip) !=
607                                  UNALIGNED_LOAD32(candidate)));
608 
609       // Step 2: A 4-byte match has been found.  We'll later see if more
610       // than 4 bytes match.  But, prior to the match, input
611       // bytes [next_emit, ip) are unmatched.  Emit them as "literal bytes."
612       assert(next_emit + 16 <= ip_end);
613       op = EmitLiteral</*allow_fast_path=*/true>(op, next_emit, ip - next_emit);
614 
615       // Step 3: Call EmitCopy, and then see if another EmitCopy could
616       // be our next move.  Repeat until we find no match for the
617       // input immediately after what was consumed by the last EmitCopy call.
618       //
619       // If we exit this loop normally then we need to call EmitLiteral next,
620       // though we don't yet know how big the literal will be.  We handle that
621       // by proceeding to the next iteration of the main loop.  We also can exit
622       // this loop via goto if we get close to exhausting the input.
623       EightBytesReference input_bytes;
624       uint32 candidate_bytes = 0;
625 
626       do {
627         // We have a 4-byte match at ip, and no need to emit any
628         // "literal bytes" prior to ip.
629         const char* base = ip;
630         std::pair<size_t, bool> p =
631             FindMatchLength(candidate + 4, ip + 4, ip_end);
632         size_t matched = 4 + p.first;
633         ip += matched;
634         size_t offset = base - candidate;
635         assert(0 == memcmp(base, candidate, matched));
636         if (p.second) {
637           op = EmitCopy</*len_less_than_12=*/true>(op, offset, matched);
638         } else {
639           op = EmitCopy</*len_less_than_12=*/false>(op, offset, matched);
640         }
641         next_emit = ip;
642         if (SNAPPY_PREDICT_FALSE(ip >= ip_limit)) {
643           goto emit_remainder;
644         }
645         // We are now looking for a 4-byte match again.  We read
646         // table[Hash(ip, shift)] for that.  To improve compression,
647         // we also update table[Hash(ip - 1, shift)] and table[Hash(ip, shift)].
648         input_bytes = GetEightBytesAt(ip - 1);
649         uint32 prev_hash = HashBytes(GetUint32AtOffset(input_bytes, 0), shift);
650         table[prev_hash] = ip - base_ip - 1;
651         uint32 cur_hash = HashBytes(GetUint32AtOffset(input_bytes, 1), shift);
652         candidate = base_ip + table[cur_hash];
653         candidate_bytes = UNALIGNED_LOAD32(candidate);
654         table[cur_hash] = ip - base_ip;
655       } while (GetUint32AtOffset(input_bytes, 1) == candidate_bytes);
656 
657       next_hash = HashBytes(GetUint32AtOffset(input_bytes, 2), shift);
658       ++ip;
659     }
660   }
661 
662  emit_remainder:
663   // Emit the remaining bytes as a literal
664   if (next_emit < ip_end) {
665     op = EmitLiteral</*allow_fast_path=*/false>(op, next_emit,
666                                                 ip_end - next_emit);
667   }
668 
669   return op;
670 }
671 }  // end namespace internal
672 
673 // Called back at avery compression call to trace parameters and sizes.
674 static inline void Report(const char */* algorithm */, size_t /* compressed_size */,
675                           size_t /* uncompressed_size */) {}
676 
677 // Signature of output types needed by decompression code.
678 // The decompression code is templatized on a type that obeys this
679 // signature so that we do not pay virtual function call overhead in
680 // the middle of a tight decompression loop.
681 //
682 // class DecompressionWriter {
683 //  public:
684 //   // Called before decompression
685 //   void SetExpectedLength(size_t length);
686 //
687 //   // Called after decompression
688 //   bool CheckLength() const;
689 //
690 //   // Called repeatedly during decompression
691 //   bool Append(const char* ip, size_t length);
692 //   bool AppendFromSelf(uint32 offset, size_t length);
693 //
694 //   // The rules for how TryFastAppend differs from Append are somewhat
695 //   // convoluted:
696 //   //
697 //   //  - TryFastAppend is allowed to decline (return false) at any
698 //   //    time, for any reason -- just "return false" would be
699 //   //    a perfectly legal implementation of TryFastAppend.
700 //   //    The intention is for TryFastAppend to allow a fast path
701 //   //    in the common case of a small append.
702 //   //  - TryFastAppend is allowed to read up to <available> bytes
703 //   //    from the input buffer, whereas Append is allowed to read
704 //   //    <length>. However, if it returns true, it must leave
705 //   //    at least five (kMaximumTagLength) bytes in the input buffer
706 //   //    afterwards, so that there is always enough space to read the
707 //   //    next tag without checking for a refill.
708 //   //  - TryFastAppend must always return decline (return false)
709 //   //    if <length> is 61 or more, as in this case the literal length is not
710 //   //    decoded fully. In practice, this should not be a big problem,
711 //   //    as it is unlikely that one would implement a fast path accepting
712 //   //    this much data.
713 //   //
714 //   bool TryFastAppend(const char* ip, size_t available, size_t length);
715 // };
716 
717 static inline uint32 ExtractLowBytes(uint32 v, int n) {
718   assert(n >= 0);
719   assert(n <= 4);
720 #if SNAPPY_HAVE_BMI2
721   return _bzhi_u32(v, 8 * n);
722 #else
723   // This needs to be wider than uint32 otherwise `mask << 32` will be
724   // undefined.
725   uint64 mask = 0xffffffff;
726   return v & ~(mask << (8 * n));
727 #endif
728 }
729 
730 static inline bool LeftShiftOverflows(uint8 value, uint32 shift) {
731   assert(shift < 32);
732   static const uint8 masks[] = {
733       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,  //
734       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,  //
735       0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,  //
736       0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe};
737   return (value & masks[shift]) != 0;
738 }
739 
740 // Helper class for decompression
741 class SnappyDecompressor {
742  private:
743   Source*       reader_;         // Underlying source of bytes to decompress
744   const char*   ip_;             // Points to next buffered byte
745   const char*   ip_limit_;       // Points just past buffered bytes
746   uint32        peeked_;         // Bytes peeked from reader (need to skip)
747   bool          eof_;            // Hit end of input without an error?
748   char          scratch_[kMaximumTagLength];  // See RefillTag().
749 
750   // Ensure that all of the tag metadata for the next tag is available
751   // in [ip_..ip_limit_-1].  Also ensures that [ip,ip+4] is readable even
752   // if (ip_limit_ - ip_ < 5).
753   //
754   // Returns true on success, false on error or end of input.
755   bool RefillTag();
756 
757  public:
758   explicit SnappyDecompressor(Source* reader)
759       : reader_(reader),
760         ip_(NULL),
761         ip_limit_(NULL),
762         peeked_(0),
763         eof_(false) {
764   }
765 
766   ~SnappyDecompressor() {
767     // Advance past any bytes we peeked at from the reader
768     reader_->Skip(peeked_);
769   }
770 
771   // Returns true iff we have hit the end of the input without an error.
772   bool eof() const {
773     return eof_;
774   }
775 
776   // Read the uncompressed length stored at the start of the compressed data.
777   // On success, stores the length in *result and returns true.
778   // On failure, returns false.
779   bool ReadUncompressedLength(uint32* result) {
780     assert(ip_ == NULL);       // Must not have read anything yet
781     // Length is encoded in 1..5 bytes
782     *result = 0;
783     uint32 shift = 0;
784     while (true) {
785       if (shift >= 32) return false;
786       size_t n;
787       const char* ip = reader_->Peek(&n);
788       if (n == 0) return false;
789       const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
790       reader_->Skip(1);
791       uint32 val = c & 0x7f;
792       if (LeftShiftOverflows(static_cast<uint8>(val), shift)) return false;
793       *result |= val << shift;
794       if (c < 128) {
795         break;
796       }
797       shift += 7;
798     }
799     return true;
800   }
801 
802   // Process the next item found in the input.
803   // Returns true if successful, false on error or end of input.
804   template <class Writer>
805 #if defined(__GNUC__) && defined(__x86_64__)
806   __attribute__((aligned(32)))
807 #endif
808   void DecompressAllTags(Writer* writer) {
809     // In x86, pad the function body to start 16 bytes later. This function has
810     // a couple of hotspots that are highly sensitive to alignment: we have
811     // observed regressions by more than 20% in some metrics just by moving the
812     // exact same code to a different position in the benchmark binary.
813     //
814     // Putting this code on a 32-byte-aligned boundary + 16 bytes makes us hit
815     // the "lucky" case consistently. Unfortunately, this is a very brittle
816     // workaround, and future differences in code generation may reintroduce
817     // this regression. If you experience a big, difficult to explain, benchmark
818     // performance regression here, first try removing this hack.
819 #if defined(__GNUC__) && defined(__x86_64__)
820     // Two 8-byte "NOP DWORD ptr [EAX + EAX*1 + 00000000H]" instructions.
821     asm(".byte 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00");
822     asm(".byte 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00");
823 #endif
824 
825     const char* ip = ip_;
826     // We could have put this refill fragment only at the beginning of the loop.
827     // However, duplicating it at the end of each branch gives the compiler more
828     // scope to optimize the <ip_limit_ - ip> expression based on the local
829     // context, which overall increases speed.
830     #define MAYBE_REFILL() \
831         if (ip_limit_ - ip < kMaximumTagLength) { \
832           ip_ = ip; \
833           if (!RefillTag()) return; \
834           ip = ip_; \
835         }
836 
837     MAYBE_REFILL();
838     for ( ;; ) {
839       const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip++));
840 
841       // Ratio of iterations that have LITERAL vs non-LITERAL for different
842       // inputs.
843       //
844       // input          LITERAL  NON_LITERAL
845       // -----------------------------------
846       // html|html4|cp   23%        77%
847       // urls            36%        64%
848       // jpg             47%        53%
849       // pdf             19%        81%
850       // txt[1-4]        25%        75%
851       // pb              24%        76%
852       // bin             24%        76%
853       if (SNAPPY_PREDICT_FALSE((c & 0x3) == LITERAL)) {
854         size_t literal_length = (c >> 2) + 1u;
855         if (writer->TryFastAppend(ip, ip_limit_ - ip, literal_length)) {
856           assert(literal_length < 61);
857           ip += literal_length;
858           // NOTE: There is no MAYBE_REFILL() here, as TryFastAppend()
859           // will not return true unless there's already at least five spare
860           // bytes in addition to the literal.
861           continue;
862         }
863         if (SNAPPY_PREDICT_FALSE(literal_length >= 61)) {
864           // Long literal.
865           const size_t literal_length_length = literal_length - 60;
866           literal_length =
867               ExtractLowBytes(LittleEndian::Load32(ip), literal_length_length) +
868               1;
869           ip += literal_length_length;
870         }
871 
872         size_t avail = ip_limit_ - ip;
873         while (avail < literal_length) {
874           if (!writer->Append(ip, avail)) return;
875           literal_length -= avail;
876           reader_->Skip(peeked_);
877           size_t n;
878           ip = reader_->Peek(&n);
879           avail = n;
880           peeked_ = avail;
881           if (avail == 0) return;  // Premature end of input
882           ip_limit_ = ip + avail;
883         }
884         if (!writer->Append(ip, literal_length)) {
885           return;
886         }
887         ip += literal_length;
888         MAYBE_REFILL();
889       } else {
890         const size_t entry = char_table[c];
891         const size_t trailer =
892             ExtractLowBytes(LittleEndian::Load32(ip), entry >> 11);
893         const size_t length = entry & 0xff;
894         ip += entry >> 11;
895 
896         // copy_offset/256 is encoded in bits 8..10.  By just fetching
897         // those bits, we get copy_offset (since the bit-field starts at
898         // bit 8).
899         const size_t copy_offset = entry & 0x700;
900         if (!writer->AppendFromSelf(copy_offset + trailer, length)) {
901           return;
902         }
903         MAYBE_REFILL();
904       }
905     }
906 
907 #undef MAYBE_REFILL
908   }
909 };
910 
911 bool SnappyDecompressor::RefillTag() {
912   const char* ip = ip_;
913   if (ip == ip_limit_) {
914     // Fetch a new fragment from the reader
915     reader_->Skip(peeked_);   // All peeked bytes are used up
916     size_t n;
917     ip = reader_->Peek(&n);
918     peeked_ = n;
919     eof_ = (n == 0);
920     if (eof_) return false;
921     ip_limit_ = ip + n;
922   }
923 
924   // Read the tag character
925   assert(ip < ip_limit_);
926   const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
927   const uint32 entry = char_table[c];
928   const uint32 needed = (entry >> 11) + 1;  // +1 byte for 'c'
929   assert(needed <= sizeof(scratch_));
930 
931   // Read more bytes from reader if needed
932   uint32 nbuf = ip_limit_ - ip;
933   if (nbuf < needed) {
934     // Stitch together bytes from ip and reader to form the word
935     // contents.  We store the needed bytes in "scratch_".  They
936     // will be consumed immediately by the caller since we do not
937     // read more than we need.
938     memmove(scratch_, ip, nbuf);
939     reader_->Skip(peeked_);  // All peeked bytes are used up
940     peeked_ = 0;
941     while (nbuf < needed) {
942       size_t length;
943       const char* src = reader_->Peek(&length);
944       if (length == 0) return false;
945       uint32 to_add = std::min<uint32>(needed - nbuf, length);
946       memcpy(scratch_ + nbuf, src, to_add);
947       nbuf += to_add;
948       reader_->Skip(to_add);
949     }
950     assert(nbuf == needed);
951     ip_ = scratch_;
952     ip_limit_ = scratch_ + needed;
953   } else if (nbuf < kMaximumTagLength) {
954     // Have enough bytes, but move into scratch_ so that we do not
955     // read past end of input
956     memmove(scratch_, ip, nbuf);
957     reader_->Skip(peeked_);  // All peeked bytes are used up
958     peeked_ = 0;
959     ip_ = scratch_;
960     ip_limit_ = scratch_ + nbuf;
961   } else {
962     // Pass pointer to buffer returned by reader_.
963     ip_ = ip;
964   }
965   return true;
966 }
967 
968 template <typename Writer>
969 static bool InternalUncompress(Source* r, Writer* writer) {
970   // Read the uncompressed length from the front of the compressed input
971   SnappyDecompressor decompressor(r);
972   uint32 uncompressed_len = 0;
973   if (!decompressor.ReadUncompressedLength(&uncompressed_len)) return false;
974 
975   return InternalUncompressAllTags(&decompressor, writer, r->Available(),
976                                    uncompressed_len);
977 }
978 
979 template <typename Writer>
980 static bool InternalUncompressAllTags(SnappyDecompressor* decompressor,
981                                       Writer* writer,
982                                       uint32 compressed_len,
983                                       uint32 uncompressed_len) {
984   Report("snappy_uncompress", compressed_len, uncompressed_len);
985 
986   writer->SetExpectedLength(uncompressed_len);
987 
988   // Process the entire input
989   decompressor->DecompressAllTags(writer);
990   writer->Flush();
991   return (decompressor->eof() && writer->CheckLength());
992 }
993 
GetUncompressedLength(Source* source, uint32* result)994 bool GetUncompressedLength(Source* source, uint32* result) {
995   SnappyDecompressor decompressor(source);
996   return decompressor.ReadUncompressedLength(result);
997 }
998 
Compress(Source* reader, Sink* writer)999 size_t Compress(Source* reader, Sink* writer) {
1000   size_t written = 0;
1001   size_t N = reader->Available();
1002   const size_t uncompressed_size = N;
1003   char ulength[Varint::kMax32];
1004   char* p = Varint::Encode32(ulength, N);
1005   writer->Append(ulength, p-ulength);
1006   written += (p - ulength);
1007 
1008   internal::WorkingMemory wmem(N);
1009 
1010   while (N > 0) {
1011     // Get next block to compress (without copying if possible)
1012     size_t fragment_size;
1013     const char* fragment = reader->Peek(&fragment_size);
1014     assert(fragment_size != 0);  // premature end of input
1015     const size_t num_to_read = std::min(N, kBlockSize);
1016     size_t bytes_read = fragment_size;
1017 
1018     size_t pending_advance = 0;
1019     if (bytes_read >= num_to_read) {
1020       // Buffer returned by reader is large enough
1021       pending_advance = num_to_read;
1022       fragment_size = num_to_read;
1023     } else {
1024       char* scratch = wmem.GetScratchInput();
1025       memcpy(scratch, fragment, bytes_read);
1026       reader->Skip(bytes_read);
1027 
1028       while (bytes_read < num_to_read) {
1029         fragment = reader->Peek(&fragment_size);
1030         size_t n = std::min<size_t>(fragment_size, num_to_read - bytes_read);
1031         memcpy(scratch + bytes_read, fragment, n);
1032         bytes_read += n;
1033         reader->Skip(n);
1034       }
1035       assert(bytes_read == num_to_read);
1036       fragment = scratch;
1037       fragment_size = num_to_read;
1038     }
1039     assert(fragment_size == num_to_read);
1040 
1041     // Get encoding table for compression
1042     int table_size;
1043     uint16* table = wmem.GetHashTable(num_to_read, &table_size);
1044 
1045     // Compress input_fragment and append to dest
1046     const int max_output = MaxCompressedLength(num_to_read);
1047 
1048     // Need a scratch buffer for the output, in case the byte sink doesn't
1049     // have room for us directly.
1050 
1051     // Since we encode kBlockSize regions followed by a region
1052     // which is <= kBlockSize in length, a previously allocated
1053     // scratch_output[] region is big enough for this iteration.
1054     char* dest = writer->GetAppendBuffer(max_output, wmem.GetScratchOutput());
1055     char* end = internal::CompressFragment(fragment, fragment_size, dest, table,
1056                                            table_size);
1057     writer->Append(dest, end - dest);
1058     written += (end - dest);
1059 
1060     N -= num_to_read;
1061     reader->Skip(pending_advance);
1062   }
1063 
1064   Report("snappy_compress", written, uncompressed_size);
1065 
1066   return written;
1067 }
1068 
1069 // -----------------------------------------------------------------------
1070 // IOVec interfaces
1071 // -----------------------------------------------------------------------
1072 
1073 // A type that writes to an iovec.
1074 // Note that this is not a "ByteSink", but a type that matches the
1075 // Writer template argument to SnappyDecompressor::DecompressAllTags().
1076 class SnappyIOVecWriter {
1077  private:
1078   // output_iov_end_ is set to iov + count and used to determine when
1079   // the end of the iovs is reached.
1080   const struct iovec* output_iov_end_;
1081 
1082 #if !defined(NDEBUG)
1083   const struct iovec* output_iov_;
1084 #endif  // !defined(NDEBUG)
1085 
1086   // Current iov that is being written into.
1087   const struct iovec* curr_iov_;
1088 
1089   // Pointer to current iov's write location.
1090   char* curr_iov_output_;
1091 
1092   // Remaining bytes to write into curr_iov_output.
1093   size_t curr_iov_remaining_;
1094 
1095   // Total bytes decompressed into output_iov_ so far.
1096   size_t total_written_;
1097 
1098   // Maximum number of bytes that will be decompressed into output_iov_.
1099   size_t output_limit_;
1100 
GetIOVecPointer(const struct iovec* iov, size_t offset)1101   static inline char* GetIOVecPointer(const struct iovec* iov, size_t offset) {
1102     return reinterpret_cast<char*>(iov->iov_base) + offset;
1103   }
1104 
1105  public:
1106   // Does not take ownership of iov. iov must be valid during the
1107   // entire lifetime of the SnappyIOVecWriter.
SnappyIOVecWriter(const struct iovec* iov, size_t iov_count)1108   inline SnappyIOVecWriter(const struct iovec* iov, size_t iov_count)
1109       : output_iov_end_(iov + iov_count),
1110 #if !defined(NDEBUG)
1111         output_iov_(iov),
1112 #endif  // !defined(NDEBUG)
1113         curr_iov_(iov),
1114         curr_iov_output_(iov_count ? reinterpret_cast<char*>(iov->iov_base)
1115                                    : nullptr),
1116         curr_iov_remaining_(iov_count ? iov->iov_len : 0),
1117         total_written_(0),
1118         output_limit_(-1) {}
1119 
SetExpectedLength(size_t len)1120   inline void SetExpectedLength(size_t len) {
1121     output_limit_ = len;
1122   }
1123 
CheckLength() const1124   inline bool CheckLength() const {
1125     return total_written_ == output_limit_;
1126   }
1127 
Append(const char* ip, size_t len)1128   inline bool Append(const char* ip, size_t len) {
1129     if (total_written_ + len > output_limit_) {
1130       return false;
1131     }
1132 
1133     return AppendNoCheck(ip, len);
1134   }
1135 
AppendNoCheck(const char* ip, size_t len)1136   inline bool AppendNoCheck(const char* ip, size_t len) {
1137     while (len > 0) {
1138       if (curr_iov_remaining_ == 0) {
1139         // This iovec is full. Go to the next one.
1140         if (curr_iov_ + 1 >= output_iov_end_) {
1141           return false;
1142         }
1143         ++curr_iov_;
1144         curr_iov_output_ = reinterpret_cast<char*>(curr_iov_->iov_base);
1145         curr_iov_remaining_ = curr_iov_->iov_len;
1146       }
1147 
1148       const size_t to_write = std::min(len, curr_iov_remaining_);
1149       memcpy(curr_iov_output_, ip, to_write);
1150       curr_iov_output_ += to_write;
1151       curr_iov_remaining_ -= to_write;
1152       total_written_ += to_write;
1153       ip += to_write;
1154       len -= to_write;
1155     }
1156 
1157     return true;
1158   }
1159 
TryFastAppend(const char* ip, size_t available, size_t len)1160   inline bool TryFastAppend(const char* ip, size_t available, size_t len) {
1161     const size_t space_left = output_limit_ - total_written_;
1162     if (len <= 16 && available >= 16 + kMaximumTagLength && space_left >= 16 &&
1163         curr_iov_remaining_ >= 16) {
1164       // Fast path, used for the majority (about 95%) of invocations.
1165       UnalignedCopy128(ip, curr_iov_output_);
1166       curr_iov_output_ += len;
1167       curr_iov_remaining_ -= len;
1168       total_written_ += len;
1169       return true;
1170     }
1171 
1172     return false;
1173   }
1174 
AppendFromSelf(size_t offset, size_t len)1175   inline bool AppendFromSelf(size_t offset, size_t len) {
1176     // See SnappyArrayWriter::AppendFromSelf for an explanation of
1177     // the "offset - 1u" trick.
1178     if (offset - 1u >= total_written_) {
1179       return false;
1180     }
1181     const size_t space_left = output_limit_ - total_written_;
1182     if (len > space_left) {
1183       return false;
1184     }
1185 
1186     // Locate the iovec from which we need to start the copy.
1187     const iovec* from_iov = curr_iov_;
1188     size_t from_iov_offset = curr_iov_->iov_len - curr_iov_remaining_;
1189     while (offset > 0) {
1190       if (from_iov_offset >= offset) {
1191         from_iov_offset -= offset;
1192         break;
1193       }
1194 
1195       offset -= from_iov_offset;
1196       --from_iov;
1197 #if !defined(NDEBUG)
1198       assert(from_iov >= output_iov_);
1199 #endif  // !defined(NDEBUG)
1200       from_iov_offset = from_iov->iov_len;
1201     }
1202 
1203     // Copy <len> bytes starting from the iovec pointed to by from_iov_index to
1204     // the current iovec.
1205     while (len > 0) {
1206       assert(from_iov <= curr_iov_);
1207       if (from_iov != curr_iov_) {
1208         const size_t to_copy =
1209             std::min(from_iov->iov_len - from_iov_offset, len);
1210         AppendNoCheck(GetIOVecPointer(from_iov, from_iov_offset), to_copy);
1211         len -= to_copy;
1212         if (len > 0) {
1213           ++from_iov;
1214           from_iov_offset = 0;
1215         }
1216       } else {
1217         size_t to_copy = curr_iov_remaining_;
1218         if (to_copy == 0) {
1219           // This iovec is full. Go to the next one.
1220           if (curr_iov_ + 1 >= output_iov_end_) {
1221             return false;
1222           }
1223           ++curr_iov_;
1224           curr_iov_output_ = reinterpret_cast<char*>(curr_iov_->iov_base);
1225           curr_iov_remaining_ = curr_iov_->iov_len;
1226           continue;
1227         }
1228         if (to_copy > len) {
1229           to_copy = len;
1230         }
1231 
1232         IncrementalCopy(GetIOVecPointer(from_iov, from_iov_offset),
1233                         curr_iov_output_, curr_iov_output_ + to_copy,
1234                         curr_iov_output_ + curr_iov_remaining_);
1235         curr_iov_output_ += to_copy;
1236         curr_iov_remaining_ -= to_copy;
1237         from_iov_offset += to_copy;
1238         total_written_ += to_copy;
1239         len -= to_copy;
1240       }
1241     }
1242 
1243     return true;
1244   }
1245 
Flush()1246   inline void Flush() {}
1247 };
1248 
RawUncompressToIOVec(const char* compressed, size_t compressed_length, const struct iovec* iov, size_t iov_cnt)1249 bool RawUncompressToIOVec(const char* compressed, size_t compressed_length,
1250                           const struct iovec* iov, size_t iov_cnt) {
1251   ByteArraySource reader(compressed, compressed_length);
1252   return RawUncompressToIOVec(&reader, iov, iov_cnt);
1253 }
1254 
RawUncompressToIOVec(Source* compressed, const struct iovec* iov, size_t iov_cnt)1255 bool RawUncompressToIOVec(Source* compressed, const struct iovec* iov,
1256                           size_t iov_cnt) {
1257   SnappyIOVecWriter output(iov, iov_cnt);
1258   return InternalUncompress(compressed, &output);
1259 }
1260 
1261 // -----------------------------------------------------------------------
1262 // Flat array interfaces
1263 // -----------------------------------------------------------------------
1264 
1265 // A type that writes to a flat array.
1266 // Note that this is not a "ByteSink", but a type that matches the
1267 // Writer template argument to SnappyDecompressor::DecompressAllTags().
1268 class SnappyArrayWriter {
1269  private:
1270   char* base_;
1271   char* op_;
1272   char* op_limit_;
1273 
1274  public:
SnappyArrayWriter(char* dst)1275   inline explicit SnappyArrayWriter(char* dst)
1276       : base_(dst),
1277         op_(dst),
1278         op_limit_(dst) {
1279   }
1280 
SetExpectedLength(size_t len)1281   inline void SetExpectedLength(size_t len) {
1282     op_limit_ = op_ + len;
1283   }
1284 
CheckLength() const1285   inline bool CheckLength() const {
1286     return op_ == op_limit_;
1287   }
1288 
Append(const char* ip, size_t len)1289   inline bool Append(const char* ip, size_t len) {
1290     char* op = op_;
1291     const size_t space_left = op_limit_ - op;
1292     if (space_left < len) {
1293       return false;
1294     }
1295     memcpy(op, ip, len);
1296     op_ = op + len;
1297     return true;
1298   }
1299 
TryFastAppend(const char* ip, size_t available, size_t len)1300   inline bool TryFastAppend(const char* ip, size_t available, size_t len) {
1301     char* op = op_;
1302     const size_t space_left = op_limit_ - op;
1303     if (len <= 16 && available >= 16 + kMaximumTagLength && space_left >= 16) {
1304       // Fast path, used for the majority (about 95%) of invocations.
1305       UnalignedCopy128(ip, op);
1306       op_ = op + len;
1307       return true;
1308     } else {
1309       return false;
1310     }
1311   }
1312 
AppendFromSelf(size_t offset, size_t len)1313   inline bool AppendFromSelf(size_t offset, size_t len) {
1314     char* const op_end = op_ + len;
1315 
1316     // Check if we try to append from before the start of the buffer.
1317     // Normally this would just be a check for "produced < offset",
1318     // but "produced <= offset - 1u" is equivalent for every case
1319     // except the one where offset==0, where the right side will wrap around
1320     // to a very big number. This is convenient, as offset==0 is another
1321     // invalid case that we also want to catch, so that we do not go
1322     // into an infinite loop.
1323     if (Produced() <= offset - 1u || op_end > op_limit_) return false;
1324     op_ = IncrementalCopy(op_ - offset, op_, op_end, op_limit_);
1325 
1326     return true;
1327   }
Produced() const1328   inline size_t Produced() const {
1329     assert(op_ >= base_);
1330     return op_ - base_;
1331   }
Flush()1332   inline void Flush() {}
1333 };
1334 
RawUncompress(const char* compressed, size_t n, char* uncompressed)1335 bool RawUncompress(const char* compressed, size_t n, char* uncompressed) {
1336   ByteArraySource reader(compressed, n);
1337   return RawUncompress(&reader, uncompressed);
1338 }
1339 
RawUncompress(Source* compressed, char* uncompressed)1340 bool RawUncompress(Source* compressed, char* uncompressed) {
1341   SnappyArrayWriter output(uncompressed);
1342   return InternalUncompress(compressed, &output);
1343 }
1344 
Uncompress(const char* compressed, size_t n, std::string* uncompressed)1345 bool Uncompress(const char* compressed, size_t n, std::string* uncompressed) {
1346   size_t ulength;
1347   if (!GetUncompressedLength(compressed, n, &ulength)) {
1348     return false;
1349   }
1350   // On 32-bit builds: max_size() < kuint32max.  Check for that instead
1351   // of crashing (e.g., consider externally specified compressed data).
1352   if (ulength > uncompressed->max_size()) {
1353     return false;
1354   }
1355   STLStringResizeUninitialized(uncompressed, ulength);
1356   return RawUncompress(compressed, n, string_as_array(uncompressed));
1357 }
1358 
1359 // A Writer that drops everything on the floor and just does validation
1360 class SnappyDecompressionValidator {
1361  private:
1362   size_t expected_;
1363   size_t produced_;
1364 
1365  public:
SnappyDecompressionValidator()1366   inline SnappyDecompressionValidator() : expected_(0), produced_(0) { }
SetExpectedLength(size_t len)1367   inline void SetExpectedLength(size_t len) {
1368     expected_ = len;
1369   }
CheckLength() const1370   inline bool CheckLength() const {
1371     return expected_ == produced_;
1372   }
Append(const char* , size_t len)1373   inline bool Append(const char* /* ip */, size_t len) {
1374     produced_ += len;
1375     return produced_ <= expected_;
1376   }
TryFastAppend(const char* , size_t , size_t )1377   inline bool TryFastAppend(const char* /* ip */, size_t /* available */, size_t /* length */) {
1378     return false;
1379   }
AppendFromSelf(size_t offset, size_t len)1380   inline bool AppendFromSelf(size_t offset, size_t len) {
1381     // See SnappyArrayWriter::AppendFromSelf for an explanation of
1382     // the "offset - 1u" trick.
1383     if (produced_ <= offset - 1u) return false;
1384     produced_ += len;
1385     return produced_ <= expected_;
1386   }
Flush()1387   inline void Flush() {}
1388 };
1389 
IsValidCompressedBuffer(const char* compressed, size_t n)1390 bool IsValidCompressedBuffer(const char* compressed, size_t n) {
1391   ByteArraySource reader(compressed, n);
1392   SnappyDecompressionValidator writer;
1393   return InternalUncompress(&reader, &writer);
1394 }
1395 
IsValidCompressed(Source* compressed)1396 bool IsValidCompressed(Source* compressed) {
1397   SnappyDecompressionValidator writer;
1398   return InternalUncompress(compressed, &writer);
1399 }
1400 
RawCompress(const char* input, size_t input_length, char* compressed, size_t* compressed_length)1401 void RawCompress(const char* input,
1402                  size_t input_length,
1403                  char* compressed,
1404                  size_t* compressed_length) {
1405   ByteArraySource reader(input, input_length);
1406   UncheckedByteArraySink writer(compressed);
1407   Compress(&reader, &writer);
1408 
1409   // Compute how many bytes were added
1410   *compressed_length = (writer.CurrentDestination() - compressed);
1411 }
1412 
Compress(const char* input, size_t input_length, std::string* compressed)1413 size_t Compress(const char* input, size_t input_length,
1414                 std::string* compressed) {
1415   // Pre-grow the buffer to the max length of the compressed output
1416   STLStringResizeUninitialized(compressed, MaxCompressedLength(input_length));
1417 
1418   size_t compressed_length;
1419   RawCompress(input, input_length, string_as_array(compressed),
1420               &compressed_length);
1421   compressed->resize(compressed_length);
1422   return compressed_length;
1423 }
1424 
1425 // -----------------------------------------------------------------------
1426 // Sink interface
1427 // -----------------------------------------------------------------------
1428 
1429 // A type that decompresses into a Sink. The template parameter
1430 // Allocator must export one method "char* Allocate(int size);", which
1431 // allocates a buffer of "size" and appends that to the destination.
1432 template <typename Allocator>
1433 class SnappyScatteredWriter {
1434   Allocator allocator_;
1435 
1436   // We need random access into the data generated so far.  Therefore
1437   // we keep track of all of the generated data as an array of blocks.
1438   // All of the blocks except the last have length kBlockSize.
1439   std::vector<char*> blocks_;
1440   size_t expected_;
1441 
1442   // Total size of all fully generated blocks so far
1443   size_t full_size_;
1444 
1445   // Pointer into current output block
1446   char* op_base_;       // Base of output block
1447   char* op_ptr_;        // Pointer to next unfilled byte in block
1448   char* op_limit_;      // Pointer just past block
1449 
Size() const1450   inline size_t Size() const {
1451     return full_size_ + (op_ptr_ - op_base_);
1452   }
1453 
1454   bool SlowAppend(const char* ip, size_t len);
1455   bool SlowAppendFromSelf(size_t offset, size_t len);
1456 
1457  public:
SnappyScatteredWriter(const Allocator& allocator)1458   inline explicit SnappyScatteredWriter(const Allocator& allocator)
1459       : allocator_(allocator),
1460         full_size_(0),
1461         op_base_(NULL),
1462         op_ptr_(NULL),
1463         op_limit_(NULL) {
1464   }
1465 
SetExpectedLength(size_t len)1466   inline void SetExpectedLength(size_t len) {
1467     assert(blocks_.empty());
1468     expected_ = len;
1469   }
1470 
CheckLength() const1471   inline bool CheckLength() const {
1472     return Size() == expected_;
1473   }
1474 
1475   // Return the number of bytes actually uncompressed so far
Produced() const1476   inline size_t Produced() const {
1477     return Size();
1478   }
1479 
Append(const char* ip, size_t len)1480   inline bool Append(const char* ip, size_t len) {
1481     size_t avail = op_limit_ - op_ptr_;
1482     if (len <= avail) {
1483       // Fast path
1484       memcpy(op_ptr_, ip, len);
1485       op_ptr_ += len;
1486       return true;
1487     } else {
1488       return SlowAppend(ip, len);
1489     }
1490   }
1491 
TryFastAppend(const char* ip, size_t available, size_t length)1492   inline bool TryFastAppend(const char* ip, size_t available, size_t length) {
1493     char* op = op_ptr_;
1494     const int space_left = op_limit_ - op;
1495     if (length <= 16 && available >= 16 + kMaximumTagLength &&
1496         space_left >= 16) {
1497       // Fast path, used for the majority (about 95%) of invocations.
1498       UnalignedCopy128(ip, op);
1499       op_ptr_ = op + length;
1500       return true;
1501     } else {
1502       return false;
1503     }
1504   }
1505 
AppendFromSelf(size_t offset, size_t len)1506   inline bool AppendFromSelf(size_t offset, size_t len) {
1507     char* const op_end = op_ptr_ + len;
1508     // See SnappyArrayWriter::AppendFromSelf for an explanation of
1509     // the "offset - 1u" trick.
1510     if (SNAPPY_PREDICT_TRUE(offset - 1u < size_t(op_ptr_ - op_base_) &&
1511                           op_end <= op_limit_)) {
1512       // Fast path: src and dst in current block.
1513       op_ptr_ = IncrementalCopy(op_ptr_ - offset, op_ptr_, op_end, op_limit_);
1514       return true;
1515     }
1516     return SlowAppendFromSelf(offset, len);
1517   }
1518 
1519   // Called at the end of the decompress. We ask the allocator
1520   // write all blocks to the sink.
Flush()1521   inline void Flush() { allocator_.Flush(Produced()); }
1522 };
1523 
1524 template<typename Allocator>
SlowAppend(const char* ip, size_t len)1525 bool SnappyScatteredWriter<Allocator>::SlowAppend(const char* ip, size_t len) {
1526   size_t avail = op_limit_ - op_ptr_;
1527   while (len > avail) {
1528     // Completely fill this block
1529     memcpy(op_ptr_, ip, avail);
1530     op_ptr_ += avail;
1531     assert(op_limit_ - op_ptr_ == 0);
1532     full_size_ += (op_ptr_ - op_base_);
1533     len -= avail;
1534     ip += avail;
1535 
1536     // Bounds check
1537     if (full_size_ + len > expected_) {
1538       return false;
1539     }
1540 
1541     // Make new block
1542     size_t bsize = std::min<size_t>(kBlockSize, expected_ - full_size_);
1543     op_base_ = allocator_.Allocate(bsize);
1544     op_ptr_ = op_base_;
1545     op_limit_ = op_base_ + bsize;
1546     blocks_.push_back(op_base_);
1547     avail = bsize;
1548   }
1549 
1550   memcpy(op_ptr_, ip, len);
1551   op_ptr_ += len;
1552   return true;
1553 }
1554 
1555 template<typename Allocator>
SlowAppendFromSelf(size_t offset, size_t len)1556 bool SnappyScatteredWriter<Allocator>::SlowAppendFromSelf(size_t offset,
1557                                                          size_t len) {
1558   // Overflow check
1559   // See SnappyArrayWriter::AppendFromSelf for an explanation of
1560   // the "offset - 1u" trick.
1561   const size_t cur = Size();
1562   if (offset - 1u >= cur) return false;
1563   if (expected_ - cur < len) return false;
1564 
1565   // Currently we shouldn't ever hit this path because Compress() chops the
1566   // input into blocks and does not create cross-block copies. However, it is
1567   // nice if we do not rely on that, since we can get better compression if we
1568   // allow cross-block copies and thus might want to change the compressor in
1569   // the future.
1570   size_t src = cur - offset;
1571   while (len-- > 0) {
1572     char c = blocks_[src >> kBlockLog][src & (kBlockSize-1)];
1573     Append(&c, 1);
1574     src++;
1575   }
1576   return true;
1577 }
1578 
1579 class SnappySinkAllocator {
1580  public:
SnappySinkAllocator(Sink* dest)1581   explicit SnappySinkAllocator(Sink* dest): dest_(dest) {}
~SnappySinkAllocator()1582   ~SnappySinkAllocator() {}
1583 
Allocate(int size)1584   char* Allocate(int size) {
1585     Datablock block(new char[size], size);
1586     blocks_.push_back(block);
1587     return block.data;
1588   }
1589 
1590   // We flush only at the end, because the writer wants
1591   // random access to the blocks and once we hand the
1592   // block over to the sink, we can't access it anymore.
1593   // Also we don't write more than has been actually written
1594   // to the blocks.
Flush(size_t size)1595   void Flush(size_t size) {
1596     size_t size_written = 0;
1597     size_t block_size;
1598     for (size_t i = 0; i < blocks_.size(); ++i) {
1599       block_size = std::min<size_t>(blocks_[i].size, size - size_written);
1600       dest_->AppendAndTakeOwnership(blocks_[i].data, block_size,
1601                                     &SnappySinkAllocator::Deleter, NULL);
1602       size_written += block_size;
1603     }
1604     blocks_.clear();
1605   }
1606 
1607  private:
1608   struct Datablock {
1609     char* data;
1610     size_t size;
Datablocksnappy::__anon55::SnappySinkAllocator::Datablock1611     Datablock(char* p, size_t s) : data(p), size(s) {}
1612   };
1613 
Deleter(void* , const char* bytes, size_t )1614   static void Deleter(void* /* arg */, const char* bytes, size_t /* size */) {
1615     delete[] bytes;
1616   }
1617 
1618   Sink* dest_;
1619   std::vector<Datablock> blocks_;
1620 
1621   // Note: copying this object is allowed
1622 };
1623 
UncompressAsMuchAsPossible(Source* compressed, Sink* uncompressed)1624 size_t UncompressAsMuchAsPossible(Source* compressed, Sink* uncompressed) {
1625   SnappySinkAllocator allocator(uncompressed);
1626   SnappyScatteredWriter<SnappySinkAllocator> writer(allocator);
1627   InternalUncompress(compressed, &writer);
1628   return writer.Produced();
1629 }
1630 
Uncompress(Source* compressed, Sink* uncompressed)1631 bool Uncompress(Source* compressed, Sink* uncompressed) {
1632   // Read the uncompressed length from the front of the compressed input
1633   SnappyDecompressor decompressor(compressed);
1634   uint32 uncompressed_len = 0;
1635   if (!decompressor.ReadUncompressedLength(&uncompressed_len)) {
1636     return false;
1637   }
1638 
1639   char c;
1640   size_t allocated_size;
1641   char* buf = uncompressed->GetAppendBufferVariable(
1642       1, uncompressed_len, &c, 1, &allocated_size);
1643 
1644   const size_t compressed_len = compressed->Available();
1645   // If we can get a flat buffer, then use it, otherwise do block by block
1646   // uncompression
1647   if (allocated_size >= uncompressed_len) {
1648     SnappyArrayWriter writer(buf);
1649     bool result = InternalUncompressAllTags(&decompressor, &writer,
1650                                             compressed_len, uncompressed_len);
1651     uncompressed->Append(buf, writer.Produced());
1652     return result;
1653   } else {
1654     SnappySinkAllocator allocator(uncompressed);
1655     SnappyScatteredWriter<SnappySinkAllocator> writer(allocator);
1656     return InternalUncompressAllTags(&decompressor, &writer, compressed_len,
1657                                      uncompressed_len);
1658   }
1659 }
1660 
1661 }  // namespace snappy
1662