summaryrefslogtreecommitdiff
path: root/thirdparty/snappy/snappy-internal.h
diff options
context:
space:
mode:
Diffstat (limited to 'thirdparty/snappy/snappy-internal.h')
-rw-r--r--thirdparty/snappy/snappy-internal.h109
1 files changed, 93 insertions, 16 deletions
diff --git a/thirdparty/snappy/snappy-internal.h b/thirdparty/snappy/snappy-internal.h
index 0653dc65..b8355c08 100644
--- a/thirdparty/snappy/snappy-internal.h
+++ b/thirdparty/snappy/snappy-internal.h
@@ -70,11 +70,12 @@ char* CompressFragment(const char* input,
uint16* table,
const int table_size);
-// Return the largest n such that
+// Find the largest n such that
//
// s1[0,n-1] == s2[0,n-1]
// and n <= (s2_limit - s2).
//
+// Return make_pair(n, n < 8).
// Does not read *s2_limit or beyond.
// Does not read *(s1 + (s2_limit - s2)) or beyond.
// Requires that s2_limit >= s2.
@@ -82,11 +83,28 @@ char* CompressFragment(const char* input,
// Separate implementation for x86_64, for speed. Uses the fact that
// x86_64 is little endian.
#if defined(ARCH_K8)
-static inline int FindMatchLength(const char* s1,
- const char* s2,
- const char* s2_limit) {
+static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
+ const char* s2,
+ const char* s2_limit) {
assert(s2_limit >= s2);
- int matched = 0;
+ size_t matched = 0;
+
+ // This block isn't necessary for correctness; we could just start looping
+ // immediately. As an optimization though, it is useful. It creates some not
+ // uncommon code paths that determine, without extra effort, whether the match
+ // length is less than 8. In short, we are hoping to avoid a conditional
+ // branch, and perhaps get better code layout from the C++ compiler.
+ if (PREDICT_TRUE(s2 <= s2_limit - 8)) {
+ uint64 a1 = UNALIGNED_LOAD64(s1);
+ uint64 a2 = UNALIGNED_LOAD64(s2);
+ if (a1 != a2) {
+ return std::pair<size_t, bool>(Bits::FindLSBSetNonZero64(a1 ^ a2) >> 3,
+ true);
+ } else {
+ matched = 8;
+ s2 += 8;
+ }
+ }
// Find out how long the match is. We loop over the data 64 bits at a
// time until we find a 64-bit block that doesn't match; then we find
@@ -97,14 +115,11 @@ static inline int FindMatchLength(const char* s1,
s2 += 8;
matched += 8;
} else {
- // On current (mid-2008) Opteron models there is a 3% more
- // efficient code sequence to find the first non-matching byte.
- // However, what follows is ~10% better on Intel Core 2 and newer,
- // and we expect AMD's bsf instruction to improve.
uint64 x = UNALIGNED_LOAD64(s2) ^ UNALIGNED_LOAD64(s1 + matched);
int matching_bits = Bits::FindLSBSetNonZero64(x);
matched += matching_bits >> 3;
- return matched;
+ assert(matched >= 8);
+ return std::pair<size_t, bool>(matched, false);
}
}
while (PREDICT_TRUE(s2 < s2_limit)) {
@@ -112,15 +127,15 @@ static inline int FindMatchLength(const char* s1,
++s2;
++matched;
} else {
- return matched;
+ return std::pair<size_t, bool>(matched, matched < 8);
}
}
- return matched;
+ return std::pair<size_t, bool>(matched, matched < 8);
}
#else
-static inline int FindMatchLength(const char* s1,
- const char* s2,
- const char* s2_limit) {
+static inline std::pair<size_t, bool> FindMatchLength(const char* s1,
+ const char* s2,
+ const char* s2_limit) {
// Implementation based on the x86-64 version, above.
assert(s2_limit >= s2);
int matched = 0;
@@ -140,10 +155,72 @@ static inline int FindMatchLength(const char* s1,
++matched;
}
}
- return matched;
+ return std::pair<size_t, bool>(matched, matched < 8);
}
#endif
+// Lookup tables for decompression code. Give --snappy_dump_decompression_table
+// to the unit test to recompute char_table.
+
+enum {
+ LITERAL = 0,
+ COPY_1_BYTE_OFFSET = 1, // 3 bit length + 3 bits of offset in opcode
+ COPY_2_BYTE_OFFSET = 2,
+ COPY_4_BYTE_OFFSET = 3
+};
+static const int kMaximumTagLength = 5; // COPY_4_BYTE_OFFSET plus the actual offset.
+
+// Mapping from i in range [0,4] to a mask to extract the bottom 8*i bits
+static const uint32 wordmask[] = {
+ 0u, 0xffu, 0xffffu, 0xffffffu, 0xffffffffu
+};
+
+// Data stored per entry in lookup table:
+// Range Bits-used Description
+// ------------------------------------
+// 1..64 0..7 Literal/copy length encoded in opcode byte
+// 0..7 8..10 Copy offset encoded in opcode byte / 256
+// 0..4 11..13 Extra bytes after opcode
+//
+// We use eight bits for the length even though 7 would have sufficed
+// because of efficiency reasons:
+// (1) Extracting a byte is faster than a bit-field
+// (2) It properly aligns copy offset so we do not need a <<8
+static const uint16 char_table[256] = {
+ 0x0001, 0x0804, 0x1001, 0x2001, 0x0002, 0x0805, 0x1002, 0x2002,
+ 0x0003, 0x0806, 0x1003, 0x2003, 0x0004, 0x0807, 0x1004, 0x2004,
+ 0x0005, 0x0808, 0x1005, 0x2005, 0x0006, 0x0809, 0x1006, 0x2006,
+ 0x0007, 0x080a, 0x1007, 0x2007, 0x0008, 0x080b, 0x1008, 0x2008,
+ 0x0009, 0x0904, 0x1009, 0x2009, 0x000a, 0x0905, 0x100a, 0x200a,
+ 0x000b, 0x0906, 0x100b, 0x200b, 0x000c, 0x0907, 0x100c, 0x200c,
+ 0x000d, 0x0908, 0x100d, 0x200d, 0x000e, 0x0909, 0x100e, 0x200e,
+ 0x000f, 0x090a, 0x100f, 0x200f, 0x0010, 0x090b, 0x1010, 0x2010,
+ 0x0011, 0x0a04, 0x1011, 0x2011, 0x0012, 0x0a05, 0x1012, 0x2012,
+ 0x0013, 0x0a06, 0x1013, 0x2013, 0x0014, 0x0a07, 0x1014, 0x2014,
+ 0x0015, 0x0a08, 0x1015, 0x2015, 0x0016, 0x0a09, 0x1016, 0x2016,
+ 0x0017, 0x0a0a, 0x1017, 0x2017, 0x0018, 0x0a0b, 0x1018, 0x2018,
+ 0x0019, 0x0b04, 0x1019, 0x2019, 0x001a, 0x0b05, 0x101a, 0x201a,
+ 0x001b, 0x0b06, 0x101b, 0x201b, 0x001c, 0x0b07, 0x101c, 0x201c,
+ 0x001d, 0x0b08, 0x101d, 0x201d, 0x001e, 0x0b09, 0x101e, 0x201e,
+ 0x001f, 0x0b0a, 0x101f, 0x201f, 0x0020, 0x0b0b, 0x1020, 0x2020,
+ 0x0021, 0x0c04, 0x1021, 0x2021, 0x0022, 0x0c05, 0x1022, 0x2022,
+ 0x0023, 0x0c06, 0x1023, 0x2023, 0x0024, 0x0c07, 0x1024, 0x2024,
+ 0x0025, 0x0c08, 0x1025, 0x2025, 0x0026, 0x0c09, 0x1026, 0x2026,
+ 0x0027, 0x0c0a, 0x1027, 0x2027, 0x0028, 0x0c0b, 0x1028, 0x2028,
+ 0x0029, 0x0d04, 0x1029, 0x2029, 0x002a, 0x0d05, 0x102a, 0x202a,
+ 0x002b, 0x0d06, 0x102b, 0x202b, 0x002c, 0x0d07, 0x102c, 0x202c,
+ 0x002d, 0x0d08, 0x102d, 0x202d, 0x002e, 0x0d09, 0x102e, 0x202e,
+ 0x002f, 0x0d0a, 0x102f, 0x202f, 0x0030, 0x0d0b, 0x1030, 0x2030,
+ 0x0031, 0x0e04, 0x1031, 0x2031, 0x0032, 0x0e05, 0x1032, 0x2032,
+ 0x0033, 0x0e06, 0x1033, 0x2033, 0x0034, 0x0e07, 0x1034, 0x2034,
+ 0x0035, 0x0e08, 0x1035, 0x2035, 0x0036, 0x0e09, 0x1036, 0x2036,
+ 0x0037, 0x0e0a, 0x1037, 0x2037, 0x0038, 0x0e0b, 0x1038, 0x2038,
+ 0x0039, 0x0f04, 0x1039, 0x2039, 0x003a, 0x0f05, 0x103a, 0x203a,
+ 0x003b, 0x0f06, 0x103b, 0x203b, 0x003c, 0x0f07, 0x103c, 0x203c,
+ 0x0801, 0x0f08, 0x103d, 0x203d, 0x1001, 0x0f09, 0x103e, 0x203e,
+ 0x1801, 0x0f0a, 0x103f, 0x203f, 0x2001, 0x0f0b, 0x1040, 0x2040
+};
+
} // end namespace internal
} // end namespace snappy