summaryrefslogtreecommitdiff
path: root/thirdparty/snappy/snappy.cc
diff options
context:
space:
mode:
Diffstat (limited to 'thirdparty/snappy/snappy.cc')
-rw-r--r--thirdparty/snappy/snappy.cc347
1 files changed, 271 insertions, 76 deletions
diff --git a/thirdparty/snappy/snappy.cc b/thirdparty/snappy/snappy.cc
index 4d4eb42a..f8d0d23d 100644
--- a/thirdparty/snappy/snappy.cc
+++ b/thirdparty/snappy/snappy.cc
@@ -82,6 +82,7 @@ enum {
COPY_2_BYTE_OFFSET = 2,
COPY_4_BYTE_OFFSET = 3
};
+static const int kMaximumTagLength = 5; // COPY_4_BYTE_OFFSET plus the actual offset.
// Copy "len" bytes from "src" to "op", one byte at a time. Used for
// handling COPY operations where the input and output regions may
@@ -94,8 +95,8 @@ enum {
// ababababababababababab
// Note that this does not match the semantics of either memcpy()
// or memmove().
-static inline void IncrementalCopy(const char* src, char* op, int len) {
- DCHECK_GT(len, 0);
+static inline void IncrementalCopy(const char* src, char* op, ssize_t len) {
+ assert(len > 0);
do {
*op++ = *src++;
} while (--len > 0);
@@ -136,9 +137,7 @@ namespace {
const int kMaxIncrementCopyOverflow = 10;
-} // namespace
-
-static inline void IncrementalCopyFastPath(const char* src, char* op, int len) {
+inline void IncrementalCopyFastPath(const char* src, char* op, ssize_t len) {
while (op - src < 8) {
UnalignedCopy64(src, op);
len -= op - src;
@@ -152,6 +151,8 @@ static inline void IncrementalCopyFastPath(const char* src, char* op, int len) {
}
}
+} // namespace
+
static inline char* EmitLiteral(char* op,
const char* literal,
int len,
@@ -195,17 +196,17 @@ static inline char* EmitLiteral(char* op,
}
static inline char* EmitCopyLessThan64(char* op, size_t offset, int len) {
- DCHECK_LE(len, 64);
- DCHECK_GE(len, 4);
- DCHECK_LT(offset, 65536);
+ assert(len <= 64);
+ assert(len >= 4);
+ assert(offset < 65536);
if ((len < 12) && (offset < 2048)) {
size_t len_minus_4 = len - 4;
assert(len_minus_4 < 8); // Must fit in 3 bits
- *op++ = COPY_1_BYTE_OFFSET | ((len_minus_4) << 2) | ((offset >> 8) << 5);
+ *op++ = COPY_1_BYTE_OFFSET + ((len_minus_4) << 2) + ((offset >> 8) << 5);
*op++ = offset & 0xff;
} else {
- *op++ = COPY_2_BYTE_OFFSET | ((len-1) << 2);
+ *op++ = COPY_2_BYTE_OFFSET + ((len-1) << 2);
LittleEndian::Store16(op, offset);
op += 2;
}
@@ -253,8 +254,6 @@ uint16* WorkingMemory::GetHashTable(size_t input_size, int* table_size) {
while (htsize < kMaxHashTableSize && htsize < input_size) {
htsize <<= 1;
}
- CHECK_EQ(0, htsize & (htsize - 1)) << ": must be power of two";
- CHECK_LE(htsize, kMaxHashTableSize) << ": hash table too large";
uint16* table;
if (htsize <= ARRAYSIZE(small_table_)) {
@@ -294,8 +293,8 @@ static inline EightBytesReference GetEightBytesAt(const char* ptr) {
}
static inline uint32 GetUint32AtOffset(uint64 v, int offset) {
- DCHECK_GE(offset, 0);
- DCHECK_LE(offset, 4);
+ assert(offset >= 0);
+ assert(offset <= 4);
return v >> (LittleEndian::IsLittleEndian() ? 8 * offset : 32 - 8 * offset);
}
@@ -308,8 +307,8 @@ static inline EightBytesReference GetEightBytesAt(const char* ptr) {
}
static inline uint32 GetUint32AtOffset(const char* v, int offset) {
- DCHECK_GE(offset, 0);
- DCHECK_LE(offset, 4);
+ assert(offset >= 0);
+ assert(offset <= 4);
return UNALIGNED_LOAD32(v + offset);
}
@@ -334,10 +333,10 @@ char* CompressFragment(const char* input,
const int table_size) {
// "ip" is the input pointer, and "op" is the output pointer.
const char* ip = input;
- CHECK_LE(input_size, kBlockSize);
- CHECK_EQ(table_size & (table_size - 1), 0) << ": table must be power of two";
+ assert(input_size <= kBlockSize);
+ assert((table_size & (table_size - 1)) == 0); // table must be power of two
const int shift = 32 - Bits::Log2Floor(table_size);
- DCHECK_EQ(static_cast<int>(kuint32max >> shift), table_size - 1);
+ assert(static_cast<int>(kuint32max >> shift) == table_size - 1);
const char* ip_end = input + input_size;
const char* base_ip = ip;
// Bytes in [next_emit, ip) will be emitted as literal bytes. Or
@@ -349,7 +348,7 @@ char* CompressFragment(const char* input,
const char* ip_limit = input + input_size - kInputMarginBytes;
for (uint32 next_hash = Hash(++ip, shift); ; ) {
- DCHECK_LT(next_emit, ip);
+ assert(next_emit < ip);
// The body of this loop calls EmitLiteral once and then EmitCopy one or
// more times. (The exception is that when we're close to exhausting
// the input we goto emit_remainder.)
@@ -382,7 +381,7 @@ char* CompressFragment(const char* input,
do {
ip = next_ip;
uint32 hash = next_hash;
- DCHECK_EQ(hash, Hash(ip, shift));
+ assert(hash == Hash(ip, shift));
uint32 bytes_between_hash_lookups = skip++ >> 5;
next_ip = ip + bytes_between_hash_lookups;
if (PREDICT_FALSE(next_ip > ip_limit)) {
@@ -390,8 +389,8 @@ char* CompressFragment(const char* input,
}
next_hash = Hash(next_ip, shift);
candidate = base_ip + table[hash];
- DCHECK_GE(candidate, base_ip);
- DCHECK_LT(candidate, ip);
+ assert(candidate >= base_ip);
+ assert(candidate < ip);
table[hash] = ip - base_ip;
} while (PREDICT_TRUE(UNALIGNED_LOAD32(ip) !=
@@ -400,7 +399,7 @@ char* CompressFragment(const char* input,
// Step 2: A 4-byte match has been found. We'll later see if more
// than 4 bytes match. But, prior to the match, input
// bytes [next_emit, ip) are unmatched. Emit them as "literal bytes."
- DCHECK_LE(next_emit + 16, ip_end);
+ assert(next_emit + 16 <= ip_end);
op = EmitLiteral(op, next_emit, ip - next_emit, true);
// Step 3: Call EmitCopy, and then see if another EmitCopy could
@@ -421,7 +420,7 @@ char* CompressFragment(const char* input,
int matched = 4 + FindMatchLength(candidate + 4, ip + 4, ip_end);
ip += matched;
size_t offset = base - candidate;
- DCHECK_EQ(0, memcmp(base, candidate, matched));
+ assert(0 == memcmp(base, candidate, matched));
op = EmitCopy(op, offset, matched);
// We could immediately start working at ip now, but to improve
// compression we first update table[Hash(ip - 1, ...)].
@@ -471,21 +470,26 @@ char* CompressFragment(const char* input,
// bool Append(const char* ip, size_t length);
// bool AppendFromSelf(uint32 offset, size_t length);
//
-// // The difference between TryFastAppend and Append is that TryFastAppend
-// // is allowed to read up to <available> bytes from the input buffer,
-// // whereas Append is allowed to read <length>.
+// // The rules for how TryFastAppend differs from Append are somewhat
+// // convoluted:
// //
-// // Also, TryFastAppend is allowed to return false, declining the append,
-// // without it being a fatal error -- just "return false" would be
-// // a perfectly legal implementation of TryFastAppend. The intention
-// // is for TryFastAppend to allow a fast path in the common case of
-// // a small append.
+// // - TryFastAppend is allowed to decline (return false) at any
+// // time, for any reason -- just "return false" would be
+// // a perfectly legal implementation of TryFastAppend.
+// // The intention is for TryFastAppend to allow a fast path
+// // in the common case of a small append.
+// // - TryFastAppend is allowed to read up to <available> bytes
+// // from the input buffer, whereas Append is allowed to read
+// // <length>. However, if it returns true, it must leave
+// // at least five (kMaximumTagLength) bytes in the input buffer
+// // afterwards, so that there is always enough space to read the
+// // next tag without checking for a refill.
+// // - TryFastAppend must always return decline (return false)
+// // if <length> is 61 or more, as in this case the literal length is not
+// // decoded fully. In practice, this should not be a big problem,
+// // as it is unlikely that one would implement a fast path accepting
+// // this much data.
// //
-// // NOTE(user): TryFastAppend must always return decline (return false)
-// // if <length> is 61 or more, as in this case the literal length is not
-// // decoded fully. In practice, this should not be a big problem,
-// // as it is unlikely that one would implement a fast path accepting
-// // this much data.
// bool TryFastAppend(const char* ip, size_t available, size_t length);
// };
@@ -554,9 +558,9 @@ static uint16 MakeEntry(unsigned int extra,
unsigned int len,
unsigned int copy_offset) {
// Check that all of the fields fit within the allocated space
- DCHECK_EQ(extra, extra & 0x7); // At most 3 bits
- DCHECK_EQ(copy_offset, copy_offset & 0x7); // At most 3 bits
- DCHECK_EQ(len, len & 0x7f); // At most 7 bits
+ assert(extra == (extra & 0x7)); // At most 3 bits
+ assert(copy_offset == (copy_offset & 0x7)); // At most 3 bits
+ assert(len == (len & 0x7f)); // At most 7 bits
return len | (copy_offset << 8) | (extra << 11);
}
@@ -614,9 +618,15 @@ static void ComputeTable() {
}
// Check that each entry was initialized exactly once.
- CHECK_EQ(assigned, 256);
+ if (assigned != 256) {
+ fprintf(stderr, "ComputeTable: assigned only %d of 256\n", assigned);
+ abort();
+ }
for (int i = 0; i < 256; i++) {
- CHECK_NE(dst[i], 0xffff);
+ if (dst[i] == 0xffff) {
+ fprintf(stderr, "ComputeTable: did not assign byte %d\n", i);
+ abort();
+ }
}
if (FLAGS_snappy_dump_decompression_table) {
@@ -631,7 +641,11 @@ static void ComputeTable() {
// Check that computed table matched recorded table
for (int i = 0; i < 256; i++) {
- CHECK_EQ(dst[i], char_table[i]);
+ if (dst[i] != char_table[i]) {
+ fprintf(stderr, "ComputeTable: byte %d: computed (%x), expect (%x)\n",
+ i, static_cast<int>(dst[i]), static_cast<int>(char_table[i]));
+ abort();
+ }
}
}
#endif /* !NDEBUG */
@@ -644,7 +658,7 @@ class SnappyDecompressor {
const char* ip_limit_; // Points just past buffered bytes
uint32 peeked_; // Bytes peeked from reader (need to skip)
bool eof_; // Hit end of input without an error?
- char scratch_[5]; // Temporary buffer for PeekFast() boundaries
+ char scratch_[kMaximumTagLength]; // See RefillTag().
// Ensure that all of the tag metadata for the next tag is available
// in [ip_..ip_limit_-1]. Also ensures that [ip,ip+4] is readable even
@@ -676,7 +690,7 @@ class SnappyDecompressor {
// On succcess, stores the length in *result and returns true.
// On failure, returns false.
bool ReadUncompressedLength(uint32* result) {
- DCHECK(ip_ == NULL); // Must not have read anything yet
+ assert(ip_ == NULL); // Must not have read anything yet
// Length is encoded in 1..5 bytes
*result = 0;
uint32 shift = 0;
@@ -707,7 +721,7 @@ class SnappyDecompressor {
// scope to optimize the <ip_limit_ - ip> expression based on the local
// context, which overall increases speed.
#define MAYBE_REFILL() \
- if (ip_limit_ - ip < 5) { \
+ if (ip_limit_ - ip < kMaximumTagLength) { \
ip_ = ip; \
if (!RefillTag()) return; \
ip = ip_; \
@@ -720,9 +734,11 @@ class SnappyDecompressor {
if ((c & 0x3) == LITERAL) {
size_t literal_length = (c >> 2) + 1u;
if (writer->TryFastAppend(ip, ip_limit_ - ip, literal_length)) {
- DCHECK_LT(literal_length, 61);
+ assert(literal_length < 61);
ip += literal_length;
- MAYBE_REFILL();
+ // NOTE(user): There is no MAYBE_REFILL() here, as TryFastAppend()
+ // will not return true unless there's already at least five spare
+ // bytes in addition to the literal.
continue;
}
if (PREDICT_FALSE(literal_length >= 61)) {
@@ -787,11 +803,11 @@ bool SnappyDecompressor::RefillTag() {
}
// Read the tag character
- DCHECK_LT(ip, ip_limit_);
+ assert(ip < ip_limit_);
const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
const uint32 entry = char_table[c];
const uint32 needed = (entry >> 11) + 1; // +1 byte for 'c'
- DCHECK_LE(needed, sizeof(scratch_));
+ assert(needed <= sizeof(scratch_));
// Read more bytes from reader if needed
uint32 nbuf = ip_limit_ - ip;
@@ -812,10 +828,10 @@ bool SnappyDecompressor::RefillTag() {
nbuf += to_add;
reader_->Skip(to_add);
}
- DCHECK_EQ(nbuf, needed);
+ assert(nbuf == needed);
ip_ = scratch_;
ip_limit_ = scratch_ + needed;
- } else if (nbuf < 5) {
+ } else if (nbuf < kMaximumTagLength) {
// Have enough bytes, but move into scratch_ so that we do not
// read past end of input
memmove(scratch_, ip, nbuf);
@@ -831,27 +847,18 @@ bool SnappyDecompressor::RefillTag() {
}
template <typename Writer>
-static bool InternalUncompress(Source* r,
- Writer* writer,
- uint32 max_len) {
+static bool InternalUncompress(Source* r, Writer* writer) {
// Read the uncompressed length from the front of the compressed input
SnappyDecompressor decompressor(r);
uint32 uncompressed_len = 0;
if (!decompressor.ReadUncompressedLength(&uncompressed_len)) return false;
- return InternalUncompressAllTags(
- &decompressor, writer, uncompressed_len, max_len);
+ return InternalUncompressAllTags(&decompressor, writer, uncompressed_len);
}
template <typename Writer>
static bool InternalUncompressAllTags(SnappyDecompressor* decompressor,
Writer* writer,
- uint32 uncompressed_len,
- uint32 max_len) {
- // Protect against possible DoS attack
- if (static_cast<uint64>(uncompressed_len) > max_len) {
- return false;
- }
-
+ uint32 uncompressed_len) {
writer->SetExpectedLength(uncompressed_len);
// Process the entire input
@@ -880,7 +887,7 @@ size_t Compress(Source* reader, Sink* writer) {
// Get next block to compress (without copying if possible)
size_t fragment_size;
const char* fragment = reader->Peek(&fragment_size);
- DCHECK_NE(fragment_size, 0) << ": premature end of input";
+ assert(fragment_size != 0); // premature end of input
const size_t num_to_read = min(N, kBlockSize);
size_t bytes_read = fragment_size;
@@ -907,11 +914,11 @@ size_t Compress(Source* reader, Sink* writer) {
bytes_read += n;
reader->Skip(n);
}
- DCHECK_EQ(bytes_read, num_to_read);
+ assert(bytes_read == num_to_read);
fragment = scratch;
fragment_size = num_to_read;
}
- DCHECK_EQ(fragment_size, num_to_read);
+ assert(fragment_size == num_to_read);
// Get encoding table for compression
int table_size;
@@ -946,6 +953,183 @@ size_t Compress(Source* reader, Sink* writer) {
}
// -----------------------------------------------------------------------
+// IOVec interfaces
+// -----------------------------------------------------------------------
+
+// A type that writes to an iovec.
+// Note that this is not a "ByteSink", but a type that matches the
+// Writer template argument to SnappyDecompressor::DecompressAllTags().
+class SnappyIOVecWriter {
+ private:
+ const struct iovec* output_iov_;
+ const size_t output_iov_count_;
+
+ // We are currently writing into output_iov_[curr_iov_index_].
+ int curr_iov_index_;
+
+ // Bytes written to output_iov_[curr_iov_index_] so far.
+ size_t curr_iov_written_;
+
+ // Total bytes decompressed into output_iov_ so far.
+ size_t total_written_;
+
+ // Maximum number of bytes that will be decompressed into output_iov_.
+ size_t output_limit_;
+
+ inline char* GetIOVecPointer(int index, size_t offset) {
+ return reinterpret_cast<char*>(output_iov_[index].iov_base) +
+ offset;
+ }
+
+ public:
+ // Does not take ownership of iov. iov must be valid during the
+ // entire lifetime of the SnappyIOVecWriter.
+ inline SnappyIOVecWriter(const struct iovec* iov, size_t iov_count)
+ : output_iov_(iov),
+ output_iov_count_(iov_count),
+ curr_iov_index_(0),
+ curr_iov_written_(0),
+ total_written_(0),
+ output_limit_(-1) {
+ }
+
+ inline void SetExpectedLength(size_t len) {
+ output_limit_ = len;
+ }
+
+ inline bool CheckLength() const {
+ return total_written_ == output_limit_;
+ }
+
+ inline bool Append(const char* ip, size_t len) {
+ if (total_written_ + len > output_limit_) {
+ return false;
+ }
+
+ while (len > 0) {
+ assert(curr_iov_written_ <= output_iov_[curr_iov_index_].iov_len);
+ if (curr_iov_written_ >= output_iov_[curr_iov_index_].iov_len) {
+ // This iovec is full. Go to the next one.
+ if (curr_iov_index_ + 1 >= output_iov_count_) {
+ return false;
+ }
+ curr_iov_written_ = 0;
+ ++curr_iov_index_;
+ }
+
+ const size_t to_write = std::min(
+ len, output_iov_[curr_iov_index_].iov_len - curr_iov_written_);
+ memcpy(GetIOVecPointer(curr_iov_index_, curr_iov_written_),
+ ip,
+ to_write);
+ curr_iov_written_ += to_write;
+ total_written_ += to_write;
+ ip += to_write;
+ len -= to_write;
+ }
+
+ return true;
+ }
+
+ inline bool TryFastAppend(const char* ip, size_t available, size_t len) {
+ const size_t space_left = output_limit_ - total_written_;
+ if (len <= 16 && available >= 16 + kMaximumTagLength && space_left >= 16 &&
+ output_iov_[curr_iov_index_].iov_len - curr_iov_written_ >= 16) {
+ // Fast path, used for the majority (about 95%) of invocations.
+ char* ptr = GetIOVecPointer(curr_iov_index_, curr_iov_written_);
+ UnalignedCopy64(ip, ptr);
+ UnalignedCopy64(ip + 8, ptr + 8);
+ curr_iov_written_ += len;
+ total_written_ += len;
+ return true;
+ }
+
+ return false;
+ }
+
+ inline bool AppendFromSelf(size_t offset, size_t len) {
+ if (offset > total_written_ || offset == 0) {
+ return false;
+ }
+ const size_t space_left = output_limit_ - total_written_;
+ if (len > space_left) {
+ return false;
+ }
+
+ // Locate the iovec from which we need to start the copy.
+ int from_iov_index = curr_iov_index_;
+ size_t from_iov_offset = curr_iov_written_;
+ while (offset > 0) {
+ if (from_iov_offset >= offset) {
+ from_iov_offset -= offset;
+ break;
+ }
+
+ offset -= from_iov_offset;
+ --from_iov_index;
+ assert(from_iov_index >= 0);
+ from_iov_offset = output_iov_[from_iov_index].iov_len;
+ }
+
+ // Copy <len> bytes starting from the iovec pointed to by from_iov_index to
+ // the current iovec.
+ while (len > 0) {
+ assert(from_iov_index <= curr_iov_index_);
+ if (from_iov_index != curr_iov_index_) {
+ const size_t to_copy = std::min(
+ output_iov_[from_iov_index].iov_len - from_iov_offset,
+ len);
+ Append(GetIOVecPointer(from_iov_index, from_iov_offset), to_copy);
+ len -= to_copy;
+ if (len > 0) {
+ ++from_iov_index;
+ from_iov_offset = 0;
+ }
+ } else {
+ assert(curr_iov_written_ <= output_iov_[curr_iov_index_].iov_len);
+ size_t to_copy = std::min(output_iov_[curr_iov_index_].iov_len -
+ curr_iov_written_,
+ len);
+ if (to_copy == 0) {
+ // This iovec is full. Go to the next one.
+ if (curr_iov_index_ + 1 >= output_iov_count_) {
+ return false;
+ }
+ ++curr_iov_index_;
+ curr_iov_written_ = 0;
+ continue;
+ }
+ if (to_copy > len) {
+ to_copy = len;
+ }
+ IncrementalCopy(GetIOVecPointer(from_iov_index, from_iov_offset),
+ GetIOVecPointer(curr_iov_index_, curr_iov_written_),
+ to_copy);
+ curr_iov_written_ += to_copy;
+ from_iov_offset += to_copy;
+ total_written_ += to_copy;
+ len -= to_copy;
+ }
+ }
+
+ return true;
+ }
+
+};
+
+bool RawUncompressToIOVec(const char* compressed, size_t compressed_length,
+ const struct iovec* iov, size_t iov_cnt) {
+ ByteArraySource reader(compressed, compressed_length);
+ return RawUncompressToIOVec(&reader, iov, iov_cnt);
+}
+
+bool RawUncompressToIOVec(Source* compressed, const struct iovec* iov,
+ size_t iov_cnt) {
+ SnappyIOVecWriter output(iov, iov_cnt);
+ return InternalUncompress(compressed, &output);
+}
+
+// -----------------------------------------------------------------------
// Flat array interfaces
// -----------------------------------------------------------------------
@@ -986,7 +1170,7 @@ class SnappyArrayWriter {
inline bool TryFastAppend(const char* ip, size_t available, size_t len) {
char* op = op_;
const size_t space_left = op_limit_ - op;
- if (len <= 16 && available >= 16 && space_left >= 16) {
+ if (len <= 16 && available >= 16 + kMaximumTagLength && space_left >= 16) {
// Fast path, used for the majority (about 95%) of invocations.
UnalignedCopy64(ip, op);
UnalignedCopy64(ip + 8, op + 8);
@@ -1001,7 +1185,16 @@ class SnappyArrayWriter {
char* op = op_;
const size_t space_left = op_limit_ - op;
- if (op - base_ <= offset - 1u) { // -1u catches offset==0
+ // Check if we try to append from before the start of the buffer.
+ // Normally this would just be a check for "produced < offset",
+ // but "produced <= offset - 1u" is equivalent for every case
+ // except the one where offset==0, where the right side will wrap around
+ // to a very big number. This is convenient, as offset==0 is another
+ // invalid case that we also want to catch, so that we do not go
+ // into an infinite loop.
+ assert(op >= base_);
+ size_t produced = op - base_;
+ if (produced <= offset - 1u) {
return false;
}
if (len <= 16 && offset >= 8 && space_left >= 16) {
@@ -1031,7 +1224,7 @@ bool RawUncompress(const char* compressed, size_t n, char* uncompressed) {
bool RawUncompress(Source* compressed, char* uncompressed) {
SnappyArrayWriter output(uncompressed);
- return InternalUncompress(compressed, &output, kuint32max);
+ return InternalUncompress(compressed, &output);
}
bool Uncompress(const char* compressed, size_t n, string* uncompressed) {
@@ -1039,9 +1232,9 @@ bool Uncompress(const char* compressed, size_t n, string* uncompressed) {
if (!GetUncompressedLength(compressed, n, &ulength)) {
return false;
}
- // Protect against possible DoS attack
- if ((static_cast<uint64>(ulength) + uncompressed->size()) >
- uncompressed->max_size()) {
+ // On 32-bit builds: max_size() < kuint32max. Check for that instead
+ // of crashing (e.g., consider externally specified compressed data).
+ if (ulength > uncompressed->max_size()) {
return false;
}
STLStringResizeUninitialized(uncompressed, ulength);
@@ -1071,7 +1264,9 @@ class SnappyDecompressionValidator {
return false;
}
inline bool AppendFromSelf(size_t offset, size_t len) {
- if (produced_ <= offset - 1u) return false; // -1u catches offset==0
+ // See SnappyArrayWriter::AppendFromSelf for an explanation of
+ // the "offset - 1u" trick.
+ if (produced_ <= offset - 1u) return false;
produced_ += len;
return produced_ <= expected_;
}
@@ -1080,7 +1275,7 @@ class SnappyDecompressionValidator {
bool IsValidCompressedBuffer(const char* compressed, size_t n) {
ByteArraySource reader(compressed, n);
SnappyDecompressionValidator writer;
- return InternalUncompress(&reader, &writer, kuint32max);
+ return InternalUncompress(&reader, &writer);
}
void RawCompress(const char* input,