summaryrefslogtreecommitdiff
path: root/thirdparty/brotli/enc/port.h
diff options
context:
space:
mode:
Diffstat (limited to 'thirdparty/brotli/enc/port.h')
-rw-r--r--thirdparty/brotli/enc/port.h120
1 files changed, 73 insertions, 47 deletions
diff --git a/thirdparty/brotli/enc/port.h b/thirdparty/brotli/enc/port.h
index e73df63a..cd908ec2 100644
--- a/thirdparty/brotli/enc/port.h
+++ b/thirdparty/brotli/enc/port.h
@@ -4,14 +4,16 @@
See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
*/
-// Macros for endianness, branch prediction and unaligned loads and stores.
+/* Macros for endianness, branch prediction and unaligned loads and stores. */
#ifndef BROTLI_ENC_PORT_H_
#define BROTLI_ENC_PORT_H_
#include <assert.h>
-#include <string.h>
-#include "./types.h"
+#include <string.h> /* memcpy */
+
+#include "../common/port.h"
+#include "../common/types.h"
#if defined OS_LINUX || defined OS_CYGWIN
#include <endian.h>
@@ -24,9 +26,9 @@
#define __LITTLE_ENDIAN LITTLE_ENDIAN
#endif
-// define the macro IS_LITTLE_ENDIAN
-// using the above endian definitions from endian.h if
-// endian.h was included
+/* define the macro IS_LITTLE_ENDIAN
+ using the above endian definitions from endian.h if
+ endian.h was included */
#ifdef __BYTE_ORDER
#if __BYTE_ORDER == __LITTLE_ENDIAN
#define IS_LITTLE_ENDIAN
@@ -37,49 +39,36 @@
#if defined(__LITTLE_ENDIAN__)
#define IS_LITTLE_ENDIAN
#endif
-#endif // __BYTE_ORDER
+#endif /* __BYTE_ORDER */
#if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
#define IS_LITTLE_ENDIAN
#endif
-// Enable little-endian optimization for x64 architecture on Windows.
+/* Enable little-endian optimization for x64 architecture on Windows. */
#if (defined(_WIN32) || defined(_WIN64)) && defined(_M_X64)
#define IS_LITTLE_ENDIAN
#endif
-/* Compatibility with non-clang compilers. */
-#ifndef __has_builtin
-#define __has_builtin(x) 0
-#endif
-
-#if (__GNUC__ > 2) || (__GNUC__ == 2 && __GNUC_MINOR__ > 95) || \
- (defined(__llvm__) && __has_builtin(__builtin_expect))
-#define PREDICT_FALSE(x) (__builtin_expect(x, 0))
-#define PREDICT_TRUE(x) (__builtin_expect(!!(x), 1))
-#else
-#define PREDICT_FALSE(x) (x)
-#define PREDICT_TRUE(x) (x)
-#endif
-
-// Portable handling of unaligned loads, stores, and copies.
-// On some platforms, like ARM, the copy functions can be more efficient
-// then a load and a store.
+/* Portable handling of unaligned loads, stores, and copies.
+ On some platforms, like ARM, the copy functions can be more efficient
+ then a load and a store. */
#if defined(ARCH_PIII) || \
defined(ARCH_ATHLON) || defined(ARCH_K8) || defined(_ARCH_PPC)
-// x86 and x86-64 can perform unaligned loads/stores directly;
-// modern PowerPC hardware can also do unaligned integer loads and stores;
-// but note: the FPU still sends unaligned loads and stores to a trap handler!
+/* x86 and x86-64 can perform unaligned loads/stores directly;
+ modern PowerPC hardware can also do unaligned integer loads and stores;
+ but note: the FPU still sends unaligned loads and stores to a trap handler!
+*/
-#define BROTLI_UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32_t *>(_p))
-#define BROTLI_UNALIGNED_LOAD64(_p) (*reinterpret_cast<const uint64_t *>(_p))
+#define BROTLI_UNALIGNED_LOAD32(_p) (*(const uint32_t *)(_p))
+#define BROTLI_UNALIGNED_LOAD64(_p) (*(const uint64_t *)(_p))
#define BROTLI_UNALIGNED_STORE32(_p, _val) \
- (*reinterpret_cast<uint32_t *>(_p) = (_val))
+ (*(uint32_t *)(_p) = (_val))
#define BROTLI_UNALIGNED_STORE64(_p, _val) \
- (*reinterpret_cast<uint64_t *>(_p) = (_val))
+ (*(uint64_t *)(_p) = (_val))
#elif defined(__arm__) && \
!defined(__ARM_ARCH_5__) && \
@@ -93,50 +82,87 @@
!defined(__ARM_ARCH_6ZK__) && \
!defined(__ARM_ARCH_6T2__)
-// ARMv7 and newer support native unaligned accesses, but only of 16-bit
-// and 32-bit values (not 64-bit); older versions either raise a fatal signal,
-// do an unaligned read and rotate the words around a bit, or do the reads very
-// slowly (trip through kernel mode).
+/* ARMv7 and newer support native unaligned accesses, but only of 16-bit
+ and 32-bit values (not 64-bit); older versions either raise a fatal signal,
+ do an unaligned read and rotate the words around a bit, or do the reads very
+ slowly (trip through kernel mode). */
-#define BROTLI_UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32_t *>(_p))
+#define BROTLI_UNALIGNED_LOAD32(_p) (*(const uint32_t *)(_p))
#define BROTLI_UNALIGNED_STORE32(_p, _val) \
- (*reinterpret_cast<uint32_t *>(_p) = (_val))
+ (*(uint32_t *)(_p) = (_val))
-inline uint64_t BROTLI_UNALIGNED_LOAD64(const void *p) {
+static BROTLI_INLINE uint64_t BROTLI_UNALIGNED_LOAD64(const void *p) {
uint64_t t;
memcpy(&t, p, sizeof t);
return t;
}
-inline void BROTLI_UNALIGNED_STORE64(void *p, uint64_t v) {
+static BROTLI_INLINE void BROTLI_UNALIGNED_STORE64(void *p, uint64_t v) {
memcpy(p, &v, sizeof v);
}
#else
-// These functions are provided for architectures that don't support
-// unaligned loads and stores.
+/* These functions are provided for architectures that don't support */
+/* unaligned loads and stores. */
-inline uint32_t BROTLI_UNALIGNED_LOAD32(const void *p) {
+static BROTLI_INLINE uint32_t BROTLI_UNALIGNED_LOAD32(const void *p) {
uint32_t t;
memcpy(&t, p, sizeof t);
return t;
}
-inline uint64_t BROTLI_UNALIGNED_LOAD64(const void *p) {
+static BROTLI_INLINE uint64_t BROTLI_UNALIGNED_LOAD64(const void *p) {
uint64_t t;
memcpy(&t, p, sizeof t);
return t;
}
-inline void BROTLI_UNALIGNED_STORE32(void *p, uint32_t v) {
+static BROTLI_INLINE void BROTLI_UNALIGNED_STORE32(void *p, uint32_t v) {
memcpy(p, &v, sizeof v);
}
-inline void BROTLI_UNALIGNED_STORE64(void *p, uint64_t v) {
+static BROTLI_INLINE void BROTLI_UNALIGNED_STORE64(void *p, uint64_t v) {
memcpy(p, &v, sizeof v);
}
#endif
-#endif // BROTLI_ENC_PORT_H_
+#if !defined(__cplusplus) && !defined(c_plusplus) && __STDC_VERSION__ >= 199901L
+#define BROTLI_RESTRICT restrict
+#elif BROTLI_GCC_VERSION > 295 || defined(__llvm__)
+#define BROTLI_RESTRICT __restrict
+#else
+#define BROTLI_RESTRICT
+#endif
+
+#define _TEMPLATE(T) \
+ static BROTLI_INLINE T brotli_min_ ## T (T a, T b) { return a < b ? a : b; } \
+ static BROTLI_INLINE T brotli_max_ ## T (T a, T b) { return a > b ? a : b; }
+_TEMPLATE(double) _TEMPLATE(float) _TEMPLATE(int)
+_TEMPLATE(size_t) _TEMPLATE(uint32_t) _TEMPLATE(uint8_t)
+#undef _TEMPLATE
+#define BROTLI_MIN(T, A, B) (brotli_min_ ## T((A), (B)))
+#define BROTLI_MAX(T, A, B) (brotli_max_ ## T((A), (B)))
+
+#define BROTLI_SWAP(T, A, I, J) { \
+ T __brotli_swap_tmp = (A)[(I)]; \
+ (A)[(I)] = (A)[(J)]; \
+ (A)[(J)] = __brotli_swap_tmp; \
+}
+
+#define BROTLI_ENSURE_CAPACITY(M, T, A, C, R) { \
+ if (C < (R)) { \
+ size_t _new_size = (C == 0) ? (R) : C; \
+ T* new_array; \
+ while (_new_size < (R)) _new_size *= 2; \
+ new_array = BROTLI_ALLOC((M), T, _new_size); \
+ if (!BROTLI_IS_OOM(m) && C != 0) \
+ memcpy(new_array, A, C * sizeof(T)); \
+ BROTLI_FREE((M), A); \
+ A = new_array; \
+ C = _new_size; \
+ } \
+}
+
+#endif /* BROTLI_ENC_PORT_H_ */