| OLD | NEW |
| (Empty) | |
| 1 /* Copyright 2013 Google Inc. All Rights Reserved. |
| 2 |
| 3 Distributed under MIT license. |
| 4 See file LICENSE for detail or copy at https://opensource.org/licenses/MIT |
| 5 */ |
| 6 |
| 7 // Macros for endianness, branch prediction and unaligned loads and stores. |
| 8 |
| 9 #ifndef BROTLI_ENC_PORT_H_ |
| 10 #define BROTLI_ENC_PORT_H_ |
| 11 |
| 12 #include <assert.h> |
| 13 #include <string.h> |
| 14 #include "./types.h" |
| 15 |
| 16 #if defined OS_LINUX || defined OS_CYGWIN |
| 17 #include <endian.h> |
| 18 #elif defined OS_FREEBSD |
| 19 #include <machine/endian.h> |
| 20 #elif defined OS_MACOSX |
| 21 #include <machine/endian.h> |
| 22 /* Let's try and follow the Linux convention */ |
| 23 #define __BYTE_ORDER BYTE_ORDER |
| 24 #define __LITTLE_ENDIAN LITTLE_ENDIAN |
| 25 #endif |
| 26 |
| 27 // define the macro IS_LITTLE_ENDIAN |
| 28 // using the above endian definitions from endian.h if |
| 29 // endian.h was included |
| 30 #ifdef __BYTE_ORDER |
| 31 #if __BYTE_ORDER == __LITTLE_ENDIAN |
| 32 #define IS_LITTLE_ENDIAN |
| 33 #endif |
| 34 |
| 35 #else |
| 36 |
| 37 #if defined(__LITTLE_ENDIAN__) |
| 38 #define IS_LITTLE_ENDIAN |
| 39 #endif |
| 40 #endif // __BYTE_ORDER |
| 41 |
| 42 #if defined(__BYTE_ORDER__) && (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) |
| 43 #define IS_LITTLE_ENDIAN |
| 44 #endif |
| 45 |
| 46 // Enable little-endian optimization for x64 architecture on Windows. |
| 47 #if (defined(_WIN32) || defined(_WIN64)) && defined(_M_X64) |
| 48 #define IS_LITTLE_ENDIAN |
| 49 #endif |
| 50 |
| 51 /* Compatibility with non-clang compilers. */ |
| 52 #ifndef __has_builtin |
| 53 #define __has_builtin(x) 0 |
| 54 #endif |
| 55 |
| 56 #if (__GNUC__ > 2) || (__GNUC__ == 2 && __GNUC_MINOR__ > 95) || \ |
| 57 (defined(__llvm__) && __has_builtin(__builtin_expect)) |
| 58 #define PREDICT_FALSE(x) (__builtin_expect(x, 0)) |
| 59 #define PREDICT_TRUE(x) (__builtin_expect(!!(x), 1)) |
| 60 #else |
| 61 #define PREDICT_FALSE(x) (x) |
| 62 #define PREDICT_TRUE(x) (x) |
| 63 #endif |
| 64 |
| 65 // Portable handling of unaligned loads, stores, and copies. |
| 66 // On some platforms, like ARM, the copy functions can be more efficient |
| 67 // then a load and a store. |
| 68 |
| 69 #if defined(ARCH_PIII) || \ |
| 70 defined(ARCH_ATHLON) || defined(ARCH_K8) || defined(_ARCH_PPC) |
| 71 |
| 72 // x86 and x86-64 can perform unaligned loads/stores directly; |
| 73 // modern PowerPC hardware can also do unaligned integer loads and stores; |
| 74 // but note: the FPU still sends unaligned loads and stores to a trap handler! |
| 75 |
| 76 #define BROTLI_UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32_t *>(_p)) |
| 77 #define BROTLI_UNALIGNED_LOAD64(_p) (*reinterpret_cast<const uint64_t *>(_p)) |
| 78 |
| 79 #define BROTLI_UNALIGNED_STORE32(_p, _val) \ |
| 80 (*reinterpret_cast<uint32_t *>(_p) = (_val)) |
| 81 #define BROTLI_UNALIGNED_STORE64(_p, _val) \ |
| 82 (*reinterpret_cast<uint64_t *>(_p) = (_val)) |
| 83 |
| 84 #elif defined(__arm__) && \ |
| 85 !defined(__ARM_ARCH_5__) && \ |
| 86 !defined(__ARM_ARCH_5T__) && \ |
| 87 !defined(__ARM_ARCH_5TE__) && \ |
| 88 !defined(__ARM_ARCH_5TEJ__) && \ |
| 89 !defined(__ARM_ARCH_6__) && \ |
| 90 !defined(__ARM_ARCH_6J__) && \ |
| 91 !defined(__ARM_ARCH_6K__) && \ |
| 92 !defined(__ARM_ARCH_6Z__) && \ |
| 93 !defined(__ARM_ARCH_6ZK__) && \ |
| 94 !defined(__ARM_ARCH_6T2__) |
| 95 |
| 96 // ARMv7 and newer support native unaligned accesses, but only of 16-bit |
| 97 // and 32-bit values (not 64-bit); older versions either raise a fatal signal, |
| 98 // do an unaligned read and rotate the words around a bit, or do the reads very |
| 99 // slowly (trip through kernel mode). |
| 100 |
| 101 #define BROTLI_UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32_t *>(_p)) |
| 102 #define BROTLI_UNALIGNED_STORE32(_p, _val) \ |
| 103 (*reinterpret_cast<uint32_t *>(_p) = (_val)) |
| 104 |
| 105 inline uint64_t BROTLI_UNALIGNED_LOAD64(const void *p) { |
| 106 uint64_t t; |
| 107 memcpy(&t, p, sizeof t); |
| 108 return t; |
| 109 } |
| 110 |
| 111 inline void BROTLI_UNALIGNED_STORE64(void *p, uint64_t v) { |
| 112 memcpy(p, &v, sizeof v); |
| 113 } |
| 114 |
| 115 #else |
| 116 |
| 117 // These functions are provided for architectures that don't support |
| 118 // unaligned loads and stores. |
| 119 |
| 120 inline uint32_t BROTLI_UNALIGNED_LOAD32(const void *p) { |
| 121 uint32_t t; |
| 122 memcpy(&t, p, sizeof t); |
| 123 return t; |
| 124 } |
| 125 |
| 126 inline uint64_t BROTLI_UNALIGNED_LOAD64(const void *p) { |
| 127 uint64_t t; |
| 128 memcpy(&t, p, sizeof t); |
| 129 return t; |
| 130 } |
| 131 |
| 132 inline void BROTLI_UNALIGNED_STORE32(void *p, uint32_t v) { |
| 133 memcpy(p, &v, sizeof v); |
| 134 } |
| 135 |
| 136 inline void BROTLI_UNALIGNED_STORE64(void *p, uint64_t v) { |
| 137 memcpy(p, &v, sizeof v); |
| 138 } |
| 139 |
| 140 #endif |
| 141 |
| 142 #endif // BROTLI_ENC_PORT_H_ |
| OLD | NEW |