OLD | NEW |
(Empty) | |
| 1 /* |
| 2 * Speed-optimized CRC64 using slicing-by-four algorithm |
| 3 * |
| 4 * This uses only i386 instructions, but it is optimized for i686 and later |
| 5 * (including e.g. Pentium II/III/IV, Athlon XP, and Core 2). |
| 6 * |
| 7 * Authors: Igor Pavlov (original CRC32 assembly code) |
| 8 * Lasse Collin (CRC64 adaptation of the modified CRC32 code) |
| 9 * |
| 10 * This file has been put into the public domain. |
| 11 * You can do whatever you want with this file. |
| 12 * |
| 13 * This code needs lzma_crc64_table, which can be created using the |
| 14 * following C code: |
| 15 |
| 16 uint64_t lzma_crc64_table[4][256]; |
| 17 |
| 18 void |
| 19 init_table(void) |
| 20 { |
| 21 // ECMA-182 |
| 22 static const uint64_t poly64 = UINT64_C(0xC96C5795D7870F42); |
| 23 |
| 24 for (size_t s = 0; s < 4; ++s) { |
| 25 for (size_t b = 0; b < 256; ++b) { |
| 26 uint64_t r = s == 0 ? b : lzma_crc64_table[s - 1][b]; |
| 27 |
| 28 for (size_t i = 0; i < 8; ++i) { |
| 29 if (r & 1) |
| 30 r = (r >> 1) ^ poly64; |
| 31 else |
| 32 r >>= 1; |
| 33 } |
| 34 |
| 35 lzma_crc64_table[s][b] = r; |
| 36 } |
| 37 } |
| 38 } |
| 39 |
| 40 * The prototype of the CRC64 function: |
| 41 * extern uint64_t lzma_crc64(const uint8_t *buf, size_t size, uint64_t crc); |
| 42 */ |
| 43 |
| 44 /* |
| 45 * On some systems, the functions need to be prefixed. The prefix is |
| 46 * usually an underscore. |
| 47 */ |
| 48 #ifndef __USER_LABEL_PREFIX__ |
| 49 # define __USER_LABEL_PREFIX__ |
| 50 #endif |
| 51 #define MAKE_SYM_CAT(prefix, sym) prefix ## sym |
| 52 #define MAKE_SYM(prefix, sym) MAKE_SYM_CAT(prefix, sym) |
| 53 #define LZMA_CRC64 MAKE_SYM(__USER_LABEL_PREFIX__, lzma_crc64) |
| 54 #define LZMA_CRC64_TABLE MAKE_SYM(__USER_LABEL_PREFIX__, lzma_crc64_table) |
| 55 |
| 56 /* |
| 57 * Solaris assembler doesn't have .p2align, and Darwin uses .align |
| 58 * differently than GNU/Linux and Solaris. |
| 59 */ |
| 60 #if defined(__APPLE__) || defined(__MSDOS__) |
| 61 # define ALIGN(pow2, abs) .align pow2 |
| 62 #else |
| 63 # define ALIGN(pow2, abs) .align abs |
| 64 #endif |
| 65 |
| 66 .text |
| 67 .globl LZMA_CRC64 |
| 68 |
| 69 #if !defined(__APPLE__) && !defined(_WIN32) && !defined(__CYGWIN__) \ |
| 70 && !defined(__MSDOS__) |
| 71 .type LZMA_CRC64, @function |
| 72 #endif |
| 73 |
| 74 ALIGN(4, 16) |
| 75 LZMA_CRC64: |
| 76 /* |
| 77 * Register usage: |
| 78 * %eax crc LSB |
| 79 * %edx crc MSB |
| 80 * %esi buf |
| 81 * %edi size or buf + size |
| 82 * %ebx lzma_crc64_table |
| 83 * %ebp Table index |
| 84 * %ecx Temporary |
| 85 */ |
| 86 pushl %ebx |
| 87 pushl %esi |
| 88 pushl %edi |
| 89 pushl %ebp |
| 90 movl 0x14(%esp), %esi /* buf */ |
| 91 movl 0x18(%esp), %edi /* size */ |
| 92 movl 0x1C(%esp), %eax /* crc LSB */ |
| 93 movl 0x20(%esp), %edx /* crc MSB */ |
| 94 |
| 95 /* |
| 96 * Store the address of lzma_crc64_table to %ebx. This is needed to |
| 97 * get position-independent code (PIC). |
| 98 * |
| 99 * The PIC macro is defined by libtool, while __PIC__ is defined |
| 100 * by GCC but only on some systems. Testing for both makes it simpler |
| 101 * to test this code without libtool, and keeps the code working also |
| 102 * when built with libtool but using something else than GCC. |
| 103 * |
| 104 * I understood that libtool may define PIC on Windows even though |
| 105 * the code in Windows DLLs is not PIC in sense that it is in ELF |
| 106 * binaries, so we need a separate check to always use the non-PIC |
| 107 * code on Windows. |
| 108 */ |
| 109 #if (!defined(PIC) && !defined(__PIC__)) \ |
| 110 || (defined(_WIN32) || defined(__CYGWIN__)) |
| 111 /* Not PIC */ |
| 112 movl $ LZMA_CRC64_TABLE, %ebx |
| 113 #elif defined(__APPLE__) |
| 114 /* Mach-O */ |
| 115 call .L_get_pc |
| 116 .L_pic: |
| 117 leal .L_lzma_crc64_table$non_lazy_ptr-.L_pic(%ebx), %ebx |
| 118 movl (%ebx), %ebx |
| 119 #else |
| 120 /* ELF */ |
| 121 call .L_get_pc |
| 122 addl $_GLOBAL_OFFSET_TABLE_, %ebx |
| 123 movl LZMA_CRC64_TABLE@GOT(%ebx), %ebx |
| 124 #endif |
| 125 |
| 126 /* Complement the initial value. */ |
| 127 notl %eax |
| 128 notl %edx |
| 129 |
| 130 .L_align: |
| 131 /* |
| 132 * Check if there is enough input to use slicing-by-four. |
| 133 * We need eight bytes, because the loop pre-reads four bytes. |
| 134 */ |
| 135 cmpl $8, %edi |
| 136 jb .L_rest |
| 137 |
| 138 /* Check if we have reached alignment of four bytes. */ |
| 139 testl $3, %esi |
| 140 jz .L_slice |
| 141 |
| 142 /* Calculate CRC of the next input byte. */ |
| 143 movzbl (%esi), %ebp |
| 144 incl %esi |
| 145 movzbl %al, %ecx |
| 146 xorl %ecx, %ebp |
| 147 shrdl $8, %edx, %eax |
| 148 xorl (%ebx, %ebp, 8), %eax |
| 149 shrl $8, %edx |
| 150 xorl 4(%ebx, %ebp, 8), %edx |
| 151 decl %edi |
| 152 jmp .L_align |
| 153 |
| 154 .L_slice: |
| 155 /* |
| 156 * If we get here, there's at least eight bytes of aligned input |
| 157 * available. Make %edi multiple of four bytes. Store the possible |
| 158 * remainder over the "size" variable in the argument stack. |
| 159 */ |
| 160 movl %edi, 0x18(%esp) |
| 161 andl $-4, %edi |
| 162 subl %edi, 0x18(%esp) |
| 163 |
| 164 /* |
| 165 * Let %edi be buf + size - 4 while running the main loop. This way |
| 166 * we can compare for equality to determine when exit the loop. |
| 167 */ |
| 168 addl %esi, %edi |
| 169 subl $4, %edi |
| 170 |
| 171 /* Read in the first four aligned bytes. */ |
| 172 movl (%esi), %ecx |
| 173 |
| 174 .L_loop: |
| 175 xorl %eax, %ecx |
| 176 movzbl %cl, %ebp |
| 177 movl 0x1800(%ebx, %ebp, 8), %eax |
| 178 xorl %edx, %eax |
| 179 movl 0x1804(%ebx, %ebp, 8), %edx |
| 180 movzbl %ch, %ebp |
| 181 xorl 0x1000(%ebx, %ebp, 8), %eax |
| 182 xorl 0x1004(%ebx, %ebp, 8), %edx |
| 183 shrl $16, %ecx |
| 184 movzbl %cl, %ebp |
| 185 xorl 0x0800(%ebx, %ebp, 8), %eax |
| 186 xorl 0x0804(%ebx, %ebp, 8), %edx |
| 187 movzbl %ch, %ebp |
| 188 addl $4, %esi |
| 189 xorl (%ebx, %ebp, 8), %eax |
| 190 xorl 4(%ebx, %ebp, 8), %edx |
| 191 |
| 192 /* Check for end of aligned input. */ |
| 193 cmpl %edi, %esi |
| 194 |
| 195 /* |
| 196 * Copy the next input byte to %ecx. It is slightly faster to |
| 197 * read it here than at the top of the loop. |
| 198 */ |
| 199 movl (%esi), %ecx |
| 200 jb .L_loop |
| 201 |
| 202 /* |
| 203 * Process the remaining four bytes, which we have already |
| 204 * copied to %ecx. |
| 205 */ |
| 206 xorl %eax, %ecx |
| 207 movzbl %cl, %ebp |
| 208 movl 0x1800(%ebx, %ebp, 8), %eax |
| 209 xorl %edx, %eax |
| 210 movl 0x1804(%ebx, %ebp, 8), %edx |
| 211 movzbl %ch, %ebp |
| 212 xorl 0x1000(%ebx, %ebp, 8), %eax |
| 213 xorl 0x1004(%ebx, %ebp, 8), %edx |
| 214 shrl $16, %ecx |
| 215 movzbl %cl, %ebp |
| 216 xorl 0x0800(%ebx, %ebp, 8), %eax |
| 217 xorl 0x0804(%ebx, %ebp, 8), %edx |
| 218 movzbl %ch, %ebp |
| 219 addl $4, %esi |
| 220 xorl (%ebx, %ebp, 8), %eax |
| 221 xorl 4(%ebx, %ebp, 8), %edx |
| 222 |
| 223 /* Copy the number of remaining bytes to %edi. */ |
| 224 movl 0x18(%esp), %edi |
| 225 |
| 226 .L_rest: |
| 227 /* Check for end of input. */ |
| 228 testl %edi, %edi |
| 229 jz .L_return |
| 230 |
| 231 /* Calculate CRC of the next input byte. */ |
| 232 movzbl (%esi), %ebp |
| 233 incl %esi |
| 234 movzbl %al, %ecx |
| 235 xorl %ecx, %ebp |
| 236 shrdl $8, %edx, %eax |
| 237 xorl (%ebx, %ebp, 8), %eax |
| 238 shrl $8, %edx |
| 239 xorl 4(%ebx, %ebp, 8), %edx |
| 240 decl %edi |
| 241 jmp .L_rest |
| 242 |
| 243 .L_return: |
| 244 /* Complement the final value. */ |
| 245 notl %eax |
| 246 notl %edx |
| 247 |
| 248 popl %ebp |
| 249 popl %edi |
| 250 popl %esi |
| 251 popl %ebx |
| 252 ret |
| 253 |
| 254 #if defined(PIC) || defined(__PIC__) |
| 255 ALIGN(4, 16) |
| 256 .L_get_pc: |
| 257 movl (%esp), %ebx |
| 258 ret |
| 259 #endif |
| 260 |
| 261 #if defined(__APPLE__) && (defined(PIC) || defined(__PIC__)) |
| 262 /* Mach-O PIC */ |
| 263 .section __IMPORT,__pointers,non_lazy_symbol_pointers |
| 264 .L_lzma_crc64_table$non_lazy_ptr: |
| 265 .indirect_symbol LZMA_CRC64_TABLE |
| 266 .long 0 |
| 267 |
| 268 #elif defined(_WIN32) || defined(__CYGWIN__) |
| 269 # ifdef DLL_EXPORT |
| 270 /* This is equivalent of __declspec(dllexport). */ |
| 271 .section .drectve |
| 272 .ascii " -export:lzma_crc64" |
| 273 # endif |
| 274 |
| 275 #elif !defined(__MSDOS__) |
| 276 /* ELF */ |
| 277 .size LZMA_CRC64, .-LZMA_CRC64 |
| 278 #endif |
| 279 |
| 280 /* |
| 281 * This is needed to support non-executable stack. It's ugly to |
| 282 * use __linux__ here, but I don't know a way to detect when |
| 283 * we are using GNU assembler. |
| 284 */ |
| 285 #if defined(__ELF__) && defined(__linux__) |
| 286 .section .note.GNU-stack,"",@progbits |
| 287 #endif |
OLD | NEW |