OLD | NEW |
(Empty) | |
| 1 #if defined(__arm__) |
| 2 #include "arm_arch.h" |
| 3 |
| 4 .text |
| 5 .fpu neon |
| 6 .code 32 |
| 7 .globl gcm_init_v8 |
| 8 .type gcm_init_v8,%function |
| 9 .align 4 |
| 10 gcm_init_v8: |
| 11 vld1.64 {q9},[r1] @ load input H |
| 12 vmov.i8 q11,#0xe1 |
| 13 vshl.i64 q11,q11,#57 @ 0xc2.0 |
| 14 vext.8 q3,q9,q9,#8 |
| 15 vshr.u64 q10,q11,#63 |
| 16 vdup.32 q9,d18[1] |
| 17 vext.8 q8,q10,q11,#8 @ t0=0xc2....01 |
| 18 vshr.u64 q10,q3,#63 |
| 19 vshr.s32 q9,q9,#31 @ broadcast carry bit |
| 20 vand q10,q10,q8 |
| 21 vshl.i64 q3,q3,#1 |
| 22 vext.8 q10,q10,q10,#8 |
| 23 vand q8,q8,q9 |
| 24 vorr q3,q3,q10 @ H<<<=1 |
| 25 veor q12,q3,q8 @ twisted H |
| 26 vst1.64 {q12},[r0]! @ store Htable[0] |
| 27 |
| 28 @ calculate H^2 |
| 29 vext.8 q8,q12,q12,#8 @ Karatsuba pre-processing |
| 30 .byte 0xa8,0x0e,0xa8,0xf2 @ pmull q0,q12,q12 |
| 31 veor q8,q8,q12 |
| 32 .byte 0xa9,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q12 |
| 33 .byte 0xa0,0x2e,0xa0,0xf2 @ pmull q1,q8,q8 |
| 34 |
| 35 vext.8 q9,q0,q2,#8 @ Karatsuba post-processing |
| 36 veor q10,q0,q2 |
| 37 veor q1,q1,q9 |
| 38 veor q1,q1,q10 |
| 39 .byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase |
| 40 |
| 41 vmov d4,d3 @ Xh|Xm - 256-bit result |
| 42 vmov d3,d0 @ Xm is rotated Xl |
| 43 veor q0,q1,q10 |
| 44 |
| 45 vext.8 q10,q0,q0,#8 @ 2nd phase |
| 46 .byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11 |
| 47 veor q10,q10,q2 |
| 48 veor q14,q0,q10 |
| 49 |
| 50 vext.8 q9,q14,q14,#8 @ Karatsuba pre-processing |
| 51 veor q9,q9,q14 |
| 52 vext.8 q13,q8,q9,#8 @ pack Karatsuba pre-processed |
| 53 vst1.64 {q13,q14},[r0] @ store Htable[1..2] |
| 54 |
| 55 bx lr |
| 56 .size gcm_init_v8,.-gcm_init_v8 |
| 57 .globl gcm_gmult_v8 |
| 58 .type gcm_gmult_v8,%function |
| 59 .align 4 |
| 60 gcm_gmult_v8: |
| 61 vld1.64 {q9},[r0] @ load Xi |
| 62 vmov.i8 q11,#0xe1 |
| 63 vld1.64 {q12,q13},[r1] @ load twisted H, ... |
| 64 vshl.u64 q11,q11,#57 |
| 65 #ifndef __ARMEB__ |
| 66 vrev64.8 q9,q9 |
| 67 #endif |
| 68 vext.8 q3,q9,q9,#8 |
| 69 |
| 70 .byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo |
| 71 veor q9,q9,q3 @ Karatsuba pre-processing |
| 72 .byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi |
| 73 .byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(X
i.lo+Xi.hi) |
| 74 |
| 75 vext.8 q9,q0,q2,#8 @ Karatsuba post-processing |
| 76 veor q10,q0,q2 |
| 77 veor q1,q1,q9 |
| 78 veor q1,q1,q10 |
| 79 .byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of r
eduction |
| 80 |
| 81 vmov d4,d3 @ Xh|Xm - 256-bit result |
| 82 vmov d3,d0 @ Xm is rotated Xl |
| 83 veor q0,q1,q10 |
| 84 |
| 85 vext.8 q10,q0,q0,#8 @ 2nd phase of reduction |
| 86 .byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11 |
| 87 veor q10,q10,q2 |
| 88 veor q0,q0,q10 |
| 89 |
| 90 #ifndef __ARMEB__ |
| 91 vrev64.8 q0,q0 |
| 92 #endif |
| 93 vext.8 q0,q0,q0,#8 |
| 94 vst1.64 {q0},[r0] @ write out Xi |
| 95 |
| 96 bx lr |
| 97 .size gcm_gmult_v8,.-gcm_gmult_v8 |
| 98 .globl gcm_ghash_v8 |
| 99 .type gcm_ghash_v8,%function |
| 100 .align 4 |
| 101 gcm_ghash_v8: |
| 102 vstmdb sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ 32-bit ABI say
s so |
| 103 vld1.64 {q0},[r0] @ load [rotated] Xi |
| 104 @ "[rotated]" means that |
| 105 @ loaded value would have |
| 106 @ to be rotated in order to |
| 107 @ make it appear as in |
| 108 @ alorithm specification |
| 109 subs r3,r3,#32 @ see if r3 is 32 or larger |
| 110 mov r12,#16 @ r12 is used as post- |
| 111 @ increment for input pointer; |
| 112 @ as loop is modulo-scheduled |
| 113 @ r12 is zeroed just in time |
| 114 @ to preclude oversteping |
| 115 @ inp[len], which means that |
| 116 @ last block[s] are actually |
| 117 @ loaded twice, but last |
| 118 @ copy is not processed |
| 119 vld1.64 {q12,q13},[r1]! @ load twisted H, ..., H^2 |
| 120 vmov.i8 q11,#0xe1 |
| 121 vld1.64 {q14},[r1] |
| 122 moveq r12,#0 @ is it time to zero r12? |
| 123 vext.8 q0,q0,q0,#8 @ rotate Xi |
| 124 vld1.64 {q8},[r2]! @ load [rotated] I[0] |
| 125 vshl.u64 q11,q11,#57 @ compose 0xc2.0 constant |
| 126 #ifndef __ARMEB__ |
| 127 vrev64.8 q8,q8 |
| 128 vrev64.8 q0,q0 |
| 129 #endif |
| 130 vext.8 q3,q8,q8,#8 @ rotate I[0] |
| 131 blo .Lodd_tail_v8 @ r3 was less than 32 |
| 132 vld1.64 {q9},[r2],r12 @ load [rotated] I[1] |
| 133 #ifndef __ARMEB__ |
| 134 vrev64.8 q9,q9 |
| 135 #endif |
| 136 vext.8 q7,q9,q9,#8 |
| 137 veor q3,q3,q0 @ I[i]^=Xi |
| 138 .byte 0x8e,0x8e,0xa8,0xf2 @ pmull q4,q12,q7 @ H·Ii+1 |
| 139 veor q9,q9,q7 @ Karatsuba pre-processing |
| 140 .byte 0x8f,0xce,0xa9,0xf2 @ pmull2 q6,q12,q7 |
| 141 b .Loop_mod2x_v8 |
| 142 |
| 143 .align 4 |
| 144 .Loop_mod2x_v8: |
| 145 vext.8 q10,q3,q3,#8 |
| 146 subs r3,r3,#32 @ is there more data? |
| 147 .byte 0x86,0x0e,0xac,0xf2 @ pmull q0,q14,q3 @ H^2.lo·Xi.lo |
| 148 movlo r12,#0 @ is it time to zero r12? |
| 149 |
| 150 .byte 0xa2,0xae,0xaa,0xf2 @ pmull q5,q13,q9 |
| 151 veor q10,q10,q3 @ Karatsuba pre-processing |
| 152 .byte 0x87,0x4e,0xad,0xf2 @ pmull2 q2,q14,q3 @ H^2.hi·Xi.hi |
| 153 veor q0,q0,q4 @ accumulate |
| 154 .byte 0xa5,0x2e,0xab,0xf2 @ pmull2 q1,q13,q10 @ (H^2.lo+H^2.hi
)·(Xi.lo+Xi.hi) |
| 155 vld1.64 {q8},[r2],r12 @ load [rotated] I[i+2] |
| 156 |
| 157 veor q2,q2,q6 |
| 158 moveq r12,#0 @ is it time to zero r12? |
| 159 veor q1,q1,q5 |
| 160 |
| 161 vext.8 q9,q0,q2,#8 @ Karatsuba post-processing |
| 162 veor q10,q0,q2 |
| 163 veor q1,q1,q9 |
| 164 vld1.64 {q9},[r2],r12 @ load [rotated] I[i+3] |
| 165 #ifndef __ARMEB__ |
| 166 vrev64.8 q8,q8 |
| 167 #endif |
| 168 veor q1,q1,q10 |
| 169 .byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of r
eduction |
| 170 |
| 171 #ifndef __ARMEB__ |
| 172 vrev64.8 q9,q9 |
| 173 #endif |
| 174 vmov d4,d3 @ Xh|Xm - 256-bit result |
| 175 vmov d3,d0 @ Xm is rotated Xl |
| 176 vext.8 q7,q9,q9,#8 |
| 177 vext.8 q3,q8,q8,#8 |
| 178 veor q0,q1,q10 |
| 179 .byte 0x8e,0x8e,0xa8,0xf2 @ pmull q4,q12,q7 @ H·Ii+1 |
| 180 veor q3,q3,q2 @ accumulate q3 early |
| 181 |
| 182 vext.8 q10,q0,q0,#8 @ 2nd phase of reduction |
| 183 .byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11 |
| 184 veor q3,q3,q10 |
| 185 veor q9,q9,q7 @ Karatsuba pre-processing |
| 186 veor q3,q3,q0 |
| 187 .byte 0x8f,0xce,0xa9,0xf2 @ pmull2 q6,q12,q7 |
| 188 bhs .Loop_mod2x_v8 @ there was at least 32 more bytes |
| 189 |
| 190 veor q2,q2,q10 |
| 191 vext.8 q3,q8,q8,#8 @ re-construct q3 |
| 192 adds r3,r3,#32 @ re-construct r3 |
| 193 veor q0,q0,q2 @ re-construct q0 |
| 194 beq .Ldone_v8 @ is r3 zero? |
| 195 .Lodd_tail_v8: |
| 196 vext.8 q10,q0,q0,#8 |
| 197 veor q3,q3,q0 @ inp^=Xi |
| 198 veor q9,q8,q10 @ q9 is rotated inp^Xi |
| 199 |
| 200 .byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo |
| 201 veor q9,q9,q3 @ Karatsuba pre-processing |
| 202 .byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi |
| 203 .byte 0xa2,0x2e,0xaa,0xf2 @ pmull q1,q13,q9 @ (H.lo+H.hi)·(X
i.lo+Xi.hi) |
| 204 |
| 205 vext.8 q9,q0,q2,#8 @ Karatsuba post-processing |
| 206 veor q10,q0,q2 |
| 207 veor q1,q1,q9 |
| 208 veor q1,q1,q10 |
| 209 .byte 0x26,0x4e,0xe0,0xf2 @ pmull q10,q0,q11 @ 1st phase of r
eduction |
| 210 |
| 211 vmov d4,d3 @ Xh|Xm - 256-bit result |
| 212 vmov d3,d0 @ Xm is rotated Xl |
| 213 veor q0,q1,q10 |
| 214 |
| 215 vext.8 q10,q0,q0,#8 @ 2nd phase of reduction |
| 216 .byte 0x26,0x0e,0xa0,0xf2 @ pmull q0,q0,q11 |
| 217 veor q10,q10,q2 |
| 218 veor q0,q0,q10 |
| 219 |
| 220 .Ldone_v8: |
| 221 #ifndef __ARMEB__ |
| 222 vrev64.8 q0,q0 |
| 223 #endif |
| 224 vext.8 q0,q0,q0,#8 |
| 225 vst1.64 {q0},[r0] @ write out Xi |
| 226 |
| 227 vldmia sp!,{d8,d9,d10,d11,d12,d13,d14,d15} @ 32-bit ABI say
s so |
| 228 bx lr |
| 229 .size gcm_ghash_v8,.-gcm_ghash_v8 |
| 230 .byte 71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79
,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,4
6,111,114,103,62,0 |
| 231 .align 2 |
| 232 .align 2 |
| 233 #endif |
OLD | NEW |