OLD | NEW |
(Empty) | |
| 1 //===-------------------- UnwindRegistersRestore.s ------------------------===// |
| 2 // |
| 3 // The LLVM Compiler Infrastructure |
| 4 // |
| 5 // This file is dual licensed under the MIT and the University of Illinois Open |
| 6 // Source Licenses. See LICENSE.TXT for details. |
| 7 // |
| 8 //===----------------------------------------------------------------------===// |
| 9 |
| 10 |
| 11 #if __i386__ |
| 12 .text |
| 13 .globl __ZN9libunwind13Registers_x866jumptoEv |
| 14 .private_extern __ZN9libunwind13Registers_x866jumptoEv |
| 15 __ZN9libunwind13Registers_x866jumptoEv: |
| 16 # |
| 17 # void libunwind::Registers_x86::jumpto() |
| 18 # |
| 19 # On entry: |
| 20 # + + |
| 21 # +-----------------------+ |
| 22 # + thread_state pointer + |
| 23 # +-----------------------+ |
| 24 # + return address + |
| 25 # +-----------------------+ <-- SP |
| 26 # + + |
| 27 movl 4(%esp), %eax |
| 28 # set up eax and ret on new stack location |
| 29 movl 28(%eax), %edx # edx holds new stack pointer |
| 30 subl $8,%edx |
| 31 movl %edx, 28(%eax) |
| 32 movl 0(%eax), %ebx |
| 33 movl %ebx, 0(%edx) |
| 34 movl 40(%eax), %ebx |
| 35 movl %ebx, 4(%edx) |
| 36 # we now have ret and eax pushed onto where new stack will be |
| 37 # restore all registers |
| 38 movl 4(%eax), %ebx |
| 39 movl 8(%eax), %ecx |
| 40 movl 12(%eax), %edx |
| 41 movl 16(%eax), %edi |
| 42 movl 20(%eax), %esi |
| 43 movl 24(%eax), %ebp |
| 44 movl 28(%eax), %esp |
| 45 # skip ss |
| 46 # skip eflags |
| 47 pop %eax # eax was already pushed on new stack |
| 48 ret # eip was already pushed on new stack |
| 49 # skip cs |
| 50 # skip ds |
| 51 # skip es |
| 52 # skip fs |
| 53 # skip gs |
| 54 |
| 55 #elif __x86_64__ |
| 56 |
| 57 .text |
| 58 .globl __ZN9libunwind16Registers_x86_646jumptoEv |
| 59 .private_extern __ZN9libunwind16Registers_x86_646jumptoEv |
| 60 __ZN9libunwind16Registers_x86_646jumptoEv: |
| 61 # |
| 62 # void libunwind::Registers_x86_64::jumpto() |
| 63 # |
| 64 # On entry, thread_state pointer is in rdi |
| 65 |
| 66 movq 56(%rdi), %rax # rax holds new stack pointer |
| 67 subq $16, %rax |
| 68 movq %rax, 56(%rdi) |
| 69 movq 32(%rdi), %rbx # store new rdi on new stack |
| 70 movq %rbx, 0(%rax) |
| 71 movq 128(%rdi), %rbx # store new rip on new stack |
| 72 movq %rbx, 8(%rax) |
| 73 # restore all registers |
| 74 movq 0(%rdi), %rax |
| 75 movq 8(%rdi), %rbx |
| 76 movq 16(%rdi), %rcx |
| 77 movq 24(%rdi), %rdx |
| 78 # restore rdi later |
| 79 movq 40(%rdi), %rsi |
| 80 movq 48(%rdi), %rbp |
| 81 # restore rsp later |
| 82 movq 64(%rdi), %r8 |
| 83 movq 72(%rdi), %r9 |
| 84 movq 80(%rdi), %r10 |
| 85 movq 88(%rdi), %r11 |
| 86 movq 96(%rdi), %r12 |
| 87 movq 104(%rdi), %r13 |
| 88 movq 112(%rdi), %r14 |
| 89 movq 120(%rdi), %r15 |
| 90 # skip rflags |
| 91 # skip cs |
| 92 # skip fs |
| 93 # skip gs |
| 94 movq 56(%rdi), %rsp # cut back rsp to new location |
| 95 pop %rdi # rdi was saved here earlier |
| 96 ret # rip was saved here |
| 97 |
| 98 |
| 99 #elif __ppc__ |
| 100 |
| 101 .text |
| 102 .globl __ZN9libunwind13Registers_ppc6jumptoEv |
| 103 .private_extern __ZN9libunwind13Registers_ppc6jumptoEv |
| 104 __ZN9libunwind13Registers_ppc6jumptoEv: |
| 105 ; |
| 106 ; void libunwind::Registers_ppc::jumpto() |
| 107 ; |
| 108 ; On entry: |
| 109 ; thread_state pointer is in r3 |
| 110 ; |
| 111 |
| 112 ; restore integral registerrs |
| 113 ; skip r0 for now |
| 114 ; skip r1 for now |
| 115 lwz r2, 16(r3) |
| 116 ; skip r3 for now |
| 117 ; skip r4 for now |
| 118 ; skip r5 for now |
| 119 lwz r6, 32(r3) |
| 120 lwz r7, 36(r3) |
| 121 lwz r8, 40(r3) |
| 122 lwz r9, 44(r3) |
| 123 lwz r10, 48(r3) |
| 124 lwz r11, 52(r3) |
| 125 lwz r12, 56(r3) |
| 126 lwz r13, 60(r3) |
| 127 lwz r14, 64(r3) |
| 128 lwz r15, 68(r3) |
| 129 lwz r16, 72(r3) |
| 130 lwz r17, 76(r3) |
| 131 lwz r18, 80(r3) |
| 132 lwz r19, 84(r3) |
| 133 lwz r20, 88(r3) |
| 134 lwz r21, 92(r3) |
| 135 lwz r22, 96(r3) |
| 136 lwz r23,100(r3) |
| 137 lwz r24,104(r3) |
| 138 lwz r25,108(r3) |
| 139 lwz r26,112(r3) |
| 140 lwz r27,116(r3) |
| 141 lwz r28,120(r3) |
| 142 lwz r29,124(r3) |
| 143 lwz r30,128(r3) |
| 144 lwz r31,132(r3) |
| 145 |
| 146 ; restore float registers |
| 147 lfd f0, 160(r3) |
| 148 lfd f1, 168(r3) |
| 149 lfd f2, 176(r3) |
| 150 lfd f3, 184(r3) |
| 151 lfd f4, 192(r3) |
| 152 lfd f5, 200(r3) |
| 153 lfd f6, 208(r3) |
| 154 lfd f7, 216(r3) |
| 155 lfd f8, 224(r3) |
| 156 lfd f9, 232(r3) |
| 157 lfd f10,240(r3) |
| 158 lfd f11,248(r3) |
| 159 lfd f12,256(r3) |
| 160 lfd f13,264(r3) |
| 161 lfd f14,272(r3) |
| 162 lfd f15,280(r3) |
| 163 lfd f16,288(r3) |
| 164 lfd f17,296(r3) |
| 165 lfd f18,304(r3) |
| 166 lfd f19,312(r3) |
| 167 lfd f20,320(r3) |
| 168 lfd f21,328(r3) |
| 169 lfd f22,336(r3) |
| 170 lfd f23,344(r3) |
| 171 lfd f24,352(r3) |
| 172 lfd f25,360(r3) |
| 173 lfd f26,368(r3) |
| 174 lfd f27,376(r3) |
| 175 lfd f28,384(r3) |
| 176 lfd f29,392(r3) |
| 177 lfd f30,400(r3) |
| 178 lfd f31,408(r3) |
| 179 |
| 180 ; restore vector registers if any are in use |
| 181 lwz r5,156(r3) ; test VRsave |
| 182 cmpwi r5,0 |
| 183 beq Lnovec |
| 184 |
| 185 subi r4,r1,16 |
| 186 rlwinm r4,r4,0,0,27 ; mask low 4-bits |
| 187 ; r4 is now a 16-byte aligned pointer into the red zone |
| 188 ; the _vectorRegisters may not be 16-byte aligned so copy via red zone temp bu
ffer |
| 189 |
| 190 |
| 191 #define LOAD_VECTOR_UNALIGNEDl(_index) \ |
| 192 andis. r0,r5,(1<<(15-_index)) @\ |
| 193 beq Ldone ## _index @\ |
| 194 lwz r0, 424+_index*16(r3) @\ |
| 195 stw r0, 0(r4) @\ |
| 196 lwz r0, 424+_index*16+4(r3) @\ |
| 197 stw r0, 4(r4) @\ |
| 198 lwz r0, 424+_index*16+8(r3) @\ |
| 199 stw r0, 8(r4) @\ |
| 200 lwz r0, 424+_index*16+12(r3)@\ |
| 201 stw r0, 12(r4) @\ |
| 202 lvx v ## _index,0,r4 @\ |
| 203 Ldone ## _index: |
| 204 |
| 205 #define LOAD_VECTOR_UNALIGNEDh(_index) \ |
| 206 andi. r0,r5,(1<<(31-_index)) @\ |
| 207 beq Ldone ## _index @\ |
| 208 lwz r0, 424+_index*16(r3) @\ |
| 209 stw r0, 0(r4) @\ |
| 210 lwz r0, 424+_index*16+4(r3) @\ |
| 211 stw r0, 4(r4) @\ |
| 212 lwz r0, 424+_index*16+8(r3) @\ |
| 213 stw r0, 8(r4) @\ |
| 214 lwz r0, 424+_index*16+12(r3)@\ |
| 215 stw r0, 12(r4) @\ |
| 216 lvx v ## _index,0,r4 @\ |
| 217 Ldone ## _index: |
| 218 |
| 219 |
| 220 LOAD_VECTOR_UNALIGNEDl(0) |
| 221 LOAD_VECTOR_UNALIGNEDl(1) |
| 222 LOAD_VECTOR_UNALIGNEDl(2) |
| 223 LOAD_VECTOR_UNALIGNEDl(3) |
| 224 LOAD_VECTOR_UNALIGNEDl(4) |
| 225 LOAD_VECTOR_UNALIGNEDl(5) |
| 226 LOAD_VECTOR_UNALIGNEDl(6) |
| 227 LOAD_VECTOR_UNALIGNEDl(7) |
| 228 LOAD_VECTOR_UNALIGNEDl(8) |
| 229 LOAD_VECTOR_UNALIGNEDl(9) |
| 230 LOAD_VECTOR_UNALIGNEDl(10) |
| 231 LOAD_VECTOR_UNALIGNEDl(11) |
| 232 LOAD_VECTOR_UNALIGNEDl(12) |
| 233 LOAD_VECTOR_UNALIGNEDl(13) |
| 234 LOAD_VECTOR_UNALIGNEDl(14) |
| 235 LOAD_VECTOR_UNALIGNEDl(15) |
| 236 LOAD_VECTOR_UNALIGNEDh(16) |
| 237 LOAD_VECTOR_UNALIGNEDh(17) |
| 238 LOAD_VECTOR_UNALIGNEDh(18) |
| 239 LOAD_VECTOR_UNALIGNEDh(19) |
| 240 LOAD_VECTOR_UNALIGNEDh(20) |
| 241 LOAD_VECTOR_UNALIGNEDh(21) |
| 242 LOAD_VECTOR_UNALIGNEDh(22) |
| 243 LOAD_VECTOR_UNALIGNEDh(23) |
| 244 LOAD_VECTOR_UNALIGNEDh(24) |
| 245 LOAD_VECTOR_UNALIGNEDh(25) |
| 246 LOAD_VECTOR_UNALIGNEDh(26) |
| 247 LOAD_VECTOR_UNALIGNEDh(27) |
| 248 LOAD_VECTOR_UNALIGNEDh(28) |
| 249 LOAD_VECTOR_UNALIGNEDh(29) |
| 250 LOAD_VECTOR_UNALIGNEDh(30) |
| 251 LOAD_VECTOR_UNALIGNEDh(31) |
| 252 |
| 253 Lnovec: |
| 254 lwz r0, 136(r3) ; __cr |
| 255 mtocrf 255,r0 |
| 256 lwz r0, 148(r3) ; __ctr |
| 257 mtctr r0 |
| 258 lwz r0, 0(r3) ; __ssr0 |
| 259 mtctr r0 |
| 260 lwz r0, 8(r3) ; do r0 now |
| 261 lwz r5,28(r3) ; do r5 now |
| 262 lwz r4,24(r3) ; do r4 now |
| 263 lwz r1,12(r3) ; do sp now |
| 264 lwz r3,20(r3) ; do r3 last |
| 265 bctr |
| 266 |
| 267 #elif __arm64__ |
| 268 |
| 269 .text |
| 270 .globl __ZN9libunwind15Registers_arm646jumptoEv |
| 271 .private_extern __ZN9libunwind15Registers_arm646jumptoEv |
| 272 __ZN9libunwind15Registers_arm646jumptoEv: |
| 273 ; |
| 274 ; void libunwind::Registers_arm64::jumpto() |
| 275 ; |
| 276 ; On entry: |
| 277 ; thread_state pointer is in x0 |
| 278 ; |
| 279 ; skip restore of x0,x1 for now |
| 280 ldp x2, x3, [x0, #0x010] |
| 281 ldp x4, x5, [x0, #0x020] |
| 282 ldp x6, x7, [x0, #0x030] |
| 283 ldp x8, x9, [x0, #0x040] |
| 284 ldp x10,x11, [x0, #0x050] |
| 285 ldp x12,x13, [x0, #0x060] |
| 286 ldp x14,x15, [x0, #0x070] |
| 287 ldp x16,x17, [x0, #0x080] |
| 288 ldp x18,x19, [x0, #0x090] |
| 289 ldp x20,x21, [x0, #0x0A0] |
| 290 ldp x22,x23, [x0, #0x0B0] |
| 291 ldp x24,x25, [x0, #0x0C0] |
| 292 ldp x26,x27, [x0, #0x0D0] |
| 293 ldp x28,fp, [x0, #0x0E0] |
| 294 ldr lr, [x0, #0x100] ; restore pc into lr |
| 295 ldr x1, [x0, #0x0F8] |
| 296 mov sp,x1 ; restore sp |
| 297 |
| 298 ldp d0, d1, [x0, #0x110] |
| 299 ldp d2, d3, [x0, #0x120] |
| 300 ldp d4, d5, [x0, #0x130] |
| 301 ldp d6, d7, [x0, #0x140] |
| 302 ldp d8, d9, [x0, #0x150] |
| 303 ldp d10,d11, [x0, #0x160] |
| 304 ldp d12,d13, [x0, #0x170] |
| 305 ldp d14,d15, [x0, #0x180] |
| 306 ldp d16,d17, [x0, #0x190] |
| 307 ldp d18,d19, [x0, #0x1A0] |
| 308 ldp d20,d21, [x0, #0x1B0] |
| 309 ldp d22,d23, [x0, #0x1C0] |
| 310 ldp d24,d25, [x0, #0x1D0] |
| 311 ldp d26,d27, [x0, #0x1E0] |
| 312 ldp d28,d29, [x0, #0x1F0] |
| 313 ldr d30, [x0, #0x200] |
| 314 ldr d31, [x0, #0x208] |
| 315 |
| 316 ldp x0, x1, [x0, #0x000] ; restore x0,x1 |
| 317 ret lr ; jump to pc |
| 318 |
| 319 |
| 320 |
| 321 |
| 322 #endif |
| 323 |
OLD | NEW |