OLD | NEW |
(Empty) | |
| 1 // Copyright 2014 The Crashpad Authors. All rights reserved. |
| 2 // |
| 3 // Licensed under the Apache License, Version 2.0 (the "License"); |
| 4 // you may not use this file except in compliance with the License. |
| 5 // You may obtain a copy of the License at |
| 6 // |
| 7 // http://www.apache.org/licenses/LICENSE-2.0 |
| 8 // |
| 9 // Unless required by applicable law or agreed to in writing, software |
| 10 // distributed under the License is distributed on an "AS IS" BASIS, |
| 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 12 // See the License for the specific language governing permissions and |
| 13 // limitations under the License. |
| 14 |
| 15 #if defined(__i386__) || defined(__x86_64__) |
| 16 |
| 17 // namespace crashpad { |
| 18 // void CaptureContext(x86_thread_state_t* x86_thread_state); |
| 19 // } // namespace crashpad |
| 20 #define CAPTURECONTEXT_SYMBOL __ZN8crashpad14CaptureContextEP16x86_thread_state |
| 21 |
| 22 .section __TEXT,__text,regular,pure_instructions |
| 23 .private_extern CAPTURECONTEXT_SYMBOL |
| 24 .globl CAPTURECONTEXT_SYMBOL |
| 25 .align 4, 0x90 |
| 26 CAPTURECONTEXT_SYMBOL: |
| 27 |
| 28 #if defined(__i386__) |
| 29 |
| 30 .cfi_startproc |
| 31 |
| 32 pushl %ebp |
| 33 .cfi_def_cfa_offset 8 |
| 34 .cfi_offset %ebp, -8 |
| 35 movl %esp, %ebp |
| 36 .cfi_def_cfa_register %ebp |
| 37 |
| 38 // Note that 16-byte stack alignment is not maintained because this function |
| 39 // does not call out to any other. |
| 40 |
| 41 // pushfl first, because some instructions (but probably none used here) |
| 42 // affect %eflags. %eflags will be in -4(%rbp). |
| 43 pushfl |
| 44 |
| 45 // Save the original value of %eax, and use %eax to hold the x86_thread_state* |
| 46 // argument. The original value of %eax will be in -8(%rbp). |
| 47 pushl %eax |
| 48 movl 8(%ebp), %eax |
| 49 |
| 50 // Initialize the header identifying the x86_thread_state_t structure as |
| 51 // carrying an x86_thread_state32_t (flavor x86_THREAD_STATE32) of size |
| 52 // x86_THREAD_STATE32_COUNT 32-bit values. |
| 53 movl $1, (%eax) // x86_thread_state->tsh.flavor |
| 54 movl $16, 4(%eax) // x86_thread_state->tsh.count |
| 55 |
| 56 // General-purpose registers whose values haven’t changed can be captured |
| 57 // directly. |
| 58 movl %ebx, 12(%eax) // x86_thread_state->uts.ts32.__ebx |
| 59 movl %ecx, 16(%eax) // x86_thread_state->uts.ts32.__ecx |
| 60 movl %edx, 20(%eax) // x86_thread_state->uts.ts32.__edx |
| 61 movl %edi, 24(%eax) // x86_thread_state->uts.ts32.__edi |
| 62 movl %esi, 28(%eax) // x86_thread_state->uts.ts32.__esi |
| 63 |
| 64 // Now that the original value of %edx has been saved, it can be repurposed to |
| 65 // hold other registers’ values. |
| 66 |
| 67 // The original %eax was saved on the stack above. |
| 68 movl -8(%ebp), %edx |
| 69 movl %edx, 8(%eax) // x86_thread_state->uts.ts32.__eax |
| 70 |
| 71 // The original %ebp was saved on the stack in this function’s prologue. |
| 72 movl (%ebp), %edx |
| 73 movl %edx, 32(%eax) // x86_thread_state->uts.ts32.__ebp |
| 74 |
| 75 // %esp was saved in %ebp in this function’s prologue, but the caller’s %esp |
| 76 // is 8 more than this value: 4 for the original %ebp saved on the stack in |
| 77 // this function’s prologue, and 4 for the return address saved on the stack |
| 78 // by the call instruction that reached this function. |
| 79 leal 8(%ebp), %edx |
| 80 movl %edx, 36(%eax) // x86_thread_state->uts.ts32.__esp |
| 81 |
| 82 // The original %eflags was saved on the stack above. |
| 83 movl -4(%ebp), %edx |
| 84 movl %edx, 44(%eax) // x86_thread_state->uts.ts32.__eflags |
| 85 |
| 86 // %eip can’t be accessed directly, but the return address saved on the stack |
| 87 // by the call instruction that reached this function can be used. |
| 88 movl 4(%ebp), %edx |
| 89 movl %edx, 48(%eax) // x86_thread_state->uts.ts32.__eip |
| 90 |
| 91 // The segment registers are 16 bits wide, but x86_thread_state declares them |
| 92 // as unsigned 32-bit values, so zero the top half. |
| 93 xorl %edx, %edx |
| 94 movw %ss, %dx |
| 95 movl %edx, 40(%eax) // x86_thread_state->uts.ts32.__ss |
| 96 movw %cs, %dx |
| 97 movl %edx, 52(%eax) // x86_thread_state->uts.ts32.__cs |
| 98 movw %ds, %dx |
| 99 movl %edx, 56(%eax) // x86_thread_state->uts.ts32.__ds |
| 100 movw %es, %dx |
| 101 movl %edx, 60(%eax) // x86_thread_state->uts.ts32.__es |
| 102 movw %fs, %dx |
| 103 movl %edx, 64(%eax) // x86_thread_state->uts.ts32.__fs |
| 104 movw %gs, %dx |
| 105 movl %edx, 68(%eax) // x86_thread_state->uts.ts32.__gs |
| 106 |
| 107 // Clean up by restoring clobbered registers, even those considered volatile |
| 108 // by the ABI, so that the captured context represents the state at this |
| 109 // function’s exit. |
| 110 popl %eax |
| 111 popfl |
| 112 |
| 113 popl %ebp |
| 114 |
| 115 ret |
| 116 |
| 117 .cfi_endproc |
| 118 |
| 119 #elif defined(__x86_64__) |
| 120 |
| 121 .cfi_startproc |
| 122 |
| 123 pushq %rbp |
| 124 .cfi_def_cfa_offset 16 |
| 125 .cfi_offset %rbp, -16 |
| 126 movq %rsp, %rbp |
| 127 .cfi_def_cfa_register %rbp |
| 128 |
| 129 // Note that 16-byte stack alignment is not maintained because this function |
| 130 // does not call out to any other. |
| 131 |
| 132 // pushfq first, because some instructions (but probably none used here) |
| 133 // affect %rflags. %rflags will be in -8(%rbp). |
| 134 pushfq |
| 135 |
| 136 // Initialize the header identifying the x86_thread_state_t structure as |
| 137 // carrying an x86_thread_state64_t (flavor x86_THREAD_STATE64) of size |
| 138 // x86_THREAD_STATE64_COUNT 32-bit values. |
| 139 movl $4, (%rdi) // x86_thread_state->tsh.flavor |
| 140 movl $42, 4(%rdi) // x86_thread_state->tsh.count |
| 141 |
| 142 // General-purpose registers whose values haven’t changed can be captured |
| 143 // directly. |
| 144 movq %rax, 8(%rdi) // x86_thread_state->uts.ts64.__rax |
| 145 movq %rbx, 16(%rdi) // x86_thread_state->uts.ts64.__rbx |
| 146 movq %rcx, 24(%rdi) // x86_thread_state->uts.ts64.__rcx |
| 147 movq %rdx, 32(%rdi) // x86_thread_state->uts.ts64.__rdx |
| 148 movq %rsi, 48(%rdi) // x86_thread_state->uts.ts64.__rsi |
| 149 movq %r8, 72(%rdi) // x86_thread_state->uts.ts64.__r8 |
| 150 movq %r9, 80(%rdi) // x86_thread_state->uts.ts64.__r9 |
| 151 movq %r10, 88(%rdi) // x86_thread_state->uts.ts64.__r10 |
| 152 movq %r11, 96(%rdi) // x86_thread_state->uts.ts64.__r11 |
| 153 movq %r12, 104(%rdi) // x86_thread_state->uts.ts64.__r12 |
| 154 movq %r13, 112(%rdi) // x86_thread_state->uts.ts64.__r13 |
| 155 movq %r14, 120(%rdi) // x86_thread_state->uts.ts64.__r14 |
| 156 movq %r15, 128(%rdi) // x86_thread_state->uts.ts64.__r15 |
| 157 |
| 158 // Because of the calling convention, there’s no way to recover the value of |
| 159 // the caller’s %rdi as it existed prior to calling this function. This |
| 160 // function captures a snapshot of the register state at its return, which |
| 161 // involves %rdi containing a pointer to its first argument. Callers that |
| 162 // require the value of %rdi prior to calling this function should obtain it |
| 163 // separately. For example: |
| 164 // uint64_t rdi; |
| 165 // asm("movq %%rdi, %0" : "=m"(rdi)); |
| 166 movq %rdi, 40(%rdi) // x86_thread_state->uts.ts64.__rdi |
| 167 |
| 168 // Now that the original value of %rax has been saved, it can be repurposed to |
| 169 // hold other registers’ values. |
| 170 |
| 171 // The original %rbp was saved on the stack in this function’s prologue. |
| 172 movq (%rbp), %rax |
| 173 movq %rax, 56(%rdi) // x86_thread_state->uts.ts64.__rbp |
| 174 |
| 175 // %rsp was saved in %rbp in this function’s prologue, but the caller’s %rsp |
| 176 // is 16 more than this value: 8 for the original %rbp saved on the stack in |
| 177 // this function’s prologue, and 8 for the return address saved on the stack |
| 178 // by the call instruction that reached this function. |
| 179 leaq 16(%rbp), %rax |
| 180 movq %rax, 64(%rdi) // x86_thread_state->uts.ts64.__rsp |
| 181 |
| 182 // %rip can’t be accessed directly, but the return address saved on the stack |
| 183 // by the call instruction that reached this function can be used. |
| 184 movq 8(%rbp), %rax |
| 185 movq %rax, 136(%rdi) // x86_thread_state->uts.ts64.__rip |
| 186 |
| 187 // The original %rflags was saved on the stack above. |
| 188 movq -8(%rbp), %rax |
| 189 movq %rax, 144(%rdi) // x86_thread_state->uts.ts64.__rflags |
| 190 |
| 191 // The segment registers are 16 bits wide, but x86_thread_state declares them |
| 192 // as unsigned 64-bit values, so zero the top portion. |
| 193 xorq %rax, %rax |
| 194 movw %cs, %ax |
| 195 movq %rax, 152(%rdi) // x86_thread_state->uts.ts64.__cs |
| 196 movw %fs, %ax |
| 197 movq %rax, 160(%rdi) // x86_thread_state->uts.ts64.__fs |
| 198 movw %gs, %ax |
| 199 movq %rax, 168(%rdi) // x86_thread_state->uts.ts64.__gs |
| 200 |
| 201 // Clean up by restoring clobbered registers, even those considered volatile |
| 202 // by the ABI, so that the captured context represents the state at this |
| 203 // function’s exit. |
| 204 movq 8(%rdi), %rax |
| 205 popfq |
| 206 |
| 207 popq %rbp |
| 208 |
| 209 ret |
| 210 |
| 211 .cfi_endproc |
| 212 |
| 213 #endif |
| 214 |
| 215 .subsections_via_symbols |
| 216 |
| 217 #endif |
OLD | NEW |