OLD | NEW |
1 // Copyright 2014 The Crashpad Authors. All rights reserved. | 1 // Copyright 2014 The Crashpad Authors. All rights reserved. |
2 // | 2 // |
3 // Licensed under the Apache License, Version 2.0 (the "License"); | 3 // Licensed under the Apache License, Version 2.0 (the "License"); |
4 // you may not use this file except in compliance with the License. | 4 // you may not use this file except in compliance with the License. |
5 // You may obtain a copy of the License at | 5 // You may obtain a copy of the License at |
6 // | 6 // |
7 // http://www.apache.org/licenses/LICENSE-2.0 | 7 // http://www.apache.org/licenses/LICENSE-2.0 |
8 // | 8 // |
9 // Unless required by applicable law or agreed to in writing, software | 9 // Unless required by applicable law or agreed to in writing, software |
10 // distributed under the License is distributed on an "AS IS" BASIS, | 10 // distributed under the License is distributed on an "AS IS" BASIS, |
(...skipping 14 matching lines...) Expand all Loading... |
25 | 25 |
26 namespace crashpad { | 26 namespace crashpad { |
27 namespace test { | 27 namespace test { |
28 namespace { | 28 namespace { |
29 | 29 |
30 // If the context structure has fields that tell whether it’s valid, such as | 30 // If the context structure has fields that tell whether it’s valid, such as |
31 // magic numbers or size fields, sanity-checks those fields for validity with | 31 // magic numbers or size fields, sanity-checks those fields for validity with |
32 // fatal gtest assertions. For other fields, where it’s possible to reason about | 32 // fatal gtest assertions. For other fields, where it’s possible to reason about |
33 // their validity based solely on their contents, sanity-checks via nonfatal | 33 // their validity based solely on their contents, sanity-checks via nonfatal |
34 // gtest assertions. | 34 // gtest assertions. |
35 void SanityCheckContext(NativeCPUContext* context) { | 35 void SanityCheckContext(const NativeCPUContext& context) { |
36 #if defined(ARCH_CPU_X86) | 36 #if defined(ARCH_CPU_X86) |
37 ASSERT_EQ(x86_THREAD_STATE32, context->tsh.flavor); | 37 ASSERT_EQ(x86_THREAD_STATE32, context.tsh.flavor); |
38 ASSERT_EQ(implicit_cast<int>(x86_THREAD_STATE32_COUNT), context->tsh.count); | 38 ASSERT_EQ(implicit_cast<int>(x86_THREAD_STATE32_COUNT), context.tsh.count); |
39 #elif defined(ARCH_CPU_X86_64) | 39 #elif defined(ARCH_CPU_X86_64) |
40 ASSERT_EQ(x86_THREAD_STATE64, context->tsh.flavor); | 40 ASSERT_EQ(x86_THREAD_STATE64, context.tsh.flavor); |
41 ASSERT_EQ(implicit_cast<int>(x86_THREAD_STATE64_COUNT), context->tsh.count); | 41 ASSERT_EQ(implicit_cast<int>(x86_THREAD_STATE64_COUNT), context.tsh.count); |
42 #endif | 42 #endif |
43 | 43 |
44 #if defined(ARCH_CPU_X86_FAMILY) | 44 #if defined(ARCH_CPU_X86_FAMILY) |
45 // The segment registers are only capable of storing 16-bit quantities, but | 45 // The segment registers are only capable of storing 16-bit quantities, but |
46 // the context structure provides native integer-width fields for them. Ensure | 46 // the context structure provides native integer-width fields for them. Ensure |
47 // that the high bits are all clear. | 47 // that the high bits are all clear. |
48 // | 48 // |
49 // Many bit positions in the flags register are reserved and will always read | 49 // Many bit positions in the flags register are reserved and will always read |
50 // a known value. Most reservd bits are always 0, but bit 1 is always 1. Check | 50 // a known value. Most reserved bits are always 0, but bit 1 is always 1. |
51 // that the reserved bits are all set to their expected values. Note that the | 51 // Check that the reserved bits are all set to their expected values. Note |
52 // set of reserved bits may be relaxed over time with newer CPUs, and that | 52 // that the set of reserved bits may be relaxed over time with newer CPUs, and |
53 // this test may need to be changed to reflect these developments. The current | 53 // that this test may need to be changed to reflect these developments. The |
54 // set of reserved bits are 1, 3, 5, 15, and 22 and higher. See Intel Software | 54 // current set of reserved bits are 1, 3, 5, 15, and 22 and higher. See Intel |
55 // Developer’s Manual, Volume 1: Basic Architecture (253665-051), 3.4.3 | 55 // Software Developer’s Manual, Volume 1: Basic Architecture (253665-051), |
56 // “EFLAGS Register”, and AMD Architecture Programmer’s Manual, Volume 2: | 56 // 3.4.3 “EFLAGS Register”, and AMD Architecture Programmer’s Manual, Volume |
57 // System Programming (24593-3.24), 3.1.6 “RFLAGS Register”. | 57 // 2: System Programming (24593-3.24), 3.1.6 “RFLAGS Register”. |
58 #if defined(ARCH_CPU_X86) | 58 #if defined(ARCH_CPU_X86) |
59 EXPECT_EQ(0u, context->uts.ts32.__cs & ~0xffff); | 59 EXPECT_EQ(0u, context.uts.ts32.__cs & ~0xffff); |
60 EXPECT_EQ(0u, context->uts.ts32.__ds & ~0xffff); | 60 EXPECT_EQ(0u, context.uts.ts32.__ds & ~0xffff); |
61 EXPECT_EQ(0u, context->uts.ts32.__es & ~0xffff); | 61 EXPECT_EQ(0u, context.uts.ts32.__es & ~0xffff); |
62 EXPECT_EQ(0u, context->uts.ts32.__fs & ~0xffff); | 62 EXPECT_EQ(0u, context.uts.ts32.__fs & ~0xffff); |
63 EXPECT_EQ(0u, context->uts.ts32.__gs & ~0xffff); | 63 EXPECT_EQ(0u, context.uts.ts32.__gs & ~0xffff); |
64 EXPECT_EQ(0u, context->uts.ts32.__ss & ~0xffff); | 64 EXPECT_EQ(0u, context.uts.ts32.__ss & ~0xffff); |
65 EXPECT_EQ(2u, context->uts.ts32.__eflags & 0xffc0802a); | 65 EXPECT_EQ(2u, context.uts.ts32.__eflags & 0xffc0802a); |
66 #elif defined(ARCH_CPU_X86_64) | 66 #elif defined(ARCH_CPU_X86_64) |
67 EXPECT_EQ(0u, context->uts.ts64.__cs & ~UINT64_C(0xffff)); | 67 EXPECT_EQ(0u, context.uts.ts64.__cs & ~UINT64_C(0xffff)); |
68 EXPECT_EQ(0u, context->uts.ts64.__fs & ~UINT64_C(0xffff)); | 68 EXPECT_EQ(0u, context.uts.ts64.__fs & ~UINT64_C(0xffff)); |
69 EXPECT_EQ(0u, context->uts.ts64.__gs & ~UINT64_C(0xffff)); | 69 EXPECT_EQ(0u, context.uts.ts64.__gs & ~UINT64_C(0xffff)); |
70 EXPECT_EQ(2u, context->uts.ts64.__rflags & UINT64_C(0xffffffffffc0802a)); | 70 EXPECT_EQ(2u, context.uts.ts64.__rflags & UINT64_C(0xffffffffffc0802a)); |
71 #endif | 71 #endif |
72 #endif | 72 #endif |
73 } | 73 } |
74 | 74 |
75 // A CPU-independent function to return the program counter. | 75 // A CPU-independent function to return the program counter. |
76 uintptr_t ProgramCounterFromContext(NativeCPUContext* context) { | 76 uintptr_t ProgramCounterFromContext(const NativeCPUContext& context) { |
77 #if defined(ARCH_CPU_X86) | 77 #if defined(ARCH_CPU_X86) |
78 return context->uts.ts32.__eip; | 78 return context.uts.ts32.__eip; |
79 #elif defined(ARCH_CPU_X86_64) | 79 #elif defined(ARCH_CPU_X86_64) |
80 return context->uts.ts64.__rip; | 80 return context.uts.ts64.__rip; |
81 #endif | 81 #endif |
82 } | 82 } |
83 | 83 |
84 // A CPU-independent function to return the stack pointer. | 84 // A CPU-independent function to return the stack pointer. |
85 uintptr_t StackPointerFromContext(NativeCPUContext* context) { | 85 uintptr_t StackPointerFromContext(const NativeCPUContext& context) { |
86 #if defined(ARCH_CPU_X86) | 86 #if defined(ARCH_CPU_X86) |
87 return context->uts.ts32.__esp; | 87 return context.uts.ts32.__esp; |
88 #elif defined(ARCH_CPU_X86_64) | 88 #elif defined(ARCH_CPU_X86_64) |
89 return context->uts.ts64.__rsp; | 89 return context.uts.ts64.__rsp; |
90 #endif | 90 #endif |
91 } | 91 } |
92 | 92 |
93 void TestCaptureContext() { | 93 void TestCaptureContext() { |
94 NativeCPUContext context_1; | 94 NativeCPUContext context_1; |
95 CaptureContext(&context_1); | 95 CaptureContext(&context_1); |
96 | 96 |
97 { | 97 { |
98 SCOPED_TRACE("context_1"); | 98 SCOPED_TRACE("context_1"); |
99 ASSERT_NO_FATAL_FAILURE(SanityCheckContext(&context_1)); | 99 ASSERT_NO_FATAL_FAILURE(SanityCheckContext(context_1)); |
100 } | 100 } |
101 | 101 |
102 // The program counter reference value is this function’s address. The | 102 // The program counter reference value is this function’s address. The |
103 // captured program counter should be slightly greater than or equal to the | 103 // captured program counter should be slightly greater than or equal to the |
104 // reference program counter. | 104 // reference program counter. |
105 uintptr_t pc = ProgramCounterFromContext(&context_1); | 105 uintptr_t pc = ProgramCounterFromContext(context_1); |
106 #if !__has_feature(address_sanitizer) | 106 #if !__has_feature(address_sanitizer) |
107 // AddressSanitizer can cause enough code bloat that the “nearby” check would | 107 // AddressSanitizer can cause enough code bloat that the “nearby” check would |
108 // likely fail. | 108 // likely fail. |
109 const uintptr_t kReferencePC = | 109 const uintptr_t kReferencePC = |
110 reinterpret_cast<uintptr_t>(TestCaptureContext); | 110 reinterpret_cast<uintptr_t>(TestCaptureContext); |
111 EXPECT_LT(pc - kReferencePC, 64u); | 111 EXPECT_LT(pc - kReferencePC, 64u); |
112 #endif | 112 #endif |
113 | 113 |
114 // Declare sp and context_2 here because all local variables need to be | 114 // Declare sp and context_2 here because all local variables need to be |
115 // declared before computing the stack pointer reference value, so that the | 115 // declared before computing the stack pointer reference value, so that the |
116 // reference value can be the lowest value possible. | 116 // reference value can be the lowest value possible. |
117 uintptr_t sp; | 117 uintptr_t sp; |
118 NativeCPUContext context_2; | 118 NativeCPUContext context_2; |
119 | 119 |
120 // The stack pointer reference value is the lowest address of a local variable | 120 // The stack pointer reference value is the lowest address of a local variable |
121 // in this function. The captured program counter will be slightly less than | 121 // in this function. The captured program counter will be slightly less than |
122 // or equal to the reference stack pointer. | 122 // or equal to the reference stack pointer. |
123 const uintptr_t kReferenceSP = | 123 const uintptr_t kReferenceSP = |
124 std::min(std::min(reinterpret_cast<uintptr_t>(&context_1), | 124 std::min(std::min(reinterpret_cast<uintptr_t>(&context_1), |
125 reinterpret_cast<uintptr_t>(&context_2)), | 125 reinterpret_cast<uintptr_t>(&context_2)), |
126 std::min(reinterpret_cast<uintptr_t>(&pc), | 126 std::min(reinterpret_cast<uintptr_t>(&pc), |
127 reinterpret_cast<uintptr_t>(&sp))); | 127 reinterpret_cast<uintptr_t>(&sp))); |
128 sp = StackPointerFromContext(&context_1); | 128 sp = StackPointerFromContext(context_1); |
129 EXPECT_LT(kReferenceSP - sp, 512u); | 129 EXPECT_LT(kReferenceSP - sp, 512u); |
130 | 130 |
131 // Capture the context again, expecting that the stack pointer stays the same | 131 // Capture the context again, expecting that the stack pointer stays the same |
132 // and the program counter increases. Strictly speaking, there’s no guarantee | 132 // and the program counter increases. Strictly speaking, there’s no guarantee |
133 // that these conditions will hold, although they do for known compilers even | 133 // that these conditions will hold, although they do for known compilers even |
134 // under typical optimization. | 134 // under typical optimization. |
135 CaptureContext(&context_2); | 135 CaptureContext(&context_2); |
136 | 136 |
137 { | 137 { |
138 SCOPED_TRACE("context_2"); | 138 SCOPED_TRACE("context_2"); |
139 ASSERT_NO_FATAL_FAILURE(SanityCheckContext(&context_2)); | 139 ASSERT_NO_FATAL_FAILURE(SanityCheckContext(context_2)); |
140 } | 140 } |
141 | 141 |
142 EXPECT_EQ(sp, StackPointerFromContext(&context_2)); | 142 EXPECT_EQ(sp, StackPointerFromContext(context_2)); |
143 EXPECT_GT(ProgramCounterFromContext(&context_2), pc); | 143 EXPECT_GT(ProgramCounterFromContext(context_2), pc); |
144 } | 144 } |
145 | 145 |
146 TEST(CaptureContextMac, CaptureContext) { | 146 TEST(CaptureContextMac, CaptureContext) { |
147 ASSERT_NO_FATAL_FAILURE(TestCaptureContext()); | 147 ASSERT_NO_FATAL_FAILURE(TestCaptureContext()); |
148 } | 148 } |
149 | 149 |
150 } // namespace | 150 } // namespace |
151 } // namespace test | 151 } // namespace test |
152 } // namespace crashpad | 152 } // namespace crashpad |
OLD | NEW |