OLD | NEW |
1 // Copyright (c) 2009 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2009 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "chrome_frame/vtable_patch_manager.h" | 5 #include "chrome_frame/vtable_patch_manager.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <atlcomcli.h> |
8 | 9 |
| 10 #include "base/atomicops.h" |
| 11 #include "base/lock.h" |
9 #include "base/logging.h" | 12 #include "base/logging.h" |
10 #include "base/scoped_ptr.h" | 13 #include "base/scoped_ptr.h" |
11 | 14 |
12 #include "chrome_frame/function_stub.h" | 15 #include "chrome_frame/function_stub.h" |
13 | 16 |
14 namespace vtable_patch { | 17 namespace vtable_patch { |
15 | 18 |
| 19 // The number of times we retry a patch/unpatch operation in case of |
| 20 // VM races with other 3rd party software trying to patch the same thing. |
| 21 const int kMaxRetries = 3; |
| 22 |
| 23 // We hold a lock over all patching operations to make sure that we don't |
| 24 // e.g. race on VM operations to the same patches, or to physical pages |
| 25 // shared across different VTABLEs. |
| 26 Lock patch_lock_; |
| 27 |
| 28 namespace internal { |
| 29 // Because other parties in our process might be attempting to patch the same |
| 30 // virtual tables at the same time, we have a race to modify the VM protections |
| 31 // on the pages. We also need to do a compare/swap type operation when we |
| 32 // modify the function, so as to be sure that we grab the most recent value. |
| 33 // Hence the SEH blocks and the nasty-looking compare/swap operation. |
| 34 bool ReplaceFunctionPointer(void** entry, void* new_proc, void* curr_proc) { |
| 35 __try { |
| 36 base::subtle::Atomic32 prev_value; |
| 37 |
| 38 prev_value = base::subtle::NoBarrier_CompareAndSwap( |
| 39 reinterpret_cast<base::subtle::Atomic32 volatile*>(entry), |
| 40 reinterpret_cast<base::subtle::Atomic32>(curr_proc), |
| 41 reinterpret_cast<base::subtle::Atomic32>(new_proc)); |
| 42 |
| 43 return curr_proc == reinterpret_cast<void*>(prev_value); |
| 44 } __except(EXCEPTION_EXECUTE_HANDLER) { |
| 45 // Oops, we took exception on access. |
| 46 } |
| 47 |
| 48 return false; |
| 49 } |
| 50 |
| 51 } // namespace |
| 52 |
16 // Convenient definition of a VTABLE | 53 // Convenient definition of a VTABLE |
17 typedef PROC* Vtable; | 54 typedef PROC* Vtable; |
18 | 55 |
19 // Returns a pointer to the VTable of a COM interface. | 56 // Returns a pointer to the VTable of a COM interface. |
20 // @param unknown [in] The pointer of the COM interface. | 57 // @param unknown [in] The pointer of the COM interface. |
21 inline Vtable GetIFVTable(void* unknown) { | 58 inline Vtable GetIFVTable(void* unknown) { |
22 return reinterpret_cast<Vtable>(*reinterpret_cast<void**>(unknown)); | 59 return reinterpret_cast<Vtable>(*reinterpret_cast<void**>(unknown)); |
23 } | 60 } |
24 | 61 |
25 HRESULT PatchInterfaceMethods(void* unknown, MethodPatchInfo* patches) { | 62 HRESULT PatchInterfaceMethods(void* unknown, MethodPatchInfo* patches) { |
26 // Do some sanity checking of the input arguments. | 63 // Do some sanity checking of the input arguments. |
27 if (NULL == unknown || NULL == patches) { | 64 if (NULL == unknown || NULL == patches) { |
28 NOTREACHED(); | 65 NOTREACHED(); |
29 return E_INVALIDARG; | 66 return E_INVALIDARG; |
30 } | 67 } |
31 | 68 |
32 Vtable vtable = GetIFVTable(unknown); | 69 Vtable vtable = GetIFVTable(unknown); |
33 DCHECK(vtable); | 70 DCHECK(vtable); |
34 | 71 |
| 72 // All VM operations, patching and manipulation of MethodPatchInfo |
| 73 // is done under a global lock, to ensure multiple threads don't |
| 74 // race, whether on an individual patch, or on VM operations to |
| 75 // the same physical pages. |
| 76 AutoLock lock(patch_lock_); |
| 77 |
35 for (MethodPatchInfo* it = patches; it->index_ != -1; ++it) { | 78 for (MethodPatchInfo* it = patches; it->index_ != -1; ++it) { |
36 if (it->stub_ != NULL) { | 79 if (it->stub_ != NULL) { |
37 // If this DCHECK fires it means that we are using the same VTable | 80 // If this DCHECK fires it means that we are using the same VTable |
38 // information to patch two different interfaces. | 81 // information to patch two different interfaces, or we've lost a |
39 DCHECK(false); | 82 // race with another thread who's patching the same interface. |
40 DLOG(ERROR) << "Attempting to patch two different VTables with the " | 83 DLOG(WARNING) << "Attempting to patch two different VTables with the " |
41 << "same VTable information"; | 84 "same VTable information, or patching the same interface on " |
| 85 "multiple threads"; |
42 continue; | 86 continue; |
43 } | 87 } |
44 | 88 |
45 PROC original_fn = vtable[it->index_]; | 89 PROC original_fn = vtable[it->index_]; |
46 FunctionStub* stub = FunctionStub::FromCode(original_fn); | 90 FunctionStub* stub = NULL; |
| 91 |
| 92 #ifndef NDEBUG |
| 93 FunctionStub::FromCode(original_fn); |
47 if (stub != NULL) { | 94 if (stub != NULL) { |
48 DLOG(ERROR) << "attempt to patch a function that's already patched"; | 95 DLOG(ERROR) << "attempt to patch a function that's already patched"; |
49 DCHECK(stub->absolute_target() == | 96 DCHECK(stub->destination_function() == |
50 reinterpret_cast<uintptr_t>(it->method_)) << | 97 reinterpret_cast<uintptr_t>(it->method_)) << |
51 "patching the same method multiple times with different hooks?"; | 98 "patching the same method multiple times with different hooks?"; |
52 continue; | 99 continue; |
53 } | 100 } |
| 101 #endif |
54 | 102 |
55 stub = FunctionStub::Create(reinterpret_cast<uintptr_t>(original_fn), | 103 stub = FunctionStub::Create(reinterpret_cast<uintptr_t>(original_fn), |
56 it->method_); | 104 it->method_); |
57 if (!stub) { | 105 if (!stub) { |
58 NOTREACHED(); | 106 NOTREACHED(); |
59 return E_OUTOFMEMORY; | 107 return E_OUTOFMEMORY; |
| 108 } |
| 109 |
| 110 // Do the VM operations and the patching in a loop, to try and ensure |
| 111 // we succeed even if there's a VM operation or a patch race against |
| 112 // other 3rd parties patching. |
| 113 bool succeeded = false; |
| 114 for (int i = 0; !succeeded && i < kMaxRetries; ++i) { |
| 115 DWORD protect = 0; |
| 116 if (!::VirtualProtect(&vtable[it->index_], sizeof(PROC), |
| 117 PAGE_EXECUTE_READWRITE, &protect)) { |
| 118 HRESULT hr = AtlHresultFromLastError(); |
| 119 DLOG(ERROR) << "VirtualProtect failed 0x" << std::hex << hr; |
| 120 |
| 121 // Go around again in the feeble hope that this is |
| 122 // a temporary problem. |
| 123 continue; |
| 124 } |
| 125 original_fn = vtable[it->index_]; |
| 126 stub->set_argument(reinterpret_cast<uintptr_t>(original_fn)); |
| 127 succeeded = internal::ReplaceFunctionPointer( |
| 128 reinterpret_cast<void**>(&vtable[it->index_]), stub->code(), |
| 129 original_fn); |
| 130 |
| 131 if (!::VirtualProtect(&vtable[it->index_], sizeof(PROC), protect, |
| 132 &protect)) { |
| 133 DLOG(ERROR) << "VirtualProtect failed to restore protection"; |
| 134 } |
| 135 } |
| 136 |
| 137 if (!succeeded) { |
| 138 FunctionStub::Destroy(stub); |
| 139 stub = NULL; |
| 140 |
| 141 DLOG(ERROR) << "Failed to patch VTable."; |
| 142 return E_FAIL; |
60 } else { | 143 } else { |
61 DWORD protect = 0; | 144 // Success, save the stub we created. |
62 if (::VirtualProtect(&vtable[it->index_], sizeof(PROC), | 145 it->stub_ = stub; |
63 PAGE_EXECUTE_READWRITE, &protect)) { | |
64 it->stub_ = stub; // save the stub | |
65 vtable[it->index_] = stub->code(); | |
66 ::VirtualProtect(&vtable[it->index_], sizeof(PROC), protect, | |
67 &protect); | |
68 } else { | |
69 NOTREACHED(); | |
70 } | |
71 } | 146 } |
72 } | 147 } |
73 | 148 |
74 return S_OK; | 149 return S_OK; |
75 } | 150 } |
76 | 151 |
77 HRESULT UnpatchInterfaceMethods(MethodPatchInfo* patches) { | 152 HRESULT UnpatchInterfaceMethods(MethodPatchInfo* patches) { |
| 153 AutoLock lock(patch_lock_); |
| 154 |
78 for (MethodPatchInfo* it = patches; it->index_ != -1; ++it) { | 155 for (MethodPatchInfo* it = patches; it->index_ != -1; ++it) { |
79 if (it->stub_) { | 156 if (it->stub_) { |
80 DCHECK(it->stub_->absolute_target() == | 157 DCHECK(it->stub_->destination_function() == |
81 reinterpret_cast<uintptr_t>(it->method_)); | 158 reinterpret_cast<uintptr_t>(it->method_)); |
82 // Modify the stub to just jump directly to the original function. | 159 // Modify the stub to just jump directly to the original function. |
83 it->stub_->BypassStub(reinterpret_cast<void*>(it->stub_->argument())); | 160 it->stub_->BypassStub(reinterpret_cast<void*>(it->stub_->argument())); |
84 it->stub_ = NULL; | 161 it->stub_ = NULL; |
85 // Leave the stub in memory so that we won't break any possible chains. | 162 // Leave the stub in memory so that we won't break any possible chains. |
| 163 |
| 164 // TODO(siggi): why not restore the original VTBL pointer here, provided |
| 165 // we haven't been chained? |
86 } else { | 166 } else { |
87 DLOG(WARNING) << "attempt to unpatch a function that wasn't patched"; | 167 DLOG(WARNING) << "attempt to unpatch a function that wasn't patched"; |
88 } | 168 } |
89 } | 169 } |
90 | 170 |
91 return S_OK; | 171 return S_OK; |
92 } | 172 } |
93 | 173 |
94 // Disabled for now as we're not using it atm. | 174 // Disabled for now as we're not using it atm. |
95 #if 0 | 175 #if 0 |
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
153 } | 233 } |
154 patch_list_.clear(); | 234 patch_list_.clear(); |
155 patch_list_lock_.Release(); | 235 patch_list_lock_.Release(); |
156 | 236 |
157 return true; | 237 return true; |
158 } | 238 } |
159 | 239 |
160 #endif // disabled DynamicPatchManager | 240 #endif // disabled DynamicPatchManager |
161 | 241 |
162 } // namespace vtable_patch | 242 } // namespace vtable_patch |
OLD | NEW |