OLD | NEW |
---|---|
1 // Copyright 2012 Google Inc. All Rights Reserved. | 1 // Copyright 2012 Google Inc. All Rights Reserved. |
2 // | 2 // |
3 // Licensed under the Apache License, Version 2.0 (the "License"); | 3 // Licensed under the Apache License, Version 2.0 (the "License"); |
4 // you may not use this file except in compliance with the License. | 4 // you may not use this file except in compliance with the License. |
5 // You may obtain a copy of the License at | 5 // You may obtain a copy of the License at |
6 // | 6 // |
7 // http://www.apache.org/licenses/LICENSE-2.0 | 7 // http://www.apache.org/licenses/LICENSE-2.0 |
8 // | 8 // |
9 // Unless required by applicable law or agreed to in writing, software | 9 // Unless required by applicable law or agreed to in writing, software |
10 // distributed under the License is distributed on an "AS IS" BASIS, | 10 // distributed under the License is distributed on an "AS IS" BASIS, |
(...skipping 15 matching lines...) Expand all Loading... | |
26 | 26 |
27 namespace { | 27 namespace { |
28 | 28 |
29 #ifdef _WIN64 | 29 #ifdef _WIN64 |
30 // A lock under which the shadow instance is modified. | 30 // A lock under which the shadow instance is modified. |
31 base::Lock shadow_instance_lock; | 31 base::Lock shadow_instance_lock; |
32 | 32 |
33 // The pointer for the exception handler to know what shadow object is | 33 // The pointer for the exception handler to know what shadow object is |
34 // currently used. Under shadow_instance_lock. | 34 // currently used. Under shadow_instance_lock. |
35 // TODO(loskutov): eliminate this by enforcing Shadow to be a singleton. | 35 // TODO(loskutov): eliminate this by enforcing Shadow to be a singleton. |
36 const Shadow* shadow_instance; | 36 const Shadow* shadow_instance = nullptr; |
37 | 37 |
38 // The exception handler, intended to map the pages for shadow and page_bits | 38 // The exception handler, intended to map the pages for shadow and page_bits |
39 // on demand. When a page fault happens, the operating systems calls | 39 // on demand. When a page fault happens, the operating systems calls |
40 // this handler, and if the page is inside shadow or page_bits, it gets | 40 // this handler, and if the page is inside shadow or page_bits, it gets |
41 // commited seamlessly for the caller, and then execution continues. | 41 // commited seamlessly for the caller, and then execution continues. |
42 // Otherwise, the OS keeps searching for an appropriate handler. | 42 // Otherwise, the OS keeps searching for an appropriate handler. |
43 LONG NTAPI ShadowExceptionHandler(PEXCEPTION_POINTERS exception_pointers) { | 43 LONG NTAPI ShadowExceptionHandler(PEXCEPTION_POINTERS exception_pointers) { |
44 DCHECK_NE(static_cast<const Shadow*>(nullptr), shadow_instance); | |
44 // Only handle access violations. | 45 // Only handle access violations. |
45 if (exception_pointers->ExceptionRecord->ExceptionCode != | 46 if (exception_pointers->ExceptionRecord->ExceptionCode != |
46 EXCEPTION_ACCESS_VIOLATION) { | 47 EXCEPTION_ACCESS_VIOLATION) { |
47 return EXCEPTION_CONTINUE_SEARCH; | 48 return EXCEPTION_CONTINUE_SEARCH; |
48 } | 49 } |
49 | 50 |
50 // Only handle access violations that land within the shadow memory | 51 // Only handle access violations that land within the shadow memory |
51 // or the page bits. | 52 // or the page bits. |
52 | 53 |
53 void* addr = reinterpret_cast<void*>( | 54 void* addr = reinterpret_cast<void*>( |
(...skipping 15 matching lines...) Expand all Loading... | |
69 // This is an access violation while trying to read from the shadow. Commit | 70 // This is an access violation while trying to read from the shadow. Commit |
70 // the relevant page and let execution continue. | 71 // the relevant page and let execution continue. |
71 | 72 |
72 // Commit the page. | 73 // Commit the page. |
73 void* result = ::VirtualAlloc(addr, 1, MEM_COMMIT, PAGE_READWRITE); | 74 void* result = ::VirtualAlloc(addr, 1, MEM_COMMIT, PAGE_READWRITE); |
74 | 75 |
75 return result != nullptr | 76 return result != nullptr |
76 ? EXCEPTION_CONTINUE_EXECUTION | 77 ? EXCEPTION_CONTINUE_EXECUTION |
77 : EXCEPTION_CONTINUE_SEARCH; | 78 : EXCEPTION_CONTINUE_SEARCH; |
78 } | 79 } |
80 | |
81 // Check if |addr| is in a memory region that has been committed. | |
82 bool AddressIsInCommittedMemory(const void* addr) { | |
83 MEMORY_BASIC_INFORMATION memory_info = {}; | |
84 SIZE_T mem_status = ::VirtualQuery(addr, &memory_info, sizeof(memory_info)); | |
85 DCHECK_GT(mem_status, 0u); | |
86 if (memory_info.State != MEM_COMMIT) | |
87 return false; | |
88 return true; | |
89 } | |
79 #endif // defined _WIN64 | 90 #endif // defined _WIN64 |
80 | 91 |
81 static const size_t kPageSize = GetPageSize(); | 92 static const size_t kPageSize = GetPageSize(); |
82 | 93 |
83 // Converts an address to a page index and bit mask. | 94 // Converts an address to a page index and bit mask. |
84 inline void AddressToPageMask(const void* address, | 95 inline void AddressToPageMask(const void* address, |
85 size_t* index, | 96 size_t* index, |
86 uint8_t* mask) { | 97 uint8_t* mask) { |
87 DCHECK_NE(static_cast<size_t*>(nullptr), index); | 98 DCHECK_NE(static_cast<size_t*>(nullptr), index); |
88 DCHECK_NE(static_cast<uint8_t*>(nullptr), mask); | 99 DCHECK_NE(static_cast<uint8_t*>(nullptr), mask); |
(...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
208 } | 219 } |
209 | 220 |
210 auto cursor = shadow_ + i; | 221 auto cursor = shadow_ + i; |
211 MEMORY_BASIC_INFORMATION info = {}; | 222 MEMORY_BASIC_INFORMATION info = {}; |
212 while (i < length_) { | 223 while (i < length_) { |
213 auto next_cursor = cursor; | 224 auto next_cursor = cursor; |
214 while (cursor < shadow_ + length_) { | 225 while (cursor < shadow_ + length_) { |
215 auto ret = ::VirtualQuery(cursor, &info, sizeof(info)); | 226 auto ret = ::VirtualQuery(cursor, &info, sizeof(info)); |
216 DCHECK_GT(ret, 0u); | 227 DCHECK_GT(ret, 0u); |
217 next_cursor = static_cast<uint8_t*>(info.BaseAddress) + info.RegionSize; | 228 next_cursor = static_cast<uint8_t*>(info.BaseAddress) + info.RegionSize; |
218 if (info.Type == MEM_COMMIT) | 229 if (info.State == MEM_COMMIT) |
219 break; | 230 break; |
220 cursor = next_cursor; | 231 cursor = next_cursor; |
221 } | 232 } |
222 i = cursor - shadow_; | 233 i = cursor - shadow_; |
223 next_cursor = std::min(next_cursor, shadow_ + length_); | 234 next_cursor = std::min(next_cursor, shadow_ + length_); |
224 auto next_i = next_cursor - shadow_; | 235 auto next_i = next_cursor - shadow_; |
225 for (; i < next_i; ++i) { | 236 for (; i < next_i; ++i) { |
226 if ((i >= shadow_begin && i < shadow_end) || | 237 if ((i >= shadow_begin && i < shadow_end) || |
227 (i >= page_bits_begin && i < page_bits_end) || | 238 (i >= page_bits_begin && i < page_bits_end) || |
228 (i >= this_begin && i < this_end)) { | 239 (i >= this_begin && i < this_end)) { |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
277 Init(true, mem, length); | 288 Init(true, mem, length); |
278 } | 289 } |
279 | 290 |
280 void Shadow::Init(bool own_memory, void* shadow, size_t length) { | 291 void Shadow::Init(bool own_memory, void* shadow, size_t length) { |
281 #ifdef _WIN64 | 292 #ifdef _WIN64 |
282 { | 293 { |
283 base::AutoLock lock(shadow_instance_lock); | 294 base::AutoLock lock(shadow_instance_lock); |
284 shadow_instance = this; | 295 shadow_instance = this; |
285 } | 296 } |
286 exception_handler_ = | 297 exception_handler_ = |
287 AddVectoredExceptionHandler(TRUE, ShadowExceptionHandler); | 298 ::AddVectoredExceptionHandler(TRUE, ShadowExceptionHandler); |
288 #endif | 299 #endif |
289 | 300 |
290 // Handle the case of a failed allocation. | 301 // Handle the case of a failed allocation. |
291 if (shadow == nullptr) { | 302 if (shadow == nullptr) { |
292 own_memory_ = false; | 303 own_memory_ = false; |
293 shadow_ = nullptr; | 304 shadow_ = nullptr; |
294 length = 0; | 305 length = 0; |
295 return; | 306 return; |
296 } | 307 } |
297 | 308 |
(...skipping 579 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
877 } | 888 } |
878 | 889 |
879 bool Shadow::ScanLeftForBracketingBlockStart( | 890 bool Shadow::ScanLeftForBracketingBlockStart( |
880 size_t initial_nesting_depth, size_t cursor, size_t* location) const { | 891 size_t initial_nesting_depth, size_t cursor, size_t* location) const { |
881 DCHECK_NE(static_cast<size_t*>(NULL), location); | 892 DCHECK_NE(static_cast<size_t*>(NULL), location); |
882 | 893 |
883 static const size_t kLowerBound = kAddressLowerBound / kShadowRatio; | 894 static const size_t kLowerBound = kAddressLowerBound / kShadowRatio; |
884 | 895 |
885 size_t left = cursor; | 896 size_t left = cursor; |
886 int nesting_depth = static_cast<int>(initial_nesting_depth); | 897 int nesting_depth = static_cast<int>(initial_nesting_depth); |
898 #ifdef _WIN64 | |
899 if (!AddressIsInCommittedMemory((&shadow_[left]))) | |
chrisha
2016/10/13 20:22:49
This is going to cost a VirtualQuery per value of
Sébastien Marchand
2016/10/13 21:05:45
No, in the loop I'm evaluating the following condi
Sébastien Marchand
2016/10/14 19:04:14
Ha, didn't realized that VirtualQuery return the i
| |
900 return false; | |
901 uint8_t* page_begin = ::common::AlignDown(&shadow_[left], kPageSize); | |
902 #endif | |
887 if (ShadowMarkerHelper::IsBlockEnd(shadow_[left])) | 903 if (ShadowMarkerHelper::IsBlockEnd(shadow_[left])) |
888 --nesting_depth; | 904 --nesting_depth; |
889 while (true) { | 905 while (true) { |
906 #ifdef _WIN64 | |
907 if (&shadow_[left] < page_begin && | |
908 !AddressIsInCommittedMemory((&shadow_[left]))) { | |
909 return false; | |
910 } else { | |
911 page_begin = ::common::AlignDown(&shadow_[left], kPageSize); | |
912 } | |
913 #endif | |
890 if (ShadowMarkerHelper::IsBlockStart(shadow_[left])) { | 914 if (ShadowMarkerHelper::IsBlockStart(shadow_[left])) { |
891 if (nesting_depth == 0) { | 915 if (nesting_depth == 0) { |
892 *location = left; | 916 *location = left; |
893 return true; | 917 return true; |
894 } | 918 } |
895 // If this is not a nested block then there's no hope of finding a | 919 // If this is not a nested block then there's no hope of finding a |
896 // block containing the original cursor. | 920 // block containing the original cursor. |
897 if (!ShadowMarkerHelper::IsNestedBlockStart(shadow_[left])) | 921 if (!ShadowMarkerHelper::IsNestedBlockStart(shadow_[left])) |
898 return false; | 922 return false; |
899 --nesting_depth; | 923 --nesting_depth; |
(...skipping 142 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1042 size_t initial_nesting_depth, | 1066 size_t initial_nesting_depth, |
1043 const void* addr, | 1067 const void* addr, |
1044 CompactBlockInfo* info) const { | 1068 CompactBlockInfo* info) const { |
1045 DCHECK_NE(static_cast<void*>(NULL), addr); | 1069 DCHECK_NE(static_cast<void*>(NULL), addr); |
1046 DCHECK_NE(static_cast<CompactBlockInfo*>(NULL), info); | 1070 DCHECK_NE(static_cast<CompactBlockInfo*>(NULL), info); |
1047 | 1071 |
1048 // Convert the address to an offset in the shadow memory. | 1072 // Convert the address to an offset in the shadow memory. |
1049 size_t left = reinterpret_cast<uintptr_t>(addr) / kShadowRatio; | 1073 size_t left = reinterpret_cast<uintptr_t>(addr) / kShadowRatio; |
1050 size_t right = left; | 1074 size_t right = left; |
1051 | 1075 |
1076 #ifdef _WIN64 | |
1077 if (!AddressIsInCommittedMemory((&shadow_[left]))) | |
1078 return false; | |
1079 #endif | |
1080 | |
1052 if (!ScanLeftForBracketingBlockStart(initial_nesting_depth, left, &left)) | 1081 if (!ScanLeftForBracketingBlockStart(initial_nesting_depth, left, &left)) |
1053 return false; | 1082 return false; |
1054 if (!ScanRightForBracketingBlockEnd(initial_nesting_depth, right, &right)) | 1083 if (!ScanRightForBracketingBlockEnd(initial_nesting_depth, right, &right)) |
1055 return false; | 1084 return false; |
1056 ++right; | 1085 ++right; |
1057 | 1086 |
1058 uint8_t* block = reinterpret_cast<uint8_t*>(left * kShadowRatio); | 1087 uint8_t* block = reinterpret_cast<uint8_t*>(left * kShadowRatio); |
1059 info->header = reinterpret_cast<BlockHeader*>(block); | 1088 info->header = reinterpret_cast<BlockHeader*>(block); |
1060 info->block_size = static_cast<uint32_t>((right - left) * kShadowRatio); | 1089 info->block_size = static_cast<uint32_t>((right - left) * kShadowRatio); |
1061 | 1090 |
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1134 auto end_of_region = start_of_region + memory_info.RegionSize; | 1163 auto end_of_region = start_of_region + memory_info.RegionSize; |
1135 | 1164 |
1136 // If the region isn't committed and readable memory then skip it. | 1165 // If the region isn't committed and readable memory then skip it. |
1137 if (memory_info.State != MEM_COMMIT) { | 1166 if (memory_info.State != MEM_COMMIT) { |
1138 // If the next region is beyond the part of the shadow being scanned | 1167 // If the next region is beyond the part of the shadow being scanned |
1139 // then bail early (be careful to handle overflow here). | 1168 // then bail early (be careful to handle overflow here). |
1140 if (end_of_region > shadow_upper_bound || end_of_region == nullptr) | 1169 if (end_of_region > shadow_upper_bound || end_of_region == nullptr) |
1141 return false; | 1170 return false; |
1142 | 1171 |
1143 // Step to the beginning of the next region and try again. | 1172 // Step to the beginning of the next region and try again. |
1144 shadow_cursor_ = start_of_region; | 1173 shadow_cursor_ = end_of_region; |
chrisha
2016/10/13 20:22:49
Oops! My bad. New unittest for this?
Sébastien Marchand
2016/10/14 19:04:14
Addressing this in https://codereview.chromium.org
| |
1145 continue; | 1174 continue; |
1146 } | 1175 } |
1147 | 1176 |
1148 // Getting here then |start_of_region| and |end_of_region| are a part of | 1177 // Getting here then |start_of_region| and |end_of_region| are a part of |
1149 // the shadow that should be scanned. Calculate where to stop for this | 1178 // the shadow that should be scanned. Calculate where to stop for this |
1150 // region, taking care to handle overflow. | 1179 // region, taking care to handle overflow. |
1151 if (!end_of_region) { | 1180 if (!end_of_region) { |
1152 end_of_region = shadow_upper_bound; | 1181 end_of_region = shadow_upper_bound; |
1153 } else { | 1182 } else { |
1154 end_of_region = std::min(shadow_upper_bound, end_of_region); | 1183 end_of_region = std::min(shadow_upper_bound, end_of_region); |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1206 // Advance the shadow cursor. | 1235 // Advance the shadow cursor. |
1207 ++shadow_cursor_; | 1236 ++shadow_cursor_; |
1208 } // while (shadow_cursor_ < end_of_region) | 1237 } // while (shadow_cursor_ < end_of_region) |
1209 } | 1238 } |
1210 | 1239 |
1211 return false; | 1240 return false; |
1212 } | 1241 } |
1213 | 1242 |
1214 } // namespace asan | 1243 } // namespace asan |
1215 } // namespace agent | 1244 } // namespace agent |
OLD | NEW |