Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(14)

Side by Side Diff: util/mach/task_memory_test.cc

Issue 558313002: Add a MappedMemory interface to TaskMemory and use it in MachOImageSymbolTableReader (Closed) Base URL: https://chromium.googlesource.com/crashpad/crashpad@master
Patch Set: Created 6 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« util/mach/task_memory.cc ('K') | « util/mach/task_memory.cc ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 The Crashpad Authors. All rights reserved. 1 // Copyright 2014 The Crashpad Authors. All rights reserved.
2 // 2 //
3 // Licensed under the Apache License, Version 2.0 (the "License"); 3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License. 4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at 5 // You may obtain a copy of the License at
6 // 6 //
7 // http://www.apache.org/licenses/LICENSE-2.0 7 // http://www.apache.org/licenses/LICENSE-2.0
8 // 8 //
9 // Unless required by applicable law or agreed to in writing, software 9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS, 10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and 12 // See the License for the specific language governing permissions and
13 // limitations under the License. 13 // limitations under the License.
14 14
15 #include "util/mach/task_memory.h" 15 #include "util/mach/task_memory.h"
16 16
17 #include <mach/mach.h> 17 #include <mach/mach_vm.h>
18 #include <string.h>
18 19
19 #include <algorithm> 20 #include <algorithm>
20 #include <string> 21 #include <string>
21 22
23 #include "base/mac/scoped_mach_port.h"
22 #include "base/mac/scoped_mach_vm.h" 24 #include "base/mac/scoped_mach_vm.h"
25 #include "base/memory/scoped_ptr.h"
23 #include "gtest/gtest.h" 26 #include "gtest/gtest.h"
24 #include "util/test/mac/mach_errors.h" 27 #include "util/test/mac/mach_errors.h"
25 28
26 namespace { 29 namespace {
27 30
28 using namespace crashpad; 31 using namespace crashpad;
29 using namespace crashpad::test; 32 using namespace crashpad::test;
30 33
31 TEST(TaskMemory, ReadSelf) { 34 TEST(TaskMemory, ReadSelf) {
32 vm_address_t address = 0; 35 vm_address_t address = 0;
33 const vm_size_t kSize = 4 * PAGE_SIZE; 36 const vm_size_t kSize = 4 * PAGE_SIZE;
34 kern_return_t kr = 37 kern_return_t kr =
35 vm_allocate(mach_task_self(), &address, kSize, VM_FLAGS_ANYWHERE); 38 vm_allocate(mach_task_self(), &address, kSize, VM_FLAGS_ANYWHERE);
36 ASSERT_EQ(KERN_SUCCESS, kr) << MachErrorMessage(kr, "vm_allocate"); 39 ASSERT_EQ(KERN_SUCCESS, kr) << MachErrorMessage(kr, "vm_allocate");
37 base::mac::ScopedMachVM vm_owner(address, mach_vm_round_page(kSize)); 40 base::mac::ScopedMachVM vm_owner(address, mach_vm_round_page(kSize));
38 41
39 char* region = reinterpret_cast<char*>(address); 42 char* region = reinterpret_cast<char*>(address);
40 for (size_t index = 0; index < kSize; ++index) { 43 for (size_t index = 0; index < kSize; ++index) {
41 region[index] = (index % 256) ^ ((index >> 8) % 256); 44 region[index] = (index % 256) ^ ((index >> 8) % 256);
42 } 45 }
43 46
44 TaskMemory memory(mach_task_self()); 47 TaskMemory memory(mach_task_self());
48
49 // This tests using both the Read() and ReadMapped() interfaces.
45 std::string result(kSize, '\0'); 50 std::string result(kSize, '\0');
51 scoped_ptr<TaskMemory::MappedMemory> mapped;
46 52
47 // Ensure that the entire region can be read. 53 // Ensure that the entire region can be read.
48 ASSERT_TRUE(memory.Read(address, kSize, &result[0])); 54 ASSERT_TRUE(memory.Read(address, kSize, &result[0]));
49 EXPECT_EQ(0, memcmp(region, &result[0], kSize)); 55 EXPECT_EQ(0, memcmp(region, &result[0], kSize));
56 ASSERT_TRUE((mapped = memory.ReadMapped(address, kSize)));
57 EXPECT_EQ(0, memcmp(region, mapped->data(), kSize));
50 58
51 // Ensure that a read of length 0 succeeds and doesn’t touch the result. 59 // Ensure that a read of length 0 succeeds and doesn’t touch the result.
52 result.assign(kSize, '\0'); 60 result.assign(kSize, '\0');
53 std::string zeroes = result; 61 std::string zeroes = result;
54 ASSERT_TRUE(memory.Read(address, 0, &result[0])); 62 ASSERT_TRUE(memory.Read(address, 0, &result[0]));
55 EXPECT_EQ(zeroes, result); 63 EXPECT_EQ(zeroes, result);
64 ASSERT_TRUE((mapped = memory.ReadMapped(address, 0)));
56 65
57 // Ensure that a read starting at an unaligned address works. 66 // Ensure that a read starting at an unaligned address works.
58 ASSERT_TRUE(memory.Read(address + 1, kSize - 1, &result[0])); 67 ASSERT_TRUE(memory.Read(address + 1, kSize - 1, &result[0]));
59 EXPECT_EQ(0, memcmp(region + 1, &result[0], kSize - 1)); 68 EXPECT_EQ(0, memcmp(region + 1, &result[0], kSize - 1));
69 ASSERT_TRUE((mapped = memory.ReadMapped(address + 1, kSize - 1)));
70 EXPECT_EQ(0, memcmp(region + 1, mapped->data(), kSize - 1));
60 71
61 // Ensure that a read ending at an unaligned address works. 72 // Ensure that a read ending at an unaligned address works.
62 ASSERT_TRUE(memory.Read(address, kSize - 1, &result[0])); 73 ASSERT_TRUE(memory.Read(address, kSize - 1, &result[0]));
63 EXPECT_EQ(0, memcmp(region, &result[0], kSize - 1)); 74 EXPECT_EQ(0, memcmp(region, &result[0], kSize - 1));
75 ASSERT_TRUE((mapped = memory.ReadMapped(address, kSize - 1)));
76 EXPECT_EQ(0, memcmp(region, mapped->data(), kSize - 1));
64 77
65 // Ensure that a read starting and ending at unaligned addresses works. 78 // Ensure that a read starting and ending at unaligned addresses works.
66 ASSERT_TRUE(memory.Read(address + 1, kSize - 2, &result[0])); 79 ASSERT_TRUE(memory.Read(address + 1, kSize - 2, &result[0]));
67 EXPECT_EQ(0, memcmp(region + 1, &result[0], kSize - 2)); 80 EXPECT_EQ(0, memcmp(region + 1, &result[0], kSize - 2));
81 ASSERT_TRUE((mapped = memory.ReadMapped(address + 1, kSize - 2)));
82 EXPECT_EQ(0, memcmp(region + 1, mapped->data(), kSize - 2));
68 83
69 // Ensure that a read of exactly one page works. 84 // Ensure that a read of exactly one page works.
70 ASSERT_TRUE(memory.Read(address + PAGE_SIZE, PAGE_SIZE, &result[0])); 85 ASSERT_TRUE(memory.Read(address + PAGE_SIZE, PAGE_SIZE, &result[0]));
71 EXPECT_EQ(0, memcmp(region + PAGE_SIZE, &result[0], PAGE_SIZE)); 86 EXPECT_EQ(0, memcmp(region + PAGE_SIZE, &result[0], PAGE_SIZE));
87 ASSERT_TRUE((mapped = memory.ReadMapped(address + PAGE_SIZE, PAGE_SIZE)));
88 EXPECT_EQ(0, memcmp(region + PAGE_SIZE, mapped->data(), PAGE_SIZE));
72 89
73 // Ensure that a read of a single byte works. 90 // Ensure that a read of a single byte works.
74 ASSERT_TRUE(memory.Read(address + 2, 1, &result[0])); 91 ASSERT_TRUE(memory.Read(address + 2, 1, &result[0]));
75 EXPECT_EQ(region[2], result[0]); 92 EXPECT_EQ(region[2], result[0]);
93 ASSERT_TRUE((mapped = memory.ReadMapped(address + 2, 1)));
94 EXPECT_EQ(region[2], reinterpret_cast<const char*>(mapped->data())[0]);
76 95
77 // Ensure that a read of length zero works and doesn’t touch the data. 96 // Ensure that a read of length zero works and doesn’t touch the data.
78 result[0] = 'M'; 97 result[0] = 'M';
79 ASSERT_TRUE(memory.Read(address + 3, 0, &result[0])); 98 ASSERT_TRUE(memory.Read(address + 3, 0, &result[0]));
80 EXPECT_EQ('M', result[0]); 99 EXPECT_EQ('M', result[0]);
100 ASSERT_TRUE((mapped = memory.ReadMapped(address + 3, 0)));
81 } 101 }
82 102
83 TEST(TaskMemory, ReadSelfUnmapped) { 103 TEST(TaskMemory, ReadSelfUnmapped) {
84 vm_address_t address = 0; 104 vm_address_t address = 0;
85 const vm_size_t kSize = 2 * PAGE_SIZE; 105 const vm_size_t kSize = 2 * PAGE_SIZE;
86 kern_return_t kr = 106 kern_return_t kr =
87 vm_allocate(mach_task_self(), &address, kSize, VM_FLAGS_ANYWHERE); 107 vm_allocate(mach_task_self(), &address, kSize, VM_FLAGS_ANYWHERE);
88 ASSERT_EQ(KERN_SUCCESS, kr) << MachErrorMessage(kr, "vm_allocate"); 108 ASSERT_EQ(KERN_SUCCESS, kr) << MachErrorMessage(kr, "vm_allocate");
89 base::mac::ScopedMachVM vm_owner(address, mach_vm_round_page(kSize)); 109 base::mac::ScopedMachVM vm_owner(address, mach_vm_round_page(kSize));
90 110
(...skipping 11 matching lines...) Expand all
102 TaskMemory memory(mach_task_self()); 122 TaskMemory memory(mach_task_self());
103 std::string result(kSize, '\0'); 123 std::string result(kSize, '\0');
104 124
105 EXPECT_FALSE(memory.Read(address, kSize, &result[0])); 125 EXPECT_FALSE(memory.Read(address, kSize, &result[0]));
106 EXPECT_FALSE(memory.Read(address + 1, kSize - 1, &result[0])); 126 EXPECT_FALSE(memory.Read(address + 1, kSize - 1, &result[0]));
107 EXPECT_FALSE(memory.Read(address + PAGE_SIZE, 1, &result[0])); 127 EXPECT_FALSE(memory.Read(address + PAGE_SIZE, 1, &result[0]));
108 EXPECT_FALSE(memory.Read(address + PAGE_SIZE - 1, 2, &result[0])); 128 EXPECT_FALSE(memory.Read(address + PAGE_SIZE - 1, 2, &result[0]));
109 EXPECT_TRUE(memory.Read(address, PAGE_SIZE, &result[0])); 129 EXPECT_TRUE(memory.Read(address, PAGE_SIZE, &result[0]));
110 EXPECT_TRUE(memory.Read(address + PAGE_SIZE - 1, 1, &result[0])); 130 EXPECT_TRUE(memory.Read(address + PAGE_SIZE - 1, 1, &result[0]));
111 131
132 // Do the same thing with the ReadMapped() interface.
133 scoped_ptr<TaskMemory::MappedMemory> mapped;
134 EXPECT_FALSE((mapped = memory.ReadMapped(address, kSize)));
135 EXPECT_FALSE((mapped = memory.ReadMapped(address + 1, kSize - 1)));
136 EXPECT_FALSE((mapped = memory.ReadMapped(address + PAGE_SIZE, 1)));
137 EXPECT_FALSE((mapped = memory.ReadMapped(address + PAGE_SIZE - 1, 2)));
138 EXPECT_TRUE((mapped = memory.ReadMapped(address, PAGE_SIZE)));
139 EXPECT_TRUE((mapped = memory.ReadMapped(address + PAGE_SIZE - 1, 1)));
140
112 // Repeat the test with an unmapped page instead of an unreadable one. This 141 // Repeat the test with an unmapped page instead of an unreadable one. This
113 // portion of the test may be flaky in the presence of other threads, if 142 // portion of the test may be flaky in the presence of other threads, if
114 // another thread maps something in the region that is deallocated here. 143 // another thread maps something in the region that is deallocated here.
115 kr = vm_deallocate(mach_task_self(), address + PAGE_SIZE, PAGE_SIZE); 144 kr = vm_deallocate(mach_task_self(), address + PAGE_SIZE, PAGE_SIZE);
116 ASSERT_EQ(KERN_SUCCESS, kr) << MachErrorMessage(kr, "vm_deallocate"); 145 ASSERT_EQ(KERN_SUCCESS, kr) << MachErrorMessage(kr, "vm_deallocate");
117 vm_owner.reset(address, PAGE_SIZE); 146 vm_owner.reset(address, PAGE_SIZE);
118 147
119 EXPECT_FALSE(memory.Read(address, kSize, &result[0])); 148 EXPECT_FALSE(memory.Read(address, kSize, &result[0]));
120 EXPECT_FALSE(memory.Read(address + 1, kSize - 1, &result[0])); 149 EXPECT_FALSE(memory.Read(address + 1, kSize - 1, &result[0]));
121 EXPECT_FALSE(memory.Read(address + PAGE_SIZE, 1, &result[0])); 150 EXPECT_FALSE(memory.Read(address + PAGE_SIZE, 1, &result[0]));
122 EXPECT_FALSE(memory.Read(address + PAGE_SIZE - 1, 2, &result[0])); 151 EXPECT_FALSE(memory.Read(address + PAGE_SIZE - 1, 2, &result[0]));
123 EXPECT_TRUE(memory.Read(address, PAGE_SIZE, &result[0])); 152 EXPECT_TRUE(memory.Read(address, PAGE_SIZE, &result[0]));
124 EXPECT_TRUE(memory.Read(address + PAGE_SIZE - 1, 1, &result[0])); 153 EXPECT_TRUE(memory.Read(address + PAGE_SIZE - 1, 1, &result[0]));
154
155 // Do the same thing with the ReadMapped() interface.
156 EXPECT_FALSE((mapped = memory.ReadMapped(address, kSize)));
157 EXPECT_FALSE((mapped = memory.ReadMapped(address + 1, kSize - 1)));
158 EXPECT_FALSE((mapped = memory.ReadMapped(address + PAGE_SIZE, 1)));
159 EXPECT_FALSE((mapped = memory.ReadMapped(address + PAGE_SIZE - 1, 2)));
160 EXPECT_TRUE((mapped = memory.ReadMapped(address, PAGE_SIZE)));
161 EXPECT_TRUE((mapped = memory.ReadMapped(address + PAGE_SIZE - 1, 1)));
125 } 162 }
126 163
127 // This function consolidates the cast from a char* to mach_vm_address_t in one 164 // This function consolidates the cast from a char* to mach_vm_address_t in one
128 // location when reading from the current task. 165 // location when reading from the current task.
129 bool ReadCStringSelf(TaskMemory* memory, 166 bool ReadCStringSelf(TaskMemory* memory,
130 const char* pointer, 167 const char* pointer,
131 std::string* result) { 168 std::string* result) {
132 return memory->ReadCString(reinterpret_cast<mach_vm_address_t>(pointer), 169 return memory->ReadCString(reinterpret_cast<mach_vm_address_t>(pointer),
133 result); 170 result);
134 } 171 }
(...skipping 241 matching lines...) Expand 10 before | Expand all | Expand 10 after
376 ASSERT_TRUE(ReadCStringSizeLimitedSelf( 413 ASSERT_TRUE(ReadCStringSizeLimitedSelf(
377 &memory, &string_long[0], string_long.size() + 2, &result)); 414 &memory, &string_long[0], string_long.size() + 2, &result));
378 EXPECT_FALSE(result.empty()); 415 EXPECT_FALSE(result.empty());
379 EXPECT_EQ(kStringLongSize, result.size()); 416 EXPECT_EQ(kStringLongSize, result.size());
380 EXPECT_EQ(string_long, result); 417 EXPECT_EQ(string_long, result);
381 418
382 ASSERT_FALSE(ReadCStringSizeLimitedSelf( 419 ASSERT_FALSE(ReadCStringSizeLimitedSelf(
383 &memory, &string_long[0], string_long.size(), &result)); 420 &memory, &string_long[0], string_long.size(), &result));
384 } 421 }
385 422
423 bool IsAddressMapped(mach_vm_address_t address) {
424 mach_vm_address_t region_address = address;
425 mach_vm_size_t region_size;
426 mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
427 vm_region_basic_info_64 info;
428 mach_port_t object;
429 kern_return_t kr = mach_vm_region(mach_task_self(),
430 &region_address,
431 &region_size,
432 VM_REGION_BASIC_INFO_64,
433 reinterpret_cast<vm_region_info_t>(&info),
434 &count,
435 &object);
436 if (kr == KERN_SUCCESS) {
437 // |object| will be MACH_PORT_NULL (10.9.4 xnu-2422.110.17/osfmk/vm/vm_map.c
438 // vm_map_region()), but the interface acts as if it might carry a send
439 // right, so treat it as documented.
440 base::mac::ScopedMachSendRight object_owner(object);
441
442 return address >= region_address && address <= region_address + region_size;
443 }
444
445 if (kr == KERN_INVALID_ADDRESS) {
446 return false;
447 }
448
449 ADD_FAILURE() << MachErrorMessage(kr, "mach_vm_region");;
450 return false;
451 }
452
453 TEST(TaskMemory, MappedMemoryDeallocates) {
454 // This tests that once a TaskMemory::MappedMemory object is destroyed, it
455 // releases the mapped memory that it owned. Technically, this test is not
456 // valid because after the mapping is released, something else (on another
457 // thread) might wind up mapped in the same address. In the test environment,
458 // hopefully there are either no other threads or they’re all quiescent, so
459 // nothing else should wind up mapped in the address.
460
461 TaskMemory memory(mach_task_self());
462 scoped_ptr<TaskMemory::MappedMemory> mapped;
463
464 static const char kTestBuffer[] = "hello!";
465 mach_vm_address_t test_address =
466 reinterpret_cast<mach_vm_address_t>(&kTestBuffer);
467 ASSERT_TRUE((mapped = memory.ReadMapped(test_address, sizeof(kTestBuffer))));
468 EXPECT_EQ(0, memcmp(kTestBuffer, mapped->data(), sizeof(kTestBuffer)));
469
470 mach_vm_address_t mapped_address =
471 reinterpret_cast<mach_vm_address_t>(mapped->data());
472 EXPECT_TRUE(IsAddressMapped(mapped_address));
473
474 mapped.reset();
475 EXPECT_FALSE(IsAddressMapped(mapped_address));
476
477 // This is the same but with a big buffer that’s definitely larger than a
478 // single page. This makes sure that the whole mapped region winds up being
479 // deallocated.
480 const size_t kBigSize = 4 * PAGE_SIZE;
481 scoped_ptr<char[]> big_buffer(new char[kBigSize]);
482 test_address = reinterpret_cast<mach_vm_address_t>(&big_buffer[0]);
483 ASSERT_TRUE((mapped = memory.ReadMapped(test_address, kBigSize)));
484
485 mapped_address = reinterpret_cast<mach_vm_address_t>(mapped->data());
486 mach_vm_address_t mapped_last_address = mapped_address + kBigSize - 1;
487 EXPECT_TRUE(IsAddressMapped(mapped_address));
488 EXPECT_TRUE(IsAddressMapped(mapped_address + PAGE_SIZE));
489 EXPECT_TRUE(IsAddressMapped(mapped_last_address));
490
491 mapped.reset();
492 EXPECT_FALSE(IsAddressMapped(mapped_address));
493 EXPECT_FALSE(IsAddressMapped(mapped_address + PAGE_SIZE));
494 EXPECT_FALSE(IsAddressMapped(mapped_last_address));
495 }
496
497 TEST(TaskMemory, MappedMemoryReadCString) {
498 // This tests the behavior of TaskMemory::MappedMemory::ReadCString().
499 TaskMemory memory(mach_task_self());
500 scoped_ptr<TaskMemory::MappedMemory> mapped;
501
502 static const char kTestBuffer[] = "0\0" "2\0" "45\0" "789";
503 const mach_vm_address_t kTestAddress =
504 reinterpret_cast<mach_vm_address_t>(&kTestBuffer);
505 ASSERT_TRUE((mapped = memory.ReadMapped(kTestAddress, 10)));
506
507 std::string string;
508 ASSERT_TRUE(mapped->ReadCString(0, &string));
509 EXPECT_EQ("0", string);
510 ASSERT_TRUE(mapped->ReadCString(1, &string));
511 EXPECT_EQ("", string);
512 ASSERT_TRUE(mapped->ReadCString(2, &string));
513 EXPECT_EQ("2", string);
514 ASSERT_TRUE(mapped->ReadCString(3, &string));
515 EXPECT_EQ("", string);
516 ASSERT_TRUE(mapped->ReadCString(4, &string));
517 EXPECT_EQ("45", string);
518 ASSERT_TRUE(mapped->ReadCString(5, &string));
519 EXPECT_EQ("5", string);
520 ASSERT_TRUE(mapped->ReadCString(6, &string));
521 EXPECT_EQ("", string);
522
523 // kTestBuffer’s NUL terminator was not read, so these will see an
524 // unterminated string and fail.
525 EXPECT_FALSE(mapped->ReadCString(7, &string));
526 EXPECT_FALSE(mapped->ReadCString(8, &string));
527 EXPECT_FALSE(mapped->ReadCString(9, &string));
528
529 // This is out of the range of what was read, so it will fail.
530 EXPECT_FALSE(mapped->ReadCString(10, &string));
531 EXPECT_FALSE(mapped->ReadCString(11, &string));
532
533 // Read it again, this time with a length long enough to include the NUL
534 // terminator.
535 ASSERT_TRUE((mapped = memory.ReadMapped(kTestAddress, 11)));
536
537 ASSERT_TRUE(mapped->ReadCString(6, &string));
538 EXPECT_EQ("", string);
539
540 // These should now succeed.
541 ASSERT_TRUE(mapped->ReadCString(7, &string));
542 EXPECT_EQ("789", string);
543 ASSERT_TRUE(mapped->ReadCString(8, &string));
544 EXPECT_EQ("89", string);
545 ASSERT_TRUE(mapped->ReadCString(9, &string));
546 EXPECT_EQ("9", string);
547 EXPECT_TRUE(mapped->ReadCString(10, &string));
548 EXPECT_EQ("", string);
549
550 // These are still out of range.
551 EXPECT_FALSE(mapped->ReadCString(11, &string));
552 EXPECT_FALSE(mapped->ReadCString(12, &string));
553 }
554
386 } // namespace 555 } // namespace
OLDNEW
« util/mach/task_memory.cc ('K') | « util/mach/task_memory.cc ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698