Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(175)

Side by Side Diff: src/platform/virtual-memory.cc

Issue 23641009: Refactor and cleanup VirtualMemory. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Addressed nits. Created 7 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/platform/virtual-memory.h ('k') | src/spaces.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "platform/virtual-memory.h"
29
30 #if V8_OS_POSIX
31 #include <sys/types.h>
32 #include <sys/mman.h>
33 #include <sys/time.h>
34 #include <sys/resource.h>
35
36 #include <unistd.h>
37 #endif
38
39 #if V8_OS_MACOSX
40 #include <mach/vm_statistics.h>
41 #endif
42
43 #include <cerrno>
44
45 #include "platform/mutex.h"
46 #include "utils.h"
47 #include "utils/random-number-generator.h"
48 #if V8_OS_CYGIN || V8_OS_WIN
49 #include "win32-headers.h"
50 #endif
51
52 namespace v8 {
53 namespace internal {
54
55 class RandomAddressGenerator V8_FINAL {
56 public:
57 V8_INLINE(uintptr_t NextAddress()) {
58 LockGuard<Mutex> lock_guard(&mutex_);
59 uintptr_t address = rng_.NextInt();
60 #if V8_HOST_ARCH_64_BIT
61 address = (address << 32) + static_cast<uintptr_t>(rng_.NextInt());
62 #endif
63 return address;
64 }
65
66 private:
67 Mutex mutex_;
68 RandomNumberGenerator rng_;
69 };
70
71 typedef LazyInstance<RandomAddressGenerator,
72 DefaultConstructTrait<RandomAddressGenerator>,
73 ThreadSafeInitOnceTrait>::type LazyRandomAddressGenerator;
74
75 #define LAZY_RANDOM_ADDRESS_GENERATOR_INITIALIZER LAZY_INSTANCE_INITIALIZER
76
77
78 static V8_INLINE(void* GenerateRandomAddress()) {
79 #if V8_OS_NACL
80 // TODO(bradchen): Restore randomization once Native Client gets smarter
81 // about using mmap address hints.
82 // See http://code.google.com/p/nativeclient/issues/3341
83 return NULL;
84 #else // V8_OS_NACL
85 LazyRandomAddressGenerator random_address_generator =
86 LAZY_RANDOM_ADDRESS_GENERATOR_INITIALIZER;
87 uintptr_t address = random_address_generator.Pointer()->NextAddress();
88
89 # if V8_TARGET_ARCH_X64
90 # if V8_OS_CYGWIN || V8_OS_WIN
91 // Try not to map pages into the default range that windows loads DLLs.
92 // Use a multiple of 64KiB to prevent committing unused memory.
93 address += V8_UINT64_C(0x00080000000);
94 address &= V8_UINT64_C(0x3ffffff0000);
95 # else // V8_OS_CYGWIN || V8_OS_WIN
96 // Currently available CPUs have 48 bits of virtual addressing. Truncate
97 // the hint address to 46 bits to give the kernel a fighting chance of
98 // fulfilling our placement request.
99 address &= V8_UINT64_C(0x3ffffffff000);
100 # endif // V8_OS_CYGWIN || V8_OS_WIN
101 # else // V8_TARGET_ARCH_X64
102 # if V8_OS_CYGWIN || V8_OS_WIN
103 // Try not to map pages into the default range that windows loads DLLs.
104 // Use a multiple of 64KiB to prevent committing unused memory.
105 address += 0x04000000;
106 address &= 0x3fff0000;
107 # elif V8_OS_SOLARIS
108 // For our Solaris/illumos mmap hint, we pick a random address in the bottom
109 // half of the top half of the address space (that is, the third quarter).
110 // Because we do not MAP_FIXED, this will be treated only as a hint -- the
111 // system will not fail to mmap() because something else happens to already
112 // be mapped at our random address. We deliberately set the hint high enough
113 // to get well above the system's break (that is, the heap); Solaris and
114 // illumos will try the hint and if that fails allocate as if there were
115 // no hint at all. The high hint prevents the break from getting hemmed in
116 // at low values, ceding half of the address space to the system heap.
117 address &= 0x3ffff000;
118 address += 0x80000000;
119 # else // V8_OS_CYGWIN || V8_OS_WIN
120 // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
121 // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on Mac OS X
122 // 10.6 and 10.7.
123 address &= 0x3ffff000;
124 address += 0x20000000;
125 # endif // V8_OS_CYGIN || V8_OS_WIN
126 # endif // V8_TARGET_ARCH_X64
127 return reinterpret_cast<void*>(address);
128 #endif // V8_OS_NACL
129 }
130
131
132 // static
133 void* VirtualMemory::AllocateRegion(size_t size,
134 size_t* size_return,
135 Executability executability) {
136 ASSERT_LT(0, size);
137 ASSERT_NE(NULL, size_return);
138 void* address = ReserveRegion(size, &size);
139 if (address == NULL) return NULL;
140 if (!CommitRegion(address, size, executability)) {
141 bool result = ReleaseRegion(address, size);
142 ASSERT(result);
143 USE(result);
144 return NULL;
145 }
146 *size_return = size;
147 return address;
148 }
149
150 #if V8_OS_CYGWIN || V8_OS_WIN
151
152 // static
153 void* VirtualMemory::ReserveRegion(size_t size, size_t* size_return) {
154 ASSERT_LT(0, size);
155 ASSERT_NE(NULL, size_return);
156 // The minimum size that can be reserved is 64KiB, see
157 // http://msdn.microsoft.com/en-us/library/ms810627.aspx
158 if (size < 64 * KB) {
159 size = 64 * KB;
160 }
161 size = RoundUp(size, GetAllocationGranularity());
162 LPVOID address = NULL;
163 // Try and randomize the allocation address (up to three attempts).
164 for (unsigned attempts = 0; address == NULL && attempts < 3; ++attempts) {
165 address = VirtualAlloc(GenerateRandomAddress(),
166 size,
167 MEM_RESERVE,
168 PAGE_NOACCESS);
169 }
170 if (address == NULL) {
171 // After three attempts give up and let the kernel find an address.
172 address = VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS);
173 }
174 if (address == NULL) {
175 return NULL;
176 }
177 ASSERT(IsAligned(reinterpret_cast<uintptr_t>(address),
178 GetAllocationGranularity()));
179 *size_return = size;
180 return address;
181 }
182
183
184 // static
185 void* VirtualMemory::ReserveRegion(size_t size,
186 size_t* size_return,
187 size_t alignment) {
188 ASSERT_LT(0, size);
189 ASSERT_NE(NULL, size_return);
190 ASSERT(IsAligned(alignment, GetAllocationGranularity()));
191
192 size_t reserved_size;
193 Address reserved_base = static_cast<Address>(
194 ReserveRegion(size + alignment, &reserved_size));
195 if (reserved_base == NULL) {
196 return NULL;
197 }
198 ASSERT_LE(size, reserved_size);
199 ASSERT(IsAligned(reserved_size, GetPageSize()));
200
201 // Try reducing the size by freeing and then reallocating a specific area.
202 bool result = ReleaseRegion(reserved_base, reserved_size);
203 USE(result);
204 ASSERT(result);
205 size_t aligned_size = RoundUp(size, GetPageSize());
206 Address aligned_base = static_cast<Address>(
207 VirtualAlloc(RoundUp(reserved_base, alignment),
208 aligned_size,
209 MEM_RESERVE,
210 PAGE_NOACCESS));
211 if (aligned_base != NULL) {
212 ASSERT(aligned_base == RoundUp(reserved_base, alignment));
213 ASSERT(IsAligned(reinterpret_cast<uintptr_t>(aligned_base),
214 GetAllocationGranularity()));
215 ASSERT(IsAligned(aligned_size, GetPageSize()));
216 *size_return = aligned_size;
217 return aligned_base;
218 }
219
220 // Resizing failed, just go with a bigger area.
221 return ReserveRegion(reserved_size, size_return);
222 }
223
224
225 // static
226 bool VirtualMemory::CommitRegion(void* address,
227 size_t size,
228 Executability executability) {
229 ASSERT_NE(NULL, address);
230 ASSERT_LT(0, size);
231 DWORD protect = 0;
232 switch (executability) {
233 case NOT_EXECUTABLE:
234 protect = PAGE_READWRITE;
235 break;
236
237 case EXECUTABLE:
238 protect = PAGE_EXECUTE_READWRITE;
239 break;
240 }
241 LPVOID result = VirtualAlloc(address, size, MEM_COMMIT, protect);
242 if (result == NULL) {
243 ASSERT(GetLastError() != ERROR_INVALID_ADDRESS);
244 return false;
245 }
246 ASSERT_EQ(address, result);
247 return true;
248 }
249
250
251 // static
252 bool VirtualMemory::UncommitRegion(void* address, size_t size) {
253 ASSERT_NE(NULL, address);
254 ASSERT_LT(0, size);
255 int result = VirtualFree(address, size, MEM_DECOMMIT);
256 if (result == 0) {
257 return false;
258 }
259 return true;
260 }
261
262
263 // static
264 bool VirtualMemory::WriteProtectRegion(void* address, size_t size) {
265 ASSERT_NE(NULL, address);
266 ASSERT_LT(0, size);
267 DWORD old_protect;
268 return VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
269 }
270
271
272 // static
273 bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
274 ASSERT_NE(NULL, address);
275 ASSERT_LT(0, size);
276 USE(size);
277 int result = VirtualFree(address, 0, MEM_RELEASE);
278 if (result == 0) {
279 return false;
280 }
281 return true;
282 }
283
284
285 // static
286 size_t VirtualMemory::GetAllocationGranularity() {
287 static size_t allocation_granularity = 0;
288 if (allocation_granularity == 0) {
289 SYSTEM_INFO system_info;
290 GetSystemInfo(&system_info);
291 allocation_granularity = system_info.dwAllocationGranularity;
292 MemoryBarrier();
293 }
294 return allocation_granularity;
295 }
296
297
298 // static
299 size_t VirtualMemory::GetLimit() {
300 return 0;
301 }
302
303
304 // static
305 size_t VirtualMemory::GetPageSize() {
306 static size_t page_size = 0;
307 if (page_size == 0) {
308 SYSTEM_INFO system_info;
309 GetSystemInfo(&system_info);
310 page_size = system_info.dwPageSize;
311 MemoryBarrier();
312 }
313 return page_size;
314 }
315
316
317 #else // V8_OS_CYGIN || V8_OS_WIN
318
319
320 // Constants used for mmap.
321 #if V8_OS_MACOSX
322 // kMmapFd is used to pass vm_alloc flags to tag the region with the user
323 // defined tag 255 This helps identify V8-allocated regions in memory analysis
324 // tools like vmmap(1).
325 static const int kMmapFd = VM_MAKE_TAG(255);
326 #else
327 static const int kMmapFd = -1;
328 #endif // V8_OS_MACOSX
329 static const off_t kMmapFdOffset = 0;
330
331
332 // static
333 void* VirtualMemory::ReserveRegion(size_t size, size_t* size_return) {
334 ASSERT_LT(0, size);
335 ASSERT_NE(NULL, size_return);
336
337 size = RoundUp(size, GetPageSize());
338 void* address = mmap(GenerateRandomAddress(),
339 size,
340 PROT_NONE,
341 MAP_ANON | MAP_NORESERVE | MAP_PRIVATE,
342 kMmapFd,
343 kMmapFdOffset);
344 if (address == MAP_FAILED) {
345 ASSERT_NE(EINVAL, errno);
346 return NULL;
347 }
348 *size_return = size;
349 return address;
350 }
351
352
353 // static
354 void* VirtualMemory::ReserveRegion(size_t size,
355 size_t* size_return,
356 size_t alignment) {
357 ASSERT_LT(0, size);
358 ASSERT_NE(NULL, size_return);
359 ASSERT(IsAligned(alignment, GetPageSize()));
360
361 size_t reserved_size;
362 Address reserved_base = static_cast<Address>(
363 ReserveRegion(size + alignment, &reserved_size));
364 if (reserved_base == NULL) {
365 return NULL;
366 }
367
368 Address aligned_base = RoundUp(reserved_base, alignment);
369 ASSERT_LE(reserved_base, aligned_base);
370
371 // Unmap extra memory reserved before the aligned region.
372 if (aligned_base != reserved_base) {
373 size_t prefix_size = static_cast<size_t>(aligned_base - reserved_base);
374 bool result = ReleaseRegion(reserved_base, prefix_size);
375 ASSERT(result);
376 USE(result);
377 reserved_size -= prefix_size;
378 }
379
380 size_t aligned_size = RoundUp(size, GetPageSize());
381 ASSERT_LE(aligned_size, reserved_size);
382
383 // Unmap extra memory reserved after the aligned region.
384 if (aligned_size != reserved_size) {
385 size_t suffix_size = reserved_size - aligned_size;
386 bool result = ReleaseRegion(aligned_base + aligned_size, suffix_size);
387 ASSERT(result);
388 USE(result);
389 reserved_size -= suffix_size;
390 }
391
392 ASSERT(aligned_size == reserved_size);
393 ASSERT_NE(NULL, aligned_base);
394
395 *size_return = aligned_size;
396 return aligned_base;
397 }
398
399
400 // static
401 bool VirtualMemory::CommitRegion(void* address,
402 size_t size,
403 Executability executability) {
404 ASSERT_NE(NULL, address);
405 ASSERT_LT(0, size);
406 int prot = 0;
407 // The Native Client port of V8 uses an interpreter,
408 // so code pages don't need PROT_EXEC.
409 #if V8_OS_NACL
410 executability = NOT_EXECUTABLE;
411 #endif
412 switch (executability) {
413 case NOT_EXECUTABLE:
414 prot = PROT_READ | PROT_WRITE;
415 break;
416
417 case EXECUTABLE:
418 prot = PROT_EXEC | PROT_READ | PROT_WRITE;
419 break;
420 }
421 void* result = mmap(address,
422 size,
423 prot,
424 MAP_ANON | MAP_FIXED | MAP_PRIVATE,
425 kMmapFd,
426 kMmapFdOffset);
427 if (result == MAP_FAILED) {
428 ASSERT_NE(EINVAL, errno);
429 return false;
430 }
431 return true;
432 }
433
434
435 // static
436 bool VirtualMemory::UncommitRegion(void* address, size_t size) {
437 ASSERT_NE(NULL, address);
438 ASSERT_LT(0, size);
439 void* result = mmap(address,
440 size,
441 PROT_NONE,
442 MAP_ANON | MAP_FIXED | MAP_NORESERVE | MAP_PRIVATE,
443 kMmapFd,
444 kMmapFdOffset);
445 if (result == MAP_FAILED) {
446 ASSERT_NE(EINVAL, errno);
447 return false;
448 }
449 return true;
450 }
451
452
453 // static
454 bool VirtualMemory::WriteProtectRegion(void* address, size_t size) {
455 ASSERT_NE(NULL, address);
456 ASSERT_LT(0, size);
457 #if V8_OS_NACL
458 // The Native Client port of V8 uses an interpreter,
459 // so code pages don't need PROT_EXEC.
460 int prot = PROT_READ;
461 #else
462 int prot = PROT_EXEC | PROT_READ;
463 #endif
464 int result = mprotect(address, size, prot);
465 if (result < 0) {
466 ASSERT_NE(EINVAL, errno);
467 return false;
468 }
469 return true;
470 }
471
472
473 // static
474 bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
475 ASSERT_NE(NULL, address);
476 ASSERT_LT(0, size);
477 int result = munmap(address, size);
478 if (result < 0) {
479 ASSERT_NE(EINVAL, errno);
480 return false;
481 }
482 return true;
483 }
484
485
486 // static
487 size_t VirtualMemory::GetAllocationGranularity() {
488 return GetPageSize();
489 }
490
491
492 // static
493 size_t VirtualMemory::GetLimit() {
494 struct rlimit rlim;
495 int result = getrlimit(RLIMIT_DATA, &rlim);
496 ASSERT_EQ(0, result);
497 USE(result);
498 return rlim.rlim_cur;
499 }
500
501
502 // static
503 size_t VirtualMemory::GetPageSize() {
504 static const size_t kPageSize = getpagesize();
505 return kPageSize;
506 }
507
508 #endif // V8_OS_CYGWIN || V8_OS_WIN
509
510 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/platform/virtual-memory.h ('k') | src/spaces.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698