OLD | NEW |
---|---|
1 // Copyright (c) 2005, Google Inc. | 1 // Copyright (c) 2005, Google Inc. |
2 // All rights reserved. | 2 // All rights reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions are | 5 // modification, are permitted provided that the following conditions are |
6 // met: | 6 // met: |
7 // | 7 // |
8 // * Redistributions of source code must retain the above copyright | 8 // * Redistributions of source code must retain the above copyright |
9 // notice, this list of conditions and the following disclaimer. | 9 // notice, this list of conditions and the following disclaimer. |
10 // * Redistributions in binary form must reproduce the above | 10 // * Redistributions in binary form must reproduce the above |
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
93 // Check that no bit is set at position ADDRESS_BITS or higher. | 93 // Check that no bit is set at position ADDRESS_BITS or higher. |
94 template <int ADDRESS_BITS> bool CheckAddressBits(uintptr_t ptr) { | 94 template <int ADDRESS_BITS> bool CheckAddressBits(uintptr_t ptr) { |
95 return (ptr >> ADDRESS_BITS) == 0; | 95 return (ptr >> ADDRESS_BITS) == 0; |
96 } | 96 } |
97 | 97 |
98 // Specialize for the bit width of a pointer to avoid undefined shift. | 98 // Specialize for the bit width of a pointer to avoid undefined shift. |
99 template <> bool CheckAddressBits<8 * sizeof(void*)>(uintptr_t ptr) { | 99 template <> bool CheckAddressBits<8 * sizeof(void*)>(uintptr_t ptr) { |
100 return true; | 100 return true; |
101 } | 101 } |
102 | 102 |
103 // From libdieharder, public domain library by Bob Jenkins (rngav.c). | |
104 // Described at http://burtleburtle.net/bob/rand/smallprng.html. | |
105 // Not cryptographically secure, but good enough for what we need. | |
106 typedef uint32_t u4; | |
107 typedef struct ranctx { u4 a; u4 b; u4 c; u4 d; } ranctx; | |
108 | |
109 #define rot(x,k) ((x<<(k))|(x>>(32-(k)))) | |
110 | |
111 static u4 ranval(ranctx* x) { | |
112 /* xxx: the generator being tested */ | |
113 u4 e = x->a - rot(x->b, 27); | |
114 x->a = x->b ^ rot(x->c, 17); | |
115 x->b = x->c + x->d; | |
116 x->c = x->d + e; | |
117 x->d = e + x->a; | |
118 return x->d; | |
119 } | |
120 | |
121 static u4 raninit(ranctx* x, u4 seed) { | |
122 u4 i, e; | |
123 x->a = x->b = x->c = 0xf1ea5eed; | |
124 x->d = seed - x->a; | |
125 for (i=0; i<20; ++i) { | |
126 e = ranval(x); | |
127 } | |
128 return e; | |
129 } | |
130 | |
131 // End PRNG code. | |
132 | |
133 #define ASLR_IS_SUPPORTED \\ | |
134 (defined(OS_LINUX) || defined(OS_CHROMEOS)) && defined(__x86_64__) | |
Chris Evans
2013/01/29 06:08:08
Put in ARM64 to be future proof? :)
jln (very slow on Chromium)
2013/01/29 06:23:46
ARM will be very different in terms of the canonic
| |
135 | |
136 // Give a random "hint" that is suitable for use with mmap(). This cannot make | |
137 // mmap fail , as the kernel will simply not follow the hint if it can't. | |
138 // However, this will create address space fragmentation. Currently, we only | |
139 // implement it on x86_64, where we have a 47 bits userland address space and | |
140 // fragmentation is not an issue. | |
141 void* GetRandomAddrHint() { | |
142 #if defined(ASLR_IS_SUPPORTED) | |
143 // Note: we are protected by the general TCMalloc_SystemAlloc spinlock. Given | |
144 // the nature of what we're doing, it wouldn't be critical if we weren't. | |
145 // It's nice to share the state between threads, because scheduling will add | |
146 // some randomness to the succession of ranval() calls. | |
147 static ranctx ctx; | |
148 static bool initialized = false; | |
149 if (!initialized) { | |
150 volatile int c; | |
151 // Pre-initialize our seed, but /dev/urandom should always be available. | |
152 uint32_t seed = reinterpret_cast<unsigned long>(&c); | |
Chris Evans
2013/01/29 06:08:08
I think I see what you're doing here -- using the
jln (very slow on Chromium)
2013/01/29 06:23:46
It was a quick stopgap measure, but it doesn't hur
| |
153 int urandom_fd = open("/dev/urandom", O_RDONLY); | |
154 #if !defined(NDEBUG) | |
Chris Evans
2013/01/29 06:08:08
Do you need this ifdef? Doesn't ASSERT() compile o
jln (very slow on Chromium)
2013/01/29 06:23:46
Yeah I didn't really know where ASSERT came from i
| |
155 ASSERT(urandom_fd >= 0); | |
156 #endif | |
157 if (urandom_fd >= 0) { | |
158 ssize_t len; | |
159 len = read(urandom_fd, &seed, sizeof(seed)); | |
160 #if !defined(NDEBUG) | |
161 ASSERT(len == sizeof(seed)); | |
Chris Evans
2013/01/29 06:08:08
Same NDEBUG vs. ASSERT question.
And indentation i
jln (very slow on Chromium)
2013/01/29 06:23:46
Done.
| |
162 #endif | |
163 close(seed); | |
Chris Evans
2013/01/29 06:08:08
I think you mean close(urandom_fd)? :)
jln (very slow on Chromium)
2013/01/29 06:23:46
Ooch. And I *always* check return values normally.
| |
164 } | |
165 raninit(&ctx, seed); | |
166 initialized = true; | |
167 } | |
168 uint64_t random_address = static_cast<uint64_t>(ranval(&ctx)) << 32 | | |
169 ranval(&ctx); | |
170 // If the kernel cannot honor the hint in arch_get_unmapped_area_topdown, it | |
171 // will simply ignore it. So we give a hint that has a good chance of | |
172 // working. | |
173 // The mmap top-down allocator will normally allocate below TASK_SIZE - gap, | |
174 // with a gap that depends on the max stack size. See x86/mm/mmap.c. We | |
175 // should make allocations that are below this area, which would be | |
176 // 0x7ffbf8000000. | |
177 // We use 0x3ffffffff000 as the mask so that we only "pollute" half of the | |
178 // address space. In the unlikely case where fragmentation would become an | |
179 // issue, the kernel will still have another half to use. | |
180 // A a bit-wise "and" won't bias our random distribution. | |
181 random_address &= 0x3ffffffff000; | |
Chris Evans
2013/01/29 06:08:08
Postfix constant with ULL? Some compilers might ot
jln (very slow on Chromium)
2013/01/29 06:23:46
Given that it's a &=, I think it's guaranteed ok,
| |
182 return reinterpret_cast<void*>(random_address); | |
183 #else | |
184 return NULL; | |
185 #endif // ASLR_IS_SUPPORTED | |
186 } | |
187 | |
103 } // Anonymous namespace to avoid name conflicts on "CheckAddressBits". | 188 } // Anonymous namespace to avoid name conflicts on "CheckAddressBits". |
104 | 189 |
105 COMPILE_ASSERT(kAddressBits <= 8 * sizeof(void*), | 190 COMPILE_ASSERT(kAddressBits <= 8 * sizeof(void*), |
106 address_bits_larger_than_pointer_size); | 191 address_bits_larger_than_pointer_size); |
107 | 192 |
108 // Structure for discovering alignment | 193 // Structure for discovering alignment |
109 union MemoryAligner { | 194 union MemoryAligner { |
110 void* p; | 195 void* p; |
111 double d; | 196 double d; |
112 size_t s; | 197 size_t s; |
(...skipping 19 matching lines...) Expand all Loading... | |
132 EnvToInt("TCMALLOC_DEVMEM_LIMIT", 0), | 217 EnvToInt("TCMALLOC_DEVMEM_LIMIT", 0), |
133 "Physical memory limit location in MB for /dev/mem allocation." | 218 "Physical memory limit location in MB for /dev/mem allocation." |
134 " Setting this to 0 means no limit."); | 219 " Setting this to 0 means no limit."); |
135 DEFINE_bool(malloc_skip_sbrk, | 220 DEFINE_bool(malloc_skip_sbrk, |
136 EnvToBool("TCMALLOC_SKIP_SBRK", false), | 221 EnvToBool("TCMALLOC_SKIP_SBRK", false), |
137 "Whether sbrk can be used to obtain memory."); | 222 "Whether sbrk can be used to obtain memory."); |
138 DEFINE_bool(malloc_skip_mmap, | 223 DEFINE_bool(malloc_skip_mmap, |
139 EnvToBool("TCMALLOC_SKIP_MMAP", false), | 224 EnvToBool("TCMALLOC_SKIP_MMAP", false), |
140 "Whether mmap can be used to obtain memory."); | 225 "Whether mmap can be used to obtain memory."); |
141 | 226 |
227 DEFINE_bool(malloc_random_allocator, | |
228 EnvToBool("TCMALLOC_ASLR", | |
229 #if defined(ASLR_IS_SUPPORTED) | |
230 true), | |
231 #else | |
232 false), | |
233 #endif | |
234 "Whether to randomize the address space via mmap()."); | |
235 | |
142 // static allocators | 236 // static allocators |
143 class SbrkSysAllocator : public SysAllocator { | 237 class SbrkSysAllocator : public SysAllocator { |
144 public: | 238 public: |
145 SbrkSysAllocator() : SysAllocator() { | 239 SbrkSysAllocator() : SysAllocator() { |
146 } | 240 } |
147 void* Alloc(size_t size, size_t *actual_size, size_t alignment); | 241 void* Alloc(size_t size, size_t *actual_size, size_t alignment); |
148 }; | 242 }; |
149 static char sbrk_space[sizeof(SbrkSysAllocator)]; | 243 static char sbrk_space[sizeof(SbrkSysAllocator)]; |
150 | 244 |
151 class MmapSysAllocator : public SysAllocator { | 245 class MmapSysAllocator : public SysAllocator { |
(...skipping 145 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
297 // Ask for extra memory if alignment > pagesize | 391 // Ask for extra memory if alignment > pagesize |
298 size_t extra = 0; | 392 size_t extra = 0; |
299 if (alignment > pagesize) { | 393 if (alignment > pagesize) { |
300 extra = alignment - pagesize; | 394 extra = alignment - pagesize; |
301 } | 395 } |
302 | 396 |
303 // Note: size + extra does not overflow since: | 397 // Note: size + extra does not overflow since: |
304 // size + alignment < (1<<NBITS). | 398 // size + alignment < (1<<NBITS). |
305 // and extra <= alignment | 399 // and extra <= alignment |
306 // therefore size + extra < (1<<NBITS) | 400 // therefore size + extra < (1<<NBITS) |
307 void* result = mmap(NULL, size + extra, | 401 void* address_hint = NULL; |
402 if (FLAGS_malloc_random_allocator) { | |
403 address_hint = GetRandomAddrHint(); | |
404 } | |
405 void* result = mmap(address_hint, size + extra, | |
308 PROT_READ|PROT_WRITE, | 406 PROT_READ|PROT_WRITE, |
309 MAP_PRIVATE|MAP_ANONYMOUS, | 407 MAP_PRIVATE|MAP_ANONYMOUS, |
310 -1, 0); | 408 -1, 0); |
311 if (result == reinterpret_cast<void*>(MAP_FAILED)) { | 409 if (result == reinterpret_cast<void*>(MAP_FAILED)) { |
312 return NULL; | 410 return NULL; |
313 } | 411 } |
314 | 412 |
315 // Adjust the return memory so it is aligned | 413 // Adjust the return memory so it is aligned |
316 uintptr_t ptr = reinterpret_cast<uintptr_t>(result); | 414 uintptr_t ptr = reinterpret_cast<uintptr_t>(result); |
317 size_t adjust = 0; | 415 size_t adjust = 0; |
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
438 failed_[i] = false; | 536 failed_[i] = false; |
439 } | 537 } |
440 return NULL; | 538 return NULL; |
441 } | 539 } |
442 | 540 |
443 static bool system_alloc_inited = false; | 541 static bool system_alloc_inited = false; |
444 void InitSystemAllocators(void) { | 542 void InitSystemAllocators(void) { |
445 MmapSysAllocator *mmap = new (mmap_space) MmapSysAllocator(); | 543 MmapSysAllocator *mmap = new (mmap_space) MmapSysAllocator(); |
446 SbrkSysAllocator *sbrk = new (sbrk_space) SbrkSysAllocator(); | 544 SbrkSysAllocator *sbrk = new (sbrk_space) SbrkSysAllocator(); |
447 | 545 |
546 DefaultSysAllocator *sdef = new (default_space) DefaultSysAllocator(); | |
547 | |
548 // Unfortunately, this code runs before flags are initialized. So | |
549 // we can't use FLAGS_malloc_random_allocator. | |
550 #if defined(ASLR_IS_SUPPORTED) | |
551 // Our only random allocator is mmap. | |
552 sdef->SetChildAllocator(mmap, 0, mmap_name); | |
553 #else | |
448 // In 64-bit debug mode, place the mmap allocator first since it | 554 // In 64-bit debug mode, place the mmap allocator first since it |
449 // allocates pointers that do not fit in 32 bits and therefore gives | 555 // allocates pointers that do not fit in 32 bits and therefore gives |
450 // us better testing of code's 64-bit correctness. It also leads to | 556 // us better testing of code's 64-bit correctness. It also leads to |
451 // less false negatives in heap-checking code. (Numbers are less | 557 // less false negatives in heap-checking code. (Numbers are less |
452 // likely to look like pointers and therefore the conservative gc in | 558 // likely to look like pointers and therefore the conservative gc in |
453 // the heap-checker is less likely to misinterpret a number as a | 559 // the heap-checker is less likely to misinterpret a number as a |
454 // pointer). | 560 // pointer). |
455 DefaultSysAllocator *sdef = new (default_space) DefaultSysAllocator(); | |
456 if (kDebugMode && sizeof(void*) > 4) { | 561 if (kDebugMode && sizeof(void*) > 4) { |
457 sdef->SetChildAllocator(mmap, 0, mmap_name); | 562 sdef->SetChildAllocator(mmap, 0, mmap_name); |
458 sdef->SetChildAllocator(sbrk, 1, sbrk_name); | 563 sdef->SetChildAllocator(sbrk, 1, sbrk_name); |
459 } else { | 564 } else { |
460 sdef->SetChildAllocator(sbrk, 0, sbrk_name); | 565 sdef->SetChildAllocator(sbrk, 0, sbrk_name); |
461 sdef->SetChildAllocator(mmap, 1, mmap_name); | 566 sdef->SetChildAllocator(mmap, 1, mmap_name); |
462 } | 567 } |
568 #endif // ASLR_IS_SUPPORTED | |
463 sys_alloc = sdef; | 569 sys_alloc = sdef; |
464 } | 570 } |
465 | 571 |
466 void* TCMalloc_SystemAlloc(size_t size, size_t *actual_size, | 572 void* TCMalloc_SystemAlloc(size_t size, size_t *actual_size, |
467 size_t alignment) { | 573 size_t alignment) { |
468 // Discard requests that overflow | 574 // Discard requests that overflow |
469 if (size + alignment < size) return NULL; | 575 if (size + alignment < size) return NULL; |
470 | 576 |
471 SpinLockHolder lock_holder(&spinlock); | 577 SpinLockHolder lock_holder(&spinlock); |
472 | 578 |
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
540 } | 646 } |
541 } | 647 } |
542 #endif | 648 #endif |
543 } | 649 } |
544 | 650 |
545 void TCMalloc_SystemCommit(void* start, size_t length) { | 651 void TCMalloc_SystemCommit(void* start, size_t length) { |
546 // Nothing to do here. TCMalloc_SystemRelease does not alter pages | 652 // Nothing to do here. TCMalloc_SystemRelease does not alter pages |
547 // such that they need to be re-committed before they can be used by the | 653 // such that they need to be re-committed before they can be used by the |
548 // application. | 654 // application. |
549 } | 655 } |
OLD | NEW |