OLD | NEW |
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 284 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
295 return InitializePagesInChunk(chunk_id, *allocated_pages, owner); | 295 return InitializePagesInChunk(chunk_id, *allocated_pages, owner); |
296 } | 296 } |
297 | 297 |
298 | 298 |
299 Page* MemoryAllocator::CommitPages(Address start, size_t size, | 299 Page* MemoryAllocator::CommitPages(Address start, size_t size, |
300 PagedSpace* owner, int* num_pages) { | 300 PagedSpace* owner, int* num_pages) { |
301 ASSERT(start != NULL); | 301 ASSERT(start != NULL); |
302 *num_pages = PagesInChunk(start, size); | 302 *num_pages = PagesInChunk(start, size); |
303 ASSERT(*num_pages > 0); | 303 ASSERT(*num_pages > 0); |
304 ASSERT(initial_chunk_ != NULL); | 304 ASSERT(initial_chunk_ != NULL); |
305 ASSERT(initial_chunk_->address() <= start); | 305 ASSERT(InInitialChunk(start)); |
306 ASSERT(start + size <= reinterpret_cast<Address>(initial_chunk_->address()) | 306 ASSERT(InInitialChunk(start + size - 1)); |
307 + initial_chunk_->size()); | |
308 if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) { | 307 if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) { |
309 return Page::FromAddress(NULL); | 308 return Page::FromAddress(NULL); |
310 } | 309 } |
311 Counters::memory_allocated.Increment(size); | 310 Counters::memory_allocated.Increment(size); |
312 | 311 |
313 // So long as we correctly overestimated the number of chunks we should not | 312 // So long as we correctly overestimated the number of chunks we should not |
314 // run out of chunk ids. | 313 // run out of chunk ids. |
315 CHECK(!OutOfChunkIds()); | 314 CHECK(!OutOfChunkIds()); |
316 int chunk_id = Pop(); | 315 int chunk_id = Pop(); |
317 chunks_[chunk_id].init(start, size, owner); | 316 chunks_[chunk_id].init(start, size, owner); |
318 return InitializePagesInChunk(chunk_id, *num_pages, owner); | 317 return InitializePagesInChunk(chunk_id, *num_pages, owner); |
319 } | 318 } |
320 | 319 |
321 | 320 |
322 bool MemoryAllocator::CommitBlock(Address start, | 321 bool MemoryAllocator::CommitBlock(Address start, |
323 size_t size, | 322 size_t size, |
324 Executability executable) { | 323 Executability executable) { |
325 ASSERT(start != NULL); | 324 ASSERT(start != NULL); |
326 ASSERT(size > 0); | 325 ASSERT(size > 0); |
327 ASSERT(initial_chunk_ != NULL); | 326 ASSERT(initial_chunk_ != NULL); |
328 ASSERT(initial_chunk_->address() <= start); | 327 ASSERT(InInitialChunk(start)); |
329 ASSERT(start + size <= reinterpret_cast<Address>(initial_chunk_->address()) | 328 ASSERT(InInitialChunk(start + size - 1)); |
330 + initial_chunk_->size()); | |
331 | 329 |
332 if (!initial_chunk_->Commit(start, size, executable)) return false; | 330 if (!initial_chunk_->Commit(start, size, executable)) return false; |
333 Counters::memory_allocated.Increment(size); | 331 Counters::memory_allocated.Increment(size); |
334 return true; | 332 return true; |
335 } | 333 } |
336 | 334 |
337 | 335 |
338 Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk, | 336 Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk, |
339 PagedSpace* owner) { | 337 PagedSpace* owner) { |
340 ASSERT(IsValidChunk(chunk_id)); | 338 ASSERT(IsValidChunk(chunk_id)); |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
400 | 398 |
401 | 399 |
402 void MemoryAllocator::DeleteChunk(int chunk_id) { | 400 void MemoryAllocator::DeleteChunk(int chunk_id) { |
403 ASSERT(IsValidChunk(chunk_id)); | 401 ASSERT(IsValidChunk(chunk_id)); |
404 | 402 |
405 ChunkInfo& c = chunks_[chunk_id]; | 403 ChunkInfo& c = chunks_[chunk_id]; |
406 | 404 |
407 // We cannot free a chunk contained in the initial chunk because it was not | 405 // We cannot free a chunk contained in the initial chunk because it was not |
408 // allocated with AllocateRawMemory. Instead we uncommit the virtual | 406 // allocated with AllocateRawMemory. Instead we uncommit the virtual |
409 // memory. | 407 // memory. |
410 bool in_initial_chunk = false; | 408 if (InInitialChunk(c.address())) { |
411 if (initial_chunk_ != NULL) { | |
412 Address start = static_cast<Address>(initial_chunk_->address()); | |
413 Address end = start + initial_chunk_->size(); | |
414 in_initial_chunk = (start <= c.address()) && (c.address() < end); | |
415 } | |
416 | |
417 if (in_initial_chunk) { | |
418 // TODO(1240712): VirtualMemory::Uncommit has a return value which | 409 // TODO(1240712): VirtualMemory::Uncommit has a return value which |
419 // is ignored here. | 410 // is ignored here. |
420 initial_chunk_->Uncommit(c.address(), c.size()); | 411 initial_chunk_->Uncommit(c.address(), c.size()); |
421 Counters::memory_allocated.Decrement(c.size()); | 412 Counters::memory_allocated.Decrement(c.size()); |
422 } else { | 413 } else { |
423 LOG(DeleteEvent("PagedChunk", c.address())); | 414 LOG(DeleteEvent("PagedChunk", c.address())); |
424 FreeRawMemory(c.address(), c.size()); | 415 FreeRawMemory(c.address(), c.size()); |
425 } | 416 } |
426 c.init(NULL, 0, NULL); | 417 c.init(NULL, 0, NULL); |
427 Push(chunk_id); | 418 Push(chunk_id); |
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
522 | 513 |
523 | 514 |
524 void PagedSpace::TearDown() { | 515 void PagedSpace::TearDown() { |
525 first_page_ = MemoryAllocator::FreePages(first_page_); | 516 first_page_ = MemoryAllocator::FreePages(first_page_); |
526 ASSERT(!first_page_->is_valid()); | 517 ASSERT(!first_page_->is_valid()); |
527 | 518 |
528 accounting_stats_.Clear(); | 519 accounting_stats_.Clear(); |
529 } | 520 } |
530 | 521 |
531 | 522 |
| 523 #ifdef ENABLE_HEAP_PROTECTION |
| 524 |
| 525 void PagedSpace::Protect() { |
| 526 Page* page = first_page_; |
| 527 while (page->is_valid()) { |
| 528 MemoryAllocator::ProtectChunkFromPage(page); |
| 529 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page(); |
| 530 } |
| 531 } |
| 532 |
| 533 |
| 534 void PagedSpace::Unprotect() { |
| 535 Page* page = first_page_; |
| 536 while (page->is_valid()) { |
| 537 MemoryAllocator::UnprotectChunkFromPage(page); |
| 538 page = MemoryAllocator::FindLastPageInSameChunk(page)->next_page(); |
| 539 } |
| 540 } |
| 541 |
| 542 #endif |
| 543 |
| 544 |
532 void PagedSpace::ClearRSet() { | 545 void PagedSpace::ClearRSet() { |
533 PageIterator it(this, PageIterator::ALL_PAGES); | 546 PageIterator it(this, PageIterator::ALL_PAGES); |
534 while (it.has_next()) { | 547 while (it.has_next()) { |
535 it.next()->ClearRSet(); | 548 it.next()->ClearRSet(); |
536 } | 549 } |
537 } | 550 } |
538 | 551 |
539 | 552 |
540 Object* PagedSpace::FindObject(Address addr) { | 553 Object* PagedSpace::FindObject(Address addr) { |
541 // Note: this function can only be called before or after mark-compact GC | 554 // Note: this function can only be called before or after mark-compact GC |
(...skipping 285 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
827 allocation_info_.top = NULL; | 840 allocation_info_.top = NULL; |
828 allocation_info_.limit = NULL; | 841 allocation_info_.limit = NULL; |
829 mc_forwarding_info_.top = NULL; | 842 mc_forwarding_info_.top = NULL; |
830 mc_forwarding_info_.limit = NULL; | 843 mc_forwarding_info_.limit = NULL; |
831 | 844 |
832 to_space_.TearDown(); | 845 to_space_.TearDown(); |
833 from_space_.TearDown(); | 846 from_space_.TearDown(); |
834 } | 847 } |
835 | 848 |
836 | 849 |
| 850 #ifdef ENABLE_HEAP_PROTECTION |
| 851 |
| 852 void NewSpace::Protect() { |
| 853 MemoryAllocator::Protect(ToSpaceLow(), Capacity()); |
| 854 MemoryAllocator::Protect(FromSpaceLow(), Capacity()); |
| 855 } |
| 856 |
| 857 |
| 858 void NewSpace::Unprotect() { |
| 859 MemoryAllocator::Unprotect(ToSpaceLow(), Capacity(), |
| 860 to_space_.executable()); |
| 861 MemoryAllocator::Unprotect(FromSpaceLow(), Capacity(), |
| 862 from_space_.executable()); |
| 863 } |
| 864 |
| 865 #endif |
| 866 |
| 867 |
837 void NewSpace::Flip() { | 868 void NewSpace::Flip() { |
838 SemiSpace tmp = from_space_; | 869 SemiSpace tmp = from_space_; |
839 from_space_ = to_space_; | 870 from_space_ = to_space_; |
840 to_space_ = tmp; | 871 to_space_ = tmp; |
841 } | 872 } |
842 | 873 |
843 | 874 |
844 bool NewSpace::Double() { | 875 bool NewSpace::Double() { |
845 ASSERT(capacity_ <= maximum_capacity_ / 2); | 876 ASSERT(capacity_ <= maximum_capacity_ / 2); |
846 // TODO(1240712): Failure to double the from space can result in | 877 // TODO(1240712): Failure to double the from space can result in |
(...skipping 1388 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2235 first_chunk_ = first_chunk_->next(); | 2266 first_chunk_ = first_chunk_->next(); |
2236 LOG(DeleteEvent("LargeObjectChunk", chunk->address())); | 2267 LOG(DeleteEvent("LargeObjectChunk", chunk->address())); |
2237 MemoryAllocator::FreeRawMemory(chunk->address(), chunk->size()); | 2268 MemoryAllocator::FreeRawMemory(chunk->address(), chunk->size()); |
2238 } | 2269 } |
2239 | 2270 |
2240 size_ = 0; | 2271 size_ = 0; |
2241 page_count_ = 0; | 2272 page_count_ = 0; |
2242 } | 2273 } |
2243 | 2274 |
2244 | 2275 |
| 2276 #ifdef ENABLE_HEAP_PROTECTION |
| 2277 |
| 2278 void LargeObjectSpace::Protect() { |
| 2279 LargeObjectChunk* chunk = first_chunk_; |
| 2280 while (chunk != NULL) { |
| 2281 MemoryAllocator::Protect(chunk->address(), chunk->size()); |
| 2282 chunk = chunk->next(); |
| 2283 } |
| 2284 } |
| 2285 |
| 2286 |
| 2287 void LargeObjectSpace::Unprotect() { |
| 2288 LargeObjectChunk* chunk = first_chunk_; |
| 2289 while (chunk != NULL) { |
| 2290 bool is_code = chunk->GetObject()->IsCode(); |
| 2291 MemoryAllocator::Unprotect(chunk->address(), chunk->size(), |
| 2292 is_code ? EXECUTABLE : NOT_EXECUTABLE); |
| 2293 chunk = chunk->next(); |
| 2294 } |
| 2295 } |
| 2296 |
| 2297 #endif |
| 2298 |
| 2299 |
2245 Object* LargeObjectSpace::AllocateRawInternal(int requested_size, | 2300 Object* LargeObjectSpace::AllocateRawInternal(int requested_size, |
2246 int object_size, | 2301 int object_size, |
2247 Executability executable) { | 2302 Executability executable) { |
2248 ASSERT(0 < object_size && object_size <= requested_size); | 2303 ASSERT(0 < object_size && object_size <= requested_size); |
2249 | 2304 |
2250 // Check if we want to force a GC before growing the old space further. | 2305 // Check if we want to force a GC before growing the old space further. |
2251 // If so, fail the allocation. | 2306 // If so, fail the allocation. |
2252 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) { | 2307 if (!Heap::always_allocate() && Heap::OldGenerationAllocationLimitReached()) { |
2253 return Failure::RetryAfterGC(requested_size, identity()); | 2308 return Failure::RetryAfterGC(requested_size, identity()); |
2254 } | 2309 } |
(...skipping 280 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2535 reinterpret_cast<Object**>(object->address() | 2590 reinterpret_cast<Object**>(object->address() |
2536 + Page::kObjectAreaSize), | 2591 + Page::kObjectAreaSize), |
2537 allocation_top); | 2592 allocation_top); |
2538 PrintF("\n"); | 2593 PrintF("\n"); |
2539 } | 2594 } |
2540 } | 2595 } |
2541 } | 2596 } |
2542 #endif // DEBUG | 2597 #endif // DEBUG |
2543 | 2598 |
2544 } } // namespace v8::internal | 2599 } } // namespace v8::internal |
OLD | NEW |