OLD | NEW |
---|---|
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 339 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
350 Executability executable) { | 350 Executability executable) { |
351 if (size_ + static_cast<int>(requested) > capacity_) return NULL; | 351 if (size_ + static_cast<int>(requested) > capacity_) return NULL; |
352 void* mem; | 352 void* mem; |
353 if (executable == EXECUTABLE && CodeRange::exists()) { | 353 if (executable == EXECUTABLE && CodeRange::exists()) { |
354 mem = CodeRange::AllocateRawMemory(requested, allocated); | 354 mem = CodeRange::AllocateRawMemory(requested, allocated); |
355 } else { | 355 } else { |
356 mem = OS::Allocate(requested, allocated, (executable == EXECUTABLE)); | 356 mem = OS::Allocate(requested, allocated, (executable == EXECUTABLE)); |
357 } | 357 } |
358 int alloced = static_cast<int>(*allocated); | 358 int alloced = static_cast<int>(*allocated); |
359 size_ += alloced; | 359 size_ += alloced; |
360 #ifdef DEBUG | |
361 // Is this the problematic one? | |
Kevin Millikin (Chromium)
2010/01/28 08:38:02
I'm not sure this comment adds anything.
| |
362 ZapBlock(reinterpret_cast<Address>(mem), alloced); | |
363 #endif | |
360 Counters::memory_allocated.Increment(alloced); | 364 Counters::memory_allocated.Increment(alloced); |
361 return mem; | 365 return mem; |
362 } | 366 } |
363 | 367 |
364 | 368 |
365 void MemoryAllocator::FreeRawMemory(void* mem, size_t length) { | 369 void MemoryAllocator::FreeRawMemory(void* mem, size_t length) { |
370 #ifdef DEBUG | |
371 ZapBlock(reinterpret_cast<Address>(mem), length); | |
372 #endif | |
366 if (CodeRange::contains(static_cast<Address>(mem))) { | 373 if (CodeRange::contains(static_cast<Address>(mem))) { |
367 CodeRange::FreeRawMemory(mem, length); | 374 CodeRange::FreeRawMemory(mem, length); |
368 } else { | 375 } else { |
369 OS::Free(mem, length); | 376 OS::Free(mem, length); |
370 } | 377 } |
371 Counters::memory_allocated.Decrement(static_cast<int>(length)); | 378 Counters::memory_allocated.Decrement(static_cast<int>(length)); |
372 size_ -= static_cast<int>(length); | 379 size_ -= static_cast<int>(length); |
373 ASSERT(size_ >= 0); | 380 ASSERT(size_ >= 0); |
374 } | 381 } |
375 | 382 |
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
439 PagedSpace* owner, int* num_pages) { | 446 PagedSpace* owner, int* num_pages) { |
440 ASSERT(start != NULL); | 447 ASSERT(start != NULL); |
441 *num_pages = PagesInChunk(start, size); | 448 *num_pages = PagesInChunk(start, size); |
442 ASSERT(*num_pages > 0); | 449 ASSERT(*num_pages > 0); |
443 ASSERT(initial_chunk_ != NULL); | 450 ASSERT(initial_chunk_ != NULL); |
444 ASSERT(InInitialChunk(start)); | 451 ASSERT(InInitialChunk(start)); |
445 ASSERT(InInitialChunk(start + size - 1)); | 452 ASSERT(InInitialChunk(start + size - 1)); |
446 if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) { | 453 if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) { |
447 return Page::FromAddress(NULL); | 454 return Page::FromAddress(NULL); |
448 } | 455 } |
456 #ifdef DEBUG | |
457 ZapBlock(start, size); | |
458 #endif | |
449 Counters::memory_allocated.Increment(static_cast<int>(size)); | 459 Counters::memory_allocated.Increment(static_cast<int>(size)); |
450 | 460 |
451 // So long as we correctly overestimated the number of chunks we should not | 461 // So long as we correctly overestimated the number of chunks we should not |
452 // run out of chunk ids. | 462 // run out of chunk ids. |
453 CHECK(!OutOfChunkIds()); | 463 CHECK(!OutOfChunkIds()); |
454 int chunk_id = Pop(); | 464 int chunk_id = Pop(); |
455 chunks_[chunk_id].init(start, size, owner); | 465 chunks_[chunk_id].init(start, size, owner); |
456 return InitializePagesInChunk(chunk_id, *num_pages, owner); | 466 return InitializePagesInChunk(chunk_id, *num_pages, owner); |
457 } | 467 } |
458 | 468 |
459 | 469 |
460 bool MemoryAllocator::CommitBlock(Address start, | 470 bool MemoryAllocator::CommitBlock(Address start, |
461 size_t size, | 471 size_t size, |
462 Executability executable) { | 472 Executability executable) { |
463 ASSERT(start != NULL); | 473 ASSERT(start != NULL); |
464 ASSERT(size > 0); | 474 ASSERT(size > 0); |
465 ASSERT(initial_chunk_ != NULL); | 475 ASSERT(initial_chunk_ != NULL); |
466 ASSERT(InInitialChunk(start)); | 476 ASSERT(InInitialChunk(start)); |
467 ASSERT(InInitialChunk(start + size - 1)); | 477 ASSERT(InInitialChunk(start + size - 1)); |
468 | 478 |
469 if (!initial_chunk_->Commit(start, size, executable)) return false; | 479 if (!initial_chunk_->Commit(start, size, executable)) return false; |
480 #ifdef DEBUG | |
481 ZapBlock(start, size); | |
482 #endif | |
470 Counters::memory_allocated.Increment(static_cast<int>(size)); | 483 Counters::memory_allocated.Increment(static_cast<int>(size)); |
471 return true; | 484 return true; |
472 } | 485 } |
473 | 486 |
487 | |
474 bool MemoryAllocator::UncommitBlock(Address start, size_t size) { | 488 bool MemoryAllocator::UncommitBlock(Address start, size_t size) { |
475 ASSERT(start != NULL); | 489 ASSERT(start != NULL); |
476 ASSERT(size > 0); | 490 ASSERT(size > 0); |
477 ASSERT(initial_chunk_ != NULL); | 491 ASSERT(initial_chunk_ != NULL); |
478 ASSERT(InInitialChunk(start)); | 492 ASSERT(InInitialChunk(start)); |
479 ASSERT(InInitialChunk(start + size - 1)); | 493 ASSERT(InInitialChunk(start + size - 1)); |
480 | 494 |
481 if (!initial_chunk_->Uncommit(start, size)) return false; | 495 if (!initial_chunk_->Uncommit(start, size)) return false; |
482 Counters::memory_allocated.Decrement(static_cast<int>(size)); | 496 Counters::memory_allocated.Decrement(static_cast<int>(size)); |
483 return true; | 497 return true; |
484 } | 498 } |
485 | 499 |
500 | |
501 void MemoryAllocator::ZapBlock(Address start, size_t size) { | |
502 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) { | |
503 Memory::Address_at(start + s) = kZapValue; | |
504 } | |
505 } | |
506 | |
507 | |
486 Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk, | 508 Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk, |
487 PagedSpace* owner) { | 509 PagedSpace* owner) { |
488 ASSERT(IsValidChunk(chunk_id)); | 510 ASSERT(IsValidChunk(chunk_id)); |
489 ASSERT(pages_in_chunk > 0); | 511 ASSERT(pages_in_chunk > 0); |
490 | 512 |
491 Address chunk_start = chunks_[chunk_id].address(); | 513 Address chunk_start = chunks_[chunk_id].address(); |
492 | 514 |
493 Address low = RoundUp(chunk_start, Page::kPageSize); | 515 Address low = RoundUp(chunk_start, Page::kPageSize); |
494 | 516 |
495 #ifdef DEBUG | 517 #ifdef DEBUG |
(...skipping 1096 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1592 cur = i; | 1614 cur = i; |
1593 } | 1615 } |
1594 } | 1616 } |
1595 free_[cur].next_size_ = kEnd; | 1617 free_[cur].next_size_ = kEnd; |
1596 needs_rebuild_ = false; | 1618 needs_rebuild_ = false; |
1597 } | 1619 } |
1598 | 1620 |
1599 | 1621 |
1600 int OldSpaceFreeList::Free(Address start, int size_in_bytes) { | 1622 int OldSpaceFreeList::Free(Address start, int size_in_bytes) { |
1601 #ifdef DEBUG | 1623 #ifdef DEBUG |
1602 for (int i = 0; i < size_in_bytes; i += kPointerSize) { | 1624 MemoryAllocator::ZapBlock(start, size_in_bytes); |
1603 Memory::Address_at(start + i) = kZapValue; | |
1604 } | |
1605 #endif | 1625 #endif |
1606 FreeListNode* node = FreeListNode::FromAddress(start); | 1626 FreeListNode* node = FreeListNode::FromAddress(start); |
1607 node->set_size(size_in_bytes); | 1627 node->set_size(size_in_bytes); |
1608 | 1628 |
1609 // We don't use the freelists in compacting mode. This makes it more like a | 1629 // We don't use the freelists in compacting mode. This makes it more like a |
1610 // GC that only has mark-sweep-compact and doesn't have a mark-sweep | 1630 // GC that only has mark-sweep-compact and doesn't have a mark-sweep |
1611 // collector. | 1631 // collector. |
1612 if (FLAG_always_compact) { | 1632 if (FLAG_always_compact) { |
1613 return size_in_bytes; | 1633 return size_in_bytes; |
1614 } | 1634 } |
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1726 | 1746 |
1727 | 1747 |
1728 void FixedSizeFreeList::Reset() { | 1748 void FixedSizeFreeList::Reset() { |
1729 available_ = 0; | 1749 available_ = 0; |
1730 head_ = NULL; | 1750 head_ = NULL; |
1731 } | 1751 } |
1732 | 1752 |
1733 | 1753 |
1734 void FixedSizeFreeList::Free(Address start) { | 1754 void FixedSizeFreeList::Free(Address start) { |
1735 #ifdef DEBUG | 1755 #ifdef DEBUG |
1736 for (int i = 0; i < object_size_; i += kPointerSize) { | 1756 MemoryAllocator::ZapBlock(start, object_size_); |
1737 Memory::Address_at(start + i) = kZapValue; | |
1738 } | |
1739 #endif | 1757 #endif |
1740 // We only use the freelists with mark-sweep. | 1758 // We only use the freelists with mark-sweep. |
1741 ASSERT(!MarkCompactCollector::IsCompacting()); | 1759 ASSERT(!MarkCompactCollector::IsCompacting()); |
1742 FreeListNode* node = FreeListNode::FromAddress(start); | 1760 FreeListNode* node = FreeListNode::FromAddress(start); |
1743 node->set_size(object_size_); | 1761 node->set_size(object_size_); |
1744 node->set_next(head_); | 1762 node->set_next(head_); |
1745 head_ = node->address(); | 1763 head_ = node->address(); |
1746 available_ += object_size_; | 1764 available_ += object_size_; |
1747 } | 1765 } |
1748 | 1766 |
(...skipping 1103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2852 reinterpret_cast<Object**>(object->address() | 2870 reinterpret_cast<Object**>(object->address() |
2853 + Page::kObjectAreaSize), | 2871 + Page::kObjectAreaSize), |
2854 allocation_top); | 2872 allocation_top); |
2855 PrintF("\n"); | 2873 PrintF("\n"); |
2856 } | 2874 } |
2857 } | 2875 } |
2858 } | 2876 } |
2859 #endif // DEBUG | 2877 #endif // DEBUG |
2860 | 2878 |
2861 } } // namespace v8::internal | 2879 } } // namespace v8::internal |
OLD | NEW |