OLD | NEW |
1 // Copyright 2006-2010 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2010 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 375 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
386 size_executable_ += static_cast<int>(*allocated); | 386 size_executable_ += static_cast<int>(*allocated); |
387 } else { | 387 } else { |
388 mem = OS::Allocate(requested, allocated, false); | 388 mem = OS::Allocate(requested, allocated, false); |
389 } | 389 } |
390 int alloced = static_cast<int>(*allocated); | 390 int alloced = static_cast<int>(*allocated); |
391 size_ += alloced; | 391 size_ += alloced; |
392 | 392 |
393 #ifdef DEBUG | 393 #ifdef DEBUG |
394 ZapBlock(reinterpret_cast<Address>(mem), alloced); | 394 ZapBlock(reinterpret_cast<Address>(mem), alloced); |
395 #endif | 395 #endif |
396 COUNTERS->memory_allocated()->Increment(alloced); | 396 isolate_->counters()->memory_allocated()->Increment(alloced); |
397 return mem; | 397 return mem; |
398 } | 398 } |
399 | 399 |
400 | 400 |
401 void MemoryAllocator::FreeRawMemory(void* mem, | 401 void MemoryAllocator::FreeRawMemory(void* mem, |
402 size_t length, | 402 size_t length, |
403 Executability executable) { | 403 Executability executable) { |
404 #ifdef DEBUG | 404 #ifdef DEBUG |
405 ZapBlock(reinterpret_cast<Address>(mem), length); | 405 ZapBlock(reinterpret_cast<Address>(mem), length); |
406 #endif | 406 #endif |
407 if (isolate_->code_range()->contains(static_cast<Address>(mem))) { | 407 if (isolate_->code_range()->contains(static_cast<Address>(mem))) { |
408 isolate_->code_range()->FreeRawMemory(mem, length); | 408 isolate_->code_range()->FreeRawMemory(mem, length); |
409 } else { | 409 } else { |
410 OS::Free(mem, length); | 410 OS::Free(mem, length); |
411 } | 411 } |
412 COUNTERS->memory_allocated()->Decrement(static_cast<int>(length)); | 412 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(length)); |
413 size_ -= static_cast<int>(length); | 413 size_ -= static_cast<int>(length); |
414 if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length); | 414 if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length); |
415 | 415 |
416 ASSERT(size_ >= 0); | 416 ASSERT(size_ >= 0); |
417 ASSERT(size_executable_ >= 0); | 417 ASSERT(size_executable_ >= 0); |
418 } | 418 } |
419 | 419 |
420 | 420 |
421 void MemoryAllocator::PerformAllocationCallback(ObjectSpace space, | 421 void MemoryAllocator::PerformAllocationCallback(ObjectSpace space, |
422 AllocationAction action, | 422 AllocationAction action, |
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
530 ASSERT(*num_pages > 0); | 530 ASSERT(*num_pages > 0); |
531 ASSERT(initial_chunk_ != NULL); | 531 ASSERT(initial_chunk_ != NULL); |
532 ASSERT(InInitialChunk(start)); | 532 ASSERT(InInitialChunk(start)); |
533 ASSERT(InInitialChunk(start + size - 1)); | 533 ASSERT(InInitialChunk(start + size - 1)); |
534 if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) { | 534 if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) { |
535 return Page::FromAddress(NULL); | 535 return Page::FromAddress(NULL); |
536 } | 536 } |
537 #ifdef DEBUG | 537 #ifdef DEBUG |
538 ZapBlock(start, size); | 538 ZapBlock(start, size); |
539 #endif | 539 #endif |
540 COUNTERS->memory_allocated()->Increment(static_cast<int>(size)); | 540 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size)); |
541 | 541 |
542 // So long as we correctly overestimated the number of chunks we should not | 542 // So long as we correctly overestimated the number of chunks we should not |
543 // run out of chunk ids. | 543 // run out of chunk ids. |
544 CHECK(!OutOfChunkIds()); | 544 CHECK(!OutOfChunkIds()); |
545 int chunk_id = Pop(); | 545 int chunk_id = Pop(); |
546 chunks_[chunk_id].init(start, size, owner); | 546 chunks_[chunk_id].init(start, size, owner); |
547 return InitializePagesInChunk(chunk_id, *num_pages, owner); | 547 return InitializePagesInChunk(chunk_id, *num_pages, owner); |
548 } | 548 } |
549 | 549 |
550 | 550 |
551 bool MemoryAllocator::CommitBlock(Address start, | 551 bool MemoryAllocator::CommitBlock(Address start, |
552 size_t size, | 552 size_t size, |
553 Executability executable) { | 553 Executability executable) { |
554 ASSERT(start != NULL); | 554 ASSERT(start != NULL); |
555 ASSERT(size > 0); | 555 ASSERT(size > 0); |
556 ASSERT(initial_chunk_ != NULL); | 556 ASSERT(initial_chunk_ != NULL); |
557 ASSERT(InInitialChunk(start)); | 557 ASSERT(InInitialChunk(start)); |
558 ASSERT(InInitialChunk(start + size - 1)); | 558 ASSERT(InInitialChunk(start + size - 1)); |
559 | 559 |
560 if (!initial_chunk_->Commit(start, size, executable)) return false; | 560 if (!initial_chunk_->Commit(start, size, executable)) return false; |
561 #ifdef DEBUG | 561 #ifdef DEBUG |
562 ZapBlock(start, size); | 562 ZapBlock(start, size); |
563 #endif | 563 #endif |
564 COUNTERS->memory_allocated()->Increment(static_cast<int>(size)); | 564 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size)); |
565 return true; | 565 return true; |
566 } | 566 } |
567 | 567 |
568 | 568 |
569 bool MemoryAllocator::UncommitBlock(Address start, size_t size) { | 569 bool MemoryAllocator::UncommitBlock(Address start, size_t size) { |
570 ASSERT(start != NULL); | 570 ASSERT(start != NULL); |
571 ASSERT(size > 0); | 571 ASSERT(size > 0); |
572 ASSERT(initial_chunk_ != NULL); | 572 ASSERT(initial_chunk_ != NULL); |
573 ASSERT(InInitialChunk(start)); | 573 ASSERT(InInitialChunk(start)); |
574 ASSERT(InInitialChunk(start + size - 1)); | 574 ASSERT(InInitialChunk(start + size - 1)); |
575 | 575 |
576 if (!initial_chunk_->Uncommit(start, size)) return false; | 576 if (!initial_chunk_->Uncommit(start, size)) return false; |
577 COUNTERS->memory_allocated()->Decrement(static_cast<int>(size)); | 577 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); |
578 return true; | 578 return true; |
579 } | 579 } |
580 | 580 |
581 | 581 |
582 void MemoryAllocator::ZapBlock(Address start, size_t size) { | 582 void MemoryAllocator::ZapBlock(Address start, size_t size) { |
583 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) { | 583 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) { |
584 Memory::Address_at(start + s) = kZapValue; | 584 Memory::Address_at(start + s) = kZapValue; |
585 } | 585 } |
586 } | 586 } |
587 | 587 |
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
668 | 668 |
669 ChunkInfo& c = chunks_[chunk_id]; | 669 ChunkInfo& c = chunks_[chunk_id]; |
670 | 670 |
671 // We cannot free a chunk contained in the initial chunk because it was not | 671 // We cannot free a chunk contained in the initial chunk because it was not |
672 // allocated with AllocateRawMemory. Instead we uncommit the virtual | 672 // allocated with AllocateRawMemory. Instead we uncommit the virtual |
673 // memory. | 673 // memory. |
674 if (InInitialChunk(c.address())) { | 674 if (InInitialChunk(c.address())) { |
675 // TODO(1240712): VirtualMemory::Uncommit has a return value which | 675 // TODO(1240712): VirtualMemory::Uncommit has a return value which |
676 // is ignored here. | 676 // is ignored here. |
677 initial_chunk_->Uncommit(c.address(), c.size()); | 677 initial_chunk_->Uncommit(c.address(), c.size()); |
678 COUNTERS->memory_allocated()->Decrement(static_cast<int>(c.size())); | 678 Counters* counters = isolate_->counters(); |
| 679 counters->memory_allocated()->Decrement(static_cast<int>(c.size())); |
679 } else { | 680 } else { |
680 LOG(isolate_, DeleteEvent("PagedChunk", c.address())); | 681 LOG(isolate_, DeleteEvent("PagedChunk", c.address())); |
681 ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner_identity()); | 682 ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner_identity()); |
682 size_t size = c.size(); | 683 size_t size = c.size(); |
683 FreeRawMemory(c.address(), size, c.executable()); | 684 FreeRawMemory(c.address(), size, c.executable()); |
684 PerformAllocationCallback(space, kAllocationActionFree, size); | 685 PerformAllocationCallback(space, kAllocationActionFree, size); |
685 } | 686 } |
686 c.init(NULL, 0, NULL); | 687 c.init(NULL, 0, NULL); |
687 Push(chunk_id); | 688 Push(chunk_id); |
688 } | 689 } |
(...skipping 2057 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2746 | 2747 |
2747 ObjectSpace space = (executable == EXECUTABLE) | 2748 ObjectSpace space = (executable == EXECUTABLE) |
2748 ? kObjectSpaceCodeSpace | 2749 ? kObjectSpaceCodeSpace |
2749 : kObjectSpaceLoSpace; | 2750 : kObjectSpaceLoSpace; |
2750 isolate->memory_allocator()->PerformAllocationCallback( | 2751 isolate->memory_allocator()->PerformAllocationCallback( |
2751 space, kAllocationActionAllocate, size); | 2752 space, kAllocationActionAllocate, size); |
2752 | 2753 |
2753 LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem); | 2754 LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem); |
2754 chunk->size_ = size; | 2755 chunk->size_ = size; |
2755 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize)); | 2756 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize)); |
2756 page->heap_ = Isolate::Current()->heap(); | 2757 page->heap_ = isolate->heap(); |
2757 return chunk; | 2758 return chunk; |
2758 } | 2759 } |
2759 | 2760 |
2760 | 2761 |
2761 int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) { | 2762 int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) { |
2762 int os_alignment = static_cast<int>(OS::AllocateAlignment()); | 2763 int os_alignment = static_cast<int>(OS::AllocateAlignment()); |
2763 if (os_alignment < Page::kPageSize) { | 2764 if (os_alignment < Page::kPageSize) { |
2764 size_in_bytes += (Page::kPageSize - os_alignment); | 2765 size_in_bytes += (Page::kPageSize - os_alignment); |
2765 } | 2766 } |
2766 return size_in_bytes + Page::kObjectStartOffset; | 2767 return size_in_bytes + Page::kObjectStartOffset; |
(...skipping 369 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3136 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { | 3137 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { |
3137 if (obj->IsCode()) { | 3138 if (obj->IsCode()) { |
3138 Code* code = Code::cast(obj); | 3139 Code* code = Code::cast(obj); |
3139 isolate->code_kind_statistics()[code->kind()] += code->Size(); | 3140 isolate->code_kind_statistics()[code->kind()] += code->Size(); |
3140 } | 3141 } |
3141 } | 3142 } |
3142 } | 3143 } |
3143 #endif // DEBUG | 3144 #endif // DEBUG |
3144 | 3145 |
3145 } } // namespace v8::internal | 3146 } } // namespace v8::internal |
OLD | NEW |