OLD | NEW |
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 336 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
347 void* MemoryAllocator::AllocateRawMemory(const size_t requested, | 347 void* MemoryAllocator::AllocateRawMemory(const size_t requested, |
348 size_t* allocated, | 348 size_t* allocated, |
349 Executability executable) { | 349 Executability executable) { |
350 if (size_ + static_cast<int>(requested) > capacity_) return NULL; | 350 if (size_ + static_cast<int>(requested) > capacity_) return NULL; |
351 void* mem; | 351 void* mem; |
352 if (executable == EXECUTABLE && CodeRange::exists()) { | 352 if (executable == EXECUTABLE && CodeRange::exists()) { |
353 mem = CodeRange::AllocateRawMemory(requested, allocated); | 353 mem = CodeRange::AllocateRawMemory(requested, allocated); |
354 } else { | 354 } else { |
355 mem = OS::Allocate(requested, allocated, (executable == EXECUTABLE)); | 355 mem = OS::Allocate(requested, allocated, (executable == EXECUTABLE)); |
356 } | 356 } |
357 int alloced = *allocated; | 357 int alloced = static_cast<int>(*allocated); |
358 size_ += alloced; | 358 size_ += alloced; |
359 Counters::memory_allocated.Increment(alloced); | 359 Counters::memory_allocated.Increment(alloced); |
360 return mem; | 360 return mem; |
361 } | 361 } |
362 | 362 |
363 | 363 |
364 void MemoryAllocator::FreeRawMemory(void* mem, size_t length) { | 364 void MemoryAllocator::FreeRawMemory(void* mem, size_t length) { |
365 if (CodeRange::contains(static_cast<Address>(mem))) { | 365 if (CodeRange::contains(static_cast<Address>(mem))) { |
366 CodeRange::FreeRawMemory(mem, length); | 366 CodeRange::FreeRawMemory(mem, length); |
367 } else { | 367 } else { |
368 OS::Free(mem, length); | 368 OS::Free(mem, length); |
369 } | 369 } |
370 Counters::memory_allocated.Decrement(length); | 370 Counters::memory_allocated.Decrement(static_cast<int>(length)); |
371 size_ -= length; | 371 size_ -= static_cast<int>(length); |
372 ASSERT(size_ >= 0); | 372 ASSERT(size_ >= 0); |
373 } | 373 } |
374 | 374 |
375 | 375 |
376 void* MemoryAllocator::ReserveInitialChunk(const size_t requested) { | 376 void* MemoryAllocator::ReserveInitialChunk(const size_t requested) { |
377 ASSERT(initial_chunk_ == NULL); | 377 ASSERT(initial_chunk_ == NULL); |
378 | 378 |
379 initial_chunk_ = new VirtualMemory(requested); | 379 initial_chunk_ = new VirtualMemory(requested); |
380 CHECK(initial_chunk_ != NULL); | 380 CHECK(initial_chunk_ != NULL); |
381 if (!initial_chunk_->IsReserved()) { | 381 if (!initial_chunk_->IsReserved()) { |
382 delete initial_chunk_; | 382 delete initial_chunk_; |
383 initial_chunk_ = NULL; | 383 initial_chunk_ = NULL; |
384 return NULL; | 384 return NULL; |
385 } | 385 } |
386 | 386 |
387 // We are sure that we have mapped a block of requested addresses. | 387 // We are sure that we have mapped a block of requested addresses. |
388 ASSERT(initial_chunk_->size() == requested); | 388 ASSERT(initial_chunk_->size() == requested); |
389 LOG(NewEvent("InitialChunk", initial_chunk_->address(), requested)); | 389 LOG(NewEvent("InitialChunk", initial_chunk_->address(), requested)); |
390 size_ += requested; | 390 size_ += static_cast<int>(requested); |
391 return initial_chunk_->address(); | 391 return initial_chunk_->address(); |
392 } | 392 } |
393 | 393 |
394 | 394 |
395 static int PagesInChunk(Address start, size_t size) { | 395 static int PagesInChunk(Address start, size_t size) { |
396 // The first page starts on the first page-aligned address from start onward | 396 // The first page starts on the first page-aligned address from start onward |
397 // and the last page ends on the last page-aligned address before | 397 // and the last page ends on the last page-aligned address before |
398 // start+size. Page::kPageSize is a power of two so we can divide by | 398 // start+size. Page::kPageSize is a power of two so we can divide by |
399 // shifting. | 399 // shifting. |
400 return (RoundDown(start + size, Page::kPageSize) | 400 return static_cast<int>((RoundDown(start + size, Page::kPageSize) |
401 - RoundUp(start, Page::kPageSize)) >> Page::kPageSizeBits; | 401 - RoundUp(start, Page::kPageSize)) >> Page::kPageSizeBits); |
402 } | 402 } |
403 | 403 |
404 | 404 |
405 Page* MemoryAllocator::AllocatePages(int requested_pages, int* allocated_pages, | 405 Page* MemoryAllocator::AllocatePages(int requested_pages, int* allocated_pages, |
406 PagedSpace* owner) { | 406 PagedSpace* owner) { |
407 if (requested_pages <= 0) return Page::FromAddress(NULL); | 407 if (requested_pages <= 0) return Page::FromAddress(NULL); |
408 size_t chunk_size = requested_pages * Page::kPageSize; | 408 size_t chunk_size = requested_pages * Page::kPageSize; |
409 | 409 |
410 // There is not enough space to guarantee the desired number pages can be | 410 // There is not enough space to guarantee the desired number pages can be |
411 // allocated. | 411 // allocated. |
412 if (size_ + static_cast<int>(chunk_size) > capacity_) { | 412 if (size_ + static_cast<int>(chunk_size) > capacity_) { |
413 // Request as many pages as we can. | 413 // Request as many pages as we can. |
414 chunk_size = capacity_ - size_; | 414 chunk_size = capacity_ - size_; |
415 requested_pages = chunk_size >> Page::kPageSizeBits; | 415 requested_pages = static_cast<int>(chunk_size >> Page::kPageSizeBits); |
416 | 416 |
417 if (requested_pages <= 0) return Page::FromAddress(NULL); | 417 if (requested_pages <= 0) return Page::FromAddress(NULL); |
418 } | 418 } |
419 void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable()); | 419 void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable()); |
420 if (chunk == NULL) return Page::FromAddress(NULL); | 420 if (chunk == NULL) return Page::FromAddress(NULL); |
421 LOG(NewEvent("PagedChunk", chunk, chunk_size)); | 421 LOG(NewEvent("PagedChunk", chunk, chunk_size)); |
422 | 422 |
423 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size); | 423 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size); |
424 if (*allocated_pages == 0) { | 424 if (*allocated_pages == 0) { |
425 FreeRawMemory(chunk, chunk_size); | 425 FreeRawMemory(chunk, chunk_size); |
(...skipping 12 matching lines...) Expand all Loading... |
438 PagedSpace* owner, int* num_pages) { | 438 PagedSpace* owner, int* num_pages) { |
439 ASSERT(start != NULL); | 439 ASSERT(start != NULL); |
440 *num_pages = PagesInChunk(start, size); | 440 *num_pages = PagesInChunk(start, size); |
441 ASSERT(*num_pages > 0); | 441 ASSERT(*num_pages > 0); |
442 ASSERT(initial_chunk_ != NULL); | 442 ASSERT(initial_chunk_ != NULL); |
443 ASSERT(InInitialChunk(start)); | 443 ASSERT(InInitialChunk(start)); |
444 ASSERT(InInitialChunk(start + size - 1)); | 444 ASSERT(InInitialChunk(start + size - 1)); |
445 if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) { | 445 if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) { |
446 return Page::FromAddress(NULL); | 446 return Page::FromAddress(NULL); |
447 } | 447 } |
448 Counters::memory_allocated.Increment(size); | 448 Counters::memory_allocated.Increment(static_cast<int>(size)); |
449 | 449 |
450 // So long as we correctly overestimated the number of chunks we should not | 450 // So long as we correctly overestimated the number of chunks we should not |
451 // run out of chunk ids. | 451 // run out of chunk ids. |
452 CHECK(!OutOfChunkIds()); | 452 CHECK(!OutOfChunkIds()); |
453 int chunk_id = Pop(); | 453 int chunk_id = Pop(); |
454 chunks_[chunk_id].init(start, size, owner); | 454 chunks_[chunk_id].init(start, size, owner); |
455 return InitializePagesInChunk(chunk_id, *num_pages, owner); | 455 return InitializePagesInChunk(chunk_id, *num_pages, owner); |
456 } | 456 } |
457 | 457 |
458 | 458 |
459 bool MemoryAllocator::CommitBlock(Address start, | 459 bool MemoryAllocator::CommitBlock(Address start, |
460 size_t size, | 460 size_t size, |
461 Executability executable) { | 461 Executability executable) { |
462 ASSERT(start != NULL); | 462 ASSERT(start != NULL); |
463 ASSERT(size > 0); | 463 ASSERT(size > 0); |
464 ASSERT(initial_chunk_ != NULL); | 464 ASSERT(initial_chunk_ != NULL); |
465 ASSERT(InInitialChunk(start)); | 465 ASSERT(InInitialChunk(start)); |
466 ASSERT(InInitialChunk(start + size - 1)); | 466 ASSERT(InInitialChunk(start + size - 1)); |
467 | 467 |
468 if (!initial_chunk_->Commit(start, size, executable)) return false; | 468 if (!initial_chunk_->Commit(start, size, executable)) return false; |
469 Counters::memory_allocated.Increment(size); | 469 Counters::memory_allocated.Increment(static_cast<int>(size)); |
470 return true; | 470 return true; |
471 } | 471 } |
472 | 472 |
473 bool MemoryAllocator::UncommitBlock(Address start, size_t size) { | 473 bool MemoryAllocator::UncommitBlock(Address start, size_t size) { |
474 ASSERT(start != NULL); | 474 ASSERT(start != NULL); |
475 ASSERT(size > 0); | 475 ASSERT(size > 0); |
476 ASSERT(initial_chunk_ != NULL); | 476 ASSERT(initial_chunk_ != NULL); |
477 ASSERT(InInitialChunk(start)); | 477 ASSERT(InInitialChunk(start)); |
478 ASSERT(InInitialChunk(start + size - 1)); | 478 ASSERT(InInitialChunk(start + size - 1)); |
479 | 479 |
480 if (!initial_chunk_->Uncommit(start, size)) return false; | 480 if (!initial_chunk_->Uncommit(start, size)) return false; |
481 Counters::memory_allocated.Decrement(size); | 481 Counters::memory_allocated.Decrement(static_cast<int>(size)); |
482 return true; | 482 return true; |
483 } | 483 } |
484 | 484 |
485 Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk, | 485 Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk, |
486 PagedSpace* owner) { | 486 PagedSpace* owner) { |
487 ASSERT(IsValidChunk(chunk_id)); | 487 ASSERT(IsValidChunk(chunk_id)); |
488 ASSERT(pages_in_chunk > 0); | 488 ASSERT(pages_in_chunk > 0); |
489 | 489 |
490 Address chunk_start = chunks_[chunk_id].address(); | 490 Address chunk_start = chunks_[chunk_id].address(); |
491 | 491 |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
551 | 551 |
552 ChunkInfo& c = chunks_[chunk_id]; | 552 ChunkInfo& c = chunks_[chunk_id]; |
553 | 553 |
554 // We cannot free a chunk contained in the initial chunk because it was not | 554 // We cannot free a chunk contained in the initial chunk because it was not |
555 // allocated with AllocateRawMemory. Instead we uncommit the virtual | 555 // allocated with AllocateRawMemory. Instead we uncommit the virtual |
556 // memory. | 556 // memory. |
557 if (InInitialChunk(c.address())) { | 557 if (InInitialChunk(c.address())) { |
558 // TODO(1240712): VirtualMemory::Uncommit has a return value which | 558 // TODO(1240712): VirtualMemory::Uncommit has a return value which |
559 // is ignored here. | 559 // is ignored here. |
560 initial_chunk_->Uncommit(c.address(), c.size()); | 560 initial_chunk_->Uncommit(c.address(), c.size()); |
561 Counters::memory_allocated.Decrement(c.size()); | 561 Counters::memory_allocated.Decrement(static_cast<int>(c.size())); |
562 } else { | 562 } else { |
563 LOG(DeleteEvent("PagedChunk", c.address())); | 563 LOG(DeleteEvent("PagedChunk", c.address())); |
564 FreeRawMemory(c.address(), c.size()); | 564 FreeRawMemory(c.address(), c.size()); |
565 } | 565 } |
566 c.init(NULL, 0, NULL); | 566 c.init(NULL, 0, NULL); |
567 Push(chunk_id); | 567 Push(chunk_id); |
568 } | 568 } |
569 | 569 |
570 | 570 |
571 Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) { | 571 Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) { |
(...skipping 517 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1089 } | 1089 } |
1090 } | 1090 } |
1091 } | 1091 } |
1092 allocation_info_.limit = to_space_.high(); | 1092 allocation_info_.limit = to_space_.high(); |
1093 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); | 1093 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
1094 } | 1094 } |
1095 | 1095 |
1096 | 1096 |
1097 void NewSpace::Shrink() { | 1097 void NewSpace::Shrink() { |
1098 int new_capacity = Max(InitialCapacity(), 2 * Size()); | 1098 int new_capacity = Max(InitialCapacity(), 2 * Size()); |
1099 int rounded_new_capacity = RoundUp(new_capacity, OS::AllocateAlignment()); | 1099 int rounded_new_capacity = |
| 1100 RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment())); |
1100 if (rounded_new_capacity < Capacity() && | 1101 if (rounded_new_capacity < Capacity() && |
1101 to_space_.ShrinkTo(rounded_new_capacity)) { | 1102 to_space_.ShrinkTo(rounded_new_capacity)) { |
1102 // Only shrink from space if we managed to shrink to space. | 1103 // Only shrink from space if we managed to shrink to space. |
1103 if (!from_space_.ShrinkTo(rounded_new_capacity)) { | 1104 if (!from_space_.ShrinkTo(rounded_new_capacity)) { |
1104 // If we managed to shrink to space but couldn't shrink from | 1105 // If we managed to shrink to space but couldn't shrink from |
1105 // space, attempt to grow to space again. | 1106 // space, attempt to grow to space again. |
1106 if (!to_space_.GrowTo(from_space_.Capacity())) { | 1107 if (!to_space_.GrowTo(from_space_.Capacity())) { |
1107 // We are in an inconsistent state because we could not | 1108 // We are in an inconsistent state because we could not |
1108 // commit/uncommit memory from new space. | 1109 // commit/uncommit memory from new space. |
1109 V8::FatalProcessOutOfMemory("Failed to shrink new space."); | 1110 V8::FatalProcessOutOfMemory("Failed to shrink new space."); |
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1227 | 1228 |
1228 void SemiSpace::TearDown() { | 1229 void SemiSpace::TearDown() { |
1229 start_ = NULL; | 1230 start_ = NULL; |
1230 capacity_ = 0; | 1231 capacity_ = 0; |
1231 } | 1232 } |
1232 | 1233 |
1233 | 1234 |
1234 bool SemiSpace::Grow() { | 1235 bool SemiSpace::Grow() { |
1235 // Double the semispace size but only up to maximum capacity. | 1236 // Double the semispace size but only up to maximum capacity. |
1236 int maximum_extra = maximum_capacity_ - capacity_; | 1237 int maximum_extra = maximum_capacity_ - capacity_; |
1237 int extra = Min(RoundUp(capacity_, OS::AllocateAlignment()), | 1238 int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())), |
1238 maximum_extra); | 1239 maximum_extra); |
1239 if (!MemoryAllocator::CommitBlock(high(), extra, executable())) { | 1240 if (!MemoryAllocator::CommitBlock(high(), extra, executable())) { |
1240 return false; | 1241 return false; |
1241 } | 1242 } |
1242 capacity_ += extra; | 1243 capacity_ += extra; |
1243 return true; | 1244 return true; |
1244 } | 1245 } |
1245 | 1246 |
1246 | 1247 |
1247 bool SemiSpace::GrowTo(int new_capacity) { | 1248 bool SemiSpace::GrowTo(int new_capacity) { |
(...skipping 542 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1790 // wasted any space. | 1791 // wasted any space. |
1791 ASSERT(Waste() == 0); | 1792 ASSERT(Waste() == 0); |
1792 ASSERT(AvailableFree() == 0); | 1793 ASSERT(AvailableFree() == 0); |
1793 | 1794 |
1794 // Build the free list for the space. | 1795 // Build the free list for the space. |
1795 int computed_size = 0; | 1796 int computed_size = 0; |
1796 PageIterator it(this, PageIterator::PAGES_USED_BY_MC); | 1797 PageIterator it(this, PageIterator::PAGES_USED_BY_MC); |
1797 while (it.has_next()) { | 1798 while (it.has_next()) { |
1798 Page* p = it.next(); | 1799 Page* p = it.next(); |
1799 // Space below the relocation pointer is allocated. | 1800 // Space below the relocation pointer is allocated. |
1800 computed_size += p->mc_relocation_top - p->ObjectAreaStart(); | 1801 computed_size += |
| 1802 static_cast<int>(p->mc_relocation_top - p->ObjectAreaStart()); |
1801 if (it.has_next()) { | 1803 if (it.has_next()) { |
1802 // Free the space at the top of the page. We cannot use | 1804 // Free the space at the top of the page. We cannot use |
1803 // p->mc_relocation_top after the call to Free (because Free will clear | 1805 // p->mc_relocation_top after the call to Free (because Free will clear |
1804 // remembered set bits). | 1806 // remembered set bits). |
1805 int extra_size = p->ObjectAreaEnd() - p->mc_relocation_top; | 1807 int extra_size = |
| 1808 static_cast<int>(p->ObjectAreaEnd() - p->mc_relocation_top); |
1806 if (extra_size > 0) { | 1809 if (extra_size > 0) { |
1807 int wasted_bytes = free_list_.Free(p->mc_relocation_top, extra_size); | 1810 int wasted_bytes = free_list_.Free(p->mc_relocation_top, extra_size); |
1808 // The bytes we have just "freed" to add to the free list were | 1811 // The bytes we have just "freed" to add to the free list were |
1809 // already accounted as available. | 1812 // already accounted as available. |
1810 accounting_stats_.WasteBytes(wasted_bytes); | 1813 accounting_stats_.WasteBytes(wasted_bytes); |
1811 } | 1814 } |
1812 } | 1815 } |
1813 } | 1816 } |
1814 | 1817 |
1815 // Make sure the computed size - based on the used portion of the pages in | 1818 // Make sure the computed size - based on the used portion of the pages in |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1861 } | 1864 } |
1862 | 1865 |
1863 | 1866 |
1864 // Add the block at the top of the page to the space's free list, set the | 1867 // Add the block at the top of the page to the space's free list, set the |
1865 // allocation info to the next page (assumed to be one), and allocate | 1868 // allocation info to the next page (assumed to be one), and allocate |
1866 // linearly there. | 1869 // linearly there. |
1867 HeapObject* OldSpace::AllocateInNextPage(Page* current_page, | 1870 HeapObject* OldSpace::AllocateInNextPage(Page* current_page, |
1868 int size_in_bytes) { | 1871 int size_in_bytes) { |
1869 ASSERT(current_page->next_page()->is_valid()); | 1872 ASSERT(current_page->next_page()->is_valid()); |
1870 // Add the block at the top of this page to the free list. | 1873 // Add the block at the top of this page to the free list. |
1871 int free_size = current_page->ObjectAreaEnd() - allocation_info_.top; | 1874 int free_size = |
| 1875 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top); |
1872 if (free_size > 0) { | 1876 if (free_size > 0) { |
1873 int wasted_bytes = free_list_.Free(allocation_info_.top, free_size); | 1877 int wasted_bytes = free_list_.Free(allocation_info_.top, free_size); |
1874 accounting_stats_.WasteBytes(wasted_bytes); | 1878 accounting_stats_.WasteBytes(wasted_bytes); |
1875 } | 1879 } |
1876 SetAllocationInfo(&allocation_info_, current_page->next_page()); | 1880 SetAllocationInfo(&allocation_info_, current_page->next_page()); |
1877 return AllocateLinearly(&allocation_info_, size_in_bytes); | 1881 return AllocateLinearly(&allocation_info_, size_in_bytes); |
1878 } | 1882 } |
1879 | 1883 |
1880 | 1884 |
1881 #ifdef DEBUG | 1885 #ifdef DEBUG |
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1961 const byte* prev_pc = it->rinfo()->pc(); | 1965 const byte* prev_pc = it->rinfo()->pc(); |
1962 int flat_delta = 0; | 1966 int flat_delta = 0; |
1963 it->next(); | 1967 it->next(); |
1964 while (true) { | 1968 while (true) { |
1965 // All nested comments must be terminated properly, and therefore exit | 1969 // All nested comments must be terminated properly, and therefore exit |
1966 // from loop. | 1970 // from loop. |
1967 ASSERT(!it->done()); | 1971 ASSERT(!it->done()); |
1968 if (it->rinfo()->rmode() == RelocInfo::COMMENT) { | 1972 if (it->rinfo()->rmode() == RelocInfo::COMMENT) { |
1969 const char* const txt = | 1973 const char* const txt = |
1970 reinterpret_cast<const char*>(it->rinfo()->data()); | 1974 reinterpret_cast<const char*>(it->rinfo()->data()); |
1971 flat_delta += it->rinfo()->pc() - prev_pc; | 1975 flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc); |
1972 if (txt[0] == ']') break; // End of nested comment | 1976 if (txt[0] == ']') break; // End of nested comment |
1973 // A new comment | 1977 // A new comment |
1974 CollectCommentStatistics(it); | 1978 CollectCommentStatistics(it); |
1975 // Skip code that was covered with previous comment | 1979 // Skip code that was covered with previous comment |
1976 prev_pc = it->rinfo()->pc(); | 1980 prev_pc = it->rinfo()->pc(); |
1977 } | 1981 } |
1978 it->next(); | 1982 it->next(); |
1979 } | 1983 } |
1980 EnterComment(comment_txt, flat_delta); | 1984 EnterComment(comment_txt, flat_delta); |
1981 } | 1985 } |
1982 | 1986 |
1983 | 1987 |
1984 // Collects code size statistics: | 1988 // Collects code size statistics: |
1985 // - by code kind | 1989 // - by code kind |
1986 // - by code comment | 1990 // - by code comment |
1987 void PagedSpace::CollectCodeStatistics() { | 1991 void PagedSpace::CollectCodeStatistics() { |
1988 HeapObjectIterator obj_it(this); | 1992 HeapObjectIterator obj_it(this); |
1989 while (obj_it.has_next()) { | 1993 while (obj_it.has_next()) { |
1990 HeapObject* obj = obj_it.next(); | 1994 HeapObject* obj = obj_it.next(); |
1991 if (obj->IsCode()) { | 1995 if (obj->IsCode()) { |
1992 Code* code = Code::cast(obj); | 1996 Code* code = Code::cast(obj); |
1993 code_kind_statistics[code->kind()] += code->Size(); | 1997 code_kind_statistics[code->kind()] += code->Size(); |
1994 RelocIterator it(code); | 1998 RelocIterator it(code); |
1995 int delta = 0; | 1999 int delta = 0; |
1996 const byte* prev_pc = code->instruction_start(); | 2000 const byte* prev_pc = code->instruction_start(); |
1997 while (!it.done()) { | 2001 while (!it.done()) { |
1998 if (it.rinfo()->rmode() == RelocInfo::COMMENT) { | 2002 if (it.rinfo()->rmode() == RelocInfo::COMMENT) { |
1999 delta += it.rinfo()->pc() - prev_pc; | 2003 delta += static_cast<int>(it.rinfo()->pc() - prev_pc); |
2000 CollectCommentStatistics(&it); | 2004 CollectCommentStatistics(&it); |
2001 prev_pc = it.rinfo()->pc(); | 2005 prev_pc = it.rinfo()->pc(); |
2002 } | 2006 } |
2003 it.next(); | 2007 it.next(); |
2004 } | 2008 } |
2005 | 2009 |
2006 ASSERT(code->instruction_start() <= prev_pc && | 2010 ASSERT(code->instruction_start() <= prev_pc && |
2007 prev_pc <= code->relocation_start()); | 2011 prev_pc <= code->relocation_start()); |
2008 delta += code->relocation_start() - prev_pc; | 2012 delta += static_cast<int>(code->relocation_start() - prev_pc); |
2009 EnterComment("NoComment", delta); | 2013 EnterComment("NoComment", delta); |
2010 } | 2014 } |
2011 } | 2015 } |
2012 } | 2016 } |
2013 | 2017 |
2014 | 2018 |
2015 void OldSpace::ReportStatistics() { | 2019 void OldSpace::ReportStatistics() { |
2016 int pct = Available() * 100 / Capacity(); | 2020 int pct = Available() * 100 / Capacity(); |
2017 PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n", | 2021 PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n", |
2018 Capacity(), Waste(), Available(), pct); | 2022 Capacity(), Waste(), Available(), pct); |
2019 | 2023 |
2020 // Report remembered set statistics. | 2024 // Report remembered set statistics. |
2021 int rset_marked_pointers = 0; | 2025 int rset_marked_pointers = 0; |
2022 int rset_marked_arrays = 0; | 2026 int rset_marked_arrays = 0; |
2023 int rset_marked_array_elements = 0; | 2027 int rset_marked_array_elements = 0; |
2024 int cross_gen_pointers = 0; | 2028 int cross_gen_pointers = 0; |
2025 int cross_gen_array_elements = 0; | 2029 int cross_gen_array_elements = 0; |
2026 | 2030 |
2027 PageIterator page_it(this, PageIterator::PAGES_IN_USE); | 2031 PageIterator page_it(this, PageIterator::PAGES_IN_USE); |
2028 while (page_it.has_next()) { | 2032 while (page_it.has_next()) { |
2029 Page* p = page_it.next(); | 2033 Page* p = page_it.next(); |
2030 | 2034 |
2031 for (Address rset_addr = p->RSetStart(); | 2035 for (Address rset_addr = p->RSetStart(); |
2032 rset_addr < p->RSetEnd(); | 2036 rset_addr < p->RSetEnd(); |
2033 rset_addr += kIntSize) { | 2037 rset_addr += kIntSize) { |
2034 int rset = Memory::int_at(rset_addr); | 2038 int rset = Memory::int_at(rset_addr); |
2035 if (rset != 0) { | 2039 if (rset != 0) { |
2036 // Bits were set | 2040 // Bits were set |
2037 int intoff = rset_addr - p->address() - Page::kRSetOffset; | 2041 int intoff = |
| 2042 static_cast<int>(rset_addr - p->address() - Page::kRSetOffset); |
2038 int bitoff = 0; | 2043 int bitoff = 0; |
2039 for (; bitoff < kBitsPerInt; ++bitoff) { | 2044 for (; bitoff < kBitsPerInt; ++bitoff) { |
2040 if ((rset & (1 << bitoff)) != 0) { | 2045 if ((rset & (1 << bitoff)) != 0) { |
2041 int bitpos = intoff*kBitsPerByte + bitoff; | 2046 int bitpos = intoff*kBitsPerByte + bitoff; |
2042 Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits); | 2047 Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits); |
2043 Object** obj = reinterpret_cast<Object**>(slot); | 2048 Object** obj = reinterpret_cast<Object**>(slot); |
2044 if (*obj == Heap::raw_unchecked_fixed_array_map()) { | 2049 if (*obj == Heap::raw_unchecked_fixed_array_map()) { |
2045 rset_marked_arrays++; | 2050 rset_marked_arrays++; |
2046 FixedArray* fa = FixedArray::cast(HeapObject::FromAddress(slot)); | 2051 FixedArray* fa = FixedArray::cast(HeapObject::FromAddress(slot)); |
2047 | 2052 |
(...skipping 156 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2204 | 2209 |
2205 // The space is compacted and we haven't yet wasted any space. | 2210 // The space is compacted and we haven't yet wasted any space. |
2206 ASSERT(Waste() == 0); | 2211 ASSERT(Waste() == 0); |
2207 | 2212 |
2208 // Update allocation_top of each page in use and compute waste. | 2213 // Update allocation_top of each page in use and compute waste. |
2209 int computed_size = 0; | 2214 int computed_size = 0; |
2210 PageIterator it(this, PageIterator::PAGES_USED_BY_MC); | 2215 PageIterator it(this, PageIterator::PAGES_USED_BY_MC); |
2211 while (it.has_next()) { | 2216 while (it.has_next()) { |
2212 Page* page = it.next(); | 2217 Page* page = it.next(); |
2213 Address page_top = page->AllocationTop(); | 2218 Address page_top = page->AllocationTop(); |
2214 computed_size += page_top - page->ObjectAreaStart(); | 2219 computed_size += static_cast<int>(page_top - page->ObjectAreaStart()); |
2215 if (it.has_next()) { | 2220 if (it.has_next()) { |
2216 accounting_stats_.WasteBytes(page->ObjectAreaEnd() - page_top); | 2221 accounting_stats_.WasteBytes( |
| 2222 static_cast<int>(page->ObjectAreaEnd() - page_top)); |
2217 } | 2223 } |
2218 } | 2224 } |
2219 | 2225 |
2220 // Make sure the computed size - based on the used portion of the | 2226 // Make sure the computed size - based on the used portion of the |
2221 // pages in use - matches the size we adjust during allocation. | 2227 // pages in use - matches the size we adjust during allocation. |
2222 ASSERT(computed_size == Size()); | 2228 ASSERT(computed_size == Size()); |
2223 } | 2229 } |
2224 | 2230 |
2225 | 2231 |
2226 // Slow case for normal allocation. Try in order: (1) allocate in the next | 2232 // Slow case for normal allocation. Try in order: (1) allocate in the next |
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2292 PageIterator page_it(this, PageIterator::PAGES_IN_USE); | 2298 PageIterator page_it(this, PageIterator::PAGES_IN_USE); |
2293 while (page_it.has_next()) { | 2299 while (page_it.has_next()) { |
2294 Page* p = page_it.next(); | 2300 Page* p = page_it.next(); |
2295 | 2301 |
2296 for (Address rset_addr = p->RSetStart(); | 2302 for (Address rset_addr = p->RSetStart(); |
2297 rset_addr < p->RSetEnd(); | 2303 rset_addr < p->RSetEnd(); |
2298 rset_addr += kIntSize) { | 2304 rset_addr += kIntSize) { |
2299 int rset = Memory::int_at(rset_addr); | 2305 int rset = Memory::int_at(rset_addr); |
2300 if (rset != 0) { | 2306 if (rset != 0) { |
2301 // Bits were set | 2307 // Bits were set |
2302 int intoff = rset_addr - p->address() - Page::kRSetOffset; | 2308 int intoff = |
| 2309 static_cast<int>(rset_addr - p->address() - Page::kRSetOffset); |
2303 int bitoff = 0; | 2310 int bitoff = 0; |
2304 for (; bitoff < kBitsPerInt; ++bitoff) { | 2311 for (; bitoff < kBitsPerInt; ++bitoff) { |
2305 if ((rset & (1 << bitoff)) != 0) { | 2312 if ((rset & (1 << bitoff)) != 0) { |
2306 int bitpos = intoff*kBitsPerByte + bitoff; | 2313 int bitpos = intoff*kBitsPerByte + bitoff; |
2307 Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits); | 2314 Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits); |
2308 Object** obj = reinterpret_cast<Object**>(slot); | 2315 Object** obj = reinterpret_cast<Object**>(slot); |
2309 rset_marked_pointers++; | 2316 rset_marked_pointers++; |
2310 if (Heap::InNewSpace(*obj)) | 2317 if (Heap::InNewSpace(*obj)) |
2311 cross_gen_pointers++; | 2318 cross_gen_pointers++; |
2312 } | 2319 } |
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2413 if (*chunk_size < requested) { | 2420 if (*chunk_size < requested) { |
2414 MemoryAllocator::FreeRawMemory(mem, *chunk_size); | 2421 MemoryAllocator::FreeRawMemory(mem, *chunk_size); |
2415 LOG(DeleteEvent("LargeObjectChunk", mem)); | 2422 LOG(DeleteEvent("LargeObjectChunk", mem)); |
2416 return NULL; | 2423 return NULL; |
2417 } | 2424 } |
2418 return reinterpret_cast<LargeObjectChunk*>(mem); | 2425 return reinterpret_cast<LargeObjectChunk*>(mem); |
2419 } | 2426 } |
2420 | 2427 |
2421 | 2428 |
2422 int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) { | 2429 int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) { |
2423 int os_alignment = OS::AllocateAlignment(); | 2430 int os_alignment = static_cast<int>(OS::AllocateAlignment()); |
2424 if (os_alignment < Page::kPageSize) | 2431 if (os_alignment < Page::kPageSize) |
2425 size_in_bytes += (Page::kPageSize - os_alignment); | 2432 size_in_bytes += (Page::kPageSize - os_alignment); |
2426 return size_in_bytes + Page::kObjectStartOffset; | 2433 return size_in_bytes + Page::kObjectStartOffset; |
2427 } | 2434 } |
2428 | 2435 |
2429 // ----------------------------------------------------------------------------- | 2436 // ----------------------------------------------------------------------------- |
2430 // LargeObjectSpace | 2437 // LargeObjectSpace |
2431 | 2438 |
2432 LargeObjectSpace::LargeObjectSpace(AllocationSpace id) | 2439 LargeObjectSpace::LargeObjectSpace(AllocationSpace id) |
2433 : Space(id, NOT_EXECUTABLE), // Managed on a per-allocation basis | 2440 : Space(id, NOT_EXECUTABLE), // Managed on a per-allocation basis |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2492 return Failure::RetryAfterGC(requested_size, identity()); | 2499 return Failure::RetryAfterGC(requested_size, identity()); |
2493 } | 2500 } |
2494 | 2501 |
2495 size_t chunk_size; | 2502 size_t chunk_size; |
2496 LargeObjectChunk* chunk = | 2503 LargeObjectChunk* chunk = |
2497 LargeObjectChunk::New(requested_size, &chunk_size, executable); | 2504 LargeObjectChunk::New(requested_size, &chunk_size, executable); |
2498 if (chunk == NULL) { | 2505 if (chunk == NULL) { |
2499 return Failure::RetryAfterGC(requested_size, identity()); | 2506 return Failure::RetryAfterGC(requested_size, identity()); |
2500 } | 2507 } |
2501 | 2508 |
2502 size_ += chunk_size; | 2509 size_ += static_cast<int>(chunk_size); |
2503 page_count_++; | 2510 page_count_++; |
2504 chunk->set_next(first_chunk_); | 2511 chunk->set_next(first_chunk_); |
2505 chunk->set_size(chunk_size); | 2512 chunk->set_size(chunk_size); |
2506 first_chunk_ = chunk; | 2513 first_chunk_ = chunk; |
2507 | 2514 |
2508 // Set the object address and size in the page header and clear its | 2515 // Set the object address and size in the page header and clear its |
2509 // remembered set. | 2516 // remembered set. |
2510 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize)); | 2517 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize)); |
2511 Address object_address = page->ObjectAreaStart(); | 2518 Address object_address = page->ObjectAreaStart(); |
2512 // Clear the low order bit of the second word in the page to flag it as a | 2519 // Clear the low order bit of the second word in the page to flag it as a |
(...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2643 if (previous == NULL) { | 2650 if (previous == NULL) { |
2644 first_chunk_ = current; | 2651 first_chunk_ = current; |
2645 } else { | 2652 } else { |
2646 previous->set_next(current); | 2653 previous->set_next(current); |
2647 } | 2654 } |
2648 | 2655 |
2649 // Free the chunk. | 2656 // Free the chunk. |
2650 if (object->IsCode()) { | 2657 if (object->IsCode()) { |
2651 LOG(CodeDeleteEvent(object->address())); | 2658 LOG(CodeDeleteEvent(object->address())); |
2652 } | 2659 } |
2653 size_ -= chunk_size; | 2660 size_ -= static_cast<int>(chunk_size); |
2654 page_count_--; | 2661 page_count_--; |
2655 MemoryAllocator::FreeRawMemory(chunk_address, chunk_size); | 2662 MemoryAllocator::FreeRawMemory(chunk_address, chunk_size); |
2656 LOG(DeleteEvent("LargeObjectChunk", chunk_address)); | 2663 LOG(DeleteEvent("LargeObjectChunk", chunk_address)); |
2657 } | 2664 } |
2658 } | 2665 } |
2659 } | 2666 } |
2660 | 2667 |
2661 | 2668 |
2662 bool LargeObjectSpace::Contains(HeapObject* object) { | 2669 bool LargeObjectSpace::Contains(HeapObject* object) { |
2663 Address address = object->address(); | 2670 Address address = object->address(); |
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2784 reinterpret_cast<Object**>(object->address() | 2791 reinterpret_cast<Object**>(object->address() |
2785 + Page::kObjectAreaSize), | 2792 + Page::kObjectAreaSize), |
2786 allocation_top); | 2793 allocation_top); |
2787 PrintF("\n"); | 2794 PrintF("\n"); |
2788 } | 2795 } |
2789 } | 2796 } |
2790 } | 2797 } |
2791 #endif // DEBUG | 2798 #endif // DEBUG |
2792 | 2799 |
2793 } } // namespace v8::internal | 2800 } } // namespace v8::internal |
OLD | NEW |