OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 429 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
440 | 440 |
441 chunk->heap_ = heap; | 441 chunk->heap_ = heap; |
442 chunk->size_ = size; | 442 chunk->size_ = size; |
443 chunk->area_start_ = area_start; | 443 chunk->area_start_ = area_start; |
444 chunk->area_end_ = area_end; | 444 chunk->area_end_ = area_end; |
445 chunk->flags_ = 0; | 445 chunk->flags_ = 0; |
446 chunk->set_owner(owner); | 446 chunk->set_owner(owner); |
447 chunk->InitializeReservedMemory(); | 447 chunk->InitializeReservedMemory(); |
448 chunk->slots_buffer_ = NULL; | 448 chunk->slots_buffer_ = NULL; |
449 chunk->skip_list_ = NULL; | 449 chunk->skip_list_ = NULL; |
450 chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity; | |
451 chunk->ResetLiveBytes(); | 450 chunk->ResetLiveBytes(); |
452 Bitmap::Clear(chunk); | 451 Bitmap::Clear(chunk); |
453 chunk->initialize_scan_on_scavenge(false); | 452 chunk->initialize_scan_on_scavenge(false); |
454 chunk->SetFlag(WAS_SWEPT_PRECISELY); | 453 chunk->SetFlag(WAS_SWEPT_PRECISELY); |
455 | 454 |
456 ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset); | 455 ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset); |
457 ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset); | 456 ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset); |
458 | 457 |
459 if (executable == EXECUTABLE) { | 458 if (executable == EXECUTABLE) { |
460 chunk->SetFlag(IS_EXECUTABLE); | 459 chunk->SetFlag(IS_EXECUTABLE); |
(...skipping 414 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
875 intptr_t PagedSpace::SizeOfFirstPage() { | 874 intptr_t PagedSpace::SizeOfFirstPage() { |
876 int size = 0; | 875 int size = 0; |
877 switch (identity()) { | 876 switch (identity()) { |
878 case OLD_POINTER_SPACE: | 877 case OLD_POINTER_SPACE: |
879 size = 64 * kPointerSize * KB; | 878 size = 64 * kPointerSize * KB; |
880 break; | 879 break; |
881 case OLD_DATA_SPACE: | 880 case OLD_DATA_SPACE: |
882 size = 192 * KB; | 881 size = 192 * KB; |
883 break; | 882 break; |
884 case MAP_SPACE: | 883 case MAP_SPACE: |
885 size = 16 * kPointerSize * KB; | 884 size = 128 * KB; |
886 break; | 885 break; |
887 case CELL_SPACE: | 886 case CELL_SPACE: |
888 size = 16 * kPointerSize * KB; | 887 size = 96 * KB; |
889 break; | 888 break; |
890 case CODE_SPACE: | 889 case CODE_SPACE: |
891 if (kPointerSize == 8) { | 890 if (kPointerSize == 8) { |
892 // On x64 we allocate code pages in a special way (from the reserved | 891 // On x64 we allocate code pages in a special way (from the reserved |
893 // 2Byte area). That part of the code is not yet upgraded to handle | 892 // 2Byte area). That part of the code is not yet upgraded to handle |
894 // small pages. | 893 // small pages. |
895 size = AreaSize(); | 894 size = AreaSize(); |
896 } else { | 895 } else { |
897 size = 384 * KB; | 896 size = 384 * KB; |
898 } | 897 } |
(...skipping 1353 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2252 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes); | 2251 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes); |
2253 if (new_area == NULL) return false; | 2252 if (new_area == NULL) return false; |
2254 | 2253 |
2255 int old_linear_size = static_cast<int>(limit() - top()); | 2254 int old_linear_size = static_cast<int>(limit() - top()); |
2256 // Mark the old linear allocation area with a free space so it can be | 2255 // Mark the old linear allocation area with a free space so it can be |
2257 // skipped when scanning the heap. This also puts it back in the free list | 2256 // skipped when scanning the heap. This also puts it back in the free list |
2258 // if it is big enough. | 2257 // if it is big enough. |
2259 Free(top(), old_linear_size); | 2258 Free(top(), old_linear_size); |
2260 | 2259 |
2261 SetTop(new_area->address(), new_area->address() + size_in_bytes); | 2260 SetTop(new_area->address(), new_area->address() + size_in_bytes); |
| 2261 Allocate(size_in_bytes); |
2262 return true; | 2262 return true; |
2263 } | 2263 } |
2264 | 2264 |
2265 | 2265 |
2266 static void RepairFreeList(Heap* heap, FreeListNode* n) { | |
2267 while (n != NULL) { | |
2268 Map** map_location = reinterpret_cast<Map**>(n->address()); | |
2269 if (*map_location == NULL) { | |
2270 *map_location = heap->free_space_map(); | |
2271 } else { | |
2272 ASSERT(*map_location == heap->free_space_map()); | |
2273 } | |
2274 n = n->next(); | |
2275 } | |
2276 } | |
2277 | |
2278 | |
2279 void FreeList::RepairLists(Heap* heap) { | |
2280 RepairFreeList(heap, small_list_); | |
2281 RepairFreeList(heap, medium_list_); | |
2282 RepairFreeList(heap, large_list_); | |
2283 RepairFreeList(heap, huge_list_); | |
2284 } | |
2285 | |
2286 | |
2287 // After we have booted, we have created a map which represents free space | |
2288 // on the heap. If there was already a free list then the elements on it | |
2289 // were created with the wrong FreeSpaceMap (normally NULL), so we need to | |
2290 // fix them. | |
2291 void PagedSpace::RepairFreeListsAfterBoot() { | |
2292 free_list_.RepairLists(heap()); | |
2293 } | |
2294 | |
2295 | |
2296 // You have to call this last, since the implementation from PagedSpace | 2266 // You have to call this last, since the implementation from PagedSpace |
2297 // doesn't know that memory was 'promised' to large object space. | 2267 // doesn't know that memory was 'promised' to large object space. |
2298 bool LargeObjectSpace::ReserveSpace(int bytes) { | 2268 bool LargeObjectSpace::ReserveSpace(int bytes) { |
2299 return heap()->OldGenerationCapacityAvailable() >= bytes && | 2269 return heap()->OldGenerationCapacityAvailable() >= bytes && |
2300 (!heap()->incremental_marking()->IsStopped() || | 2270 (!heap()->incremental_marking()->IsStopped() || |
2301 heap()->OldGenerationSpaceAvailable() >= bytes); | 2271 heap()->OldGenerationSpaceAvailable() >= bytes); |
2302 } | 2272 } |
2303 | 2273 |
2304 | 2274 |
2305 bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) { | 2275 bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) { |
(...skipping 366 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2672 for (uintptr_t key = base; key <= limit; key++) { | 2642 for (uintptr_t key = base; key <= limit; key++) { |
2673 HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key), | 2643 HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key), |
2674 static_cast<uint32_t>(key), | 2644 static_cast<uint32_t>(key), |
2675 true); | 2645 true); |
2676 ASSERT(entry != NULL); | 2646 ASSERT(entry != NULL); |
2677 entry->value = page; | 2647 entry->value = page; |
2678 } | 2648 } |
2679 | 2649 |
2680 HeapObject* object = page->GetObject(); | 2650 HeapObject* object = page->GetObject(); |
2681 | 2651 |
2682 // Make the object consistent so the large object space can be traversed. | 2652 #ifdef DEBUG |
| 2653 // Make the object consistent so the heap can be vefified in OldSpaceStep. |
2683 reinterpret_cast<Object**>(object->address())[0] = | 2654 reinterpret_cast<Object**>(object->address())[0] = |
2684 heap()->fixed_array_map(); | 2655 heap()->fixed_array_map(); |
2685 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0); | 2656 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0); |
| 2657 #endif |
2686 | 2658 |
2687 heap()->incremental_marking()->OldSpaceStep(object_size); | 2659 heap()->incremental_marking()->OldSpaceStep(object_size); |
2688 return object; | 2660 return object; |
2689 } | 2661 } |
2690 | 2662 |
2691 | 2663 |
2692 // GC support | 2664 // GC support |
2693 MaybeObject* LargeObjectSpace::FindObject(Address a) { | 2665 MaybeObject* LargeObjectSpace::FindObject(Address a) { |
2694 LargePage* page = FindPage(a); | 2666 LargePage* page = FindPage(a); |
2695 if (page != NULL) { | 2667 if (page != NULL) { |
(...skipping 190 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2886 object->ShortPrint(); | 2858 object->ShortPrint(); |
2887 PrintF("\n"); | 2859 PrintF("\n"); |
2888 } | 2860 } |
2889 printf(" --------------------------------------\n"); | 2861 printf(" --------------------------------------\n"); |
2890 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 2862 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
2891 } | 2863 } |
2892 | 2864 |
2893 #endif // DEBUG | 2865 #endif // DEBUG |
2894 | 2866 |
2895 } } // namespace v8::internal | 2867 } } // namespace v8::internal |
OLD | NEW |