OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 1971 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1982 if (marking_deque->IsFull()) return; | 1982 if (marking_deque->IsFull()) return; |
1983 offset += 2; | 1983 offset += 2; |
1984 grey_objects >>= 2; | 1984 grey_objects >>= 2; |
1985 } | 1985 } |
1986 | 1986 |
1987 grey_objects >>= (Bitmap::kBitsPerCell - 1); | 1987 grey_objects >>= (Bitmap::kBitsPerCell - 1); |
1988 } | 1988 } |
1989 } | 1989 } |
1990 | 1990 |
1991 | 1991 |
| 1992 int MarkCompactCollector::DiscoverAndPromoteBlackObjectsOnPage( |
| 1993 NewSpace* new_space, |
| 1994 NewSpacePage* p) { |
| 1995 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); |
| 1996 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); |
| 1997 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); |
| 1998 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); |
| 1999 |
| 2000 MarkBit::CellType* cells = p->markbits()->cells(); |
| 2001 int survivors_size = 0; |
| 2002 |
| 2003 int last_cell_index = |
| 2004 Bitmap::IndexToCell( |
| 2005 Bitmap::CellAlignIndex( |
| 2006 p->AddressToMarkbitIndex(p->area_end()))); |
| 2007 |
| 2008 Address cell_base = p->area_start(); |
| 2009 int cell_index = Bitmap::IndexToCell( |
| 2010 Bitmap::CellAlignIndex( |
| 2011 p->AddressToMarkbitIndex(cell_base))); |
| 2012 |
| 2013 for (; |
| 2014 cell_index < last_cell_index; |
| 2015 cell_index++, cell_base += 32 * kPointerSize) { |
| 2016 ASSERT(static_cast<unsigned>(cell_index) == |
| 2017 Bitmap::IndexToCell( |
| 2018 Bitmap::CellAlignIndex( |
| 2019 p->AddressToMarkbitIndex(cell_base)))); |
| 2020 |
| 2021 MarkBit::CellType current_cell = cells[cell_index]; |
| 2022 if (current_cell == 0) continue; |
| 2023 |
| 2024 int offset = 0; |
| 2025 while (current_cell != 0) { |
| 2026 int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(current_cell); |
| 2027 current_cell >>= trailing_zeros; |
| 2028 offset += trailing_zeros; |
| 2029 Address address = cell_base + offset * kPointerSize; |
| 2030 HeapObject* object = HeapObject::FromAddress(address); |
| 2031 |
| 2032 int size = object->Size(); |
| 2033 survivors_size += size; |
| 2034 |
| 2035 offset++; |
| 2036 current_cell >>= 1; |
| 2037 // Aggressively promote young survivors to the old space. |
| 2038 if (TryPromoteObject(object, size)) { |
| 2039 continue; |
| 2040 } |
| 2041 |
| 2042 // Promotion failed. Just migrate object to another semispace. |
| 2043 MaybeObject* allocation = new_space->AllocateRaw(size); |
| 2044 if (allocation->IsFailure()) { |
| 2045 if (!new_space->AddFreshPage()) { |
| 2046 // Shouldn't happen. We are sweeping linearly, and to-space |
| 2047 // has the same number of pages as from-space, so there is |
| 2048 // always room. |
| 2049 UNREACHABLE(); |
| 2050 } |
| 2051 allocation = new_space->AllocateRaw(size); |
| 2052 ASSERT(!allocation->IsFailure()); |
| 2053 } |
| 2054 Object* target = allocation->ToObjectUnchecked(); |
| 2055 |
| 2056 MigrateObject(HeapObject::cast(target)->address(), |
| 2057 object->address(), |
| 2058 size, |
| 2059 NEW_SPACE); |
| 2060 } |
| 2061 cells[cell_index] = 0; |
| 2062 } |
| 2063 return survivors_size; |
| 2064 } |
| 2065 |
| 2066 |
1992 static void DiscoverGreyObjectsInSpace(Heap* heap, | 2067 static void DiscoverGreyObjectsInSpace(Heap* heap, |
1993 MarkingDeque* marking_deque, | 2068 MarkingDeque* marking_deque, |
1994 PagedSpace* space) { | 2069 PagedSpace* space) { |
1995 if (!space->was_swept_conservatively()) { | 2070 if (!space->was_swept_conservatively()) { |
1996 HeapObjectIterator it(space); | 2071 HeapObjectIterator it(space); |
1997 DiscoverGreyObjectsWithIterator(heap, marking_deque, &it); | 2072 DiscoverGreyObjectsWithIterator(heap, marking_deque, &it); |
1998 } else { | 2073 } else { |
1999 PageIterator it(space); | 2074 PageIterator it(space); |
2000 while (it.has_next()) { | 2075 while (it.has_next()) { |
2001 Page* p = it.next(); | 2076 Page* p = it.next(); |
(...skipping 886 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2888 // live objects. | 2963 // live objects. |
2889 new_space->Flip(); | 2964 new_space->Flip(); |
2890 new_space->ResetAllocationInfo(); | 2965 new_space->ResetAllocationInfo(); |
2891 | 2966 |
2892 int survivors_size = 0; | 2967 int survivors_size = 0; |
2893 | 2968 |
2894 // First pass: traverse all objects in inactive semispace, remove marks, | 2969 // First pass: traverse all objects in inactive semispace, remove marks, |
2895 // migrate live objects and write forwarding addresses. This stage puts | 2970 // migrate live objects and write forwarding addresses. This stage puts |
2896 // new entries in the store buffer and may cause some pages to be marked | 2971 // new entries in the store buffer and may cause some pages to be marked |
2897 // scan-on-scavenge. | 2972 // scan-on-scavenge. |
2898 SemiSpaceIterator from_it(from_bottom, from_top); | 2973 NewSpacePageIterator it(from_bottom, from_top); |
2899 for (HeapObject* object = from_it.Next(); | 2974 while (it.has_next()) { |
2900 object != NULL; | 2975 NewSpacePage* p = it.next(); |
2901 object = from_it.Next()) { | 2976 survivors_size += DiscoverAndPromoteBlackObjectsOnPage(new_space, p); |
2902 MarkBit mark_bit = Marking::MarkBitFrom(object); | |
2903 if (mark_bit.Get()) { | |
2904 mark_bit.Clear(); | |
2905 // Don't bother decrementing live bytes count. We'll discard the | |
2906 // entire page at the end. | |
2907 int size = object->Size(); | |
2908 survivors_size += size; | |
2909 | |
2910 // Aggressively promote young survivors to the old space. | |
2911 if (TryPromoteObject(object, size)) { | |
2912 continue; | |
2913 } | |
2914 | |
2915 // Promotion failed. Just migrate object to another semispace. | |
2916 MaybeObject* allocation = new_space->AllocateRaw(size); | |
2917 if (allocation->IsFailure()) { | |
2918 if (!new_space->AddFreshPage()) { | |
2919 // Shouldn't happen. We are sweeping linearly, and to-space | |
2920 // has the same number of pages as from-space, so there is | |
2921 // always room. | |
2922 UNREACHABLE(); | |
2923 } | |
2924 allocation = new_space->AllocateRaw(size); | |
2925 ASSERT(!allocation->IsFailure()); | |
2926 } | |
2927 Object* target = allocation->ToObjectUnchecked(); | |
2928 | |
2929 MigrateObject(HeapObject::cast(target)->address(), | |
2930 object->address(), | |
2931 size, | |
2932 NEW_SPACE); | |
2933 } | |
2934 } | 2977 } |
2935 | 2978 |
2936 heap_->IncrementYoungSurvivorsCounter(survivors_size); | 2979 heap_->IncrementYoungSurvivorsCounter(survivors_size); |
2937 new_space->set_age_mark(new_space->top()); | 2980 new_space->set_age_mark(new_space->top()); |
2938 } | 2981 } |
2939 | 2982 |
2940 | 2983 |
2941 void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) { | 2984 void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) { |
2942 AlwaysAllocateScope always_allocate; | 2985 AlwaysAllocateScope always_allocate; |
2943 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | 2986 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
(...skipping 1385 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4329 while (buffer != NULL) { | 4372 while (buffer != NULL) { |
4330 SlotsBuffer* next_buffer = buffer->next(); | 4373 SlotsBuffer* next_buffer = buffer->next(); |
4331 DeallocateBuffer(buffer); | 4374 DeallocateBuffer(buffer); |
4332 buffer = next_buffer; | 4375 buffer = next_buffer; |
4333 } | 4376 } |
4334 *buffer_address = NULL; | 4377 *buffer_address = NULL; |
4335 } | 4378 } |
4336 | 4379 |
4337 | 4380 |
4338 } } // namespace v8::internal | 4381 } } // namespace v8::internal |
OLD | NEW |