Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 1971 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1982 if (marking_deque->IsFull()) return; | 1982 if (marking_deque->IsFull()) return; |
| 1983 offset += 2; | 1983 offset += 2; |
| 1984 grey_objects >>= 2; | 1984 grey_objects >>= 2; |
| 1985 } | 1985 } |
| 1986 | 1986 |
| 1987 grey_objects >>= (Bitmap::kBitsPerCell - 1); | 1987 grey_objects >>= (Bitmap::kBitsPerCell - 1); |
| 1988 } | 1988 } |
| 1989 } | 1989 } |
| 1990 | 1990 |
| 1991 | 1991 |
| 1992 int MarkCompactCollector::DiscoverAndPromoteBlackObjectsOnPage( | |
| 1993 NewSpace* new_space, | |
| 1994 NewSpacePage* p) { | |
|
titzer
2013/07/11 10:05:33
Please add a TODO here and in the other places whi
Hannes Payer (out of office)
2013/07/11 11:14:17
I am not adding the todo, I am building the iterat
| |
| 1995 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); | |
| 1996 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); | |
| 1997 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); | |
| 1998 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); | |
| 1999 | |
| 2000 MarkBit::CellType* cells = p->markbits()->cells(); | |
| 2001 int survivors_size = 0; | |
| 2002 | |
| 2003 int last_cell_index = | |
| 2004 Bitmap::IndexToCell( | |
| 2005 Bitmap::CellAlignIndex( | |
| 2006 p->AddressToMarkbitIndex(p->area_end()))); | |
| 2007 | |
| 2008 Address cell_base = p->area_start(); | |
| 2009 int cell_index = Bitmap::IndexToCell( | |
| 2010 Bitmap::CellAlignIndex( | |
| 2011 p->AddressToMarkbitIndex(cell_base))); | |
| 2012 | |
| 2013 for (; | |
| 2014 cell_index < last_cell_index; | |
| 2015 cell_index++, cell_base += 32 * kPointerSize) { | |
| 2016 ASSERT(static_cast<unsigned>(cell_index) == | |
| 2017 Bitmap::IndexToCell( | |
| 2018 Bitmap::CellAlignIndex( | |
| 2019 p->AddressToMarkbitIndex(cell_base)))); | |
| 2020 | |
| 2021 const MarkBit::CellType current_cell = cells[cell_index]; | |
| 2022 if (current_cell == 0) continue; | |
| 2023 | |
| 2024 for (unsigned int i = CompilerIntrinsics::CountTrailingZeros(current_cell); | |
|
Hannes Payer (out of office)
2013/07/11 11:14:17
I changed that part, could you have another look?
| |
| 2025 i < Bitmap::kBitsPerCell; | |
| 2026 i++) { | |
| 2027 MarkBit markbit(&cells[cell_index], 1 << i, false); | |
| 2028 if (markbit.Get()) { | |
| 2029 Address address = cell_base + i * kPointerSize; | |
| 2030 HeapObject* object = HeapObject::FromAddress(address); | |
| 2031 | |
| 2032 int size = object->Size(); | |
| 2033 survivors_size += size; | |
| 2034 | |
| 2035 // Aggressively promote young survivors to the old space. | |
| 2036 if (TryPromoteObject(object, size)) { | |
| 2037 continue; | |
| 2038 } | |
| 2039 | |
| 2040 // Promotion failed. Just migrate object to another semispace. | |
| 2041 MaybeObject* allocation = new_space->AllocateRaw(size); | |
| 2042 if (allocation->IsFailure()) { | |
| 2043 if (!new_space->AddFreshPage()) { | |
| 2044 // Shouldn't happen. We are sweeping linearly, and to-space | |
| 2045 // has the same number of pages as from-space, so there is | |
| 2046 // always room. | |
| 2047 UNREACHABLE(); | |
| 2048 } | |
| 2049 allocation = new_space->AllocateRaw(size); | |
| 2050 ASSERT(!allocation->IsFailure()); | |
| 2051 } | |
| 2052 Object* target = allocation->ToObjectUnchecked(); | |
| 2053 | |
| 2054 MigrateObject(HeapObject::cast(target)->address(), | |
| 2055 object->address(), | |
| 2056 size, | |
| 2057 NEW_SPACE); | |
| 2058 } | |
| 2059 } | |
| 2060 cells[cell_index] = 0; | |
| 2061 } | |
| 2062 return survivors_size; | |
| 2063 } | |
| 2064 | |
| 2065 | |
| 1992 static void DiscoverGreyObjectsInSpace(Heap* heap, | 2066 static void DiscoverGreyObjectsInSpace(Heap* heap, |
| 1993 MarkingDeque* marking_deque, | 2067 MarkingDeque* marking_deque, |
| 1994 PagedSpace* space) { | 2068 PagedSpace* space) { |
| 1995 if (!space->was_swept_conservatively()) { | 2069 if (!space->was_swept_conservatively()) { |
| 1996 HeapObjectIterator it(space); | 2070 HeapObjectIterator it(space); |
| 1997 DiscoverGreyObjectsWithIterator(heap, marking_deque, &it); | 2071 DiscoverGreyObjectsWithIterator(heap, marking_deque, &it); |
| 1998 } else { | 2072 } else { |
| 1999 PageIterator it(space); | 2073 PageIterator it(space); |
| 2000 while (it.has_next()) { | 2074 while (it.has_next()) { |
| 2001 Page* p = it.next(); | 2075 Page* p = it.next(); |
| (...skipping 886 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2888 // live objects. | 2962 // live objects. |
| 2889 new_space->Flip(); | 2963 new_space->Flip(); |
| 2890 new_space->ResetAllocationInfo(); | 2964 new_space->ResetAllocationInfo(); |
| 2891 | 2965 |
| 2892 int survivors_size = 0; | 2966 int survivors_size = 0; |
| 2893 | 2967 |
| 2894 // First pass: traverse all objects in inactive semispace, remove marks, | 2968 // First pass: traverse all objects in inactive semispace, remove marks, |
| 2895 // migrate live objects and write forwarding addresses. This stage puts | 2969 // migrate live objects and write forwarding addresses. This stage puts |
| 2896 // new entries in the store buffer and may cause some pages to be marked | 2970 // new entries in the store buffer and may cause some pages to be marked |
| 2897 // scan-on-scavenge. | 2971 // scan-on-scavenge. |
| 2898 SemiSpaceIterator from_it(from_bottom, from_top); | 2972 NewSpacePageIterator it(from_bottom, from_top); |
| 2899 for (HeapObject* object = from_it.Next(); | 2973 while (it.has_next()) { |
| 2900 object != NULL; | 2974 NewSpacePage* p = it.next(); |
| 2901 object = from_it.Next()) { | 2975 survivors_size += DiscoverAndPromoteBlackObjectsOnPage(new_space, p); |
| 2902 MarkBit mark_bit = Marking::MarkBitFrom(object); | |
| 2903 if (mark_bit.Get()) { | |
| 2904 mark_bit.Clear(); | |
| 2905 // Don't bother decrementing live bytes count. We'll discard the | |
| 2906 // entire page at the end. | |
| 2907 int size = object->Size(); | |
| 2908 survivors_size += size; | |
| 2909 | |
| 2910 // Aggressively promote young survivors to the old space. | |
| 2911 if (TryPromoteObject(object, size)) { | |
| 2912 continue; | |
| 2913 } | |
| 2914 | |
| 2915 // Promotion failed. Just migrate object to another semispace. | |
| 2916 MaybeObject* allocation = new_space->AllocateRaw(size); | |
| 2917 if (allocation->IsFailure()) { | |
| 2918 if (!new_space->AddFreshPage()) { | |
| 2919 // Shouldn't happen. We are sweeping linearly, and to-space | |
| 2920 // has the same number of pages as from-space, so there is | |
| 2921 // always room. | |
| 2922 UNREACHABLE(); | |
| 2923 } | |
| 2924 allocation = new_space->AllocateRaw(size); | |
| 2925 ASSERT(!allocation->IsFailure()); | |
| 2926 } | |
| 2927 Object* target = allocation->ToObjectUnchecked(); | |
| 2928 | |
| 2929 MigrateObject(HeapObject::cast(target)->address(), | |
| 2930 object->address(), | |
| 2931 size, | |
| 2932 NEW_SPACE); | |
| 2933 } | |
| 2934 } | 2976 } |
| 2935 | 2977 |
| 2936 heap_->IncrementYoungSurvivorsCounter(survivors_size); | 2978 heap_->IncrementYoungSurvivorsCounter(survivors_size); |
| 2937 new_space->set_age_mark(new_space->top()); | 2979 new_space->set_age_mark(new_space->top()); |
| 2938 } | 2980 } |
| 2939 | 2981 |
| 2940 | 2982 |
| 2941 void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) { | 2983 void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) { |
| 2942 AlwaysAllocateScope always_allocate; | 2984 AlwaysAllocateScope always_allocate; |
| 2943 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | 2985 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
| (...skipping 1385 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4329 while (buffer != NULL) { | 4371 while (buffer != NULL) { |
| 4330 SlotsBuffer* next_buffer = buffer->next(); | 4372 SlotsBuffer* next_buffer = buffer->next(); |
| 4331 DeallocateBuffer(buffer); | 4373 DeallocateBuffer(buffer); |
| 4332 buffer = next_buffer; | 4374 buffer = next_buffer; |
| 4333 } | 4375 } |
| 4334 *buffer_address = NULL; | 4376 *buffer_address = NULL; |
| 4335 } | 4377 } |
| 4336 | 4378 |
| 4337 | 4379 |
| 4338 } } // namespace v8::internal | 4380 } } // namespace v8::internal |
| OLD | NEW |