OLD | NEW |
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
59 return MemoryAllocator::GetNextPage(this); | 59 return MemoryAllocator::GetNextPage(this); |
60 } | 60 } |
61 | 61 |
62 | 62 |
63 Address Page::AllocationTop() { | 63 Address Page::AllocationTop() { |
64 PagedSpace* owner = MemoryAllocator::PageOwner(this); | 64 PagedSpace* owner = MemoryAllocator::PageOwner(this); |
65 return owner->PageAllocationTop(this); | 65 return owner->PageAllocationTop(this); |
66 } | 66 } |
67 | 67 |
68 | 68 |
69 void Page::ClearRSet() { | 69 Address Page::AllocationWatermark() { |
70 // This method can be called in all rset states. | 70 PagedSpace* owner = MemoryAllocator::PageOwner(this); |
71 memset(RSetStart(), 0, kRSetEndOffset - kRSetStartOffset); | 71 if (this == owner->AllocationTopPage()) { |
| 72 return owner->top(); |
| 73 } |
| 74 return address() + AllocationWatermarkOffset(); |
72 } | 75 } |
73 | 76 |
74 | 77 |
75 // Given a 32-bit address, separate its bits into: | 78 uint32_t Page::AllocationWatermarkOffset() { |
76 // | page address | words (6) | bit offset (5) | pointer alignment (2) | | 79 return (flags_ & kAllocationWatermarkOffsetMask) >> |
77 // The address of the rset word containing the bit for this word is computed as: | 80 kAllocationWatermarkOffsetShift; |
78 // page_address + words * 4 | |
79 // For a 64-bit address, if it is: | |
80 // | page address | words(5) | bit offset(5) | pointer alignment (3) | | |
81 // The address of the rset word containing the bit for this word is computed as: | |
82 // page_address + words * 4 + kRSetOffset. | |
83 // The rset is accessed as 32-bit words, and bit offsets in a 32-bit word, | |
84 // even on the X64 architecture. | |
85 | |
86 Address Page::ComputeRSetBitPosition(Address address, int offset, | |
87 uint32_t* bitmask) { | |
88 ASSERT(Page::is_rset_in_use()); | |
89 | |
90 Page* page = Page::FromAddress(address); | |
91 uint32_t bit_offset = ArithmeticShiftRight(page->Offset(address) + offset, | |
92 kPointerSizeLog2); | |
93 *bitmask = 1 << (bit_offset % kBitsPerInt); | |
94 | |
95 Address rset_address = | |
96 page->address() + kRSetOffset + (bit_offset / kBitsPerInt) * kIntSize; | |
97 // The remembered set address is either in the normal remembered set range | |
98 // of a page or else we have a large object page. | |
99 ASSERT((page->RSetStart() <= rset_address && rset_address < page->RSetEnd()) | |
100 || page->IsLargeObjectPage()); | |
101 | |
102 if (rset_address >= page->RSetEnd()) { | |
103 // We have a large object page, and the remembered set address is actually | |
104 // past the end of the object. | |
105 | |
106 // The first part of the remembered set is still located at the start of | |
107 // the page, but anything after kRSetEndOffset must be relocated to after | |
108 // the large object, i.e. after | |
109 // (page->ObjectAreaStart() + object size) | |
110 // We do that by adding the difference between the normal RSet's end and | |
111 // the object's end. | |
112 ASSERT(HeapObject::FromAddress(address)->IsFixedArray()); | |
113 int fixedarray_length = | |
114 FixedArray::SizeFor(Memory::int_at(page->ObjectAreaStart() | |
115 + Array::kLengthOffset)); | |
116 rset_address += kObjectStartOffset - kRSetEndOffset + fixedarray_length; | |
117 } | |
118 return rset_address; | |
119 } | 81 } |
120 | 82 |
121 | 83 |
122 void Page::SetRSet(Address address, int offset) { | 84 void Page::SetAllocationWatermark(Address allocation_watermark) { |
123 uint32_t bitmask = 0; | 85 if ((Heap::gc_state() == Heap::SCAVENGE) && IsWatermarkValid()) { |
124 Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask); | 86 // When iterating intergenerational references during scavenge |
125 Memory::uint32_at(rset_address) |= bitmask; | 87 // we might decide to promote an encountered young object. |
| 88 // We will allocate a space for such an object and put it |
| 89 // into the promotion queue to process it later. |
| 90 // If space for object was allocated somewhere beyond allocation |
| 91 // watermark this might cause garbage pointers to appear under allocation |
| 92 // watermark. To avoid visiting them during dirty regions iteration |
| 93 // which might be still in progress we store a valid allocation watermark |
| 94 // value and mark this page as having an invalid watermark. |
| 95 SetCachedAllocationWatermark(AllocationWatermark()); |
| 96 InvalidateWatermark(true); |
| 97 } |
126 | 98 |
127 ASSERT(IsRSetSet(address, offset)); | 99 flags_ = (flags_ & kFlagsMask) | |
| 100 Offset(allocation_watermark) << kAllocationWatermarkOffsetShift; |
| 101 ASSERT(AllocationWatermarkOffset() |
| 102 == static_cast<uint32_t>(Offset(allocation_watermark))); |
128 } | 103 } |
129 | 104 |
130 | 105 |
131 // Clears the corresponding remembered set bit for a given address. | 106 void Page::SetCachedAllocationWatermark(Address allocation_watermark) { |
132 void Page::UnsetRSet(Address address, int offset) { | 107 mc_first_forwarded = allocation_watermark; |
133 uint32_t bitmask = 0; | |
134 Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask); | |
135 Memory::uint32_at(rset_address) &= ~bitmask; | |
136 | |
137 ASSERT(!IsRSetSet(address, offset)); | |
138 } | 108 } |
139 | 109 |
140 | 110 |
141 bool Page::IsRSetSet(Address address, int offset) { | 111 Address Page::CachedAllocationWatermark() { |
| 112 return mc_first_forwarded; |
| 113 } |
| 114 |
| 115 |
| 116 uint32_t Page::GetRegionMarks() { |
| 117 return dirty_regions_; |
| 118 } |
| 119 |
| 120 |
| 121 void Page::SetRegionMarks(uint32_t marks) { |
| 122 dirty_regions_ = marks; |
| 123 } |
| 124 |
| 125 |
| 126 int Page::GetRegionNumberForAddress(Address addr) { |
| 127 // Each page is divided into 256 byte regions. Each region has a corresponding |
| 128 // dirty mark bit in the page header. Region can contain intergenerational |
| 129 // references iff its dirty mark is set. |
| 130 // A normal 8K page contains exactly 32 regions so all region marks fit |
| 131 // into 32-bit integer field. To calculate a region number we just divide |
| 132 // offset inside page by region size. |
| 133 // A large page can contain more then 32 regions. But we want to avoid |
| 134 // additional write barrier code for distinguishing between large and normal |
| 135 // pages so we just ignore the fact that addr points into a large page and |
| 136 // calculate region number as if addr pointed into a normal 8K page. This way |
| 137 // we get a region number modulo 32 so for large pages several regions might |
| 138 // be mapped to a single dirty mark. |
| 139 ASSERT_PAGE_ALIGNED(this->address()); |
| 140 STATIC_ASSERT((kPageAlignmentMask >> kRegionSizeLog2) < kBitsPerInt); |
| 141 |
| 142 // We are using masking with kPageAlignmentMask instead of Page::Offset() |
| 143 // to get an offset to the beginning of 8K page containing addr not to the |
| 144 // beginning of actual page which can be bigger then 8K. |
| 145 return (OffsetFrom(addr) & kPageAlignmentMask) >> kRegionSizeLog2; |
| 146 } |
| 147 |
| 148 |
| 149 uint32_t Page::GetRegionMaskForAddress(Address addr) { |
| 150 return 1 << GetRegionNumberForAddress(addr); |
| 151 } |
| 152 |
| 153 |
| 154 void Page::MarkRegionDirty(Address address) { |
| 155 SetRegionMarks(GetRegionMarks() | GetRegionMaskForAddress(address)); |
| 156 } |
| 157 |
| 158 |
| 159 bool Page::IsRegionDirty(Address address) { |
| 160 return GetRegionMarks() & GetRegionMaskForAddress(address); |
| 161 } |
| 162 |
| 163 |
| 164 void Page::ClearRegionMarks(Address start, Address end, bool reaches_limit) { |
| 165 int rstart = GetRegionNumberForAddress(start); |
| 166 int rend = GetRegionNumberForAddress(end); |
| 167 |
| 168 if (reaches_limit) { |
| 169 end += 1; |
| 170 } |
| 171 |
| 172 if ((rend - rstart) == 0) { |
| 173 return; |
| 174 } |
| 175 |
142 uint32_t bitmask = 0; | 176 uint32_t bitmask = 0; |
143 Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask); | 177 |
144 return (Memory::uint32_at(rset_address) & bitmask) != 0; | 178 if ((OffsetFrom(start) & kRegionAlignmentMask) == 0 |
| 179 || (start == ObjectAreaStart())) { |
| 180 // First region is fully covered |
| 181 bitmask = 1 << rstart; |
| 182 } |
| 183 |
| 184 while (++rstart < rend) { |
| 185 bitmask |= 1 << rstart; |
| 186 } |
| 187 |
| 188 if (bitmask) { |
| 189 SetRegionMarks(GetRegionMarks() & ~bitmask); |
| 190 } |
| 191 } |
| 192 |
| 193 |
| 194 void Page::FlipMeaningOfInvalidatedWatermarkFlag() { |
| 195 watermark_invalidated_mark_ ^= WATERMARK_INVALIDATED; |
| 196 } |
| 197 |
| 198 |
| 199 bool Page::IsWatermarkValid() { |
| 200 return (flags_ & WATERMARK_INVALIDATED) != watermark_invalidated_mark_; |
| 201 } |
| 202 |
| 203 |
| 204 void Page::InvalidateWatermark(bool value) { |
| 205 if (value) { |
| 206 flags_ = (flags_ & ~WATERMARK_INVALIDATED) | watermark_invalidated_mark_; |
| 207 } else { |
| 208 flags_ = (flags_ & ~WATERMARK_INVALIDATED) | |
| 209 (watermark_invalidated_mark_ ^ WATERMARK_INVALIDATED); |
| 210 } |
| 211 |
| 212 ASSERT(IsWatermarkValid() == !value); |
145 } | 213 } |
146 | 214 |
147 | 215 |
148 bool Page::GetPageFlag(PageFlag flag) { | 216 bool Page::GetPageFlag(PageFlag flag) { |
149 return (flags & flag) != 0; | 217 return (flags_ & flag) != 0; |
150 } | 218 } |
151 | 219 |
152 | 220 |
153 void Page::SetPageFlag(PageFlag flag, bool value) { | 221 void Page::SetPageFlag(PageFlag flag, bool value) { |
154 if (value) { | 222 if (value) { |
155 flags |= flag; | 223 flags_ |= flag; |
156 } else { | 224 } else { |
157 flags &= ~flag; | 225 flags_ &= ~flag; |
158 } | 226 } |
159 } | 227 } |
160 | 228 |
161 | 229 |
| 230 void Page::ClearPageFlags() { |
| 231 flags_ = 0; |
| 232 } |
| 233 |
| 234 |
162 bool Page::WasInUseBeforeMC() { | 235 bool Page::WasInUseBeforeMC() { |
163 return GetPageFlag(WAS_IN_USE_BEFORE_MC); | 236 return GetPageFlag(WAS_IN_USE_BEFORE_MC); |
164 } | 237 } |
165 | 238 |
166 | 239 |
167 void Page::SetWasInUseBeforeMC(bool was_in_use) { | 240 void Page::SetWasInUseBeforeMC(bool was_in_use) { |
168 SetPageFlag(WAS_IN_USE_BEFORE_MC, was_in_use); | 241 SetPageFlag(WAS_IN_USE_BEFORE_MC, was_in_use); |
169 } | 242 } |
170 | 243 |
171 | 244 |
(...skipping 164 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
336 // Round the chunk address up to the nearest page-aligned address | 409 // Round the chunk address up to the nearest page-aligned address |
337 // and return the heap object in that page. | 410 // and return the heap object in that page. |
338 Page* page = Page::FromAddress(RoundUp(address(), Page::kPageSize)); | 411 Page* page = Page::FromAddress(RoundUp(address(), Page::kPageSize)); |
339 return HeapObject::FromAddress(page->ObjectAreaStart()); | 412 return HeapObject::FromAddress(page->ObjectAreaStart()); |
340 } | 413 } |
341 | 414 |
342 | 415 |
343 // ----------------------------------------------------------------------------- | 416 // ----------------------------------------------------------------------------- |
344 // LargeObjectSpace | 417 // LargeObjectSpace |
345 | 418 |
346 int LargeObjectSpace::ExtraRSetBytesFor(int object_size) { | |
347 int extra_rset_bits = | |
348 RoundUp((object_size - Page::kObjectAreaSize) / kPointerSize, | |
349 kBitsPerInt); | |
350 return extra_rset_bits / kBitsPerByte; | |
351 } | |
352 | |
353 | |
354 Object* NewSpace::AllocateRawInternal(int size_in_bytes, | 419 Object* NewSpace::AllocateRawInternal(int size_in_bytes, |
355 AllocationInfo* alloc_info) { | 420 AllocationInfo* alloc_info) { |
356 Address new_top = alloc_info->top + size_in_bytes; | 421 Address new_top = alloc_info->top + size_in_bytes; |
357 if (new_top > alloc_info->limit) return Failure::RetryAfterGC(size_in_bytes); | 422 if (new_top > alloc_info->limit) return Failure::RetryAfterGC(size_in_bytes); |
358 | 423 |
359 Object* obj = HeapObject::FromAddress(alloc_info->top); | 424 Object* obj = HeapObject::FromAddress(alloc_info->top); |
360 alloc_info->top = new_top; | 425 alloc_info->top = new_top; |
361 #ifdef DEBUG | 426 #ifdef DEBUG |
362 SemiSpace* space = | 427 SemiSpace* space = |
363 (alloc_info == &allocation_info_) ? &to_space_ : &from_space_; | 428 (alloc_info == &allocation_info_) ? &to_space_ : &from_space_; |
364 ASSERT(space->low() <= alloc_info->top | 429 ASSERT(space->low() <= alloc_info->top |
365 && alloc_info->top <= space->high() | 430 && alloc_info->top <= space->high() |
366 && alloc_info->limit == space->high()); | 431 && alloc_info->limit == space->high()); |
367 #endif | 432 #endif |
368 return obj; | 433 return obj; |
369 } | 434 } |
370 | 435 |
371 | 436 |
372 bool FreeListNode::IsFreeListNode(HeapObject* object) { | 437 bool FreeListNode::IsFreeListNode(HeapObject* object) { |
373 return object->map() == Heap::raw_unchecked_byte_array_map() | 438 return object->map() == Heap::raw_unchecked_byte_array_map() |
374 || object->map() == Heap::raw_unchecked_one_pointer_filler_map() | 439 || object->map() == Heap::raw_unchecked_one_pointer_filler_map() |
375 || object->map() == Heap::raw_unchecked_two_pointer_filler_map(); | 440 || object->map() == Heap::raw_unchecked_two_pointer_filler_map(); |
376 } | 441 } |
377 | 442 |
378 } } // namespace v8::internal | 443 } } // namespace v8::internal |
379 | 444 |
380 #endif // V8_SPACES_INL_H_ | 445 #endif // V8_SPACES_INL_H_ |
OLD | NEW |