Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(5)

Side by Side Diff: src/spaces-inl.h

Issue 2144006: Cardmarking writebarrier. (Closed)
Patch Set: change NewSpace and SemiSpace Contains to match HasHeapObjectTag Created 10 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. 1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
59 return MemoryAllocator::GetNextPage(this); 59 return MemoryAllocator::GetNextPage(this);
60 } 60 }
61 61
62 62
63 Address Page::AllocationTop() { 63 Address Page::AllocationTop() {
64 PagedSpace* owner = MemoryAllocator::PageOwner(this); 64 PagedSpace* owner = MemoryAllocator::PageOwner(this);
65 return owner->PageAllocationTop(this); 65 return owner->PageAllocationTop(this);
66 } 66 }
67 67
68 68
69 void Page::ClearRSet() { 69 Address Page::AllocationWatermark() {
70 // This method can be called in all rset states. 70 PagedSpace* owner = MemoryAllocator::PageOwner(this);
71 memset(RSetStart(), 0, kRSetEndOffset - kRSetStartOffset); 71 if (this == owner->AllocationTopPage()) {
72 return owner->top();
73 }
74 return address() + AllocationWatermarkOffset();
72 } 75 }
73 76
74 77
75 // Given a 32-bit address, separate its bits into: 78 uint32_t Page::AllocationWatermarkOffset() {
76 // | page address | words (6) | bit offset (5) | pointer alignment (2) | 79 return static_cast<uint32_t>((flags_ & kAllocationWatermarkOffsetMask) >>
77 // The address of the rset word containing the bit for this word is computed as: 80 kAllocationWatermarkOffsetShift);
78 // page_address + words * 4
79 // For a 64-bit address, if it is:
80 // | page address | words(5) | bit offset(5) | pointer alignment (3) |
81 // The address of the rset word containing the bit for this word is computed as:
82 // page_address + words * 4 + kRSetOffset.
83 // The rset is accessed as 32-bit words, and bit offsets in a 32-bit word,
84 // even on the X64 architecture.
85
86 Address Page::ComputeRSetBitPosition(Address address, int offset,
87 uint32_t* bitmask) {
88 ASSERT(Page::is_rset_in_use());
89
90 Page* page = Page::FromAddress(address);
91 uint32_t bit_offset = ArithmeticShiftRight(page->Offset(address) + offset,
92 kPointerSizeLog2);
93 *bitmask = 1 << (bit_offset % kBitsPerInt);
94
95 Address rset_address =
96 page->address() + kRSetOffset + (bit_offset / kBitsPerInt) * kIntSize;
97 // The remembered set address is either in the normal remembered set range
98 // of a page or else we have a large object page.
99 ASSERT((page->RSetStart() <= rset_address && rset_address < page->RSetEnd())
100 || page->IsLargeObjectPage());
101
102 if (rset_address >= page->RSetEnd()) {
103 // We have a large object page, and the remembered set address is actually
104 // past the end of the object.
105
106 // The first part of the remembered set is still located at the start of
107 // the page, but anything after kRSetEndOffset must be relocated to after
108 // the large object, i.e. after
109 // (page->ObjectAreaStart() + object size)
110 // We do that by adding the difference between the normal RSet's end and
111 // the object's end.
112 ASSERT(HeapObject::FromAddress(address)->IsFixedArray());
113 int fixedarray_length =
114 FixedArray::SizeFor(Memory::int_at(page->ObjectAreaStart()
115 + Array::kLengthOffset));
116 rset_address += kObjectStartOffset - kRSetEndOffset + fixedarray_length;
117 }
118 return rset_address;
119 } 81 }
120 82
121 83
122 void Page::SetRSet(Address address, int offset) { 84 void Page::SetAllocationWatermark(Address allocation_watermark) {
123 uint32_t bitmask = 0; 85 if ((Heap::gc_state() == Heap::SCAVENGE) && IsWatermarkValid()) {
124 Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask); 86 // When iterating intergenerational references during scavenge
125 Memory::uint32_at(rset_address) |= bitmask; 87 // we might decide to promote an encountered young object.
88 // We will allocate a space for such an object and put it
89 // into the promotion queue to process it later.
90 // If space for object was allocated somewhere beyond allocation
91 // watermark this might cause garbage pointers to appear under allocation
92 // watermark. To avoid visiting them during dirty regions iteration
93 // which might be still in progress we store a valid allocation watermark
94 // value and mark this page as having an invalid watermark.
95 SetCachedAllocationWatermark(AllocationWatermark());
96 InvalidateWatermark(true);
97 }
126 98
127 ASSERT(IsRSetSet(address, offset)); 99 flags_ = (flags_ & kFlagsMask) |
100 Offset(allocation_watermark) << kAllocationWatermarkOffsetShift;
101 ASSERT(AllocationWatermarkOffset()
102 == static_cast<uint32_t>(Offset(allocation_watermark)));
128 } 103 }
129 104
130 105
131 // Clears the corresponding remembered set bit for a given address. 106 void Page::SetCachedAllocationWatermark(Address allocation_watermark) {
132 void Page::UnsetRSet(Address address, int offset) { 107 mc_first_forwarded = allocation_watermark;
133 uint32_t bitmask = 0;
134 Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
135 Memory::uint32_at(rset_address) &= ~bitmask;
136
137 ASSERT(!IsRSetSet(address, offset));
138 } 108 }
139 109
140 110
141 bool Page::IsRSetSet(Address address, int offset) { 111 Address Page::CachedAllocationWatermark() {
112 return mc_first_forwarded;
113 }
114
115
116 uint32_t Page::GetRegionMarks() {
117 return dirty_regions_;
118 }
119
120
121 void Page::SetRegionMarks(uint32_t marks) {
122 dirty_regions_ = marks;
123 }
124
125
126 int Page::GetRegionNumberForAddress(Address addr) {
127 // Each page is divided into 256 byte regions. Each region has a corresponding
128 // dirty mark bit in the page header. Region can contain intergenerational
129 // references iff its dirty mark is set.
130 // A normal 8K page contains exactly 32 regions so all region marks fit
131 // into 32-bit integer field. To calculate a region number we just divide
132 // offset inside page by region size.
133 // A large page can contain more then 32 regions. But we want to avoid
134 // additional write barrier code for distinguishing between large and normal
135 // pages so we just ignore the fact that addr points into a large page and
136 // calculate region number as if addr pointed into a normal 8K page. This way
137 // we get a region number modulo 32 so for large pages several regions might
138 // be mapped to a single dirty mark.
139 ASSERT_PAGE_ALIGNED(this->address());
140 STATIC_ASSERT((kPageAlignmentMask >> kRegionSizeLog2) < kBitsPerInt);
141
142 // We are using masking with kPageAlignmentMask instead of Page::Offset()
143 // to get an offset to the beginning of 8K page containing addr not to the
144 // beginning of actual page which can be bigger then 8K.
145 intptr_t offset_inside_normal_page = OffsetFrom(addr) & kPageAlignmentMask;
146 return static_cast<int>(offset_inside_normal_page >> kRegionSizeLog2);
147 }
148
149
150 uint32_t Page::GetRegionMaskForAddress(Address addr) {
151 return 1 << GetRegionNumberForAddress(addr);
152 }
153
154
155 void Page::MarkRegionDirty(Address address) {
156 SetRegionMarks(GetRegionMarks() | GetRegionMaskForAddress(address));
157 }
158
159
160 bool Page::IsRegionDirty(Address address) {
161 return GetRegionMarks() & GetRegionMaskForAddress(address);
162 }
163
164
165 void Page::ClearRegionMarks(Address start, Address end, bool reaches_limit) {
166 int rstart = GetRegionNumberForAddress(start);
167 int rend = GetRegionNumberForAddress(end);
168
169 if (reaches_limit) {
170 end += 1;
171 }
172
173 if ((rend - rstart) == 0) {
174 return;
175 }
176
142 uint32_t bitmask = 0; 177 uint32_t bitmask = 0;
143 Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask); 178
144 return (Memory::uint32_at(rset_address) & bitmask) != 0; 179 if ((OffsetFrom(start) & kRegionAlignmentMask) == 0
180 || (start == ObjectAreaStart())) {
181 // First region is fully covered
182 bitmask = 1 << rstart;
183 }
184
185 while (++rstart < rend) {
186 bitmask |= 1 << rstart;
187 }
188
189 if (bitmask) {
190 SetRegionMarks(GetRegionMarks() & ~bitmask);
191 }
192 }
193
194
195 void Page::FlipMeaningOfInvalidatedWatermarkFlag() {
196 watermark_invalidated_mark_ ^= WATERMARK_INVALIDATED;
197 }
198
199
200 bool Page::IsWatermarkValid() {
201 return (flags_ & WATERMARK_INVALIDATED) != watermark_invalidated_mark_;
202 }
203
204
205 void Page::InvalidateWatermark(bool value) {
206 if (value) {
207 flags_ = (flags_ & ~WATERMARK_INVALIDATED) | watermark_invalidated_mark_;
208 } else {
209 flags_ = (flags_ & ~WATERMARK_INVALIDATED) |
210 (watermark_invalidated_mark_ ^ WATERMARK_INVALIDATED);
211 }
212
213 ASSERT(IsWatermarkValid() == !value);
145 } 214 }
146 215
147 216
148 bool Page::GetPageFlag(PageFlag flag) { 217 bool Page::GetPageFlag(PageFlag flag) {
149 return (flags & flag) != 0; 218 return (flags_ & flag) != 0;
150 } 219 }
151 220
152 221
153 void Page::SetPageFlag(PageFlag flag, bool value) { 222 void Page::SetPageFlag(PageFlag flag, bool value) {
154 if (value) { 223 if (value) {
155 flags |= flag; 224 flags_ |= flag;
156 } else { 225 } else {
157 flags &= ~flag; 226 flags_ &= ~flag;
158 } 227 }
159 } 228 }
160 229
161 230
231 void Page::ClearPageFlags() {
232 flags_ = 0;
233 }
234
235
162 bool Page::WasInUseBeforeMC() { 236 bool Page::WasInUseBeforeMC() {
163 return GetPageFlag(WAS_IN_USE_BEFORE_MC); 237 return GetPageFlag(WAS_IN_USE_BEFORE_MC);
164 } 238 }
165 239
166 240
167 void Page::SetWasInUseBeforeMC(bool was_in_use) { 241 void Page::SetWasInUseBeforeMC(bool was_in_use) {
168 SetPageFlag(WAS_IN_USE_BEFORE_MC, was_in_use); 242 SetPageFlag(WAS_IN_USE_BEFORE_MC, was_in_use);
169 } 243 }
170 244
171 245
(...skipping 164 matching lines...) Expand 10 before | Expand all | Expand 10 after
336 // Round the chunk address up to the nearest page-aligned address 410 // Round the chunk address up to the nearest page-aligned address
337 // and return the heap object in that page. 411 // and return the heap object in that page.
338 Page* page = Page::FromAddress(RoundUp(address(), Page::kPageSize)); 412 Page* page = Page::FromAddress(RoundUp(address(), Page::kPageSize));
339 return HeapObject::FromAddress(page->ObjectAreaStart()); 413 return HeapObject::FromAddress(page->ObjectAreaStart());
340 } 414 }
341 415
342 416
343 // ----------------------------------------------------------------------------- 417 // -----------------------------------------------------------------------------
344 // LargeObjectSpace 418 // LargeObjectSpace
345 419
346 int LargeObjectSpace::ExtraRSetBytesFor(int object_size) {
347 int extra_rset_bits =
348 RoundUp((object_size - Page::kObjectAreaSize) / kPointerSize,
349 kBitsPerInt);
350 return extra_rset_bits / kBitsPerByte;
351 }
352
353
354 Object* NewSpace::AllocateRawInternal(int size_in_bytes, 420 Object* NewSpace::AllocateRawInternal(int size_in_bytes,
355 AllocationInfo* alloc_info) { 421 AllocationInfo* alloc_info) {
356 Address new_top = alloc_info->top + size_in_bytes; 422 Address new_top = alloc_info->top + size_in_bytes;
357 if (new_top > alloc_info->limit) return Failure::RetryAfterGC(size_in_bytes); 423 if (new_top > alloc_info->limit) return Failure::RetryAfterGC(size_in_bytes);
358 424
359 Object* obj = HeapObject::FromAddress(alloc_info->top); 425 Object* obj = HeapObject::FromAddress(alloc_info->top);
360 alloc_info->top = new_top; 426 alloc_info->top = new_top;
361 #ifdef DEBUG 427 #ifdef DEBUG
362 SemiSpace* space = 428 SemiSpace* space =
363 (alloc_info == &allocation_info_) ? &to_space_ : &from_space_; 429 (alloc_info == &allocation_info_) ? &to_space_ : &from_space_;
364 ASSERT(space->low() <= alloc_info->top 430 ASSERT(space->low() <= alloc_info->top
365 && alloc_info->top <= space->high() 431 && alloc_info->top <= space->high()
366 && alloc_info->limit == space->high()); 432 && alloc_info->limit == space->high());
367 #endif 433 #endif
368 return obj; 434 return obj;
369 } 435 }
370 436
371 437
372 bool FreeListNode::IsFreeListNode(HeapObject* object) { 438 bool FreeListNode::IsFreeListNode(HeapObject* object) {
373 return object->map() == Heap::raw_unchecked_byte_array_map() 439 return object->map() == Heap::raw_unchecked_byte_array_map()
374 || object->map() == Heap::raw_unchecked_one_pointer_filler_map() 440 || object->map() == Heap::raw_unchecked_one_pointer_filler_map()
375 || object->map() == Heap::raw_unchecked_two_pointer_filler_map(); 441 || object->map() == Heap::raw_unchecked_two_pointer_filler_map();
376 } 442 }
377 443
378 } } // namespace v8::internal 444 } } // namespace v8::internal
379 445
380 #endif // V8_SPACES_INL_H_ 446 #endif // V8_SPACES_INL_H_
OLDNEW
« src/spaces.cc ('K') | « src/spaces.cc ('k') | src/x64/builtins-x64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698