Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(46)

Side by Side Diff: src/spaces-inl.h

Issue 2071020: Reverting r4685, r4686, r4687 (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 10 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/spaces.cc ('k') | src/x64/builtins-x64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. 1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
59 return MemoryAllocator::GetNextPage(this); 59 return MemoryAllocator::GetNextPage(this);
60 } 60 }
61 61
62 62
63 Address Page::AllocationTop() { 63 Address Page::AllocationTop() {
64 PagedSpace* owner = MemoryAllocator::PageOwner(this); 64 PagedSpace* owner = MemoryAllocator::PageOwner(this);
65 return owner->PageAllocationTop(this); 65 return owner->PageAllocationTop(this);
66 } 66 }
67 67
68 68
69 Address Page::AllocationWatermark() { 69 void Page::ClearRSet() {
70 PagedSpace* owner = MemoryAllocator::PageOwner(this); 70 // This method can be called in all rset states.
71 if (this == owner->AllocationTopPage()) { 71 memset(RSetStart(), 0, kRSetEndOffset - kRSetStartOffset);
72 return owner->top();
73 }
74 return address() + AllocationWatermarkOffset();
75 } 72 }
76 73
77 74
78 uint32_t Page::AllocationWatermarkOffset() { 75 // Given a 32-bit address, separate its bits into:
79 return (flags_ & kAllocationWatermarkOffsetMask) >> 76 // | page address | words (6) | bit offset (5) | pointer alignment (2) |
80 kAllocationWatermarkOffsetShift; 77 // The address of the rset word containing the bit for this word is computed as:
78 // page_address + words * 4
79 // For a 64-bit address, if it is:
80 // | page address | words(5) | bit offset(5) | pointer alignment (3) |
81 // The address of the rset word containing the bit for this word is computed as:
82 // page_address + words * 4 + kRSetOffset.
83 // The rset is accessed as 32-bit words, and bit offsets in a 32-bit word,
84 // even on the X64 architecture.
85
86 Address Page::ComputeRSetBitPosition(Address address, int offset,
87 uint32_t* bitmask) {
88 ASSERT(Page::is_rset_in_use());
89
90 Page* page = Page::FromAddress(address);
91 uint32_t bit_offset = ArithmeticShiftRight(page->Offset(address) + offset,
92 kPointerSizeLog2);
93 *bitmask = 1 << (bit_offset % kBitsPerInt);
94
95 Address rset_address =
96 page->address() + kRSetOffset + (bit_offset / kBitsPerInt) * kIntSize;
97 // The remembered set address is either in the normal remembered set range
98 // of a page or else we have a large object page.
99 ASSERT((page->RSetStart() <= rset_address && rset_address < page->RSetEnd())
100 || page->IsLargeObjectPage());
101
102 if (rset_address >= page->RSetEnd()) {
103 // We have a large object page, and the remembered set address is actually
104 // past the end of the object.
105
106 // The first part of the remembered set is still located at the start of
107 // the page, but anything after kRSetEndOffset must be relocated to after
108 // the large object, i.e. after
109 // (page->ObjectAreaStart() + object size)
110 // We do that by adding the difference between the normal RSet's end and
111 // the object's end.
112 ASSERT(HeapObject::FromAddress(address)->IsFixedArray());
113 int fixedarray_length =
114 FixedArray::SizeFor(Memory::int_at(page->ObjectAreaStart()
115 + Array::kLengthOffset));
116 rset_address += kObjectStartOffset - kRSetEndOffset + fixedarray_length;
117 }
118 return rset_address;
81 } 119 }
82 120
83 121
84 void Page::SetAllocationWatermark(Address allocation_watermark) { 122 void Page::SetRSet(Address address, int offset) {
85 if ((Heap::gc_state() == Heap::SCAVENGE) && IsWatermarkValid()) { 123 uint32_t bitmask = 0;
86 // When iterating intergenerational references during scavenge 124 Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
87 // we might decide to promote an encountered young object. 125 Memory::uint32_at(rset_address) |= bitmask;
88 // We will allocate a space for such an object and put it
89 // into the promotion queue to process it later.
90 // If space for object was allocated somewhere beyond allocation
91 // watermark this might cause garbage pointers to appear under allocation
92 // watermark. To avoid visiting them during dirty regions iteration
93 // which might be still in progress we store a valid allocation watermark
94 // value and mark this page as having an invalid watermark.
95 SetCachedAllocationWatermark(AllocationWatermark());
96 InvalidateWatermark(true);
97 }
98 126
99 flags_ = (flags_ & kFlagsMask) | 127 ASSERT(IsRSetSet(address, offset));
100 Offset(allocation_watermark) << kAllocationWatermarkOffsetShift;
101 ASSERT(AllocationWatermarkOffset()
102 == static_cast<uint32_t>(Offset(allocation_watermark)));
103 } 128 }
104 129
105 130
106 void Page::SetCachedAllocationWatermark(Address allocation_watermark) { 131 // Clears the corresponding remembered set bit for a given address.
107 mc_first_forwarded = allocation_watermark; 132 void Page::UnsetRSet(Address address, int offset) {
133 uint32_t bitmask = 0;
134 Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
135 Memory::uint32_at(rset_address) &= ~bitmask;
136
137 ASSERT(!IsRSetSet(address, offset));
108 } 138 }
109 139
110 140
111 Address Page::CachedAllocationWatermark() { 141 bool Page::IsRSetSet(Address address, int offset) {
112 return mc_first_forwarded;
113 }
114
115
116 uint32_t Page::GetRegionMarks() {
117 return dirty_regions_;
118 }
119
120
121 void Page::SetRegionMarks(uint32_t marks) {
122 dirty_regions_ = marks;
123 }
124
125
126 int Page::GetRegionNumberForAddress(Address addr) {
127 // Each page is divided into 256 byte regions. Each region has a corresponding
128 // dirty mark bit in the page header. Region can contain intergenerational
129 // references iff its dirty mark is set.
130 // A normal 8K page contains exactly 32 regions so all region marks fit
131 // into 32-bit integer field. To calculate a region number we just divide
132 // offset inside page by region size.
133 // A large page can contain more then 32 regions. But we want to avoid
134 // additional write barrier code for distinguishing between large and normal
135 // pages so we just ignore the fact that addr points into a large page and
136 // calculate region number as if addr pointed into a normal 8K page. This way
137 // we get a region number modulo 32 so for large pages several regions might
138 // be mapped to a single dirty mark.
139 ASSERT_PAGE_ALIGNED(this->address());
140 STATIC_ASSERT((kPageAlignmentMask >> kRegionSizeLog2) < kBitsPerInt);
141
142 // We are using masking with kPageAlignmentMask instead of Page::Offset()
143 // to get an offset to the beginning of 8K page containing addr not to the
144 // beginning of actual page which can be bigger then 8K.
145 return (OffsetFrom(addr) & kPageAlignmentMask) >> kRegionSizeLog2;
146 }
147
148
149 uint32_t Page::GetRegionMaskForAddress(Address addr) {
150 return 1 << GetRegionNumberForAddress(addr);
151 }
152
153
154 void Page::MarkRegionDirty(Address address) {
155 SetRegionMarks(GetRegionMarks() | GetRegionMaskForAddress(address));
156 }
157
158
159 bool Page::IsRegionDirty(Address address) {
160 return GetRegionMarks() & GetRegionMaskForAddress(address);
161 }
162
163
164 void Page::ClearRegionMarks(Address start, Address end, bool reaches_limit) {
165 int rstart = GetRegionNumberForAddress(start);
166 int rend = GetRegionNumberForAddress(end);
167
168 if (reaches_limit) {
169 end += 1;
170 }
171
172 if ((rend - rstart) == 0) {
173 return;
174 }
175
176 uint32_t bitmask = 0; 142 uint32_t bitmask = 0;
177 143 Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
178 if ((OffsetFrom(start) & kRegionAlignmentMask) == 0 144 return (Memory::uint32_at(rset_address) & bitmask) != 0;
179 || (start == ObjectAreaStart())) {
180 // First region is fully covered
181 bitmask = 1 << rstart;
182 }
183
184 while (++rstart < rend) {
185 bitmask |= 1 << rstart;
186 }
187
188 if (bitmask) {
189 SetRegionMarks(GetRegionMarks() & ~bitmask);
190 }
191 }
192
193
194 void Page::FlipMeaningOfInvalidatedWatermarkFlag() {
195 watermark_invalidated_mark_ ^= WATERMARK_INVALIDATED;
196 }
197
198
199 bool Page::IsWatermarkValid() {
200 return (flags_ & WATERMARK_INVALIDATED) != watermark_invalidated_mark_;
201 }
202
203
204 void Page::InvalidateWatermark(bool value) {
205 if (value) {
206 flags_ = (flags_ & ~WATERMARK_INVALIDATED) | watermark_invalidated_mark_;
207 } else {
208 flags_ = (flags_ & ~WATERMARK_INVALIDATED) |
209 (watermark_invalidated_mark_ ^ WATERMARK_INVALIDATED);
210 }
211
212 ASSERT(IsWatermarkValid() == !value);
213 } 145 }
214 146
215 147
216 bool Page::GetPageFlag(PageFlag flag) { 148 bool Page::GetPageFlag(PageFlag flag) {
217 return (flags_ & flag) != 0; 149 return (flags & flag) != 0;
218 } 150 }
219 151
220 152
221 void Page::SetPageFlag(PageFlag flag, bool value) { 153 void Page::SetPageFlag(PageFlag flag, bool value) {
222 if (value) { 154 if (value) {
223 flags_ |= flag; 155 flags |= flag;
224 } else { 156 } else {
225 flags_ &= ~flag; 157 flags &= ~flag;
226 } 158 }
227 } 159 }
228 160
229 161
230 void Page::ClearPageFlags() {
231 flags_ = 0;
232 }
233
234
235 bool Page::WasInUseBeforeMC() { 162 bool Page::WasInUseBeforeMC() {
236 return GetPageFlag(WAS_IN_USE_BEFORE_MC); 163 return GetPageFlag(WAS_IN_USE_BEFORE_MC);
237 } 164 }
238 165
239 166
240 void Page::SetWasInUseBeforeMC(bool was_in_use) { 167 void Page::SetWasInUseBeforeMC(bool was_in_use) {
241 SetPageFlag(WAS_IN_USE_BEFORE_MC, was_in_use); 168 SetPageFlag(WAS_IN_USE_BEFORE_MC, was_in_use);
242 } 169 }
243 170
244 171
(...skipping 164 matching lines...) Expand 10 before | Expand all | Expand 10 after
409 // Round the chunk address up to the nearest page-aligned address 336 // Round the chunk address up to the nearest page-aligned address
410 // and return the heap object in that page. 337 // and return the heap object in that page.
411 Page* page = Page::FromAddress(RoundUp(address(), Page::kPageSize)); 338 Page* page = Page::FromAddress(RoundUp(address(), Page::kPageSize));
412 return HeapObject::FromAddress(page->ObjectAreaStart()); 339 return HeapObject::FromAddress(page->ObjectAreaStart());
413 } 340 }
414 341
415 342
416 // ----------------------------------------------------------------------------- 343 // -----------------------------------------------------------------------------
417 // LargeObjectSpace 344 // LargeObjectSpace
418 345
346 int LargeObjectSpace::ExtraRSetBytesFor(int object_size) {
347 int extra_rset_bits =
348 RoundUp((object_size - Page::kObjectAreaSize) / kPointerSize,
349 kBitsPerInt);
350 return extra_rset_bits / kBitsPerByte;
351 }
352
353
419 Object* NewSpace::AllocateRawInternal(int size_in_bytes, 354 Object* NewSpace::AllocateRawInternal(int size_in_bytes,
420 AllocationInfo* alloc_info) { 355 AllocationInfo* alloc_info) {
421 Address new_top = alloc_info->top + size_in_bytes; 356 Address new_top = alloc_info->top + size_in_bytes;
422 if (new_top > alloc_info->limit) return Failure::RetryAfterGC(size_in_bytes); 357 if (new_top > alloc_info->limit) return Failure::RetryAfterGC(size_in_bytes);
423 358
424 Object* obj = HeapObject::FromAddress(alloc_info->top); 359 Object* obj = HeapObject::FromAddress(alloc_info->top);
425 alloc_info->top = new_top; 360 alloc_info->top = new_top;
426 #ifdef DEBUG 361 #ifdef DEBUG
427 SemiSpace* space = 362 SemiSpace* space =
428 (alloc_info == &allocation_info_) ? &to_space_ : &from_space_; 363 (alloc_info == &allocation_info_) ? &to_space_ : &from_space_;
429 ASSERT(space->low() <= alloc_info->top 364 ASSERT(space->low() <= alloc_info->top
430 && alloc_info->top <= space->high() 365 && alloc_info->top <= space->high()
431 && alloc_info->limit == space->high()); 366 && alloc_info->limit == space->high());
432 #endif 367 #endif
433 return obj; 368 return obj;
434 } 369 }
435 370
436 371
437 bool FreeListNode::IsFreeListNode(HeapObject* object) { 372 bool FreeListNode::IsFreeListNode(HeapObject* object) {
438 return object->map() == Heap::raw_unchecked_byte_array_map() 373 return object->map() == Heap::raw_unchecked_byte_array_map()
439 || object->map() == Heap::raw_unchecked_one_pointer_filler_map() 374 || object->map() == Heap::raw_unchecked_one_pointer_filler_map()
440 || object->map() == Heap::raw_unchecked_two_pointer_filler_map(); 375 || object->map() == Heap::raw_unchecked_two_pointer_filler_map();
441 } 376 }
442 377
443 } } // namespace v8::internal 378 } } // namespace v8::internal
444 379
445 #endif // V8_SPACES_INL_H_ 380 #endif // V8_SPACES_INL_H_
OLDNEW
« no previous file with comments | « src/spaces.cc ('k') | src/x64/builtins-x64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698