OLD | NEW |
1 // Copyright 2006-2010 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2010 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
76 | 76 |
77 | 77 |
78 void Page::SetAllocationWatermark(Address allocation_watermark) { | 78 void Page::SetAllocationWatermark(Address allocation_watermark) { |
79 if ((Heap::gc_state() == Heap::SCAVENGE) && IsWatermarkValid()) { | 79 if ((Heap::gc_state() == Heap::SCAVENGE) && IsWatermarkValid()) { |
80 // When iterating intergenerational references during scavenge | 80 // When iterating intergenerational references during scavenge |
81 // we might decide to promote an encountered young object. | 81 // we might decide to promote an encountered young object. |
82 // We will allocate a space for such an object and put it | 82 // We will allocate a space for such an object and put it |
83 // into the promotion queue to process it later. | 83 // into the promotion queue to process it later. |
84 // If space for object was allocated somewhere beyond allocation | 84 // If space for object was allocated somewhere beyond allocation |
85 // watermark this might cause garbage pointers to appear under allocation | 85 // watermark this might cause garbage pointers to appear under allocation |
86 // watermark. To avoid visiting them during dirty regions iteration | 86 // watermark. To avoid visiting them during pointer-to-newspace iteration |
87 // which might be still in progress we store a valid allocation watermark | 87 // which might be still in progress we store a valid allocation watermark |
88 // value and mark this page as having an invalid watermark. | 88 // value and mark this page as having an invalid watermark. |
89 SetCachedAllocationWatermark(AllocationWatermark()); | 89 SetCachedAllocationWatermark(AllocationWatermark()); |
90 InvalidateWatermark(true); | 90 InvalidateWatermark(true); |
91 } | 91 } |
92 | 92 |
93 flags_ = (flags_ & kFlagsMask) | | 93 flags_ = (flags_ & kFlagsMask) | |
94 Offset(allocation_watermark) << kAllocationWatermarkOffsetShift; | 94 Offset(allocation_watermark) << kAllocationWatermarkOffsetShift; |
95 ASSERT(AllocationWatermarkOffset() | 95 ASSERT(AllocationWatermarkOffset() |
96 == static_cast<uint32_t>(Offset(allocation_watermark))); | 96 == static_cast<uint32_t>(Offset(allocation_watermark))); |
97 } | 97 } |
98 | 98 |
99 | 99 |
100 void Page::SetCachedAllocationWatermark(Address allocation_watermark) { | 100 void Page::SetCachedAllocationWatermark(Address allocation_watermark) { |
101 allocation_watermark_ = allocation_watermark; | 101 allocation_watermark_ = allocation_watermark; |
102 } | 102 } |
103 | 103 |
104 | 104 |
105 Address Page::CachedAllocationWatermark() { | 105 Address Page::CachedAllocationWatermark() { |
106 return allocation_watermark_; | 106 return allocation_watermark_; |
107 } | 107 } |
108 | 108 |
109 | 109 |
110 uint32_t Page::GetRegionMarks() { | |
111 #ifdef ENABLE_CARDMARKING_WRITE_BARRIER | |
112 return dirty_regions_; | |
113 #else | |
114 return kAllRegionsDirtyMarks; | |
115 #endif | |
116 } | |
117 | |
118 | |
119 void Page::SetRegionMarks(uint32_t marks) { | |
120 #ifdef ENABLE_CARDMARKING_WRITE_BARRIER | |
121 dirty_regions_ = marks; | |
122 #endif | |
123 } | |
124 | |
125 | |
126 int Page::GetRegionNumberForAddress(Address addr) { | |
127 #ifdef ENABLE_CARDMARKING_WRITE_BARRIER | |
128 // Each page is divided into 256 byte regions. Each region has a corresponding | |
129 // dirty mark bit in the page header. Region can contain intergenerational | |
130 // references iff its dirty mark is set. | |
131 // A normal 8K page contains exactly 32 regions so all region marks fit | |
132 // into 32-bit integer field. To calculate a region number we just divide | |
133 // offset inside page by region size. | |
134 // A large page can contain more then 32 regions. But we want to avoid | |
135 // additional write barrier code for distinguishing between large and normal | |
136 // pages so we just ignore the fact that addr points into a large page and | |
137 // calculate region number as if addr pointed into a normal 8K page. This way | |
138 // we get a region number modulo 32 so for large pages several regions might | |
139 // be mapped to a single dirty mark. | |
140 ASSERT_PAGE_ALIGNED(this->address()); | |
141 STATIC_ASSERT((kPageAlignmentMask >> kRegionSizeLog2) < kBitsPerInt); | |
142 | |
143 // We are using masking with kPageAlignmentMask instead of Page::Offset() | |
144 // to get an offset to the beginning of 8K page containing addr not to the | |
145 // beginning of actual page which can be bigger then 8K. | |
146 intptr_t offset_inside_normal_page = OffsetFrom(addr) & kPageAlignmentMask; | |
147 return static_cast<int>(offset_inside_normal_page >> kRegionSizeLog2); | |
148 #else | |
149 return 0; | |
150 #endif | |
151 } | |
152 | |
153 | |
154 uint32_t Page::GetRegionMaskForAddress(Address addr) { | |
155 return 1 << GetRegionNumberForAddress(addr); | |
156 } | |
157 | |
158 | |
159 uint32_t Page::GetRegionMaskForSpan(Address start, int length_in_bytes) { | |
160 #ifdef ENABLE_CARDMARKING_WRITE_BARRIER | |
161 uint32_t result = 0; | |
162 if (length_in_bytes >= kPageSize) { | |
163 result = kAllRegionsDirtyMarks; | |
164 } else if (length_in_bytes > 0) { | |
165 int start_region = GetRegionNumberForAddress(start); | |
166 int end_region = | |
167 GetRegionNumberForAddress(start + length_in_bytes - kPointerSize); | |
168 uint32_t start_mask = (~0) << start_region; | |
169 uint32_t end_mask = ~((~1) << end_region); | |
170 result = start_mask & end_mask; | |
171 // if end_region < start_region, the mask is ored. | |
172 if (result == 0) result = start_mask | end_mask; | |
173 } | |
174 #ifdef DEBUG | |
175 if (FLAG_enable_slow_asserts) { | |
176 uint32_t expected = 0; | |
177 for (Address a = start; a < start + length_in_bytes; a += kPointerSize) { | |
178 expected |= GetRegionMaskForAddress(a); | |
179 } | |
180 ASSERT(expected == result); | |
181 } | |
182 #endif | |
183 return result; | |
184 #else | |
185 return Page::kAllRegionsDirtyMarks; | |
186 #endif | |
187 } | |
188 | |
189 | |
190 void Page::MarkRegionDirty(Address address) { | |
191 SetRegionMarks(GetRegionMarks() | GetRegionMaskForAddress(address)); | |
192 } | |
193 | |
194 | |
195 bool Page::IsRegionDirty(Address address) { | |
196 return GetRegionMarks() & GetRegionMaskForAddress(address); | |
197 } | |
198 | |
199 | |
200 void Page::ClearRegionMarks(Address start, Address end, bool reaches_limit) { | |
201 #ifdef ENABLE_CARDMARKING_WRITE_BARRIER | |
202 int rstart = GetRegionNumberForAddress(start); | |
203 int rend = GetRegionNumberForAddress(end); | |
204 | |
205 if (reaches_limit) { | |
206 end += 1; | |
207 } | |
208 | |
209 if ((rend - rstart) == 0) { | |
210 return; | |
211 } | |
212 | |
213 uint32_t bitmask = 0; | |
214 | |
215 if ((OffsetFrom(start) & kRegionAlignmentMask) == 0 | |
216 || (start == ObjectAreaStart())) { | |
217 // First region is fully covered | |
218 bitmask = 1 << rstart; | |
219 } | |
220 | |
221 while (++rstart < rend) { | |
222 bitmask |= 1 << rstart; | |
223 } | |
224 | |
225 if (bitmask) { | |
226 SetRegionMarks(GetRegionMarks() & ~bitmask); | |
227 } | |
228 #endif | |
229 } | |
230 | |
231 | |
232 void Page::FlipMeaningOfInvalidatedWatermarkFlag() { | 110 void Page::FlipMeaningOfInvalidatedWatermarkFlag() { |
233 watermark_invalidated_mark_ ^= 1 << WATERMARK_INVALIDATED; | 111 watermark_invalidated_mark_ ^= 1 << WATERMARK_INVALIDATED; |
234 } | 112 } |
235 | 113 |
236 | 114 |
237 bool Page::IsWatermarkValid() { | 115 bool Page::IsWatermarkValid() { |
238 return (flags_ & (1 << WATERMARK_INVALIDATED)) != watermark_invalidated_mark_; | 116 return (flags_ & (1 << WATERMARK_INVALIDATED)) != watermark_invalidated_mark_; |
239 } | 117 } |
240 | 118 |
241 | 119 |
242 void Page::InvalidateWatermark(bool value) { | 120 void Page::InvalidateWatermark(bool value) { |
243 if (value) { | 121 if (value) { |
244 flags_ = (flags_ & ~(1 << WATERMARK_INVALIDATED)) | | 122 flags_ = (flags_ & ~(1 << WATERMARK_INVALIDATED)) | |
245 watermark_invalidated_mark_; | 123 watermark_invalidated_mark_; |
246 } else { | 124 } else { |
247 flags_ = (flags_ & ~(1 << WATERMARK_INVALIDATED)) | | 125 flags_ = (flags_ & ~(1 << WATERMARK_INVALIDATED)) | |
248 (watermark_invalidated_mark_ ^ (1 << WATERMARK_INVALIDATED)); | 126 (watermark_invalidated_mark_ ^ (1 << WATERMARK_INVALIDATED)); |
249 } | 127 } |
250 | 128 |
251 ASSERT(IsWatermarkValid() == !value); | 129 ASSERT(IsWatermarkValid() == !value); |
252 } | 130 } |
253 | 131 |
254 | 132 |
255 void Page::ClearGCFields() { | 133 void Page::ClearGCFields() { |
256 InvalidateWatermark(true); | 134 InvalidateWatermark(true); |
257 SetAllocationWatermark(ObjectAreaStart()); | 135 SetAllocationWatermark(ObjectAreaStart()); |
258 if (Heap::gc_state() == Heap::SCAVENGE) { | 136 if (Heap::gc_state() == Heap::SCAVENGE) { |
259 SetCachedAllocationWatermark(ObjectAreaStart()); | 137 SetCachedAllocationWatermark(ObjectAreaStart()); |
260 } | 138 } |
261 SetRegionMarks(kAllRegionsCleanMarks); | |
262 } | 139 } |
263 | 140 |
264 | 141 |
265 // ----------------------------------------------------------------------------- | 142 // ----------------------------------------------------------------------------- |
266 // MemoryAllocator | 143 // MemoryAllocator |
267 | 144 |
268 #ifdef ENABLE_HEAP_PROTECTION | 145 #ifdef ENABLE_HEAP_PROTECTION |
269 | 146 |
270 void MemoryAllocator::Protect(Address start, size_t size) { | 147 void MemoryAllocator::Protect(Address start, size_t size) { |
271 OS::Protect(start, size); | 148 OS::Protect(start, size); |
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
370 | 247 |
371 bool FreeListNode::IsFreeListNode(HeapObject* object) { | 248 bool FreeListNode::IsFreeListNode(HeapObject* object) { |
372 return object->map() == Heap::raw_unchecked_byte_array_map() | 249 return object->map() == Heap::raw_unchecked_byte_array_map() |
373 || object->map() == Heap::raw_unchecked_one_pointer_filler_map() | 250 || object->map() == Heap::raw_unchecked_one_pointer_filler_map() |
374 || object->map() == Heap::raw_unchecked_two_pointer_filler_map(); | 251 || object->map() == Heap::raw_unchecked_two_pointer_filler_map(); |
375 } | 252 } |
376 | 253 |
377 } } // namespace v8::internal | 254 } } // namespace v8::internal |
378 | 255 |
379 #endif // V8_SPACES_INL_H_ | 256 #endif // V8_SPACES_INL_H_ |
OLD | NEW |