Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(166)

Side by Side Diff: src/spaces-inl.h

Issue 8139027: Version 3.6.5 (Closed) Base URL: http://v8.googlecode.com/svn/trunk/
Patch Set: '' Created 9 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/spaces.cc ('k') | src/store-buffer.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2010 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution. 11 // with the distribution.
(...skipping 18 matching lines...) Expand all
30 30
31 #include "isolate.h" 31 #include "isolate.h"
32 #include "spaces.h" 32 #include "spaces.h"
33 #include "v8memory.h" 33 #include "v8memory.h"
34 34
35 namespace v8 { 35 namespace v8 {
36 namespace internal { 36 namespace internal {
37 37
38 38
39 // ----------------------------------------------------------------------------- 39 // -----------------------------------------------------------------------------
40 // Bitmap
41
42 void Bitmap::Clear(MemoryChunk* chunk) {
43 Bitmap* bitmap = chunk->markbits();
44 for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0;
45 chunk->ResetLiveBytes();
46 }
47
48
49 // -----------------------------------------------------------------------------
40 // PageIterator 50 // PageIterator
41 51
52
53 PageIterator::PageIterator(PagedSpace* space)
54 : space_(space),
55 prev_page_(&space->anchor_),
56 next_page_(prev_page_->next_page()) { }
57
58
42 bool PageIterator::has_next() { 59 bool PageIterator::has_next() {
43 return prev_page_ != stop_page_; 60 return next_page_ != &space_->anchor_;
44 } 61 }
45 62
46 63
47 Page* PageIterator::next() { 64 Page* PageIterator::next() {
48 ASSERT(has_next()); 65 ASSERT(has_next());
49 prev_page_ = (prev_page_ == NULL) 66 prev_page_ = next_page_;
50 ? space_->first_page_ 67 next_page_ = next_page_->next_page();
51 : prev_page_->next_page();
52 return prev_page_; 68 return prev_page_;
53 } 69 }
54 70
55 71
56 // ----------------------------------------------------------------------------- 72 // -----------------------------------------------------------------------------
57 // Page 73 // NewSpacePageIterator
58 74
59 Page* Page::next_page() { 75
60 return heap_->isolate()->memory_allocator()->GetNextPage(this); 76 NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
61 } 77 : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
62 78 next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
63 79 last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) { }
64 Address Page::AllocationTop() { 80
65 PagedSpace* owner = heap_->isolate()->memory_allocator()->PageOwner(this); 81 NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
66 return owner->PageAllocationTop(this); 82 : prev_page_(space->anchor()),
67 } 83 next_page_(prev_page_->next_page()),
68 84 last_page_(prev_page_->prev_page()) { }
69 85
70 Address Page::AllocationWatermark() { 86 NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
71 PagedSpace* owner = heap_->isolate()->memory_allocator()->PageOwner(this); 87 : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
72 if (this == owner->AllocationTopPage()) { 88 next_page_(NewSpacePage::FromAddress(start)),
73 return owner->top(); 89 last_page_(NewSpacePage::FromLimit(limit)) {
90 SemiSpace::AssertValidRange(start, limit);
91 }
92
93
94 bool NewSpacePageIterator::has_next() {
95 return prev_page_ != last_page_;
96 }
97
98
99 NewSpacePage* NewSpacePageIterator::next() {
100 ASSERT(has_next());
101 prev_page_ = next_page_;
102 next_page_ = next_page_->next_page();
103 return prev_page_;
104 }
105
106
107 // -----------------------------------------------------------------------------
108 // HeapObjectIterator
109 HeapObject* HeapObjectIterator::FromCurrentPage() {
110 while (cur_addr_ != cur_end_) {
111 if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
112 cur_addr_ = space_->limit();
113 continue;
114 }
115 HeapObject* obj = HeapObject::FromAddress(cur_addr_);
116 int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
117 cur_addr_ += obj_size;
118 ASSERT(cur_addr_ <= cur_end_);
119 if (!obj->IsFiller()) {
120 ASSERT_OBJECT_SIZE(obj_size);
121 return obj;
122 }
74 } 123 }
75 return address() + AllocationWatermarkOffset(); 124 return NULL;
76 } 125 }
77 126
78 127
79 uint32_t Page::AllocationWatermarkOffset() { 128 // -----------------------------------------------------------------------------
80 return static_cast<uint32_t>((flags_ & kAllocationWatermarkOffsetMask) >> 129 // MemoryAllocator
81 kAllocationWatermarkOffsetShift); 130
82 } 131 #ifdef ENABLE_HEAP_PROTECTION
83 132
84 133 void MemoryAllocator::Protect(Address start, size_t size) {
85 void Page::SetAllocationWatermark(Address allocation_watermark) { 134 OS::Protect(start, size);
86 if ((heap_->gc_state() == Heap::SCAVENGE) && IsWatermarkValid()) { 135 }
87 // When iterating intergenerational references during scavenge 136
88 // we might decide to promote an encountered young object. 137
89 // We will allocate a space for such an object and put it 138 void MemoryAllocator::Unprotect(Address start,
90 // into the promotion queue to process it later. 139 size_t size,
91 // If space for object was allocated somewhere beyond allocation 140 Executability executable) {
92 // watermark this might cause garbage pointers to appear under allocation 141 OS::Unprotect(start, size, executable);
93 // watermark. To avoid visiting them during dirty regions iteration 142 }
94 // which might be still in progress we store a valid allocation watermark 143
95 // value and mark this page as having an invalid watermark. 144
96 SetCachedAllocationWatermark(AllocationWatermark()); 145 void MemoryAllocator::ProtectChunkFromPage(Page* page) {
97 InvalidateWatermark(true); 146 int id = GetChunkId(page);
98 } 147 OS::Protect(chunks_[id].address(), chunks_[id].size());
99 148 }
100 flags_ = (flags_ & kFlagsMask) | 149
101 Offset(allocation_watermark) << kAllocationWatermarkOffsetShift; 150
102 ASSERT(AllocationWatermarkOffset() 151 void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
103 == static_cast<uint32_t>(Offset(allocation_watermark))); 152 int id = GetChunkId(page);
104 } 153 OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
105 154 chunks_[id].owner()->executable() == EXECUTABLE);
106 155 }
107 void Page::SetCachedAllocationWatermark(Address allocation_watermark) { 156
108 mc_first_forwarded = allocation_watermark;
109 }
110
111
112 Address Page::CachedAllocationWatermark() {
113 return mc_first_forwarded;
114 }
115
116
117 uint32_t Page::GetRegionMarks() {
118 return dirty_regions_;
119 }
120
121
122 void Page::SetRegionMarks(uint32_t marks) {
123 dirty_regions_ = marks;
124 }
125
126
127 int Page::GetRegionNumberForAddress(Address addr) {
128 // Each page is divided into 256 byte regions. Each region has a corresponding
129 // dirty mark bit in the page header. Region can contain intergenerational
130 // references iff its dirty mark is set.
131 // A normal 8K page contains exactly 32 regions so all region marks fit
132 // into 32-bit integer field. To calculate a region number we just divide
133 // offset inside page by region size.
134 // A large page can contain more then 32 regions. But we want to avoid
135 // additional write barrier code for distinguishing between large and normal
136 // pages so we just ignore the fact that addr points into a large page and
137 // calculate region number as if addr pointed into a normal 8K page. This way
138 // we get a region number modulo 32 so for large pages several regions might
139 // be mapped to a single dirty mark.
140 ASSERT_PAGE_ALIGNED(this->address());
141 STATIC_ASSERT((kPageAlignmentMask >> kRegionSizeLog2) < kBitsPerInt);
142
143 // We are using masking with kPageAlignmentMask instead of Page::Offset()
144 // to get an offset to the beginning of 8K page containing addr not to the
145 // beginning of actual page which can be bigger then 8K.
146 intptr_t offset_inside_normal_page = OffsetFrom(addr) & kPageAlignmentMask;
147 return static_cast<int>(offset_inside_normal_page >> kRegionSizeLog2);
148 }
149
150
151 uint32_t Page::GetRegionMaskForAddress(Address addr) {
152 return 1 << GetRegionNumberForAddress(addr);
153 }
154
155
156 uint32_t Page::GetRegionMaskForSpan(Address start, int length_in_bytes) {
157 uint32_t result = 0;
158 static const intptr_t kRegionMask = (1 << kRegionSizeLog2) - 1;
159 if (length_in_bytes + (OffsetFrom(start) & kRegionMask) >= kPageSize) {
160 result = kAllRegionsDirtyMarks;
161 } else if (length_in_bytes > 0) {
162 int start_region = GetRegionNumberForAddress(start);
163 int end_region =
164 GetRegionNumberForAddress(start + length_in_bytes - kPointerSize);
165 uint32_t start_mask = (~0) << start_region;
166 uint32_t end_mask = ~((~1) << end_region);
167 result = start_mask & end_mask;
168 // if end_region < start_region, the mask is ored.
169 if (result == 0) result = start_mask | end_mask;
170 }
171 #ifdef DEBUG
172 if (FLAG_enable_slow_asserts) {
173 uint32_t expected = 0;
174 for (Address a = start; a < start + length_in_bytes; a += kPointerSize) {
175 expected |= GetRegionMaskForAddress(a);
176 }
177 ASSERT(expected == result);
178 }
179 #endif 157 #endif
180 return result;
181 }
182
183
184 void Page::MarkRegionDirty(Address address) {
185 SetRegionMarks(GetRegionMarks() | GetRegionMaskForAddress(address));
186 }
187
188
189 bool Page::IsRegionDirty(Address address) {
190 return GetRegionMarks() & GetRegionMaskForAddress(address);
191 }
192
193
194 void Page::ClearRegionMarks(Address start, Address end, bool reaches_limit) {
195 int rstart = GetRegionNumberForAddress(start);
196 int rend = GetRegionNumberForAddress(end);
197
198 if (reaches_limit) {
199 end += 1;
200 }
201
202 if ((rend - rstart) == 0) {
203 return;
204 }
205
206 uint32_t bitmask = 0;
207
208 if ((OffsetFrom(start) & kRegionAlignmentMask) == 0
209 || (start == ObjectAreaStart())) {
210 // First region is fully covered
211 bitmask = 1 << rstart;
212 }
213
214 while (++rstart < rend) {
215 bitmask |= 1 << rstart;
216 }
217
218 if (bitmask) {
219 SetRegionMarks(GetRegionMarks() & ~bitmask);
220 }
221 }
222
223
224 void Page::FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap) {
225 heap->page_watermark_invalidated_mark_ ^= 1 << WATERMARK_INVALIDATED;
226 }
227
228
229 bool Page::IsWatermarkValid() {
230 return (flags_ & (1 << WATERMARK_INVALIDATED)) !=
231 heap_->page_watermark_invalidated_mark_;
232 }
233
234
235 void Page::InvalidateWatermark(bool value) {
236 if (value) {
237 flags_ = (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
238 heap_->page_watermark_invalidated_mark_;
239 } else {
240 flags_ =
241 (flags_ & ~(1 << WATERMARK_INVALIDATED)) |
242 (heap_->page_watermark_invalidated_mark_ ^
243 (1 << WATERMARK_INVALIDATED));
244 }
245
246 ASSERT(IsWatermarkValid() == !value);
247 }
248
249
250 bool Page::GetPageFlag(PageFlag flag) {
251 return (flags_ & static_cast<intptr_t>(1 << flag)) != 0;
252 }
253
254
255 void Page::SetPageFlag(PageFlag flag, bool value) {
256 if (value) {
257 flags_ |= static_cast<intptr_t>(1 << flag);
258 } else {
259 flags_ &= ~static_cast<intptr_t>(1 << flag);
260 }
261 }
262
263
264 void Page::ClearPageFlags() {
265 flags_ = 0;
266 }
267
268
269 void Page::ClearGCFields() {
270 InvalidateWatermark(true);
271 SetAllocationWatermark(ObjectAreaStart());
272 if (heap_->gc_state() == Heap::SCAVENGE) {
273 SetCachedAllocationWatermark(ObjectAreaStart());
274 }
275 SetRegionMarks(kAllRegionsCleanMarks);
276 }
277
278
279 bool Page::WasInUseBeforeMC() {
280 return GetPageFlag(WAS_IN_USE_BEFORE_MC);
281 }
282
283
284 void Page::SetWasInUseBeforeMC(bool was_in_use) {
285 SetPageFlag(WAS_IN_USE_BEFORE_MC, was_in_use);
286 }
287
288
289 bool Page::IsLargeObjectPage() {
290 return !GetPageFlag(IS_NORMAL_PAGE);
291 }
292
293
294 void Page::SetIsLargeObjectPage(bool is_large_object_page) {
295 SetPageFlag(IS_NORMAL_PAGE, !is_large_object_page);
296 }
297
298 Executability Page::PageExecutability() {
299 return GetPageFlag(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
300 }
301
302
303 void Page::SetPageExecutability(Executability executable) {
304 SetPageFlag(IS_EXECUTABLE, executable == EXECUTABLE);
305 }
306
307
308 // -----------------------------------------------------------------------------
309 // MemoryAllocator
310
311 void MemoryAllocator::ChunkInfo::init(Address a, size_t s, PagedSpace* o) {
312 address_ = a;
313 size_ = s;
314 owner_ = o;
315 executable_ = (o == NULL) ? NOT_EXECUTABLE : o->executable();
316 owner_identity_ = (o == NULL) ? FIRST_SPACE : o->identity();
317 }
318
319
320 bool MemoryAllocator::IsValidChunk(int chunk_id) {
321 if (!IsValidChunkId(chunk_id)) return false;
322
323 ChunkInfo& c = chunks_[chunk_id];
324 return (c.address() != NULL) && (c.size() != 0) && (c.owner() != NULL);
325 }
326
327
328 bool MemoryAllocator::IsValidChunkId(int chunk_id) {
329 return (0 <= chunk_id) && (chunk_id < max_nof_chunks_);
330 }
331
332
333 bool MemoryAllocator::IsPageInSpace(Page* p, PagedSpace* space) {
334 ASSERT(p->is_valid());
335
336 int chunk_id = GetChunkId(p);
337 if (!IsValidChunkId(chunk_id)) return false;
338
339 ChunkInfo& c = chunks_[chunk_id];
340 return (c.address() <= p->address()) &&
341 (p->address() < c.address() + c.size()) &&
342 (space == c.owner());
343 }
344
345
346 Page* MemoryAllocator::GetNextPage(Page* p) {
347 ASSERT(p->is_valid());
348 intptr_t raw_addr = p->opaque_header & ~Page::kPageAlignmentMask;
349 return Page::FromAddress(AddressFrom<Address>(raw_addr));
350 }
351
352
353 int MemoryAllocator::GetChunkId(Page* p) {
354 ASSERT(p->is_valid());
355 return static_cast<int>(p->opaque_header & Page::kPageAlignmentMask);
356 }
357
358
359 void MemoryAllocator::SetNextPage(Page* prev, Page* next) {
360 ASSERT(prev->is_valid());
361 int chunk_id = GetChunkId(prev);
362 ASSERT_PAGE_ALIGNED(next->address());
363 prev->opaque_header = OffsetFrom(next->address()) | chunk_id;
364 }
365
366
367 PagedSpace* MemoryAllocator::PageOwner(Page* page) {
368 int chunk_id = GetChunkId(page);
369 ASSERT(IsValidChunk(chunk_id));
370 return chunks_[chunk_id].owner();
371 }
372
373
374 bool MemoryAllocator::InInitialChunk(Address address) {
375 if (initial_chunk_ == NULL) return false;
376
377 Address start = static_cast<Address>(initial_chunk_->address());
378 return (start <= address) && (address < start + initial_chunk_->size());
379 }
380 158
381 159
382 // -------------------------------------------------------------------------- 160 // --------------------------------------------------------------------------
383 // PagedSpace 161 // PagedSpace
162 Page* Page::Initialize(Heap* heap,
163 MemoryChunk* chunk,
164 Executability executable,
165 PagedSpace* owner) {
166 Page* page = reinterpret_cast<Page*>(chunk);
167 ASSERT(chunk->size() == static_cast<size_t>(kPageSize));
168 ASSERT(chunk->owner() == owner);
169 owner->IncreaseCapacity(Page::kObjectAreaSize);
170 owner->Free(page->ObjectAreaStart(),
171 static_cast<int>(page->ObjectAreaEnd() -
172 page->ObjectAreaStart()));
173
174 heap->incremental_marking()->SetOldSpacePageFlags(chunk);
175
176 return page;
177 }
178
384 179
385 bool PagedSpace::Contains(Address addr) { 180 bool PagedSpace::Contains(Address addr) {
386 Page* p = Page::FromAddress(addr); 181 Page* p = Page::FromAddress(addr);
387 if (!p->is_valid()) return false; 182 if (!p->is_valid()) return false;
388 return heap()->isolate()->memory_allocator()->IsPageInSpace(p, this); 183 return p->owner() == this;
184 }
185
186
187 void MemoryChunk::set_scan_on_scavenge(bool scan) {
188 if (scan) {
189 if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages();
190 SetFlag(SCAN_ON_SCAVENGE);
191 } else {
192 if (scan_on_scavenge()) heap_->decrement_scan_on_scavenge_pages();
193 ClearFlag(SCAN_ON_SCAVENGE);
194 }
195 heap_->incremental_marking()->SetOldSpacePageFlags(this);
196 }
197
198
199 MemoryChunk* MemoryChunk::FromAnyPointerAddress(Address addr) {
200 MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>(
201 OffsetFrom(addr) & ~Page::kPageAlignmentMask);
202 if (maybe->owner() != NULL) return maybe;
203 LargeObjectIterator iterator(HEAP->lo_space());
204 for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) {
205 // Fixed arrays are the only pointer-containing objects in large object
206 // space.
207 if (o->IsFixedArray()) {
208 MemoryChunk* chunk = MemoryChunk::FromAddress(o->address());
209 if (chunk->Contains(addr)) {
210 return chunk;
211 }
212 }
213 }
214 UNREACHABLE();
215 return NULL;
216 }
217
218
219 PointerChunkIterator::PointerChunkIterator(Heap* heap)
220 : state_(kOldPointerState),
221 old_pointer_iterator_(heap->old_pointer_space()),
222 map_iterator_(heap->map_space()),
223 lo_iterator_(heap->lo_space()) { }
224
225
226 Page* Page::next_page() {
227 ASSERT(next_chunk()->owner() == owner());
228 return static_cast<Page*>(next_chunk());
229 }
230
231
232 Page* Page::prev_page() {
233 ASSERT(prev_chunk()->owner() == owner());
234 return static_cast<Page*>(prev_chunk());
235 }
236
237
238 void Page::set_next_page(Page* page) {
239 ASSERT(page->owner() == owner());
240 set_next_chunk(page);
241 }
242
243
244 void Page::set_prev_page(Page* page) {
245 ASSERT(page->owner() == owner());
246 set_prev_chunk(page);
389 } 247 }
390 248
391 249
392 // Try linear allocation in the page of alloc_info's allocation top. Does 250 // Try linear allocation in the page of alloc_info's allocation top. Does
393 // not contain slow case logic (eg, move to the next page or try free list 251 // not contain slow case logic (eg, move to the next page or try free list
394 // allocation) so it can be used by all the allocation functions and for all 252 // allocation) so it can be used by all the allocation functions and for all
395 // the paged spaces. 253 // the paged spaces.
396 HeapObject* PagedSpace::AllocateLinearly(AllocationInfo* alloc_info, 254 HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
397 int size_in_bytes) { 255 Address current_top = allocation_info_.top;
398 Address current_top = alloc_info->top;
399 Address new_top = current_top + size_in_bytes; 256 Address new_top = current_top + size_in_bytes;
400 if (new_top > alloc_info->limit) return NULL; 257 if (new_top > allocation_info_.limit) return NULL;
401 258
402 alloc_info->top = new_top; 259 allocation_info_.top = new_top;
403 ASSERT(alloc_info->VerifyPagedAllocation()); 260 ASSERT(allocation_info_.VerifyPagedAllocation());
404 accounting_stats_.AllocateBytes(size_in_bytes); 261 ASSERT(current_top != NULL);
405 return HeapObject::FromAddress(current_top); 262 return HeapObject::FromAddress(current_top);
406 } 263 }
407 264
408 265
409 // Raw allocation. 266 // Raw allocation.
410 MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) { 267 MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
411 ASSERT(HasBeenSetup()); 268 ASSERT(HasBeenSetup());
412 ASSERT_OBJECT_SIZE(size_in_bytes); 269 ASSERT_OBJECT_SIZE(size_in_bytes);
413 HeapObject* object = AllocateLinearly(&allocation_info_, size_in_bytes); 270 HeapObject* object = AllocateLinearly(size_in_bytes);
414 if (object != NULL) return object; 271 if (object != NULL) {
272 if (identity() == CODE_SPACE) {
273 SkipList::Update(object->address(), size_in_bytes);
274 }
275 return object;
276 }
277
278 object = free_list_.Allocate(size_in_bytes);
279 if (object != NULL) {
280 if (identity() == CODE_SPACE) {
281 SkipList::Update(object->address(), size_in_bytes);
282 }
283 return object;
284 }
415 285
416 object = SlowAllocateRaw(size_in_bytes); 286 object = SlowAllocateRaw(size_in_bytes);
417 if (object != NULL) return object; 287 if (object != NULL) {
418 288 if (identity() == CODE_SPACE) {
419 return Failure::RetryAfterGC(identity()); 289 SkipList::Update(object->address(), size_in_bytes);
420 } 290 }
421 291 return object;
422 292 }
423 // Reallocating (and promoting) objects during a compacting collection.
424 MaybeObject* PagedSpace::MCAllocateRaw(int size_in_bytes) {
425 ASSERT(HasBeenSetup());
426 ASSERT_OBJECT_SIZE(size_in_bytes);
427 HeapObject* object = AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
428 if (object != NULL) return object;
429
430 object = SlowMCAllocateRaw(size_in_bytes);
431 if (object != NULL) return object;
432 293
433 return Failure::RetryAfterGC(identity()); 294 return Failure::RetryAfterGC(identity());
434 } 295 }
435 296
436 297
437 // ----------------------------------------------------------------------------- 298 // -----------------------------------------------------------------------------
438 // NewSpace 299 // NewSpace
300 MaybeObject* NewSpace::AllocateRawInternal(int size_in_bytes) {
301 Address old_top = allocation_info_.top;
302 if (allocation_info_.limit - old_top < size_in_bytes) {
303 Address new_top = old_top + size_in_bytes;
304 Address high = to_space_.page_high();
305 if (allocation_info_.limit < high) {
306 // Incremental marking has lowered the limit to get a
307 // chance to do a step.
308 allocation_info_.limit = Min(
309 allocation_info_.limit + inline_allocation_limit_step_,
310 high);
311 int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
312 heap()->incremental_marking()->Step(bytes_allocated);
313 top_on_previous_step_ = new_top;
314 return AllocateRawInternal(size_in_bytes);
315 } else if (AddFreshPage()) {
316 // Switched to new page. Try allocating again.
317 int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
318 heap()->incremental_marking()->Step(bytes_allocated);
319 top_on_previous_step_ = to_space_.page_low();
320 return AllocateRawInternal(size_in_bytes);
321 } else {
322 return Failure::RetryAfterGC();
323 }
324 }
439 325
440 MaybeObject* NewSpace::AllocateRawInternal(int size_in_bytes, 326 Object* obj = HeapObject::FromAddress(allocation_info_.top);
441 AllocationInfo* alloc_info) { 327 allocation_info_.top += size_in_bytes;
442 Address new_top = alloc_info->top + size_in_bytes; 328 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
443 if (new_top > alloc_info->limit) return Failure::RetryAfterGC();
444 329
445 Object* obj = HeapObject::FromAddress(alloc_info->top);
446 alloc_info->top = new_top;
447 #ifdef DEBUG
448 SemiSpace* space =
449 (alloc_info == &allocation_info_) ? &to_space_ : &from_space_;
450 ASSERT(space->low() <= alloc_info->top
451 && alloc_info->top <= space->high()
452 && alloc_info->limit == space->high());
453 #endif
454 return obj; 330 return obj;
455 } 331 }
456 332
457 333
458 intptr_t LargeObjectSpace::Available() { 334 LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
459 return LargeObjectChunk::ObjectSizeFor( 335 heap->incremental_marking()->SetOldSpacePageFlags(chunk);
460 heap()->isolate()->memory_allocator()->Available()); 336 return static_cast<LargePage*>(chunk);
461 } 337 }
462 338
463 339
340 intptr_t LargeObjectSpace::Available() {
341 return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
342 }
343
344
464 template <typename StringType> 345 template <typename StringType>
465 void NewSpace::ShrinkStringAtAllocationBoundary(String* string, int length) { 346 void NewSpace::ShrinkStringAtAllocationBoundary(String* string, int length) {
466 ASSERT(length <= string->length()); 347 ASSERT(length <= string->length());
467 ASSERT(string->IsSeqString()); 348 ASSERT(string->IsSeqString());
468 ASSERT(string->address() + StringType::SizeFor(string->length()) == 349 ASSERT(string->address() + StringType::SizeFor(string->length()) ==
469 allocation_info_.top); 350 allocation_info_.top);
351 Address old_top = allocation_info_.top;
470 allocation_info_.top = 352 allocation_info_.top =
471 string->address() + StringType::SizeFor(length); 353 string->address() + StringType::SizeFor(length);
472 string->set_length(length); 354 string->set_length(length);
355 if (Marking::IsBlack(Marking::MarkBitFrom(string))) {
356 int delta = static_cast<int>(old_top - allocation_info_.top);
357 MemoryChunk::IncrementLiveBytes(string->address(), -delta);
358 }
473 } 359 }
474 360
475 361
476 bool FreeListNode::IsFreeListNode(HeapObject* object) { 362 bool FreeListNode::IsFreeListNode(HeapObject* object) {
477 return object->map() == HEAP->raw_unchecked_byte_array_map() 363 Map* map = object->map();
478 || object->map() == HEAP->raw_unchecked_one_pointer_filler_map() 364 Heap* heap = object->GetHeap();
479 || object->map() == HEAP->raw_unchecked_two_pointer_filler_map(); 365 return map == heap->raw_unchecked_free_space_map()
366 || map == heap->raw_unchecked_one_pointer_filler_map()
367 || map == heap->raw_unchecked_two_pointer_filler_map();
480 } 368 }
481 369
482 } } // namespace v8::internal 370 } } // namespace v8::internal
483 371
484 #endif // V8_SPACES_INL_H_ 372 #endif // V8_SPACES_INL_H_
OLDNEW
« no previous file with comments | « src/spaces.cc ('k') | src/store-buffer.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698