Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(69)

Side by Side Diff: src/spaces.cc

Issue 7945009: Merge experimental/gc branch to the bleeding_edge. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 9 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/spaces.h ('k') | src/spaces-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 17 matching lines...) Expand all
28 #include "v8.h" 28 #include "v8.h"
29 29
30 #include "liveobjectlist-inl.h" 30 #include "liveobjectlist-inl.h"
31 #include "macro-assembler.h" 31 #include "macro-assembler.h"
32 #include "mark-compact.h" 32 #include "mark-compact.h"
33 #include "platform.h" 33 #include "platform.h"
34 34
35 namespace v8 { 35 namespace v8 {
36 namespace internal { 36 namespace internal {
37 37
38 // For contiguous spaces, top should be in the space (or at the end) and limit
39 // should be the end of the space.
40 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
41 ASSERT((space).low() <= (info).top \
42 && (info).top <= (space).high() \
43 && (info).limit == (space).high())
44 38
45 // ---------------------------------------------------------------------------- 39 // ----------------------------------------------------------------------------
46 // HeapObjectIterator 40 // HeapObjectIterator
47 41
48 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { 42 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
49 Initialize(space->bottom(), space->top(), NULL); 43 // You can't actually iterate over the anchor page. It is not a real page,
44 // just an anchor for the double linked page list. Initialize as if we have
45 // reached the end of the anchor page, then the first iteration will move on
46 // to the first page.
47 Initialize(space,
48 NULL,
49 NULL,
50 kAllPagesInSpace,
51 NULL);
50 } 52 }
51 53
52 54
53 HeapObjectIterator::HeapObjectIterator(PagedSpace* space, 55 HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
54 HeapObjectCallback size_func) { 56 HeapObjectCallback size_func) {
55 Initialize(space->bottom(), space->top(), size_func); 57 // You can't actually iterate over the anchor page. It is not a real page,
56 } 58 // just an anchor for the double linked page list. Initialize the current
57 59 // address and end as NULL, then the first iteration will move on
58 60 // to the first page.
59 HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start) { 61 Initialize(space,
60 Initialize(start, space->top(), NULL); 62 NULL,
61 } 63 NULL,
62 64 kAllPagesInSpace,
63 65 size_func);
64 HeapObjectIterator::HeapObjectIterator(PagedSpace* space, Address start,
65 HeapObjectCallback size_func) {
66 Initialize(start, space->top(), size_func);
67 } 66 }
68 67
69 68
70 HeapObjectIterator::HeapObjectIterator(Page* page, 69 HeapObjectIterator::HeapObjectIterator(Page* page,
71 HeapObjectCallback size_func) { 70 HeapObjectCallback size_func) {
72 Initialize(page->ObjectAreaStart(), page->AllocationTop(), size_func); 71 Space* owner = page->owner();
72 ASSERT(owner == HEAP->old_pointer_space() ||
73 owner == HEAP->old_data_space() ||
74 owner == HEAP->map_space() ||
75 owner == HEAP->cell_space() ||
76 owner == HEAP->code_space());
77 Initialize(reinterpret_cast<PagedSpace*>(owner),
78 page->ObjectAreaStart(),
79 page->ObjectAreaEnd(),
80 kOnePageOnly,
81 size_func);
82 ASSERT(page->WasSweptPrecisely());
73 } 83 }
74 84
75 85
76 void HeapObjectIterator::Initialize(Address cur, Address end, 86 void HeapObjectIterator::Initialize(PagedSpace* space,
87 Address cur, Address end,
88 HeapObjectIterator::PageMode mode,
77 HeapObjectCallback size_f) { 89 HeapObjectCallback size_f) {
90 // Check that we actually can iterate this space.
91 ASSERT(!space->was_swept_conservatively());
92
93 space_ = space;
78 cur_addr_ = cur; 94 cur_addr_ = cur;
79 end_addr_ = end; 95 cur_end_ = end;
80 end_page_ = Page::FromAllocationTop(end); 96 page_mode_ = mode;
81 size_func_ = size_f; 97 size_func_ = size_f;
82 Page* p = Page::FromAllocationTop(cur_addr_);
83 cur_limit_ = (p == end_page_) ? end_addr_ : p->AllocationTop();
84 98
85 #ifdef DEBUG 99 #ifdef DEBUG
86 Verify(); 100 Verify();
87 #endif 101 #endif
88 } 102 }
89 103
90 104
91 HeapObject* HeapObjectIterator::FromNextPage() { 105 // We have hit the end of the page and should advance to the next block of
92 if (cur_addr_ == end_addr_) return NULL; 106 // objects. This happens at the end of the page.
93 107 bool HeapObjectIterator::AdvanceToNextPage() {
94 Page* cur_page = Page::FromAllocationTop(cur_addr_); 108 ASSERT(cur_addr_ == cur_end_);
109 if (page_mode_ == kOnePageOnly) return false;
110 Page* cur_page;
111 if (cur_addr_ == NULL) {
112 cur_page = space_->anchor();
113 } else {
114 cur_page = Page::FromAddress(cur_addr_ - 1);
115 ASSERT(cur_addr_ == cur_page->ObjectAreaEnd());
116 }
95 cur_page = cur_page->next_page(); 117 cur_page = cur_page->next_page();
96 ASSERT(cur_page->is_valid()); 118 if (cur_page == space_->anchor()) return false;
97
98 cur_addr_ = cur_page->ObjectAreaStart(); 119 cur_addr_ = cur_page->ObjectAreaStart();
99 cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop(); 120 cur_end_ = cur_page->ObjectAreaEnd();
100 121 ASSERT(cur_page->WasSweptPrecisely());
101 if (cur_addr_ == end_addr_) return NULL; 122 return true;
102 ASSERT(cur_addr_ < cur_limit_);
103 #ifdef DEBUG
104 Verify();
105 #endif
106 return FromCurrentPage();
107 } 123 }
108 124
109 125
110 #ifdef DEBUG 126 #ifdef DEBUG
111 void HeapObjectIterator::Verify() { 127 void HeapObjectIterator::Verify() {
112 Page* p = Page::FromAllocationTop(cur_addr_); 128 // TODO(gc): We should do something here.
113 ASSERT(p == Page::FromAllocationTop(cur_limit_));
114 ASSERT(p->Offset(cur_addr_) <= p->Offset(cur_limit_));
115 } 129 }
116 #endif 130 #endif
117 131
118 132
119 // ----------------------------------------------------------------------------- 133 // -----------------------------------------------------------------------------
120 // PageIterator
121
122 PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
123 prev_page_ = NULL;
124 switch (mode) {
125 case PAGES_IN_USE:
126 stop_page_ = space->AllocationTopPage();
127 break;
128 case PAGES_USED_BY_MC:
129 stop_page_ = space->MCRelocationTopPage();
130 break;
131 case ALL_PAGES:
132 #ifdef DEBUG
133 // Verify that the cached last page in the space is actually the
134 // last page.
135 for (Page* p = space->first_page_; p->is_valid(); p = p->next_page()) {
136 if (!p->next_page()->is_valid()) {
137 ASSERT(space->last_page_ == p);
138 }
139 }
140 #endif
141 stop_page_ = space->last_page_;
142 break;
143 }
144 }
145
146
147 // -----------------------------------------------------------------------------
148 // CodeRange 134 // CodeRange
149 135
150 136
151 CodeRange::CodeRange(Isolate* isolate) 137 CodeRange::CodeRange(Isolate* isolate)
152 : isolate_(isolate), 138 : isolate_(isolate),
153 code_range_(NULL), 139 code_range_(NULL),
154 free_list_(0), 140 free_list_(0),
155 allocation_list_(0), 141 allocation_list_(0),
156 current_allocation_block_index_(0) { 142 current_allocation_block_index_(0) {
157 } 143 }
158 144
159 145
160 bool CodeRange::Setup(const size_t requested) { 146 bool CodeRange::Setup(const size_t requested) {
161 ASSERT(code_range_ == NULL); 147 ASSERT(code_range_ == NULL);
162 148
163 code_range_ = new VirtualMemory(requested); 149 code_range_ = new VirtualMemory(requested);
164 CHECK(code_range_ != NULL); 150 CHECK(code_range_ != NULL);
165 if (!code_range_->IsReserved()) { 151 if (!code_range_->IsReserved()) {
166 delete code_range_; 152 delete code_range_;
167 code_range_ = NULL; 153 code_range_ = NULL;
168 return false; 154 return false;
169 } 155 }
170 156
171 // We are sure that we have mapped a block of requested addresses. 157 // We are sure that we have mapped a block of requested addresses.
172 ASSERT(code_range_->size() == requested); 158 ASSERT(code_range_->size() == requested);
173 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested)); 159 LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
174 allocation_list_.Add(FreeBlock(code_range_->address(), code_range_->size())); 160 Address base = reinterpret_cast<Address>(code_range_->address());
161 Address aligned_base =
162 RoundUp(reinterpret_cast<Address>(code_range_->address()),
163 MemoryChunk::kAlignment);
164 size_t size = code_range_->size() - (aligned_base - base);
165 allocation_list_.Add(FreeBlock(aligned_base, size));
175 current_allocation_block_index_ = 0; 166 current_allocation_block_index_ = 0;
176 return true; 167 return true;
177 } 168 }
178 169
179 170
180 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left, 171 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
181 const FreeBlock* right) { 172 const FreeBlock* right) {
182 // The entire point of CodeRange is that the difference between two 173 // The entire point of CodeRange is that the difference between two
183 // addresses in the range can be represented as a signed 32-bit int, 174 // addresses in the range can be represented as a signed 32-bit int,
184 // so the cast is semantically correct. 175 // so the cast is semantically correct.
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
221 return; // Found a large enough allocation block. 212 return; // Found a large enough allocation block.
222 } 213 }
223 } 214 }
224 215
225 // Code range is full or too fragmented. 216 // Code range is full or too fragmented.
226 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock"); 217 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
227 } 218 }
228 219
229 220
230 221
231 void* CodeRange::AllocateRawMemory(const size_t requested, size_t* allocated) { 222 Address CodeRange::AllocateRawMemory(const size_t requested,
223 size_t* allocated) {
232 ASSERT(current_allocation_block_index_ < allocation_list_.length()); 224 ASSERT(current_allocation_block_index_ < allocation_list_.length());
233 if (requested > allocation_list_[current_allocation_block_index_].size) { 225 if (requested > allocation_list_[current_allocation_block_index_].size) {
234 // Find an allocation block large enough. This function call may 226 // Find an allocation block large enough. This function call may
235 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block. 227 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
236 GetNextAllocationBlock(requested); 228 GetNextAllocationBlock(requested);
237 } 229 }
238 // Commit the requested memory at the start of the current allocation block. 230 // Commit the requested memory at the start of the current allocation block.
239 *allocated = RoundUp(requested, Page::kPageSize); 231 size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment);
240 FreeBlock current = allocation_list_[current_allocation_block_index_]; 232 FreeBlock current = allocation_list_[current_allocation_block_index_];
241 if (*allocated >= current.size - Page::kPageSize) { 233 if (aligned_requested >= (current.size - Page::kPageSize)) {
242 // Don't leave a small free block, useless for a large object or chunk. 234 // Don't leave a small free block, useless for a large object or chunk.
243 *allocated = current.size; 235 *allocated = current.size;
236 } else {
237 *allocated = aligned_requested;
244 } 238 }
245 ASSERT(*allocated <= current.size); 239 ASSERT(*allocated <= current.size);
240 ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
246 if (!code_range_->Commit(current.start, *allocated, true)) { 241 if (!code_range_->Commit(current.start, *allocated, true)) {
247 *allocated = 0; 242 *allocated = 0;
248 return NULL; 243 return NULL;
249 } 244 }
250 allocation_list_[current_allocation_block_index_].start += *allocated; 245 allocation_list_[current_allocation_block_index_].start += *allocated;
251 allocation_list_[current_allocation_block_index_].size -= *allocated; 246 allocation_list_[current_allocation_block_index_].size -= *allocated;
252 if (*allocated == current.size) { 247 if (*allocated == current.size) {
253 GetNextAllocationBlock(0); // This block is used up, get the next one. 248 GetNextAllocationBlock(0); // This block is used up, get the next one.
254 } 249 }
255 return current.start; 250 return current.start;
256 } 251 }
257 252
258 253
259 void CodeRange::FreeRawMemory(void* address, size_t length) { 254 void CodeRange::FreeRawMemory(Address address, size_t length) {
255 ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
260 free_list_.Add(FreeBlock(address, length)); 256 free_list_.Add(FreeBlock(address, length));
261 code_range_->Uncommit(address, length); 257 code_range_->Uncommit(address, length);
262 } 258 }
263 259
264 260
265 void CodeRange::TearDown() { 261 void CodeRange::TearDown() {
266 delete code_range_; // Frees all memory in the virtual memory range. 262 delete code_range_; // Frees all memory in the virtual memory range.
267 code_range_ = NULL; 263 code_range_ = NULL;
268 free_list_.Free(); 264 free_list_.Free();
269 allocation_list_.Free(); 265 allocation_list_.Free();
270 } 266 }
271 267
272 268
273 // ----------------------------------------------------------------------------- 269 // -----------------------------------------------------------------------------
274 // MemoryAllocator 270 // MemoryAllocator
275 // 271 //
276 272
277 // 270 is an estimate based on the static default heap size of a pair of 256K
278 // semispaces and a 64M old generation.
279 const int kEstimatedNumberOfChunks = 270;
280
281
282 MemoryAllocator::MemoryAllocator(Isolate* isolate) 273 MemoryAllocator::MemoryAllocator(Isolate* isolate)
283 : isolate_(isolate), 274 : isolate_(isolate),
284 capacity_(0), 275 capacity_(0),
285 capacity_executable_(0), 276 capacity_executable_(0),
286 size_(0), 277 size_(0),
287 size_executable_(0), 278 size_executable_(0) {
288 initial_chunk_(NULL),
289 chunks_(kEstimatedNumberOfChunks),
290 free_chunk_ids_(kEstimatedNumberOfChunks),
291 max_nof_chunks_(0),
292 top_(0) {
293 }
294
295
296 void MemoryAllocator::Push(int free_chunk_id) {
297 ASSERT(max_nof_chunks_ > 0);
298 ASSERT(top_ < max_nof_chunks_);
299 free_chunk_ids_[top_++] = free_chunk_id;
300 }
301
302
303 int MemoryAllocator::Pop() {
304 ASSERT(top_ > 0);
305 return free_chunk_ids_[--top_];
306 } 279 }
307 280
308 281
309 bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) { 282 bool MemoryAllocator::Setup(intptr_t capacity, intptr_t capacity_executable) {
310 capacity_ = RoundUp(capacity, Page::kPageSize); 283 capacity_ = RoundUp(capacity, Page::kPageSize);
311 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize); 284 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
312 ASSERT_GE(capacity_, capacity_executable_); 285 ASSERT_GE(capacity_, capacity_executable_);
313 286
314 // Over-estimate the size of chunks_ array. It assumes the expansion of old
315 // space is always in the unit of a chunk (kChunkSize) except the last
316 // expansion.
317 //
318 // Due to alignment, allocated space might be one page less than required
319 // number (kPagesPerChunk) of pages for old spaces.
320 //
321 // Reserve two chunk ids for semispaces, one for map space, one for old
322 // space, and one for code space.
323 max_nof_chunks_ =
324 static_cast<int>((capacity_ / (kChunkSize - Page::kPageSize))) + 5;
325 if (max_nof_chunks_ > kMaxNofChunks) return false;
326
327 size_ = 0; 287 size_ = 0;
328 size_executable_ = 0; 288 size_executable_ = 0;
329 ChunkInfo info; // uninitialized element. 289
330 for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
331 chunks_.Add(info);
332 free_chunk_ids_.Add(i);
333 }
334 top_ = max_nof_chunks_;
335 return true; 290 return true;
336 } 291 }
337 292
338 293
339 void MemoryAllocator::TearDown() { 294 void MemoryAllocator::TearDown() {
340 for (int i = 0; i < max_nof_chunks_; i++) { 295 // Check that spaces were torn down before MemoryAllocator.
341 if (chunks_[i].address() != NULL) DeleteChunk(i); 296 ASSERT(size_ == 0);
342 } 297 // TODO(gc) this will be true again when we fix FreeMemory.
343 chunks_.Clear(); 298 // ASSERT(size_executable_ == 0);
344 free_chunk_ids_.Clear();
345
346 if (initial_chunk_ != NULL) {
347 LOG(isolate_, DeleteEvent("InitialChunk", initial_chunk_->address()));
348 delete initial_chunk_;
349 initial_chunk_ = NULL;
350 }
351
352 ASSERT(top_ == max_nof_chunks_); // all chunks are free
353 top_ = 0;
354 capacity_ = 0; 299 capacity_ = 0;
355 capacity_executable_ = 0; 300 capacity_executable_ = 0;
356 size_ = 0; 301 }
357 max_nof_chunks_ = 0; 302
358 } 303
359 304 void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
360 305 Executability executable) {
361 void* MemoryAllocator::AllocateRawMemory(const size_t requested, 306 // TODO(gc) make code_range part of memory allocator?
362 size_t* allocated, 307 ASSERT(reservation->IsReserved());
363 Executability executable) { 308 size_t size = reservation->size();
364 if (size_ + static_cast<size_t>(requested) > static_cast<size_t>(capacity_)) { 309 ASSERT(size_ >= size);
310 size_ -= size;
311
312 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
313
314 if (executable == EXECUTABLE) {
315 ASSERT(size_executable_ >= size);
316 size_executable_ -= size;
317 }
318 // Code which is part of the code-range does not have its own VirtualMemory.
319 ASSERT(!isolate_->code_range()->contains(
320 static_cast<Address>(reservation->address())));
321 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
322 reservation->Release();
323 }
324
325
326 void MemoryAllocator::FreeMemory(Address base,
327 size_t size,
328 Executability executable) {
329 // TODO(gc) make code_range part of memory allocator?
330 ASSERT(size_ >= size);
331 size_ -= size;
332
333 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
334
335 if (executable == EXECUTABLE) {
336 ASSERT(size_executable_ >= size);
337 size_executable_ -= size;
338 }
339 if (isolate_->code_range()->contains(static_cast<Address>(base))) {
340 ASSERT(executable == EXECUTABLE);
341 isolate_->code_range()->FreeRawMemory(base, size);
342 } else {
343 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
344 VirtualMemory::ReleaseRegion(base, size);
345 }
346 }
347
348
349 Address MemoryAllocator::ReserveAlignedMemory(size_t size,
350 size_t alignment,
351 VirtualMemory* controller) {
352 VirtualMemory reservation(size, alignment);
353
354 if (!reservation.IsReserved()) return NULL;
355 size_ += reservation.size();
356 Address base = RoundUp(static_cast<Address>(reservation.address()),
357 alignment);
358 controller->TakeControl(&reservation);
359 return base;
360 }
361
362
363 Address MemoryAllocator::AllocateAlignedMemory(size_t size,
364 size_t alignment,
365 Executability executable,
366 VirtualMemory* controller) {
367 VirtualMemory reservation;
368 Address base = ReserveAlignedMemory(size, alignment, &reservation);
369 if (base == NULL) return NULL;
370 if (!reservation.Commit(base,
371 size,
372 executable == EXECUTABLE)) {
365 return NULL; 373 return NULL;
366 } 374 }
367 375 controller->TakeControl(&reservation);
368 void* mem; 376 return base;
377 }
378
379
380 void Page::InitializeAsAnchor(PagedSpace* owner) {
381 set_owner(owner);
382 set_prev_page(this);
383 set_next_page(this);
384 }
385
386
387 NewSpacePage* NewSpacePage::Initialize(Heap* heap,
388 Address start,
389 SemiSpace* semi_space) {
390 MemoryChunk* chunk = MemoryChunk::Initialize(heap,
391 start,
392 Page::kPageSize,
393 NOT_EXECUTABLE,
394 semi_space);
395 chunk->set_next_chunk(NULL);
396 chunk->set_prev_chunk(NULL);
397 chunk->initialize_scan_on_scavenge(true);
398 bool in_to_space = (semi_space->id() != kFromSpace);
399 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
400 : MemoryChunk::IN_FROM_SPACE);
401 ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
402 : MemoryChunk::IN_TO_SPACE));
403 NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
404 heap->incremental_marking()->SetNewSpacePageFlags(page);
405 return page;
406 }
407
408
409 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
410 set_owner(semi_space);
411 set_next_chunk(this);
412 set_prev_chunk(this);
413 // Flags marks this invalid page as not being in new-space.
414 // All real new-space pages will be in new-space.
415 SetFlags(0, ~0);
416 }
417
418
419 MemoryChunk* MemoryChunk::Initialize(Heap* heap,
420 Address base,
421 size_t size,
422 Executability executable,
423 Space* owner) {
424 MemoryChunk* chunk = FromAddress(base);
425
426 ASSERT(base == chunk->address());
427
428 chunk->heap_ = heap;
429 chunk->size_ = size;
430 chunk->flags_ = 0;
431 chunk->set_owner(owner);
432 chunk->InitializeReservedMemory();
433 chunk->slots_buffer_ = NULL;
434 chunk->skip_list_ = NULL;
435 Bitmap::Clear(chunk);
436 chunk->initialize_scan_on_scavenge(false);
437 chunk->SetFlag(WAS_SWEPT_PRECISELY);
438
439 ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
440
441 if (executable == EXECUTABLE) chunk->SetFlag(IS_EXECUTABLE);
442
443 if (owner == heap->old_data_space()) chunk->SetFlag(CONTAINS_ONLY_DATA);
444
445 return chunk;
446 }
447
448
449 void MemoryChunk::InsertAfter(MemoryChunk* other) {
450 next_chunk_ = other->next_chunk_;
451 prev_chunk_ = other;
452 other->next_chunk_->prev_chunk_ = this;
453 other->next_chunk_ = this;
454 }
455
456
457 void MemoryChunk::Unlink() {
458 if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) {
459 heap_->decrement_scan_on_scavenge_pages();
460 ClearFlag(SCAN_ON_SCAVENGE);
461 }
462 next_chunk_->prev_chunk_ = prev_chunk_;
463 prev_chunk_->next_chunk_ = next_chunk_;
464 prev_chunk_ = NULL;
465 next_chunk_ = NULL;
466 }
467
468
469 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
470 Executability executable,
471 Space* owner) {
472 size_t chunk_size = MemoryChunk::kObjectStartOffset + body_size;
473 Heap* heap = isolate_->heap();
474 Address base = NULL;
475 VirtualMemory reservation;
369 if (executable == EXECUTABLE) { 476 if (executable == EXECUTABLE) {
370 // Check executable memory limit. 477 // Check executable memory limit.
371 if (size_executable_ + requested > 478 if (size_executable_ + chunk_size > capacity_executable_) {
372 static_cast<size_t>(capacity_executable_)) {
373 LOG(isolate_, 479 LOG(isolate_,
374 StringEvent("MemoryAllocator::AllocateRawMemory", 480 StringEvent("MemoryAllocator::AllocateRawMemory",
375 "V8 Executable Allocation capacity exceeded")); 481 "V8 Executable Allocation capacity exceeded"));
376 return NULL; 482 return NULL;
377 } 483 }
484
378 // Allocate executable memory either from code range or from the 485 // Allocate executable memory either from code range or from the
379 // OS. 486 // OS.
380 if (isolate_->code_range()->exists()) { 487 if (isolate_->code_range()->exists()) {
381 mem = isolate_->code_range()->AllocateRawMemory(requested, allocated); 488 base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size);
489 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
490 MemoryChunk::kAlignment));
491 if (base == NULL) return NULL;
492 size_ += chunk_size;
493 // Update executable memory size.
494 size_executable_ += chunk_size;
382 } else { 495 } else {
383 mem = OS::Allocate(requested, allocated, true); 496 base = AllocateAlignedMemory(chunk_size,
497 MemoryChunk::kAlignment,
498 executable,
499 &reservation);
500 if (base == NULL) return NULL;
501 // Update executable memory size.
502 size_executable_ += reservation.size();
384 } 503 }
385 // Update executable memory size.
386 size_executable_ += static_cast<int>(*allocated);
387 } else { 504 } else {
388 mem = OS::Allocate(requested, allocated, false); 505 base = AllocateAlignedMemory(chunk_size,
389 } 506 MemoryChunk::kAlignment,
390 int alloced = static_cast<int>(*allocated); 507 executable,
391 size_ += alloced; 508 &reservation);
509
510 if (base == NULL) return NULL;
511 }
392 512
393 #ifdef DEBUG 513 #ifdef DEBUG
394 ZapBlock(reinterpret_cast<Address>(mem), alloced); 514 ZapBlock(base, chunk_size);
395 #endif 515 #endif
396 isolate_->counters()->memory_allocated()->Increment(alloced); 516 isolate_->counters()->memory_allocated()->
397 return mem; 517 Increment(static_cast<int>(chunk_size));
398 } 518
399 519 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
400 520 if (owner != NULL) {
401 void MemoryAllocator::FreeRawMemory(void* mem, 521 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
402 size_t length, 522 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
523 }
524
525 MemoryChunk* result = MemoryChunk::Initialize(heap,
526 base,
527 chunk_size,
528 executable,
529 owner);
530 result->set_reserved_memory(&reservation);
531 return result;
532 }
533
534
535 Page* MemoryAllocator::AllocatePage(PagedSpace* owner,
403 Executability executable) { 536 Executability executable) {
404 #ifdef DEBUG 537 MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize, executable, owner);
405 // Do not try to zap the guard page. 538
406 size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0; 539 if (chunk == NULL) return NULL;
407 ZapBlock(reinterpret_cast<Address>(mem) + guard_size, length - guard_size); 540
408 #endif 541 return Page::Initialize(isolate_->heap(), chunk, executable, owner);
409 if (isolate_->code_range()->contains(static_cast<Address>(mem))) { 542 }
410 isolate_->code_range()->FreeRawMemory(mem, length); 543
544
545 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
546 Executability executable,
547 Space* owner) {
548 MemoryChunk* chunk = AllocateChunk(object_size, executable, owner);
549 if (chunk == NULL) return NULL;
550 return LargePage::Initialize(isolate_->heap(), chunk);
551 }
552
553
554 void MemoryAllocator::Free(MemoryChunk* chunk) {
555 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
556 if (chunk->owner() != NULL) {
557 ObjectSpace space =
558 static_cast<ObjectSpace>(1 << chunk->owner()->identity());
559 PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
560 }
561
562 delete chunk->slots_buffer();
563 delete chunk->skip_list();
564
565 VirtualMemory* reservation = chunk->reserved_memory();
566 if (reservation->IsReserved()) {
567 FreeMemory(reservation, chunk->executable());
411 } else { 568 } else {
412 OS::Free(mem, length); 569 FreeMemory(chunk->address(),
413 } 570 chunk->size(),
414 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(length)); 571 chunk->executable());
415 size_ -= static_cast<int>(length); 572 }
416 if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length); 573 }
417 574
418 ASSERT(size_ >= 0); 575
419 ASSERT(size_executable_ >= 0); 576 bool MemoryAllocator::CommitBlock(Address start,
420 } 577 size_t size,
421 578 Executability executable) {
422 579 if (!VirtualMemory::CommitRegion(start, size, executable)) return false;
423 void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
424 AllocationAction action,
425 size_t size) {
426 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
427 MemoryAllocationCallbackRegistration registration =
428 memory_allocation_callbacks_[i];
429 if ((registration.space & space) == space &&
430 (registration.action & action) == action)
431 registration.callback(space, action, static_cast<int>(size));
432 }
433 }
434
435
436 bool MemoryAllocator::MemoryAllocationCallbackRegistered(
437 MemoryAllocationCallback callback) {
438 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
439 if (memory_allocation_callbacks_[i].callback == callback) return true;
440 }
441 return false;
442 }
443
444
445 void MemoryAllocator::AddMemoryAllocationCallback(
446 MemoryAllocationCallback callback,
447 ObjectSpace space,
448 AllocationAction action) {
449 ASSERT(callback != NULL);
450 MemoryAllocationCallbackRegistration registration(callback, space, action);
451 ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
452 return memory_allocation_callbacks_.Add(registration);
453 }
454
455
456 void MemoryAllocator::RemoveMemoryAllocationCallback(
457 MemoryAllocationCallback callback) {
458 ASSERT(callback != NULL);
459 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
460 if (memory_allocation_callbacks_[i].callback == callback) {
461 memory_allocation_callbacks_.Remove(i);
462 return;
463 }
464 }
465 UNREACHABLE();
466 }
467
468 void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {
469 ASSERT(initial_chunk_ == NULL);
470
471 initial_chunk_ = new VirtualMemory(requested);
472 CHECK(initial_chunk_ != NULL);
473 if (!initial_chunk_->IsReserved()) {
474 delete initial_chunk_;
475 initial_chunk_ = NULL;
476 return NULL;
477 }
478
479 // We are sure that we have mapped a block of requested addresses.
480 ASSERT(initial_chunk_->size() == requested);
481 LOG(isolate_,
482 NewEvent("InitialChunk", initial_chunk_->address(), requested));
483 size_ += static_cast<int>(requested);
484 return initial_chunk_->address();
485 }
486
487
488 static int PagesInChunk(Address start, size_t size) {
489 // The first page starts on the first page-aligned address from start onward
490 // and the last page ends on the last page-aligned address before
491 // start+size. Page::kPageSize is a power of two so we can divide by
492 // shifting.
493 return static_cast<int>((RoundDown(start + size, Page::kPageSize)
494 - RoundUp(start, Page::kPageSize)) >> kPageSizeBits);
495 }
496
497
498 Page* MemoryAllocator::AllocatePages(int requested_pages,
499 int* allocated_pages,
500 PagedSpace* owner) {
501 if (requested_pages <= 0) return Page::FromAddress(NULL);
502 size_t chunk_size = requested_pages * Page::kPageSize;
503
504 void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
505 if (chunk == NULL) return Page::FromAddress(NULL);
506 LOG(isolate_, NewEvent("PagedChunk", chunk, chunk_size));
507
508 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
509
510 // We may 'lose' a page due to alignment.
511 ASSERT(*allocated_pages >= kPagesPerChunk - 1);
512
513 size_t guard_size = (owner->executable() == EXECUTABLE) ? Page::kPageSize : 0;
514
515 // Check that we got at least one page that we can use.
516 if (*allocated_pages <= ((guard_size != 0) ? 1 : 0)) {
517 FreeRawMemory(chunk,
518 chunk_size,
519 owner->executable());
520 LOG(isolate_, DeleteEvent("PagedChunk", chunk));
521 return Page::FromAddress(NULL);
522 }
523
524 if (guard_size != 0) {
525 OS::Guard(chunk, guard_size);
526 chunk_size -= guard_size;
527 chunk = static_cast<Address>(chunk) + guard_size;
528 --*allocated_pages;
529 }
530
531 int chunk_id = Pop();
532 chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
533
534 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
535 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
536 Page* new_pages = InitializePagesInChunk(chunk_id, *allocated_pages, owner);
537
538 return new_pages;
539 }
540
541
542 Page* MemoryAllocator::CommitPages(Address start, size_t size,
543 PagedSpace* owner, int* num_pages) {
544 ASSERT(start != NULL);
545 *num_pages = PagesInChunk(start, size);
546 ASSERT(*num_pages > 0);
547 ASSERT(initial_chunk_ != NULL);
548 ASSERT(InInitialChunk(start));
549 ASSERT(InInitialChunk(start + size - 1));
550 if (!initial_chunk_->Commit(start, size, owner->executable() == EXECUTABLE)) {
551 return Page::FromAddress(NULL);
552 }
553 #ifdef DEBUG 580 #ifdef DEBUG
554 ZapBlock(start, size); 581 ZapBlock(start, size);
555 #endif 582 #endif
556 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
557
558 // So long as we correctly overestimated the number of chunks we should not
559 // run out of chunk ids.
560 CHECK(!OutOfChunkIds());
561 int chunk_id = Pop();
562 chunks_[chunk_id].init(start, size, owner);
563 return InitializePagesInChunk(chunk_id, *num_pages, owner);
564 }
565
566
567 bool MemoryAllocator::CommitBlock(Address start,
568 size_t size,
569 Executability executable) {
570 ASSERT(start != NULL);
571 ASSERT(size > 0);
572 ASSERT(initial_chunk_ != NULL);
573 ASSERT(InInitialChunk(start));
574 ASSERT(InInitialChunk(start + size - 1));
575
576 if (!initial_chunk_->Commit(start, size, executable)) return false;
577 #ifdef DEBUG
578 ZapBlock(start, size);
579 #endif
580 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size)); 583 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
581 return true; 584 return true;
582 } 585 }
583 586
584 587
585 bool MemoryAllocator::UncommitBlock(Address start, size_t size) { 588 bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
586 ASSERT(start != NULL); 589 if (!VirtualMemory::UncommitRegion(start, size)) return false;
587 ASSERT(size > 0);
588 ASSERT(initial_chunk_ != NULL);
589 ASSERT(InInitialChunk(start));
590 ASSERT(InInitialChunk(start + size - 1));
591
592 if (!initial_chunk_->Uncommit(start, size)) return false;
593 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); 590 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
594 return true; 591 return true;
595 } 592 }
596 593
597 594
598 void MemoryAllocator::ZapBlock(Address start, size_t size) { 595 void MemoryAllocator::ZapBlock(Address start, size_t size) {
599 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) { 596 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
600 Memory::Address_at(start + s) = kZapValue; 597 Memory::Address_at(start + s) = kZapValue;
601 } 598 }
602 } 599 }
603 600
604 601
605 Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk, 602 void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
606 PagedSpace* owner) { 603 AllocationAction action,
607 ASSERT(IsValidChunk(chunk_id)); 604 size_t size) {
608 ASSERT(pages_in_chunk > 0); 605 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
609 606 MemoryAllocationCallbackRegistration registration =
610 Address chunk_start = chunks_[chunk_id].address(); 607 memory_allocation_callbacks_[i];
611 608 if ((registration.space & space) == space &&
612 Address low = RoundUp(chunk_start, Page::kPageSize); 609 (registration.action & action) == action)
613 610 registration.callback(space, action, static_cast<int>(size));
614 #ifdef DEBUG
615 size_t chunk_size = chunks_[chunk_id].size();
616 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize);
617 ASSERT(pages_in_chunk <=
618 ((OffsetFrom(high) - OffsetFrom(low)) / Page::kPageSize));
619 #endif
620
621 Address page_addr = low;
622 for (int i = 0; i < pages_in_chunk; i++) {
623 Page* p = Page::FromAddress(page_addr);
624 p->heap_ = owner->heap();
625 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
626 p->InvalidateWatermark(true);
627 p->SetIsLargeObjectPage(false);
628 p->SetAllocationWatermark(p->ObjectAreaStart());
629 p->SetCachedAllocationWatermark(p->ObjectAreaStart());
630 page_addr += Page::kPageSize;
631 }
632
633 // Set the next page of the last page to 0.
634 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
635 last_page->opaque_header = OffsetFrom(0) | chunk_id;
636
637 return Page::FromAddress(low);
638 }
639
640
641 Page* MemoryAllocator::FreePages(Page* p) {
642 if (!p->is_valid()) return p;
643
644 // Find the first page in the same chunk as 'p'
645 Page* first_page = FindFirstPageInSameChunk(p);
646 Page* page_to_return = Page::FromAddress(NULL);
647
648 if (p != first_page) {
649 // Find the last page in the same chunk as 'prev'.
650 Page* last_page = FindLastPageInSameChunk(p);
651 first_page = GetNextPage(last_page); // first page in next chunk
652
653 // set the next_page of last_page to NULL
654 SetNextPage(last_page, Page::FromAddress(NULL));
655 page_to_return = p; // return 'p' when exiting
656 }
657
658 while (first_page->is_valid()) {
659 int chunk_id = GetChunkId(first_page);
660 ASSERT(IsValidChunk(chunk_id));
661
662 // Find the first page of the next chunk before deleting this chunk.
663 first_page = GetNextPage(FindLastPageInSameChunk(first_page));
664
665 // Free the current chunk.
666 DeleteChunk(chunk_id);
667 }
668
669 return page_to_return;
670 }
671
672
673 void MemoryAllocator::FreeAllPages(PagedSpace* space) {
674 for (int i = 0, length = chunks_.length(); i < length; i++) {
675 if (chunks_[i].owner() == space) {
676 DeleteChunk(i);
677 }
678 } 611 }
679 } 612 }
680 613
681 614
682 void MemoryAllocator::DeleteChunk(int chunk_id) { 615 bool MemoryAllocator::MemoryAllocationCallbackRegistered(
683 ASSERT(IsValidChunk(chunk_id)); 616 MemoryAllocationCallback callback) {
684 617 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
685 ChunkInfo& c = chunks_[chunk_id]; 618 if (memory_allocation_callbacks_[i].callback == callback) return true;
686
687 // We cannot free a chunk contained in the initial chunk because it was not
688 // allocated with AllocateRawMemory. Instead we uncommit the virtual
689 // memory.
690 if (InInitialChunk(c.address())) {
691 // TODO(1240712): VirtualMemory::Uncommit has a return value which
692 // is ignored here.
693 initial_chunk_->Uncommit(c.address(), c.size());
694 Counters* counters = isolate_->counters();
695 counters->memory_allocated()->Decrement(static_cast<int>(c.size()));
696 } else {
697 LOG(isolate_, DeleteEvent("PagedChunk", c.address()));
698 ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner_identity());
699 size_t size = c.size();
700 size_t guard_size = (c.executable() == EXECUTABLE) ? Page::kPageSize : 0;
701 FreeRawMemory(c.address() - guard_size, size + guard_size, c.executable());
702 PerformAllocationCallback(space, kAllocationActionFree, size);
703 } 619 }
704 c.init(NULL, 0, NULL); 620 return false;
705 Push(chunk_id);
706 } 621 }
707 622
708 623
709 Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) { 624 void MemoryAllocator::AddMemoryAllocationCallback(
710 int chunk_id = GetChunkId(p); 625 MemoryAllocationCallback callback,
711 ASSERT(IsValidChunk(chunk_id)); 626 ObjectSpace space,
712 627 AllocationAction action) {
713 Address low = RoundUp(chunks_[chunk_id].address(), Page::kPageSize); 628 ASSERT(callback != NULL);
714 return Page::FromAddress(low); 629 MemoryAllocationCallbackRegistration registration(callback, space, action);
630 ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
631 return memory_allocation_callbacks_.Add(registration);
715 } 632 }
716 633
717 634
718 Page* MemoryAllocator::FindLastPageInSameChunk(Page* p) { 635 void MemoryAllocator::RemoveMemoryAllocationCallback(
719 int chunk_id = GetChunkId(p); 636 MemoryAllocationCallback callback) {
720 ASSERT(IsValidChunk(chunk_id)); 637 ASSERT(callback != NULL);
721 638 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
722 Address chunk_start = chunks_[chunk_id].address(); 639 if (memory_allocation_callbacks_[i].callback == callback) {
723 size_t chunk_size = chunks_[chunk_id].size(); 640 memory_allocation_callbacks_.Remove(i);
724 641 return;
725 Address high = RoundDown(chunk_start + chunk_size, Page::kPageSize); 642 }
726 ASSERT(chunk_start <= p->address() && p->address() < high); 643 }
727 644 UNREACHABLE();
728 return Page::FromAddress(high - Page::kPageSize);
729 } 645 }
730 646
731 647
732 #ifdef DEBUG 648 #ifdef DEBUG
733 void MemoryAllocator::ReportStatistics() { 649 void MemoryAllocator::ReportStatistics() {
734 float pct = static_cast<float>(capacity_ - size_) / capacity_; 650 float pct = static_cast<float>(capacity_ - size_) / capacity_;
735 PrintF(" capacity: %" V8_PTR_PREFIX "d" 651 PrintF(" capacity: %" V8_PTR_PREFIX "d"
736 ", used: %" V8_PTR_PREFIX "d" 652 ", used: %" V8_PTR_PREFIX "d"
737 ", available: %%%d\n\n", 653 ", available: %%%d\n\n",
738 capacity_, size_, static_cast<int>(pct*100)); 654 capacity_, size_, static_cast<int>(pct*100));
739 } 655 }
740 #endif 656 #endif
741 657
742
743 void MemoryAllocator::RelinkPageListInChunkOrder(PagedSpace* space,
744 Page** first_page,
745 Page** last_page,
746 Page** last_page_in_use) {
747 Page* first = NULL;
748 Page* last = NULL;
749
750 for (int i = 0, length = chunks_.length(); i < length; i++) {
751 ChunkInfo& chunk = chunks_[i];
752
753 if (chunk.owner() == space) {
754 if (first == NULL) {
755 Address low = RoundUp(chunk.address(), Page::kPageSize);
756 first = Page::FromAddress(low);
757 }
758 last = RelinkPagesInChunk(i,
759 chunk.address(),
760 chunk.size(),
761 last,
762 last_page_in_use);
763 }
764 }
765
766 if (first_page != NULL) {
767 *first_page = first;
768 }
769
770 if (last_page != NULL) {
771 *last_page = last;
772 }
773 }
774
775
776 Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
777 Address chunk_start,
778 size_t chunk_size,
779 Page* prev,
780 Page** last_page_in_use) {
781 Address page_addr = RoundUp(chunk_start, Page::kPageSize);
782 int pages_in_chunk = PagesInChunk(chunk_start, chunk_size);
783
784 if (prev->is_valid()) {
785 SetNextPage(prev, Page::FromAddress(page_addr));
786 }
787
788 for (int i = 0; i < pages_in_chunk; i++) {
789 Page* p = Page::FromAddress(page_addr);
790 p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
791 page_addr += Page::kPageSize;
792
793 p->InvalidateWatermark(true);
794 if (p->WasInUseBeforeMC()) {
795 *last_page_in_use = p;
796 }
797 }
798
799 // Set the next page of the last page to 0.
800 Page* last_page = Page::FromAddress(page_addr - Page::kPageSize);
801 last_page->opaque_header = OffsetFrom(0) | chunk_id;
802
803 if (last_page->WasInUseBeforeMC()) {
804 *last_page_in_use = last_page;
805 }
806
807 return last_page;
808 }
809
810
811 // ----------------------------------------------------------------------------- 658 // -----------------------------------------------------------------------------
812 // PagedSpace implementation 659 // PagedSpace implementation
813 660
814 PagedSpace::PagedSpace(Heap* heap, 661 PagedSpace::PagedSpace(Heap* heap,
815 intptr_t max_capacity, 662 intptr_t max_capacity,
816 AllocationSpace id, 663 AllocationSpace id,
817 Executability executable) 664 Executability executable)
818 : Space(heap, id, executable) { 665 : Space(heap, id, executable),
666 free_list_(this),
667 was_swept_conservatively_(false),
668 first_unswept_page_(Page::FromAddress(NULL)),
669 last_unswept_page_(Page::FromAddress(NULL)) {
819 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) 670 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
820 * Page::kObjectAreaSize; 671 * Page::kObjectAreaSize;
821 accounting_stats_.Clear(); 672 accounting_stats_.Clear();
822 673
823 allocation_info_.top = NULL; 674 allocation_info_.top = NULL;
824 allocation_info_.limit = NULL; 675 allocation_info_.limit = NULL;
825 676
826 mc_forwarding_info_.top = NULL; 677 anchor_.InitializeAsAnchor(this);
827 mc_forwarding_info_.limit = NULL;
828 } 678 }
829 679
830 680
831 bool PagedSpace::Setup(Address start, size_t size) { 681 bool PagedSpace::Setup() {
832 if (HasBeenSetup()) return false;
833
834 int num_pages = 0;
835 // Try to use the virtual memory range passed to us. If it is too small to
836 // contain at least one page, ignore it and allocate instead.
837 int pages_in_chunk = PagesInChunk(start, size);
838 if (pages_in_chunk > 0) {
839 first_page_ = Isolate::Current()->memory_allocator()->CommitPages(
840 RoundUp(start, Page::kPageSize),
841 Page::kPageSize * pages_in_chunk,
842 this, &num_pages);
843 } else {
844 int requested_pages =
845 Min(MemoryAllocator::kPagesPerChunk,
846 static_cast<int>(max_capacity_ / Page::kObjectAreaSize));
847 first_page_ =
848 Isolate::Current()->memory_allocator()->AllocatePages(
849 requested_pages, &num_pages, this);
850 if (!first_page_->is_valid()) return false;
851 }
852
853 // We are sure that the first page is valid and that we have at least one
854 // page.
855 ASSERT(first_page_->is_valid());
856 ASSERT(num_pages > 0);
857 accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
858 ASSERT(Capacity() <= max_capacity_);
859
860 // Sequentially clear region marks in the newly allocated
861 // pages and cache the current last page in the space.
862 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
863 p->SetRegionMarks(Page::kAllRegionsCleanMarks);
864 last_page_ = p;
865 }
866
867 // Use first_page_ for allocation.
868 SetAllocationInfo(&allocation_info_, first_page_);
869
870 page_list_is_chunk_ordered_ = true;
871
872 return true; 682 return true;
873 } 683 }
874 684
875 685
876 bool PagedSpace::HasBeenSetup() { 686 bool PagedSpace::HasBeenSetup() {
877 return (Capacity() > 0); 687 return true;
878 } 688 }
879 689
880 690
881 void PagedSpace::TearDown() { 691 void PagedSpace::TearDown() {
882 Isolate::Current()->memory_allocator()->FreeAllPages(this); 692 PageIterator iterator(this);
883 first_page_ = NULL; 693 while (iterator.has_next()) {
694 heap()->isolate()->memory_allocator()->Free(iterator.next());
695 }
696 anchor_.set_next_page(&anchor_);
697 anchor_.set_prev_page(&anchor_);
884 accounting_stats_.Clear(); 698 accounting_stats_.Clear();
885 } 699 }
886 700
887 701
888 void PagedSpace::MarkAllPagesClean() {
889 PageIterator it(this, PageIterator::ALL_PAGES);
890 while (it.has_next()) {
891 it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
892 }
893 }
894
895
896 MaybeObject* PagedSpace::FindObject(Address addr) { 702 MaybeObject* PagedSpace::FindObject(Address addr) {
897 // Note: this function can only be called before or after mark-compact GC 703 // Note: this function can only be called on precisely swept spaces.
898 // because it accesses map pointers.
899 ASSERT(!heap()->mark_compact_collector()->in_use()); 704 ASSERT(!heap()->mark_compact_collector()->in_use());
900 705
901 if (!Contains(addr)) return Failure::Exception(); 706 if (!Contains(addr)) return Failure::Exception();
902 707
903 Page* p = Page::FromAddress(addr); 708 Page* p = Page::FromAddress(addr);
904 ASSERT(IsUsed(p)); 709 HeapObjectIterator it(p, NULL);
905 Address cur = p->ObjectAreaStart(); 710 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
906 Address end = p->AllocationTop(); 711 Address cur = obj->address();
907 while (cur < end) {
908 HeapObject* obj = HeapObject::FromAddress(cur);
909 Address next = cur + obj->Size(); 712 Address next = cur + obj->Size();
910 if ((cur <= addr) && (addr < next)) return obj; 713 if ((cur <= addr) && (addr < next)) return obj;
911 cur = next;
912 } 714 }
913 715
914 UNREACHABLE(); 716 UNREACHABLE();
915 return Failure::Exception(); 717 return Failure::Exception();
916 } 718 }
917 719
918 720 bool PagedSpace::CanExpand() {
919 bool PagedSpace::IsUsed(Page* page) {
920 PageIterator it(this, PageIterator::PAGES_IN_USE);
921 while (it.has_next()) {
922 if (page == it.next()) return true;
923 }
924 return false;
925 }
926
927
928 void PagedSpace::SetAllocationInfo(AllocationInfo* alloc_info, Page* p) {
929 alloc_info->top = p->ObjectAreaStart();
930 alloc_info->limit = p->ObjectAreaEnd();
931 ASSERT(alloc_info->VerifyPagedAllocation());
932 }
933
934
935 void PagedSpace::MCResetRelocationInfo() {
936 // Set page indexes.
937 int i = 0;
938 PageIterator it(this, PageIterator::ALL_PAGES);
939 while (it.has_next()) {
940 Page* p = it.next();
941 p->mc_page_index = i++;
942 }
943
944 // Set mc_forwarding_info_ to the first page in the space.
945 SetAllocationInfo(&mc_forwarding_info_, first_page_);
946 // All the bytes in the space are 'available'. We will rediscover
947 // allocated and wasted bytes during GC.
948 accounting_stats_.Reset();
949 }
950
951
952 int PagedSpace::MCSpaceOffsetForAddress(Address addr) {
953 #ifdef DEBUG
954 // The Contains function considers the address at the beginning of a
955 // page in the page, MCSpaceOffsetForAddress considers it is in the
956 // previous page.
957 if (Page::IsAlignedToPageSize(addr)) {
958 ASSERT(Contains(addr - kPointerSize));
959 } else {
960 ASSERT(Contains(addr));
961 }
962 #endif
963
964 // If addr is at the end of a page, it belongs to previous page
965 Page* p = Page::IsAlignedToPageSize(addr)
966 ? Page::FromAllocationTop(addr)
967 : Page::FromAddress(addr);
968 int index = p->mc_page_index;
969 return (index * Page::kPageSize) + p->Offset(addr);
970 }
971
972
973 // Slow case for reallocating and promoting objects during a compacting
974 // collection. This function is not space-specific.
975 HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
976 Page* current_page = TopPageOf(mc_forwarding_info_);
977 if (!current_page->next_page()->is_valid()) {
978 if (!Expand(current_page)) {
979 return NULL;
980 }
981 }
982
983 // There are surely more pages in the space now.
984 ASSERT(current_page->next_page()->is_valid());
985 // We do not add the top of page block for current page to the space's
986 // free list---the block may contain live objects so we cannot write
987 // bookkeeping information to it. Instead, we will recover top of page
988 // blocks when we move objects to their new locations.
989 //
990 // We do however write the allocation pointer to the page. The encoding
991 // of forwarding addresses is as an offset in terms of live bytes, so we
992 // need quick access to the allocation top of each page to decode
993 // forwarding addresses.
994 current_page->SetAllocationWatermark(mc_forwarding_info_.top);
995 current_page->next_page()->InvalidateWatermark(true);
996 SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
997 return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
998 }
999
1000
1001 bool PagedSpace::Expand(Page* last_page) {
1002 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0); 721 ASSERT(max_capacity_ % Page::kObjectAreaSize == 0);
1003 ASSERT(Capacity() % Page::kObjectAreaSize == 0); 722 ASSERT(Capacity() % Page::kObjectAreaSize == 0);
1004 723
1005 if (Capacity() == max_capacity_) return false; 724 if (Capacity() == max_capacity_) return false;
1006 725
1007 ASSERT(Capacity() < max_capacity_); 726 ASSERT(Capacity() < max_capacity_);
1008 // Last page must be valid and its next page is invalid.
1009 ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid());
1010 727
1011 int available_pages = 728 // Are we going to exceed capacity for this space?
1012 static_cast<int>((max_capacity_ - Capacity()) / Page::kObjectAreaSize); 729 if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
1013 // We don't want to have to handle small chunks near the end so if there are
1014 // not kPagesPerChunk pages available without exceeding the max capacity then
1015 // act as if memory has run out.
1016 if (available_pages < MemoryAllocator::kPagesPerChunk) return false;
1017 730
1018 int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk); 731 return true;
1019 Page* p = heap()->isolate()->memory_allocator()->AllocatePages( 732 }
1020 desired_pages, &desired_pages, this);
1021 if (!p->is_valid()) return false;
1022 733
1023 accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize); 734 bool PagedSpace::Expand() {
735 if (!CanExpand()) return false;
736
737 Page* p = heap()->isolate()->memory_allocator()->
738 AllocatePage(this, executable());
739 if (p == NULL) return false;
740
1024 ASSERT(Capacity() <= max_capacity_); 741 ASSERT(Capacity() <= max_capacity_);
1025 742
1026 heap()->isolate()->memory_allocator()->SetNextPage(last_page, p); 743 p->InsertAfter(anchor_.prev_page());
1027
1028 // Sequentially clear region marks of new pages and and cache the
1029 // new last page in the space.
1030 while (p->is_valid()) {
1031 p->SetRegionMarks(Page::kAllRegionsCleanMarks);
1032 last_page_ = p;
1033 p = p->next_page();
1034 }
1035 744
1036 return true; 745 return true;
1037 } 746 }
1038 747
1039 748
1040 #ifdef DEBUG 749 #ifdef DEBUG
1041 int PagedSpace::CountTotalPages() { 750 int PagedSpace::CountTotalPages() {
751 PageIterator it(this);
1042 int count = 0; 752 int count = 0;
1043 for (Page* p = first_page_; p->is_valid(); p = p->next_page()) { 753 while (it.has_next()) {
754 it.next();
1044 count++; 755 count++;
1045 } 756 }
1046 return count; 757 return count;
1047 } 758 }
1048 #endif 759 #endif
1049 760
1050 761
1051 void PagedSpace::Shrink() { 762 void PagedSpace::Shrink() {
1052 if (!page_list_is_chunk_ordered_) { 763 // TODO(1614) Not implemented.
1053 // We can't shrink space if pages is not chunk-ordered
1054 // (see comment for class MemoryAllocator for definition).
1055 return;
1056 }
1057
1058 // Release half of free pages.
1059 Page* top_page = AllocationTopPage();
1060 ASSERT(top_page->is_valid());
1061
1062 // Count the number of pages we would like to free.
1063 int pages_to_free = 0;
1064 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
1065 pages_to_free++;
1066 }
1067
1068 // Free pages after top_page.
1069 Page* p = heap()->isolate()->memory_allocator()->
1070 FreePages(top_page->next_page());
1071 heap()->isolate()->memory_allocator()->SetNextPage(top_page, p);
1072
1073 // Find out how many pages we failed to free and update last_page_.
1074 // Please note pages can only be freed in whole chunks.
1075 last_page_ = top_page;
1076 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
1077 pages_to_free--;
1078 last_page_ = p;
1079 }
1080
1081 accounting_stats_.ShrinkSpace(pages_to_free * Page::kObjectAreaSize);
1082 ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize);
1083 } 764 }
1084 765
1085 766
1086 bool PagedSpace::EnsureCapacity(int capacity) { 767 bool PagedSpace::EnsureCapacity(int capacity) {
1087 if (Capacity() >= capacity) return true; 768 while (Capacity() < capacity) {
1088 769 // Expand the space until it has the required capacity or expansion fails.
1089 // Start from the allocation top and loop to the last page in the space. 770 if (!Expand()) return false;
1090 Page* last_page = AllocationTopPage();
1091 Page* next_page = last_page->next_page();
1092 while (next_page->is_valid()) {
1093 last_page = heap()->isolate()->memory_allocator()->
1094 FindLastPageInSameChunk(next_page);
1095 next_page = last_page->next_page();
1096 } 771 }
1097
1098 // Expand the space until it has the required capacity or expansion fails.
1099 do {
1100 if (!Expand(last_page)) return false;
1101 ASSERT(last_page->next_page()->is_valid());
1102 last_page =
1103 heap()->isolate()->memory_allocator()->FindLastPageInSameChunk(
1104 last_page->next_page());
1105 } while (Capacity() < capacity);
1106
1107 return true; 772 return true;
1108 } 773 }
1109 774
1110 775
1111 #ifdef DEBUG 776 #ifdef DEBUG
1112 void PagedSpace::Print() { } 777 void PagedSpace::Print() { }
1113 #endif 778 #endif
1114 779
1115 780
1116 #ifdef DEBUG 781 #ifdef DEBUG
1117 // We do not assume that the PageIterator works, because it depends on the
1118 // invariants we are checking during verification.
1119 void PagedSpace::Verify(ObjectVisitor* visitor) { 782 void PagedSpace::Verify(ObjectVisitor* visitor) {
1120 // The allocation pointer should be valid, and it should be in a page in the 783 // We can only iterate over the pages if they were swept precisely.
1121 // space. 784 if (was_swept_conservatively_) return;
1122 ASSERT(allocation_info_.VerifyPagedAllocation());
1123 Page* top_page = Page::FromAllocationTop(allocation_info_.top);
1124 ASSERT(heap()->isolate()->memory_allocator()->IsPageInSpace(top_page, this));
1125 785
1126 // Loop over all the pages. 786 bool allocation_pointer_found_in_space =
1127 bool above_allocation_top = false; 787 (allocation_info_.top != allocation_info_.limit);
1128 Page* current_page = first_page_; 788 PageIterator page_iterator(this);
1129 while (current_page->is_valid()) { 789 while (page_iterator.has_next()) {
1130 if (above_allocation_top) { 790 Page* page = page_iterator.next();
1131 // We don't care what's above the allocation top. 791 ASSERT(page->owner() == this);
1132 } else { 792 if (page == Page::FromAllocationTop(allocation_info_.top)) {
1133 Address top = current_page->AllocationTop(); 793 allocation_pointer_found_in_space = true;
1134 if (current_page == top_page) { 794 }
1135 ASSERT(top == allocation_info_.top); 795 ASSERT(page->WasSweptPrecisely());
1136 // The next page will be above the allocation top. 796 HeapObjectIterator it(page, NULL);
1137 above_allocation_top = true; 797 Address end_of_previous_object = page->ObjectAreaStart();
798 Address top = page->ObjectAreaEnd();
799 int black_size = 0;
800 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
801 ASSERT(end_of_previous_object <= object->address());
802
803 // The first word should be a map, and we expect all map pointers to
804 // be in map space.
805 Map* map = object->map();
806 ASSERT(map->IsMap());
807 ASSERT(heap()->map_space()->Contains(map));
808
809 // Perform space-specific object verification.
810 VerifyObject(object);
811
812 // The object itself should look OK.
813 object->Verify();
814
815 // All the interior pointers should be contained in the heap.
816 int size = object->Size();
817 object->IterateBody(map->instance_type(), size, visitor);
818 if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
819 black_size += size;
1138 } 820 }
1139 821
1140 // It should be packed with objects from the bottom to the top. 822 ASSERT(object->address() + size <= top);
1141 Address current = current_page->ObjectAreaStart(); 823 end_of_previous_object = object->address() + size;
1142 while (current < top) {
1143 HeapObject* object = HeapObject::FromAddress(current);
1144
1145 // The first word should be a map, and we expect all map pointers to
1146 // be in map space.
1147 Map* map = object->map();
1148 ASSERT(map->IsMap());
1149 ASSERT(heap()->map_space()->Contains(map));
1150
1151 // Perform space-specific object verification.
1152 VerifyObject(object);
1153
1154 // The object itself should look OK.
1155 object->Verify();
1156
1157 // All the interior pointers should be contained in the heap and
1158 // have page regions covering intergenerational references should be
1159 // marked dirty.
1160 int size = object->Size();
1161 object->IterateBody(map->instance_type(), size, visitor);
1162
1163 current += size;
1164 }
1165
1166 // The allocation pointer should not be in the middle of an object.
1167 ASSERT(current == top);
1168 } 824 }
1169 825 // TODO(1672): Page live bytes are off for some tests.
1170 current_page = current_page->next_page(); 826 // CHECK_LE(black_size, page->LiveBytes());
1171 } 827 }
1172 } 828 }
1173 #endif 829 #endif
1174 830
1175 831
1176 // ----------------------------------------------------------------------------- 832 // -----------------------------------------------------------------------------
1177 // NewSpace implementation 833 // NewSpace implementation
1178 834
1179 835
1180 bool NewSpace::Setup(Address start, int size) { 836 bool NewSpace::Setup(int reserved_semispace_capacity,
837 int maximum_semispace_capacity) {
1181 // Setup new space based on the preallocated memory block defined by 838 // Setup new space based on the preallocated memory block defined by
1182 // start and size. The provided space is divided into two semi-spaces. 839 // start and size. The provided space is divided into two semi-spaces.
1183 // To support fast containment testing in the new space, the size of 840 // To support fast containment testing in the new space, the size of
1184 // this chunk must be a power of two and it must be aligned to its size. 841 // this chunk must be a power of two and it must be aligned to its size.
1185 int initial_semispace_capacity = heap()->InitialSemiSpaceSize(); 842 int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
1186 int maximum_semispace_capacity = heap()->MaxSemiSpaceSize(); 843
844 size_t size = 2 * reserved_semispace_capacity;
845 Address base =
846 heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
847 size, size, &reservation_);
848 if (base == NULL) return false;
849
850 chunk_base_ = base;
851 chunk_size_ = static_cast<uintptr_t>(size);
852 LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
1187 853
1188 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity); 854 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
1189 ASSERT(IsPowerOf2(maximum_semispace_capacity)); 855 ASSERT(IsPowerOf2(maximum_semispace_capacity));
1190 856
1191 // Allocate and setup the histogram arrays if necessary. 857 // Allocate and setup the histogram arrays if necessary.
1192 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); 858 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1193 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); 859 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1194 860
1195 #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \ 861 #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
1196 promoted_histogram_[name].set_name(#name); 862 promoted_histogram_[name].set_name(#name);
1197 INSTANCE_TYPE_LIST(SET_NAME) 863 INSTANCE_TYPE_LIST(SET_NAME)
1198 #undef SET_NAME 864 #undef SET_NAME
1199 865
1200 ASSERT(size == 2 * heap()->ReservedSemiSpaceSize()); 866 ASSERT(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
1201 ASSERT(IsAddressAligned(start, size, 0)); 867 ASSERT(static_cast<intptr_t>(chunk_size_) >=
868 2 * heap()->ReservedSemiSpaceSize());
869 ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
1202 870
1203 if (!to_space_.Setup(start, 871 if (!to_space_.Setup(chunk_base_,
1204 initial_semispace_capacity, 872 initial_semispace_capacity,
1205 maximum_semispace_capacity)) { 873 maximum_semispace_capacity)) {
1206 return false; 874 return false;
1207 } 875 }
1208 if (!from_space_.Setup(start + maximum_semispace_capacity, 876 if (!from_space_.Setup(chunk_base_ + reserved_semispace_capacity,
1209 initial_semispace_capacity, 877 initial_semispace_capacity,
1210 maximum_semispace_capacity)) { 878 maximum_semispace_capacity)) {
1211 return false; 879 return false;
1212 } 880 }
1213 881
1214 start_ = start; 882 start_ = chunk_base_;
1215 address_mask_ = ~(size - 1); 883 address_mask_ = ~(2 * reserved_semispace_capacity - 1);
1216 object_mask_ = address_mask_ | kHeapObjectTagMask; 884 object_mask_ = address_mask_ | kHeapObjectTagMask;
1217 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag; 885 object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
1218 886
1219 allocation_info_.top = to_space_.low(); 887 ResetAllocationInfo();
1220 allocation_info_.limit = to_space_.high();
1221 mc_forwarding_info_.top = NULL;
1222 mc_forwarding_info_.limit = NULL;
1223 888
1224 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1225 return true; 889 return true;
1226 } 890 }
1227 891
1228 892
1229 void NewSpace::TearDown() { 893 void NewSpace::TearDown() {
1230 if (allocated_histogram_) { 894 if (allocated_histogram_) {
1231 DeleteArray(allocated_histogram_); 895 DeleteArray(allocated_histogram_);
1232 allocated_histogram_ = NULL; 896 allocated_histogram_ = NULL;
1233 } 897 }
1234 if (promoted_histogram_) { 898 if (promoted_histogram_) {
1235 DeleteArray(promoted_histogram_); 899 DeleteArray(promoted_histogram_);
1236 promoted_histogram_ = NULL; 900 promoted_histogram_ = NULL;
1237 } 901 }
1238 902
1239 start_ = NULL; 903 start_ = NULL;
1240 allocation_info_.top = NULL; 904 allocation_info_.top = NULL;
1241 allocation_info_.limit = NULL; 905 allocation_info_.limit = NULL;
1242 mc_forwarding_info_.top = NULL;
1243 mc_forwarding_info_.limit = NULL;
1244 906
1245 to_space_.TearDown(); 907 to_space_.TearDown();
1246 from_space_.TearDown(); 908 from_space_.TearDown();
909
910 LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
911
912 ASSERT(reservation_.IsReserved());
913 heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
914 NOT_EXECUTABLE);
915 chunk_base_ = NULL;
916 chunk_size_ = 0;
1247 } 917 }
1248 918
1249 919
1250 void NewSpace::Flip() { 920 void NewSpace::Flip() {
1251 SemiSpace tmp = from_space_; 921 SemiSpace::Swap(&from_space_, &to_space_);
1252 from_space_ = to_space_;
1253 to_space_ = tmp;
1254 } 922 }
1255 923
1256 924
1257 void NewSpace::Grow() { 925 void NewSpace::Grow() {
1258 ASSERT(Capacity() < MaximumCapacity()); 926 ASSERT(Capacity() < MaximumCapacity());
1259 if (to_space_.Grow()) { 927 if (to_space_.Grow()) {
1260 // Only grow from space if we managed to grow to space. 928 // Only grow from space if we managed to grow to space.
1261 if (!from_space_.Grow()) { 929 if (!from_space_.Grow()) {
1262 // If we managed to grow to space but couldn't grow from space, 930 // If we managed to grow to space but couldn't grow from space,
1263 // attempt to shrink to space. 931 // attempt to shrink to space.
1264 if (!to_space_.ShrinkTo(from_space_.Capacity())) { 932 if (!to_space_.ShrinkTo(from_space_.Capacity())) {
1265 // We are in an inconsistent state because we could not 933 // We are in an inconsistent state because we could not
1266 // commit/uncommit memory from new space. 934 // commit/uncommit memory from new space.
1267 V8::FatalProcessOutOfMemory("Failed to grow new space."); 935 V8::FatalProcessOutOfMemory("Failed to grow new space.");
1268 } 936 }
1269 } 937 }
1270 } 938 }
1271 allocation_info_.limit = to_space_.high();
1272 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); 939 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1273 } 940 }
1274 941
1275 942
1276 void NewSpace::Shrink() { 943 void NewSpace::Shrink() {
1277 int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt()); 944 int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
1278 int rounded_new_capacity = 945 int rounded_new_capacity =
1279 RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment())); 946 RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment()));
1280 if (rounded_new_capacity < Capacity() && 947 if (rounded_new_capacity < Capacity() &&
1281 to_space_.ShrinkTo(rounded_new_capacity)) { 948 to_space_.ShrinkTo(rounded_new_capacity)) {
1282 // Only shrink from space if we managed to shrink to space. 949 // Only shrink from space if we managed to shrink to space.
1283 if (!from_space_.ShrinkTo(rounded_new_capacity)) { 950 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1284 // If we managed to shrink to space but couldn't shrink from 951 // If we managed to shrink to space but couldn't shrink from
1285 // space, attempt to grow to space again. 952 // space, attempt to grow to space again.
1286 if (!to_space_.GrowTo(from_space_.Capacity())) { 953 if (!to_space_.GrowTo(from_space_.Capacity())) {
1287 // We are in an inconsistent state because we could not 954 // We are in an inconsistent state because we could not
1288 // commit/uncommit memory from new space. 955 // commit/uncommit memory from new space.
1289 V8::FatalProcessOutOfMemory("Failed to shrink new space."); 956 V8::FatalProcessOutOfMemory("Failed to shrink new space.");
1290 } 957 }
1291 } 958 }
1292 } 959 }
1293 allocation_info_.limit = to_space_.high(); 960 allocation_info_.limit = to_space_.page_high();
961 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
962 }
963
964
965 void NewSpace::UpdateAllocationInfo() {
966 allocation_info_.top = to_space_.page_low();
967 allocation_info_.limit = to_space_.page_high();
968
969 // Lower limit during incremental marking.
970 if (heap()->incremental_marking()->IsMarking() &&
971 inline_allocation_limit_step() != 0) {
972 Address new_limit =
973 allocation_info_.top + inline_allocation_limit_step();
974 allocation_info_.limit = Min(new_limit, allocation_info_.limit);
975 }
1294 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); 976 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1295 } 977 }
1296 978
1297 979
1298 void NewSpace::ResetAllocationInfo() { 980 void NewSpace::ResetAllocationInfo() {
1299 allocation_info_.top = to_space_.low(); 981 to_space_.Reset();
1300 allocation_info_.limit = to_space_.high(); 982 UpdateAllocationInfo();
1301 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); 983 pages_used_ = 0;
984 // Clear all mark-bits in the to-space.
985 NewSpacePageIterator it(&to_space_);
986 while (it.has_next()) {
987 Bitmap::Clear(it.next());
988 }
1302 } 989 }
1303 990
1304 991
1305 void NewSpace::MCResetRelocationInfo() { 992 bool NewSpace::AddFreshPage() {
1306 mc_forwarding_info_.top = from_space_.low(); 993 Address top = allocation_info_.top;
1307 mc_forwarding_info_.limit = from_space_.high(); 994 if (NewSpacePage::IsAtStart(top)) {
1308 ASSERT_SEMISPACE_ALLOCATION_INFO(mc_forwarding_info_, from_space_); 995 // The current page is already empty. Don't try to make another.
1309 }
1310 996
1311 997 // We should only get here if someone asks to allocate more
1312 void NewSpace::MCCommitRelocationInfo() { 998 // than what can be stored in a single page.
1313 // Assumes that the spaces have been flipped so that mc_forwarding_info_ is 999 // TODO(gc): Change the limit on new-space allocation to prevent this
1314 // valid allocation info for the to space. 1000 // from happening (all such allocations should go directly to LOSpace).
1315 allocation_info_.top = mc_forwarding_info_.top; 1001 return false;
1316 allocation_info_.limit = to_space_.high(); 1002 }
1317 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); 1003 if (!to_space_.AdvancePage()) {
1004 // Failed to get a new page in to-space.
1005 return false;
1006 }
1007 // Clear remainder of current page.
1008 int remaining_in_page =
1009 static_cast<int>(NewSpacePage::FromLimit(top)->body_limit() - top);
1010 heap()->CreateFillerObjectAt(top, remaining_in_page);
1011 pages_used_++;
1012 UpdateAllocationInfo();
1013 return true;
1318 } 1014 }
1319 1015
1320 1016
1321 #ifdef DEBUG 1017 #ifdef DEBUG
1322 // We do not use the SemispaceIterator because verification doesn't assume 1018 // We do not use the SemiSpaceIterator because verification doesn't assume
1323 // that it works (it depends on the invariants we are checking). 1019 // that it works (it depends on the invariants we are checking).
1324 void NewSpace::Verify() { 1020 void NewSpace::Verify() {
1325 // The allocation pointer should be in the space or at the very end. 1021 // The allocation pointer should be in the space or at the very end.
1326 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); 1022 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1327 1023
1328 // There should be objects packed in from the low address up to the 1024 // There should be objects packed in from the low address up to the
1329 // allocation pointer. 1025 // allocation pointer.
1330 Address current = to_space_.low(); 1026 Address current = to_space_.first_page()->body();
1331 while (current < top()) { 1027 CHECK_EQ(current, to_space_.space_start());
1332 HeapObject* object = HeapObject::FromAddress(current);
1333 1028
1334 // The first word should be a map, and we expect all map pointers to 1029 while (current != top()) {
1335 // be in map space. 1030 if (!NewSpacePage::IsAtEnd(current)) {
1336 Map* map = object->map(); 1031 // The allocation pointer should not be in the middle of an object.
1337 ASSERT(map->IsMap()); 1032 CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
1338 ASSERT(heap()->map_space()->Contains(map)); 1033 current < top());
1339 1034
1340 // The object should not be code or a map. 1035 HeapObject* object = HeapObject::FromAddress(current);
1341 ASSERT(!object->IsMap());
1342 ASSERT(!object->IsCode());
1343 1036
1344 // The object itself should look OK. 1037 // The first word should be a map, and we expect all map pointers to
1345 object->Verify(); 1038 // be in map space.
1039 Map* map = object->map();
1040 CHECK(map->IsMap());
1041 CHECK(heap()->map_space()->Contains(map));
1346 1042
1347 // All the interior pointers should be contained in the heap. 1043 // The object should not be code or a map.
1348 VerifyPointersVisitor visitor; 1044 CHECK(!object->IsMap());
1349 int size = object->Size(); 1045 CHECK(!object->IsCode());
1350 object->IterateBody(map->instance_type(), size, &visitor);
1351 1046
1352 current += size; 1047 // The object itself should look OK.
1048 object->Verify();
1049
1050 // All the interior pointers should be contained in the heap.
1051 VerifyPointersVisitor visitor;
1052 int size = object->Size();
1053 object->IterateBody(map->instance_type(), size, &visitor);
1054
1055 current += size;
1056 } else {
1057 // At end of page, switch to next page.
1058 NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
1059 // Next page should be valid.
1060 CHECK(!page->is_anchor());
1061 current = page->body();
1062 }
1353 } 1063 }
1354 1064
1355 // The allocation pointer should not be in the middle of an object. 1065 // Check semi-spaces.
1356 ASSERT(current == top()); 1066 ASSERT_EQ(from_space_.id(), kFromSpace);
1067 ASSERT_EQ(to_space_.id(), kToSpace);
1068 from_space_.Verify();
1069 to_space_.Verify();
1357 } 1070 }
1358 #endif 1071 #endif
1359 1072
1360
1361 bool SemiSpace::Commit() {
1362 ASSERT(!is_committed());
1363 if (!heap()->isolate()->memory_allocator()->CommitBlock(
1364 start_, capacity_, executable())) {
1365 return false;
1366 }
1367 committed_ = true;
1368 return true;
1369 }
1370
1371
1372 bool SemiSpace::Uncommit() {
1373 ASSERT(is_committed());
1374 if (!heap()->isolate()->memory_allocator()->UncommitBlock(
1375 start_, capacity_)) {
1376 return false;
1377 }
1378 committed_ = false;
1379 return true;
1380 }
1381
1382
1383 // ----------------------------------------------------------------------------- 1073 // -----------------------------------------------------------------------------
1384 // SemiSpace implementation 1074 // SemiSpace implementation
1385 1075
1386 bool SemiSpace::Setup(Address start, 1076 bool SemiSpace::Setup(Address start,
1387 int initial_capacity, 1077 int initial_capacity,
1388 int maximum_capacity) { 1078 int maximum_capacity) {
1389 // Creates a space in the young generation. The constructor does not 1079 // Creates a space in the young generation. The constructor does not
1390 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of 1080 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of
1391 // memory of size 'capacity' when set up, and does not grow or shrink 1081 // memory of size 'capacity' when set up, and does not grow or shrink
1392 // otherwise. In the mark-compact collector, the memory region of the from 1082 // otherwise. In the mark-compact collector, the memory region of the from
1393 // space is used as the marking stack. It requires contiguous memory 1083 // space is used as the marking stack. It requires contiguous memory
1394 // addresses. 1084 // addresses.
1395 initial_capacity_ = initial_capacity; 1085 ASSERT(maximum_capacity >= Page::kPageSize);
1086 initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
1396 capacity_ = initial_capacity; 1087 capacity_ = initial_capacity;
1397 maximum_capacity_ = maximum_capacity; 1088 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
1398 committed_ = false; 1089 committed_ = false;
1399
1400 start_ = start; 1090 start_ = start;
1401 address_mask_ = ~(maximum_capacity - 1); 1091 address_mask_ = ~(maximum_capacity - 1);
1402 object_mask_ = address_mask_ | kHeapObjectTagMask; 1092 object_mask_ = address_mask_ | kHeapObjectTagMask;
1403 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag; 1093 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
1404 age_mark_ = start_; 1094 age_mark_ = start_;
1405 1095
1406 return Commit(); 1096 return Commit();
1407 } 1097 }
1408 1098
1409 1099
1410 void SemiSpace::TearDown() { 1100 void SemiSpace::TearDown() {
1411 start_ = NULL; 1101 start_ = NULL;
1412 capacity_ = 0; 1102 capacity_ = 0;
1413 } 1103 }
1414 1104
1415 1105
1106 bool SemiSpace::Commit() {
1107 ASSERT(!is_committed());
1108 int pages = capacity_ / Page::kPageSize;
1109 Address end = start_ + maximum_capacity_;
1110 Address start = end - pages * Page::kPageSize;
1111 if (!heap()->isolate()->memory_allocator()->CommitBlock(start,
1112 capacity_,
1113 executable())) {
1114 return false;
1115 }
1116
1117 NewSpacePage* page = anchor();
1118 for (int i = 1; i <= pages; i++) {
1119 NewSpacePage* new_page =
1120 NewSpacePage::Initialize(heap(), end - i * Page::kPageSize, this);
1121 new_page->InsertAfter(page);
1122 page = new_page;
1123 }
1124
1125 committed_ = true;
1126 Reset();
1127 return true;
1128 }
1129
1130
1131 bool SemiSpace::Uncommit() {
1132 ASSERT(is_committed());
1133 Address start = start_ + maximum_capacity_ - capacity_;
1134 if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) {
1135 return false;
1136 }
1137 anchor()->set_next_page(anchor());
1138 anchor()->set_prev_page(anchor());
1139
1140 committed_ = false;
1141 return true;
1142 }
1143
1144
1416 bool SemiSpace::Grow() { 1145 bool SemiSpace::Grow() {
1417 // Double the semispace size but only up to maximum capacity. 1146 // Double the semispace size but only up to maximum capacity.
1418 int maximum_extra = maximum_capacity_ - capacity_; 1147 ASSERT(static_cast<size_t>(Page::kPageSize) > OS::AllocateAlignment());
1419 int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())), 1148 int new_capacity = Min(maximum_capacity_,
1420 maximum_extra); 1149 RoundUp(capacity_ * 2, static_cast<int>(Page::kPageSize)));
1421 if (!heap()->isolate()->memory_allocator()->CommitBlock( 1150 return GrowTo(new_capacity);
1422 high(), extra, executable())) {
1423 return false;
1424 }
1425 capacity_ += extra;
1426 return true;
1427 } 1151 }
1428 1152
1429 1153
1430 bool SemiSpace::GrowTo(int new_capacity) { 1154 bool SemiSpace::GrowTo(int new_capacity) {
1155 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
1431 ASSERT(new_capacity <= maximum_capacity_); 1156 ASSERT(new_capacity <= maximum_capacity_);
1432 ASSERT(new_capacity > capacity_); 1157 ASSERT(new_capacity > capacity_);
1158 int pages_before = capacity_ / Page::kPageSize;
1159 int pages_after = new_capacity / Page::kPageSize;
1160
1161 Address end = start_ + maximum_capacity_;
1162 Address start = end - new_capacity;
1433 size_t delta = new_capacity - capacity_; 1163 size_t delta = new_capacity - capacity_;
1164
1434 ASSERT(IsAligned(delta, OS::AllocateAlignment())); 1165 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1435 if (!heap()->isolate()->memory_allocator()->CommitBlock( 1166 if (!heap()->isolate()->memory_allocator()->CommitBlock(
1436 high(), delta, executable())) { 1167 start, delta, executable())) {
1437 return false; 1168 return false;
1438 } 1169 }
1439 capacity_ = new_capacity; 1170 capacity_ = new_capacity;
1171 NewSpacePage* last_page = anchor()->prev_page();
1172 ASSERT(last_page != anchor());
1173 for (int i = pages_before + 1; i <= pages_after; i++) {
1174 Address page_address = end - i * Page::kPageSize;
1175 NewSpacePage* new_page = NewSpacePage::Initialize(heap(),
1176 page_address,
1177 this);
1178 new_page->InsertAfter(last_page);
1179 Bitmap::Clear(new_page);
1180 // Duplicate the flags that was set on the old page.
1181 new_page->SetFlags(last_page->GetFlags(),
1182 NewSpacePage::kCopyOnFlipFlagsMask);
1183 last_page = new_page;
1184 }
1440 return true; 1185 return true;
1441 } 1186 }
1442 1187
1443 1188
1444 bool SemiSpace::ShrinkTo(int new_capacity) { 1189 bool SemiSpace::ShrinkTo(int new_capacity) {
1190 ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
1445 ASSERT(new_capacity >= initial_capacity_); 1191 ASSERT(new_capacity >= initial_capacity_);
1446 ASSERT(new_capacity < capacity_); 1192 ASSERT(new_capacity < capacity_);
1193 // Semispaces grow backwards from the end of their allocated capacity,
1194 // so we find the before and after start addresses relative to the
1195 // end of the space.
1196 Address space_end = start_ + maximum_capacity_;
1197 Address old_start = space_end - capacity_;
1447 size_t delta = capacity_ - new_capacity; 1198 size_t delta = capacity_ - new_capacity;
1448 ASSERT(IsAligned(delta, OS::AllocateAlignment())); 1199 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1449 if (!heap()->isolate()->memory_allocator()->UncommitBlock( 1200 if (!heap()->isolate()->memory_allocator()->UncommitBlock(old_start, delta)) {
1450 high() - delta, delta)) {
1451 return false; 1201 return false;
1452 } 1202 }
1453 capacity_ = new_capacity; 1203 capacity_ = new_capacity;
1454 return true; 1204
1205 int pages_after = capacity_ / Page::kPageSize;
1206 NewSpacePage* new_last_page =
1207 NewSpacePage::FromAddress(space_end - pages_after * Page::kPageSize);
1208 new_last_page->set_next_page(anchor());
1209 anchor()->set_prev_page(new_last_page);
1210 ASSERT(current_page_ == first_page());
1211
1212 return true;
1213 }
1214
1215
1216 void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
1217 anchor_.set_owner(this);
1218 // Fixup back-pointers to anchor. Address of anchor changes
1219 // when we swap.
1220 anchor_.prev_page()->set_next_page(&anchor_);
1221 anchor_.next_page()->set_prev_page(&anchor_);
1222
1223 bool becomes_to_space = (id_ == kFromSpace);
1224 id_ = becomes_to_space ? kToSpace : kFromSpace;
1225 NewSpacePage* page = anchor_.next_page();
1226 while (page != &anchor_) {
1227 page->set_owner(this);
1228 page->SetFlags(flags, mask);
1229 if (becomes_to_space) {
1230 page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
1231 page->SetFlag(MemoryChunk::IN_TO_SPACE);
1232 page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1233 page->ResetLiveBytes();
1234 } else {
1235 page->SetFlag(MemoryChunk::IN_FROM_SPACE);
1236 page->ClearFlag(MemoryChunk::IN_TO_SPACE);
1237 }
1238 ASSERT(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1239 ASSERT(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
1240 page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
1241 page = page->next_page();
1242 }
1243 }
1244
1245
1246 void SemiSpace::Reset() {
1247 ASSERT(anchor_.next_page() != &anchor_);
1248 current_page_ = anchor_.next_page();
1249 }
1250
1251
1252 void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
1253 // We won't be swapping semispaces without data in them.
1254 ASSERT(from->anchor_.next_page() != &from->anchor_);
1255 ASSERT(to->anchor_.next_page() != &to->anchor_);
1256
1257 // Swap bits.
1258 SemiSpace tmp = *from;
1259 *from = *to;
1260 *to = tmp;
1261
1262 // Fixup back-pointers to the page list anchor now that its address
1263 // has changed.
1264 // Swap to/from-space bits on pages.
1265 // Copy GC flags from old active space (from-space) to new (to-space).
1266 intptr_t flags = from->current_page()->GetFlags();
1267 to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
1268
1269 from->FlipPages(0, 0);
1270 }
1271
1272
1273 void SemiSpace::set_age_mark(Address mark) {
1274 ASSERT(NewSpacePage::FromLimit(mark)->semi_space() == this);
1275 age_mark_ = mark;
1276 // Mark all pages up to the one containing mark.
1277 NewSpacePageIterator it(space_start(), mark);
1278 while (it.has_next()) {
1279 it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
1280 }
1455 } 1281 }
1456 1282
1457 1283
1458 #ifdef DEBUG 1284 #ifdef DEBUG
1459 void SemiSpace::Print() { } 1285 void SemiSpace::Print() { }
1460 1286
1461 1287
1462 void SemiSpace::Verify() { } 1288 void SemiSpace::Verify() {
1289 bool is_from_space = (id_ == kFromSpace);
1290 NewSpacePage* page = anchor_.next_page();
1291 CHECK(anchor_.semi_space() == this);
1292 while (page != &anchor_) {
1293 CHECK(page->semi_space() == this);
1294 CHECK(page->InNewSpace());
1295 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1296 : MemoryChunk::IN_TO_SPACE));
1297 CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
1298 : MemoryChunk::IN_FROM_SPACE));
1299 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
1300 if (!is_from_space) {
1301 // The pointers-from-here-are-interesting flag isn't updated dynamically
1302 // on from-space pages, so it might be out of sync with the marking state.
1303 if (page->heap()->incremental_marking()->IsMarking()) {
1304 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1305 } else {
1306 CHECK(!page->IsFlagSet(
1307 MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1308 }
1309 // TODO(gc): Check that the live_bytes_count_ field matches the
1310 // black marking on the page (if we make it match in new-space).
1311 }
1312 CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1313 CHECK(page->prev_page()->next_page() == page);
1314 page = page->next_page();
1315 }
1316 }
1317
1318
1319 void SemiSpace::AssertValidRange(Address start, Address end) {
1320 // Addresses belong to same semi-space
1321 NewSpacePage* page = NewSpacePage::FromAddress(start);
1322 NewSpacePage* end_page = NewSpacePage::FromLimit(end);
1323 SemiSpace* space = page->semi_space();
1324 CHECK_EQ(space, end_page->semi_space());
1325 // Start address is before end address, either on same page,
1326 // or end address is on a later page in the linked list of
1327 // semi-space pages.
1328 if (page == end_page) {
1329 CHECK(start <= end);
1330 } else {
1331 while (page != end_page) {
1332 page = page->next_page();
1333 CHECK_NE(page, space->anchor());
1334 }
1335 }
1336 }
1463 #endif 1337 #endif
1464 1338
1465 1339
1466 // ----------------------------------------------------------------------------- 1340 // -----------------------------------------------------------------------------
1467 // SemiSpaceIterator implementation. 1341 // SemiSpaceIterator implementation.
1468 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) { 1342 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
1469 Initialize(space, space->bottom(), space->top(), NULL); 1343 Initialize(space->bottom(), space->top(), NULL);
1470 } 1344 }
1471 1345
1472 1346
1473 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, 1347 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
1474 HeapObjectCallback size_func) { 1348 HeapObjectCallback size_func) {
1475 Initialize(space, space->bottom(), space->top(), size_func); 1349 Initialize(space->bottom(), space->top(), size_func);
1476 } 1350 }
1477 1351
1478 1352
1479 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) { 1353 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
1480 Initialize(space, start, space->top(), NULL); 1354 Initialize(start, space->top(), NULL);
1481 } 1355 }
1482 1356
1483 1357
1484 void SemiSpaceIterator::Initialize(NewSpace* space, Address start, 1358 SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
1359 Initialize(from, to, NULL);
1360 }
1361
1362
1363 void SemiSpaceIterator::Initialize(Address start,
1485 Address end, 1364 Address end,
1486 HeapObjectCallback size_func) { 1365 HeapObjectCallback size_func) {
1487 ASSERT(space->ToSpaceContains(start)); 1366 SemiSpace::AssertValidRange(start, end);
1488 ASSERT(space->ToSpaceLow() <= end
1489 && end <= space->ToSpaceHigh());
1490 space_ = &space->to_space_;
1491 current_ = start; 1367 current_ = start;
1492 limit_ = end; 1368 limit_ = end;
1493 size_func_ = size_func; 1369 size_func_ = size_func;
1494 } 1370 }
1495 1371
1496 1372
1497 #ifdef DEBUG 1373 #ifdef DEBUG
1498 // heap_histograms is shared, always clear it before using it. 1374 // heap_histograms is shared, always clear it before using it.
1499 static void ClearHistograms() { 1375 static void ClearHistograms() {
1500 Isolate* isolate = Isolate::Current(); 1376 Isolate* isolate = Isolate::Current();
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after
1616 promoted_histogram_[i].clear(); 1492 promoted_histogram_[i].clear();
1617 } 1493 }
1618 } 1494 }
1619 1495
1620 // Because the copying collector does not touch garbage objects, we iterate 1496 // Because the copying collector does not touch garbage objects, we iterate
1621 // the new space before a collection to get a histogram of allocated objects. 1497 // the new space before a collection to get a histogram of allocated objects.
1622 // This only happens when --log-gc flag is set. 1498 // This only happens when --log-gc flag is set.
1623 void NewSpace::CollectStatistics() { 1499 void NewSpace::CollectStatistics() {
1624 ClearHistograms(); 1500 ClearHistograms();
1625 SemiSpaceIterator it(this); 1501 SemiSpaceIterator it(this);
1626 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) 1502 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
1627 RecordAllocation(obj); 1503 RecordAllocation(obj);
1628 } 1504 }
1629 1505
1630 1506
1631 static void DoReportStatistics(Isolate* isolate, 1507 static void DoReportStatistics(Isolate* isolate,
1632 HistogramInfo* info, const char* description) { 1508 HistogramInfo* info, const char* description) {
1633 LOG(isolate, HeapSampleBeginEvent("NewSpace", description)); 1509 LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
1634 // Lump all the string types together. 1510 // Lump all the string types together.
1635 int string_number = 0; 1511 int string_number = 0;
1636 int string_bytes = 0; 1512 int string_bytes = 0;
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
1692 } 1568 }
1693 1569
1694 1570
1695 void NewSpace::RecordPromotion(HeapObject* obj) { 1571 void NewSpace::RecordPromotion(HeapObject* obj) {
1696 InstanceType type = obj->map()->instance_type(); 1572 InstanceType type = obj->map()->instance_type();
1697 ASSERT(0 <= type && type <= LAST_TYPE); 1573 ASSERT(0 <= type && type <= LAST_TYPE);
1698 promoted_histogram_[type].increment_number(1); 1574 promoted_histogram_[type].increment_number(1);
1699 promoted_histogram_[type].increment_bytes(obj->Size()); 1575 promoted_histogram_[type].increment_bytes(obj->Size());
1700 } 1576 }
1701 1577
1702
1703 // ----------------------------------------------------------------------------- 1578 // -----------------------------------------------------------------------------
1704 // Free lists for old object spaces implementation 1579 // Free lists for old object spaces implementation
1705 1580
1706 void FreeListNode::set_size(Heap* heap, int size_in_bytes) { 1581 void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
1707 ASSERT(size_in_bytes > 0); 1582 ASSERT(size_in_bytes > 0);
1708 ASSERT(IsAligned(size_in_bytes, kPointerSize)); 1583 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1709 1584
1710 // We write a map and possibly size information to the block. If the block 1585 // We write a map and possibly size information to the block. If the block
1711 // is big enough to be a ByteArray with at least one extra word (the next 1586 // is big enough to be a FreeSpace with at least one extra word (the next
1712 // pointer), we set its map to be the byte array map and its size to an 1587 // pointer), we set its map to be the free space map and its size to an
1713 // appropriate array length for the desired size from HeapObject::Size(). 1588 // appropriate array length for the desired size from HeapObject::Size().
1714 // If the block is too small (eg, one or two words), to hold both a size 1589 // If the block is too small (eg, one or two words), to hold both a size
1715 // field and a next pointer, we give it a filler map that gives it the 1590 // field and a next pointer, we give it a filler map that gives it the
1716 // correct size. 1591 // correct size.
1717 if (size_in_bytes > ByteArray::kHeaderSize) { 1592 if (size_in_bytes > FreeSpace::kHeaderSize) {
1718 set_map(heap->raw_unchecked_byte_array_map()); 1593 set_map(heap->raw_unchecked_free_space_map());
1719 // Can't use ByteArray::cast because it fails during deserialization. 1594 // Can't use FreeSpace::cast because it fails during deserialization.
1720 ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this); 1595 FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
1721 this_as_byte_array->set_length(ByteArray::LengthFor(size_in_bytes)); 1596 this_as_free_space->set_size(size_in_bytes);
1722 } else if (size_in_bytes == kPointerSize) { 1597 } else if (size_in_bytes == kPointerSize) {
1723 set_map(heap->raw_unchecked_one_pointer_filler_map()); 1598 set_map(heap->raw_unchecked_one_pointer_filler_map());
1724 } else if (size_in_bytes == 2 * kPointerSize) { 1599 } else if (size_in_bytes == 2 * kPointerSize) {
1725 set_map(heap->raw_unchecked_two_pointer_filler_map()); 1600 set_map(heap->raw_unchecked_two_pointer_filler_map());
1726 } else { 1601 } else {
1727 UNREACHABLE(); 1602 UNREACHABLE();
1728 } 1603 }
1729 // We would like to ASSERT(Size() == size_in_bytes) but this would fail during 1604 // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
1730 // deserialization because the byte array map is not done yet. 1605 // deserialization because the free space map is not done yet.
1731 } 1606 }
1732 1607
1733 1608
1734 Address FreeListNode::next(Heap* heap) { 1609 FreeListNode* FreeListNode::next() {
1735 ASSERT(IsFreeListNode(this)); 1610 ASSERT(IsFreeListNode(this));
1736 if (map() == heap->raw_unchecked_byte_array_map()) { 1611 if (map() == HEAP->raw_unchecked_free_space_map()) {
1612 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
1613 return reinterpret_cast<FreeListNode*>(
1614 Memory::Address_at(address() + kNextOffset));
1615 } else {
1616 return reinterpret_cast<FreeListNode*>(
1617 Memory::Address_at(address() + kPointerSize));
1618 }
1619 }
1620
1621
1622 FreeListNode** FreeListNode::next_address() {
1623 ASSERT(IsFreeListNode(this));
1624 if (map() == HEAP->raw_unchecked_free_space_map()) {
1737 ASSERT(Size() >= kNextOffset + kPointerSize); 1625 ASSERT(Size() >= kNextOffset + kPointerSize);
1738 return Memory::Address_at(address() + kNextOffset); 1626 return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
1739 } else { 1627 } else {
1740 return Memory::Address_at(address() + kPointerSize); 1628 return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
1741 } 1629 }
1742 } 1630 }
1743 1631
1744 1632
1745 void FreeListNode::set_next(Heap* heap, Address next) { 1633 void FreeListNode::set_next(FreeListNode* next) {
1746 ASSERT(IsFreeListNode(this)); 1634 ASSERT(IsFreeListNode(this));
1747 if (map() == heap->raw_unchecked_byte_array_map()) { 1635 // While we are booting the VM the free space map will actually be null. So
1748 ASSERT(Size() >= kNextOffset + kPointerSize); 1636 // we have to make sure that we don't try to use it for anything at that
1749 Memory::Address_at(address() + kNextOffset) = next; 1637 // stage.
1750 } else { 1638 if (map() == HEAP->raw_unchecked_free_space_map()) {
1751 Memory::Address_at(address() + kPointerSize) = next; 1639 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
1752 } 1640 Memory::Address_at(address() + kNextOffset) =
1753 } 1641 reinterpret_cast<Address>(next);
1754 1642 } else {
1755 1643 Memory::Address_at(address() + kPointerSize) =
1756 OldSpaceFreeList::OldSpaceFreeList(Heap* heap, AllocationSpace owner) 1644 reinterpret_cast<Address>(next);
1757 : heap_(heap), 1645 }
1758 owner_(owner) { 1646 }
1647
1648
1649 FreeList::FreeList(PagedSpace* owner)
1650 : owner_(owner), heap_(owner->heap()) {
1759 Reset(); 1651 Reset();
1760 } 1652 }
1761 1653
1762 1654
1763 void OldSpaceFreeList::Reset() { 1655 void FreeList::Reset() {
1764 available_ = 0; 1656 available_ = 0;
1765 for (int i = 0; i < kFreeListsLength; i++) { 1657 small_list_ = NULL;
1766 free_[i].head_node_ = NULL; 1658 medium_list_ = NULL;
1767 } 1659 large_list_ = NULL;
1768 needs_rebuild_ = false; 1660 huge_list_ = NULL;
1769 finger_ = kHead; 1661 }
1770 free_[kHead].next_size_ = kEnd; 1662
1771 } 1663
1772 1664 int PagedSpace::FreeOrUnmapPage(Page* page, Address start, int size_in_bytes) {
1773 1665 Heap* heap = page->heap();
1774 void OldSpaceFreeList::RebuildSizeList() { 1666 // TODO(gc): When we count the live bytes per page we can free empty pages
1775 ASSERT(needs_rebuild_); 1667 // instead of sweeping. At that point this if should be turned into an
1776 int cur = kHead; 1668 // ASSERT that the area to be freed cannot be the entire page.
1777 for (int i = cur + 1; i < kFreeListsLength; i++) { 1669 if (size_in_bytes == Page::kObjectAreaSize &&
1778 if (free_[i].head_node_ != NULL) { 1670 heap->ShouldWeGiveBackAPageToTheOS()) {
1779 free_[cur].next_size_ = i; 1671 page->Unlink();
1780 cur = i; 1672 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
1673 heap->isolate()->memory_allocator()->Free(page);
1674 } else {
1675 heap->QueueMemoryChunkForFree(page);
1781 } 1676 }
1782 } 1677 return 0;
1783 free_[cur].next_size_ = kEnd; 1678 }
1784 needs_rebuild_ = false; 1679 return Free(start, size_in_bytes);
1785 } 1680 }
1786 1681
1787 1682
1788 int OldSpaceFreeList::Free(Address start, int size_in_bytes) { 1683 int FreeList::Free(Address start, int size_in_bytes) {
1789 #ifdef DEBUG 1684 if (size_in_bytes == 0) return 0;
1790 Isolate::Current()->memory_allocator()->ZapBlock(start, size_in_bytes);
1791 #endif
1792 FreeListNode* node = FreeListNode::FromAddress(start); 1685 FreeListNode* node = FreeListNode::FromAddress(start);
1793 node->set_size(heap_, size_in_bytes); 1686 node->set_size(heap_, size_in_bytes);
1794 1687
1795 // We don't use the freelists in compacting mode. This makes it more like a 1688 // Early return to drop too-small blocks on the floor.
1796 // GC that only has mark-sweep-compact and doesn't have a mark-sweep 1689 if (size_in_bytes < kSmallListMin) return size_in_bytes;
1797 // collector. 1690
1798 if (FLAG_always_compact) { 1691 // Insert other blocks at the head of a free list of the appropriate
1799 return size_in_bytes; 1692 // magnitude.
1800 } 1693 if (size_in_bytes <= kSmallListMax) {
1801 1694 node->set_next(small_list_);
1802 // Early return to drop too-small blocks on the floor (one or two word 1695 small_list_ = node;
1803 // blocks cannot hold a map pointer, a size field, and a pointer to the 1696 } else if (size_in_bytes <= kMediumListMax) {
1804 // next block in the free list). 1697 node->set_next(medium_list_);
1805 if (size_in_bytes < kMinBlockSize) { 1698 medium_list_ = node;
1806 return size_in_bytes; 1699 } else if (size_in_bytes <= kLargeListMax) {
1807 } 1700 node->set_next(large_list_);
1808 1701 large_list_ = node;
1809 // Insert other blocks at the head of an exact free list. 1702 } else {
1810 int index = size_in_bytes >> kPointerSizeLog2; 1703 node->set_next(huge_list_);
1811 node->set_next(heap_, free_[index].head_node_); 1704 huge_list_ = node;
1812 free_[index].head_node_ = node->address(); 1705 }
1813 available_ += size_in_bytes; 1706 available_ += size_in_bytes;
1814 needs_rebuild_ = true; 1707 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1815 return 0; 1708 return 0;
1816 } 1709 }
1817 1710
1818 1711
1819 MaybeObject* OldSpaceFreeList::Allocate(int size_in_bytes, int* wasted_bytes) { 1712 FreeListNode* FreeList::PickNodeFromList(FreeListNode** list, int* node_size) {
1713 FreeListNode* node = *list;
1714
1715 if (node == NULL) return NULL;
1716
1717 while (node != NULL &&
1718 Page::FromAddress(node->address())->IsEvacuationCandidate()) {
1719 available_ -= node->Size();
1720 node = node->next();
1721 }
1722
1723 if (node != NULL) {
1724 *node_size = node->Size();
1725 *list = node->next();
1726 } else {
1727 *list = NULL;
1728 }
1729
1730 return node;
1731 }
1732
1733
1734 FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
1735 FreeListNode* node = NULL;
1736
1737 if (size_in_bytes <= kSmallAllocationMax) {
1738 node = PickNodeFromList(&small_list_, node_size);
1739 if (node != NULL) return node;
1740 }
1741
1742 if (size_in_bytes <= kMediumAllocationMax) {
1743 node = PickNodeFromList(&medium_list_, node_size);
1744 if (node != NULL) return node;
1745 }
1746
1747 if (size_in_bytes <= kLargeAllocationMax) {
1748 node = PickNodeFromList(&large_list_, node_size);
1749 if (node != NULL) return node;
1750 }
1751
1752 for (FreeListNode** cur = &huge_list_;
1753 *cur != NULL;
1754 cur = (*cur)->next_address()) {
1755 FreeListNode* cur_node = *cur;
1756 while (cur_node != NULL &&
1757 Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
1758 available_ -= reinterpret_cast<FreeSpace*>(cur_node)->Size();
1759 cur_node = cur_node->next();
1760 }
1761
1762 *cur = cur_node;
1763 if (cur_node == NULL) break;
1764
1765 ASSERT((*cur)->map() == HEAP->raw_unchecked_free_space_map());
1766 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
1767 int size = cur_as_free_space->Size();
1768 if (size >= size_in_bytes) {
1769 // Large enough node found. Unlink it from the list.
1770 node = *cur;
1771 *node_size = size;
1772 *cur = node->next();
1773 break;
1774 }
1775 }
1776
1777 return node;
1778 }
1779
1780
1781 // Allocation on the old space free list. If it succeeds then a new linear
1782 // allocation space has been set up with the top and limit of the space. If
1783 // the allocation fails then NULL is returned, and the caller can perform a GC
1784 // or allocate a new page before retrying.
1785 HeapObject* FreeList::Allocate(int size_in_bytes) {
1820 ASSERT(0 < size_in_bytes); 1786 ASSERT(0 < size_in_bytes);
1821 ASSERT(size_in_bytes <= kMaxBlockSize); 1787 ASSERT(size_in_bytes <= kMaxBlockSize);
1822 ASSERT(IsAligned(size_in_bytes, kPointerSize)); 1788 ASSERT(IsAligned(size_in_bytes, kPointerSize));
1823 1789 // Don't free list allocate if there is linear space available.
1824 if (needs_rebuild_) RebuildSizeList(); 1790 ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
1825 int index = size_in_bytes >> kPointerSizeLog2; 1791
1826 // Check for a perfect fit. 1792 int new_node_size = 0;
1827 if (free_[index].head_node_ != NULL) { 1793 FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
1828 FreeListNode* node = FreeListNode::FromAddress(free_[index].head_node_); 1794 if (new_node == NULL) return NULL;
1829 // If this was the last block of its size, remove the size. 1795
1830 if ((free_[index].head_node_ = node->next(heap_)) == NULL) 1796 available_ -= new_node_size;
1831 RemoveSize(index); 1797 ASSERT(IsVeryLong() || available_ == SumFreeLists());
1832 available_ -= size_in_bytes; 1798
1833 *wasted_bytes = 0; 1799 int bytes_left = new_node_size - size_in_bytes;
1834 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep. 1800 ASSERT(bytes_left >= 0);
1835 return node; 1801
1836 } 1802 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
1837 // Search the size list for the best fit. 1803 // Mark the old linear allocation area with a free space map so it can be
1838 int prev = finger_ < index ? finger_ : kHead; 1804 // skipped when scanning the heap. This also puts it back in the free list
1839 int cur = FindSize(index, &prev); 1805 // if it is big enough.
1840 ASSERT(index < cur); 1806 owner_->Free(owner_->top(), old_linear_size);
1841 if (cur == kEnd) { 1807 owner_->heap()->incremental_marking()->OldSpaceStep(
1842 // No large enough size in list. 1808 size_in_bytes - old_linear_size);
1843 *wasted_bytes = 0; 1809
1844 return Failure::RetryAfterGC(owner_); 1810 const int kThreshold = IncrementalMarking::kAllocatedThreshold;
1845 } 1811
1846 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep. 1812 // Memory in the linear allocation area is counted as allocated. We may free
1847 int rem = cur - index; 1813 // a little of this again immediately - see below.
1848 int rem_bytes = rem << kPointerSizeLog2; 1814 owner_->Allocate(new_node_size);
1849 FreeListNode* cur_node = FreeListNode::FromAddress(free_[cur].head_node_); 1815
1850 ASSERT(cur_node->Size() == (cur << kPointerSizeLog2)); 1816 if (bytes_left > kThreshold &&
1851 FreeListNode* rem_node = FreeListNode::FromAddress(free_[cur].head_node_ + 1817 owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
1852 size_in_bytes); 1818 FLAG_incremental_marking_steps) {
1853 // Distinguish the cases prev < rem < cur and rem <= prev < cur 1819 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
1854 // to avoid many redundant tests and calls to Insert/RemoveSize. 1820 // We don't want to give too large linear areas to the allocator while
1855 if (prev < rem) { 1821 // incremental marking is going on, because we won't check again whether
1856 // Simple case: insert rem between prev and cur. 1822 // we want to do another increment until the linear area is used up.
1857 finger_ = prev; 1823 owner_->Free(new_node->address() + size_in_bytes + linear_size,
1858 free_[prev].next_size_ = rem; 1824 new_node_size - size_in_bytes - linear_size);
1859 // If this was the last block of size cur, remove the size. 1825 owner_->SetTop(new_node->address() + size_in_bytes,
1860 if ((free_[cur].head_node_ = cur_node->next(heap_)) == NULL) { 1826 new_node->address() + size_in_bytes + linear_size);
1861 free_[rem].next_size_ = free_[cur].next_size_; 1827 } else if (bytes_left > 0) {
1862 } else { 1828 // Normally we give the rest of the node to the allocator as its new
1863 free_[rem].next_size_ = cur; 1829 // linear allocation area.
1830 owner_->SetTop(new_node->address() + size_in_bytes,
1831 new_node->address() + new_node_size);
1832 } else {
1833 // TODO(gc) Try not freeing linear allocation region when bytes_left
1834 // are zero.
1835 owner_->SetTop(NULL, NULL);
1836 }
1837
1838 return new_node;
1839 }
1840
1841
1842 static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) {
1843 intptr_t sum = 0;
1844 while (n != NULL) {
1845 if (Page::FromAddress(n->address()) == p) {
1846 FreeSpace* free_space = reinterpret_cast<FreeSpace*>(n);
1847 sum += free_space->Size();
1864 } 1848 }
1865 // Add the remainder block. 1849 n = n->next();
1866 rem_node->set_size(heap_, rem_bytes); 1850 }
1867 rem_node->set_next(heap_, free_[rem].head_node_); 1851 return sum;
1868 free_[rem].head_node_ = rem_node->address(); 1852 }
1869 } else { 1853
1870 // If this was the last block of size cur, remove the size. 1854
1871 if ((free_[cur].head_node_ = cur_node->next(heap_)) == NULL) { 1855 void FreeList::CountFreeListItems(Page* p, intptr_t* sizes) {
1872 finger_ = prev; 1856 sizes[0] = CountFreeListItemsInList(small_list_, p);
1873 free_[prev].next_size_ = free_[cur].next_size_; 1857 sizes[1] = CountFreeListItemsInList(medium_list_, p);
1874 } 1858 sizes[2] = CountFreeListItemsInList(large_list_, p);
1875 if (rem_bytes < kMinBlockSize) { 1859 sizes[3] = CountFreeListItemsInList(huge_list_, p);
1876 // Too-small remainder is wasted. 1860 }
1877 rem_node->set_size(heap_, rem_bytes);
1878 available_ -= size_in_bytes + rem_bytes;
1879 *wasted_bytes = rem_bytes;
1880 return cur_node;
1881 }
1882 // Add the remainder block and, if needed, insert its size.
1883 rem_node->set_size(heap_, rem_bytes);
1884 rem_node->set_next(heap_, free_[rem].head_node_);
1885 free_[rem].head_node_ = rem_node->address();
1886 if (rem_node->next(heap_) == NULL) InsertSize(rem);
1887 }
1888 available_ -= size_in_bytes;
1889 *wasted_bytes = 0;
1890 return cur_node;
1891 }
1892
1893
1894 void OldSpaceFreeList::MarkNodes() {
1895 for (int i = 0; i < kFreeListsLength; i++) {
1896 Address cur_addr = free_[i].head_node_;
1897 while (cur_addr != NULL) {
1898 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
1899 cur_addr = cur_node->next(heap_);
1900 cur_node->SetMark();
1901 }
1902 }
1903 }
1904
1905 1861
1906 #ifdef DEBUG 1862 #ifdef DEBUG
1907 bool OldSpaceFreeList::Contains(FreeListNode* node) { 1863 intptr_t FreeList::SumFreeList(FreeListNode* cur) {
1908 for (int i = 0; i < kFreeListsLength; i++) { 1864 intptr_t sum = 0;
1909 Address cur_addr = free_[i].head_node_; 1865 while (cur != NULL) {
1910 while (cur_addr != NULL) { 1866 ASSERT(cur->map() == HEAP->raw_unchecked_free_space_map());
1911 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr); 1867 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
1912 if (cur_node == node) return true; 1868 sum += cur_as_free_space->Size();
1913 cur_addr = cur_node->next(heap_); 1869 cur = cur->next();
1914 } 1870 }
1915 } 1871 return sum;
1872 }
1873
1874
1875 static const int kVeryLongFreeList = 500;
1876
1877
1878 int FreeList::FreeListLength(FreeListNode* cur) {
1879 int length = 0;
1880 while (cur != NULL) {
1881 length++;
1882 cur = cur->next();
1883 if (length == kVeryLongFreeList) return length;
1884 }
1885 return length;
1886 }
1887
1888
1889 bool FreeList::IsVeryLong() {
1890 if (FreeListLength(small_list_) == kVeryLongFreeList) return true;
1891 if (FreeListLength(medium_list_) == kVeryLongFreeList) return true;
1892 if (FreeListLength(large_list_) == kVeryLongFreeList) return true;
1893 if (FreeListLength(huge_list_) == kVeryLongFreeList) return true;
1916 return false; 1894 return false;
1917 } 1895 }
1896
1897
1898 // This can take a very long time because it is linear in the number of entries
1899 // on the free list, so it should not be called if FreeListLength returns
1900 // kVeryLongFreeList.
1901 intptr_t FreeList::SumFreeLists() {
1902 intptr_t sum = SumFreeList(small_list_);
1903 sum += SumFreeList(medium_list_);
1904 sum += SumFreeList(large_list_);
1905 sum += SumFreeList(huge_list_);
1906 return sum;
1907 }
1918 #endif 1908 #endif
1919 1909
1920 1910
1921 FixedSizeFreeList::FixedSizeFreeList(Heap* heap,
1922 AllocationSpace owner,
1923 int object_size)
1924 : heap_(heap), owner_(owner), object_size_(object_size) {
1925 Reset();
1926 }
1927
1928
1929 void FixedSizeFreeList::Reset() {
1930 available_ = 0;
1931 head_ = tail_ = NULL;
1932 }
1933
1934
1935 void FixedSizeFreeList::Free(Address start) {
1936 #ifdef DEBUG
1937 Isolate::Current()->memory_allocator()->ZapBlock(start, object_size_);
1938 #endif
1939 // We only use the freelists with mark-sweep.
1940 ASSERT(!HEAP->mark_compact_collector()->IsCompacting());
1941 FreeListNode* node = FreeListNode::FromAddress(start);
1942 node->set_size(heap_, object_size_);
1943 node->set_next(heap_, NULL);
1944 if (head_ == NULL) {
1945 tail_ = head_ = node->address();
1946 } else {
1947 FreeListNode::FromAddress(tail_)->set_next(heap_, node->address());
1948 tail_ = node->address();
1949 }
1950 available_ += object_size_;
1951 }
1952
1953
1954 MaybeObject* FixedSizeFreeList::Allocate() {
1955 if (head_ == NULL) {
1956 return Failure::RetryAfterGC(owner_);
1957 }
1958
1959 ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
1960 FreeListNode* node = FreeListNode::FromAddress(head_);
1961 head_ = node->next(heap_);
1962 available_ -= object_size_;
1963 return node;
1964 }
1965
1966
1967 void FixedSizeFreeList::MarkNodes() {
1968 Address cur_addr = head_;
1969 while (cur_addr != NULL && cur_addr != tail_) {
1970 FreeListNode* cur_node = FreeListNode::FromAddress(cur_addr);
1971 cur_addr = cur_node->next(heap_);
1972 cur_node->SetMark();
1973 }
1974 }
1975
1976
1977 // ----------------------------------------------------------------------------- 1911 // -----------------------------------------------------------------------------
1978 // OldSpace implementation 1912 // OldSpace implementation
1979 1913
1980 void OldSpace::PrepareForMarkCompact(bool will_compact) {
1981 // Call prepare of the super class.
1982 PagedSpace::PrepareForMarkCompact(will_compact);
1983
1984 if (will_compact) {
1985 // Reset relocation info. During a compacting collection, everything in
1986 // the space is considered 'available' and we will rediscover live data
1987 // and waste during the collection.
1988 MCResetRelocationInfo();
1989 ASSERT(Available() == Capacity());
1990 } else {
1991 // During a non-compacting collection, everything below the linear
1992 // allocation pointer is considered allocated (everything above is
1993 // available) and we will rediscover available and wasted bytes during
1994 // the collection.
1995 accounting_stats_.AllocateBytes(free_list_.available());
1996 accounting_stats_.FillWastedBytes(Waste());
1997 }
1998
1999 // Clear the free list before a full GC---it will be rebuilt afterward.
2000 free_list_.Reset();
2001 }
2002
2003
2004 void OldSpace::MCCommitRelocationInfo() {
2005 // Update fast allocation info.
2006 allocation_info_.top = mc_forwarding_info_.top;
2007 allocation_info_.limit = mc_forwarding_info_.limit;
2008 ASSERT(allocation_info_.VerifyPagedAllocation());
2009
2010 // The space is compacted and we haven't yet built free lists or
2011 // wasted any space.
2012 ASSERT(Waste() == 0);
2013 ASSERT(AvailableFree() == 0);
2014
2015 // Build the free list for the space.
2016 int computed_size = 0;
2017 PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
2018 while (it.has_next()) {
2019 Page* p = it.next();
2020 // Space below the relocation pointer is allocated.
2021 computed_size +=
2022 static_cast<int>(p->AllocationWatermark() - p->ObjectAreaStart());
2023 if (it.has_next()) {
2024 // Free the space at the top of the page.
2025 int extra_size =
2026 static_cast<int>(p->ObjectAreaEnd() - p->AllocationWatermark());
2027 if (extra_size > 0) {
2028 int wasted_bytes = free_list_.Free(p->AllocationWatermark(),
2029 extra_size);
2030 // The bytes we have just "freed" to add to the free list were
2031 // already accounted as available.
2032 accounting_stats_.WasteBytes(wasted_bytes);
2033 }
2034 }
2035 }
2036
2037 // Make sure the computed size - based on the used portion of the pages in
2038 // use - matches the size obtained while computing forwarding addresses.
2039 ASSERT(computed_size == Size());
2040 }
2041
2042
2043 bool NewSpace::ReserveSpace(int bytes) { 1914 bool NewSpace::ReserveSpace(int bytes) {
2044 // We can't reliably unpack a partial snapshot that needs more new space 1915 // We can't reliably unpack a partial snapshot that needs more new space
2045 // space than the minimum NewSpace size. 1916 // space than the minimum NewSpace size.
2046 ASSERT(bytes <= InitialCapacity()); 1917 ASSERT(bytes <= InitialCapacity());
2047 Address limit = allocation_info_.limit; 1918 Address limit = allocation_info_.limit;
2048 Address top = allocation_info_.top; 1919 Address top = allocation_info_.top;
2049 return limit - top >= bytes; 1920 return limit - top >= bytes;
2050 } 1921 }
2051 1922
2052 1923
2053 void PagedSpace::FreePages(Page* prev, Page* last) { 1924 void PagedSpace::PrepareForMarkCompact() {
2054 if (last == AllocationTopPage()) { 1925 // We don't have a linear allocation area while sweeping. It will be restored
2055 // Pages are already at the end of used pages. 1926 // on the first allocation after the sweep.
2056 return; 1927 // Mark the old linear allocation area with a free space map so it can be
1928 // skipped when scanning the heap.
1929 int old_linear_size = static_cast<int>(limit() - top());
1930 Free(top(), old_linear_size);
1931 SetTop(NULL, NULL);
1932
1933 // Stop lazy sweeping and clear marking bits for unswept pages.
1934 if (first_unswept_page_ != NULL) {
1935 Page* last = last_unswept_page_->next_page();
1936 Page* p = first_unswept_page_;
1937 do {
1938 if (ShouldBeSweptLazily(p)) {
1939 ASSERT(!p->WasSwept());
1940 Bitmap::Clear(p);
1941 if (FLAG_gc_verbose) {
1942 PrintF("Sweeping 0x%" V8PRIxPTR " lazily abandoned.\n",
1943 reinterpret_cast<intptr_t>(p));
1944 }
1945 }
1946 p = p->next_page();
1947 } while (p != last);
2057 } 1948 }
1949 first_unswept_page_ = last_unswept_page_ = Page::FromAddress(NULL);
2058 1950
2059 Page* first = NULL; 1951 // Clear the free list before a full GC---it will be rebuilt afterward.
2060 1952 free_list_.Reset();
2061 // Remove pages from the list.
2062 if (prev == NULL) {
2063 first = first_page_;
2064 first_page_ = last->next_page();
2065 } else {
2066 first = prev->next_page();
2067 heap()->isolate()->memory_allocator()->SetNextPage(
2068 prev, last->next_page());
2069 }
2070
2071 // Attach it after the last page.
2072 heap()->isolate()->memory_allocator()->SetNextPage(last_page_, first);
2073 last_page_ = last;
2074 heap()->isolate()->memory_allocator()->SetNextPage(last, NULL);
2075
2076 // Clean them up.
2077 do {
2078 first->InvalidateWatermark(true);
2079 first->SetAllocationWatermark(first->ObjectAreaStart());
2080 first->SetCachedAllocationWatermark(first->ObjectAreaStart());
2081 first->SetRegionMarks(Page::kAllRegionsCleanMarks);
2082 first = first->next_page();
2083 } while (first != NULL);
2084
2085 // Order of pages in this space might no longer be consistent with
2086 // order of pages in chunks.
2087 page_list_is_chunk_ordered_ = false;
2088 } 1953 }
2089 1954
2090 1955
2091 void PagedSpace::RelinkPageListInChunkOrder(bool deallocate_blocks) { 1956 bool PagedSpace::ReserveSpace(int size_in_bytes) {
2092 const bool add_to_freelist = true; 1957 ASSERT(size_in_bytes <= Page::kMaxHeapObjectSize);
1958 ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
1959 Address current_top = allocation_info_.top;
1960 Address new_top = current_top + size_in_bytes;
1961 if (new_top <= allocation_info_.limit) return true;
2093 1962
2094 // Mark used and unused pages to properly fill unused pages 1963 HeapObject* new_area = free_list_.Allocate(size_in_bytes);
2095 // after reordering. 1964 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
2096 PageIterator all_pages_iterator(this, PageIterator::ALL_PAGES); 1965 if (new_area == NULL) return false;
2097 Page* last_in_use = AllocationTopPage();
2098 bool in_use = true;
2099 1966
2100 while (all_pages_iterator.has_next()) { 1967 int old_linear_size = static_cast<int>(limit() - top());
2101 Page* p = all_pages_iterator.next(); 1968 // Mark the old linear allocation area with a free space so it can be
2102 p->SetWasInUseBeforeMC(in_use); 1969 // skipped when scanning the heap. This also puts it back in the free list
2103 if (p == last_in_use) { 1970 // if it is big enough.
2104 // We passed a page containing allocation top. All consequent 1971 Free(top(), old_linear_size);
2105 // pages are not used.
2106 in_use = false;
2107 }
2108 }
2109 1972
2110 if (page_list_is_chunk_ordered_) return; 1973 SetTop(new_area->address(), new_area->address() + size_in_bytes);
2111 1974 Allocate(size_in_bytes);
2112 Page* new_last_in_use = Page::FromAddress(NULL);
2113 heap()->isolate()->memory_allocator()->RelinkPageListInChunkOrder(
2114 this, &first_page_, &last_page_, &new_last_in_use);
2115 ASSERT(new_last_in_use->is_valid());
2116
2117 if (new_last_in_use != last_in_use) {
2118 // Current allocation top points to a page which is now in the middle
2119 // of page list. We should move allocation top forward to the new last
2120 // used page so various object iterators will continue to work properly.
2121 int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
2122 last_in_use->AllocationTop());
2123
2124 last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
2125 if (size_in_bytes > 0) {
2126 Address start = last_in_use->AllocationTop();
2127 if (deallocate_blocks) {
2128 accounting_stats_.AllocateBytes(size_in_bytes);
2129 DeallocateBlock(start, size_in_bytes, add_to_freelist);
2130 } else {
2131 heap()->CreateFillerObjectAt(start, size_in_bytes);
2132 }
2133 }
2134
2135 // New last in use page was in the middle of the list before
2136 // sorting so it full.
2137 SetTop(new_last_in_use->AllocationTop());
2138
2139 ASSERT(AllocationTopPage() == new_last_in_use);
2140 ASSERT(AllocationTopPage()->WasInUseBeforeMC());
2141 }
2142
2143 PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
2144 while (pages_in_use_iterator.has_next()) {
2145 Page* p = pages_in_use_iterator.next();
2146 if (!p->WasInUseBeforeMC()) {
2147 // Empty page is in the middle of a sequence of used pages.
2148 // Allocate it as a whole and deallocate immediately.
2149 int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
2150 p->ObjectAreaStart());
2151
2152 p->SetAllocationWatermark(p->ObjectAreaStart());
2153 Address start = p->ObjectAreaStart();
2154 if (deallocate_blocks) {
2155 accounting_stats_.AllocateBytes(size_in_bytes);
2156 DeallocateBlock(start, size_in_bytes, add_to_freelist);
2157 } else {
2158 heap()->CreateFillerObjectAt(start, size_in_bytes);
2159 }
2160 }
2161 }
2162
2163 page_list_is_chunk_ordered_ = true;
2164 }
2165
2166
2167 void PagedSpace::PrepareForMarkCompact(bool will_compact) {
2168 if (will_compact) {
2169 RelinkPageListInChunkOrder(false);
2170 }
2171 }
2172
2173
2174 bool PagedSpace::ReserveSpace(int bytes) {
2175 Address limit = allocation_info_.limit;
2176 Address top = allocation_info_.top;
2177 if (limit - top >= bytes) return true;
2178
2179 // There wasn't enough space in the current page. Lets put the rest
2180 // of the page on the free list and start a fresh page.
2181 PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_));
2182
2183 Page* reserved_page = TopPageOf(allocation_info_);
2184 int bytes_left_to_reserve = bytes;
2185 while (bytes_left_to_reserve > 0) {
2186 if (!reserved_page->next_page()->is_valid()) {
2187 if (heap()->OldGenerationAllocationLimitReached()) return false;
2188 Expand(reserved_page);
2189 }
2190 bytes_left_to_reserve -= Page::kPageSize;
2191 reserved_page = reserved_page->next_page();
2192 if (!reserved_page->is_valid()) return false;
2193 }
2194 ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid());
2195 TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true);
2196 SetAllocationInfo(&allocation_info_,
2197 TopPageOf(allocation_info_)->next_page());
2198 return true; 1975 return true;
2199 } 1976 }
2200 1977
2201 1978
2202 // You have to call this last, since the implementation from PagedSpace 1979 // You have to call this last, since the implementation from PagedSpace
2203 // doesn't know that memory was 'promised' to large object space. 1980 // doesn't know that memory was 'promised' to large object space.
2204 bool LargeObjectSpace::ReserveSpace(int bytes) { 1981 bool LargeObjectSpace::ReserveSpace(int bytes) {
2205 return heap()->OldGenerationSpaceAvailable() >= bytes; 1982 return heap()->OldGenerationSpaceAvailable() >= bytes;
2206 } 1983 }
2207 1984
2208 1985
2209 // Slow case for normal allocation. Try in order: (1) allocate in the next 1986 bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) {
2210 // page in the space, (2) allocate off the space's free list, (3) expand the 1987 if (IsSweepingComplete()) return true;
2211 // space, (4) fail. 1988
2212 HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) { 1989 intptr_t freed_bytes = 0;
2213 // Linear allocation in this space has failed. If there is another page 1990 Page* last = last_unswept_page_->next_page();
2214 // in the space, move to that page and allocate there. This allocation 1991 Page* p = first_unswept_page_;
2215 // should succeed (size_in_bytes should not be greater than a page's 1992 do {
2216 // object area size). 1993 Page* next_page = p->next_page();
2217 Page* current_page = TopPageOf(allocation_info_); 1994 if (ShouldBeSweptLazily(p)) {
2218 if (current_page->next_page()->is_valid()) { 1995 if (FLAG_gc_verbose) {
2219 return AllocateInNextPage(current_page, size_in_bytes); 1996 PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n",
1997 reinterpret_cast<intptr_t>(p));
1998 }
1999 freed_bytes += MarkCompactCollector::SweepConservatively(this, p);
2000 }
2001 p = next_page;
2002 } while (p != last && freed_bytes < bytes_to_sweep);
2003
2004 if (p == last) {
2005 last_unswept_page_ = first_unswept_page_ = Page::FromAddress(NULL);
2006 } else {
2007 first_unswept_page_ = p;
2220 } 2008 }
2221 2009
2222 // There is no next page in this space. Try free list allocation unless that 2010 heap()->LowerOldGenLimits(freed_bytes);
2223 // is currently forbidden.
2224 if (!heap()->linear_allocation()) {
2225 int wasted_bytes;
2226 Object* result;
2227 MaybeObject* maybe = free_list_.Allocate(size_in_bytes, &wasted_bytes);
2228 accounting_stats_.WasteBytes(wasted_bytes);
2229 if (maybe->ToObject(&result)) {
2230 accounting_stats_.AllocateBytes(size_in_bytes);
2231 2011
2232 HeapObject* obj = HeapObject::cast(result); 2012 heap()->FreeQueuedChunks();
2233 Page* p = Page::FromAddress(obj->address());
2234 2013
2235 if (obj->address() >= p->AllocationWatermark()) { 2014 return IsSweepingComplete();
2236 // There should be no hole between the allocation watermark 2015 }
2237 // and allocated object address.
2238 // Memory above the allocation watermark was not swept and
2239 // might contain garbage pointers to new space.
2240 ASSERT(obj->address() == p->AllocationWatermark());
2241 p->SetAllocationWatermark(obj->address() + size_in_bytes);
2242 }
2243 2016
2244 return obj; 2017
2245 } 2018 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
2019 if (allocation_info_.top >= allocation_info_.limit) return;
2020
2021 if (Page::FromAddress(allocation_info_.top)->IsEvacuationCandidate()) {
2022 // Create filler object to keep page iterable if it was iterable.
2023 int remaining =
2024 static_cast<int>(allocation_info_.limit - allocation_info_.top);
2025 heap()->CreateFillerObjectAt(allocation_info_.top, remaining);
2026
2027 allocation_info_.top = NULL;
2028 allocation_info_.limit = NULL;
2246 } 2029 }
2030 }
2031
2032
2033 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
2034 // Allocation in this space has failed.
2247 2035
2248 // Free list allocation failed and there is no next page. Fail if we have 2036 // Free list allocation failed and there is no next page. Fail if we have
2249 // hit the old generation size limit that should cause a garbage 2037 // hit the old generation size limit that should cause a garbage
2250 // collection. 2038 // collection.
2251 if (!heap()->always_allocate() && 2039 if (!heap()->always_allocate() &&
2252 heap()->OldGenerationAllocationLimitReached()) { 2040 heap()->OldGenerationAllocationLimitReached()) {
2253 return NULL; 2041 return NULL;
2254 } 2042 }
2255 2043
2044 // If there are unswept pages advance lazy sweeper.
2045 if (first_unswept_page_->is_valid()) {
2046 AdvanceSweeper(size_in_bytes);
2047
2048 // Retry the free list allocation.
2049 HeapObject* object = free_list_.Allocate(size_in_bytes);
2050 if (object != NULL) return object;
2051
2052 if (!IsSweepingComplete()) {
2053 AdvanceSweeper(kMaxInt);
2054
2055 // Retry the free list allocation.
2056 object = free_list_.Allocate(size_in_bytes);
2057 if (object != NULL) return object;
2058 }
2059 }
2060
2256 // Try to expand the space and allocate in the new next page. 2061 // Try to expand the space and allocate in the new next page.
2257 ASSERT(!current_page->next_page()->is_valid()); 2062 if (Expand()) {
2258 if (Expand(current_page)) { 2063 return free_list_.Allocate(size_in_bytes);
2259 return AllocateInNextPage(current_page, size_in_bytes);
2260 } 2064 }
2261 2065
2262 // Finally, fail. 2066 // Finally, fail.
2263 return NULL; 2067 return NULL;
2264 } 2068 }
2265 2069
2266 2070
2267 void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
2268 current_page->SetAllocationWatermark(allocation_info_.top);
2269 int free_size =
2270 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
2271 if (free_size > 0) {
2272 int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
2273 accounting_stats_.WasteBytes(wasted_bytes);
2274 }
2275 }
2276
2277
2278 void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
2279 current_page->SetAllocationWatermark(allocation_info_.top);
2280 int free_size =
2281 static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
2282 // In the fixed space free list all the free list items have the right size.
2283 // We use up the rest of the page while preserving this invariant.
2284 while (free_size >= object_size_in_bytes_) {
2285 free_list_.Free(allocation_info_.top);
2286 allocation_info_.top += object_size_in_bytes_;
2287 free_size -= object_size_in_bytes_;
2288 accounting_stats_.WasteBytes(object_size_in_bytes_);
2289 }
2290 }
2291
2292
2293 // Add the block at the top of the page to the space's free list, set the
2294 // allocation info to the next page (assumed to be one), and allocate
2295 // linearly there.
2296 HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
2297 int size_in_bytes) {
2298 ASSERT(current_page->next_page()->is_valid());
2299 Page* next_page = current_page->next_page();
2300 next_page->ClearGCFields();
2301 PutRestOfCurrentPageOnFreeList(current_page);
2302 SetAllocationInfo(&allocation_info_, next_page);
2303 return AllocateLinearly(&allocation_info_, size_in_bytes);
2304 }
2305
2306
2307 void OldSpace::DeallocateBlock(Address start,
2308 int size_in_bytes,
2309 bool add_to_freelist) {
2310 Free(start, size_in_bytes, add_to_freelist);
2311 }
2312
2313
2314 #ifdef DEBUG 2071 #ifdef DEBUG
2315 void PagedSpace::ReportCodeStatistics() { 2072 void PagedSpace::ReportCodeStatistics() {
2316 Isolate* isolate = Isolate::Current(); 2073 Isolate* isolate = Isolate::Current();
2317 CommentStatistic* comments_statistics = 2074 CommentStatistic* comments_statistics =
2318 isolate->paged_space_comments_statistics(); 2075 isolate->paged_space_comments_statistics();
2319 ReportCodeKindStatistics(); 2076 ReportCodeKindStatistics();
2320 PrintF("Code comment statistics (\" [ comment-txt : size/ " 2077 PrintF("Code comment statistics (\" [ comment-txt : size/ "
2321 "count (average)\"):\n"); 2078 "count (average)\"):\n");
2322 for (int i = 0; i <= CommentStatistic::kMaxComments; i++) { 2079 for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
2323 const CommentStatistic& cs = comments_statistics[i]; 2080 const CommentStatistic& cs = comments_statistics[i];
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after
2406 EnterComment(isolate, comment_txt, flat_delta); 2163 EnterComment(isolate, comment_txt, flat_delta);
2407 } 2164 }
2408 2165
2409 2166
2410 // Collects code size statistics: 2167 // Collects code size statistics:
2411 // - by code kind 2168 // - by code kind
2412 // - by code comment 2169 // - by code comment
2413 void PagedSpace::CollectCodeStatistics() { 2170 void PagedSpace::CollectCodeStatistics() {
2414 Isolate* isolate = heap()->isolate(); 2171 Isolate* isolate = heap()->isolate();
2415 HeapObjectIterator obj_it(this); 2172 HeapObjectIterator obj_it(this);
2416 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { 2173 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
2417 if (obj->IsCode()) { 2174 if (obj->IsCode()) {
2418 Code* code = Code::cast(obj); 2175 Code* code = Code::cast(obj);
2419 isolate->code_kind_statistics()[code->kind()] += code->Size(); 2176 isolate->code_kind_statistics()[code->kind()] += code->Size();
2420 RelocIterator it(code); 2177 RelocIterator it(code);
2421 int delta = 0; 2178 int delta = 0;
2422 const byte* prev_pc = code->instruction_start(); 2179 const byte* prev_pc = code->instruction_start();
2423 while (!it.done()) { 2180 while (!it.done()) {
2424 if (it.rinfo()->rmode() == RelocInfo::COMMENT) { 2181 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
2425 delta += static_cast<int>(it.rinfo()->pc() - prev_pc); 2182 delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
2426 CollectCommentStatistics(isolate, &it); 2183 CollectCommentStatistics(isolate, &it);
2427 prev_pc = it.rinfo()->pc(); 2184 prev_pc = it.rinfo()->pc();
2428 } 2185 }
2429 it.next(); 2186 it.next();
2430 } 2187 }
2431 2188
2432 ASSERT(code->instruction_start() <= prev_pc && 2189 ASSERT(code->instruction_start() <= prev_pc &&
2433 prev_pc <= code->instruction_end()); 2190 prev_pc <= code->instruction_end());
2434 delta += static_cast<int>(code->instruction_end() - prev_pc); 2191 delta += static_cast<int>(code->instruction_end() - prev_pc);
2435 EnterComment(isolate, "NoComment", delta); 2192 EnterComment(isolate, "NoComment", delta);
2436 } 2193 }
2437 } 2194 }
2438 } 2195 }
2439 2196
2440 2197
2441 void OldSpace::ReportStatistics() { 2198 void PagedSpace::ReportStatistics() {
2442 int pct = static_cast<int>(Available() * 100 / Capacity()); 2199 int pct = static_cast<int>(Available() * 100 / Capacity());
2443 PrintF(" capacity: %" V8_PTR_PREFIX "d" 2200 PrintF(" capacity: %" V8_PTR_PREFIX "d"
2444 ", waste: %" V8_PTR_PREFIX "d" 2201 ", waste: %" V8_PTR_PREFIX "d"
2445 ", available: %" V8_PTR_PREFIX "d, %%%d\n", 2202 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2446 Capacity(), Waste(), Available(), pct); 2203 Capacity(), Waste(), Available(), pct);
2447 2204
2205 if (was_swept_conservatively_) return;
2448 ClearHistograms(); 2206 ClearHistograms();
2449 HeapObjectIterator obj_it(this); 2207 HeapObjectIterator obj_it(this);
2450 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) 2208 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
2451 CollectHistogramInfo(obj); 2209 CollectHistogramInfo(obj);
2452 ReportHistogram(true); 2210 ReportHistogram(true);
2453 } 2211 }
2454 #endif 2212 #endif
2455 2213
2456 // ----------------------------------------------------------------------------- 2214 // -----------------------------------------------------------------------------
2457 // FixedSpace implementation 2215 // FixedSpace implementation
2458 2216
2459 void FixedSpace::PrepareForMarkCompact(bool will_compact) { 2217 void FixedSpace::PrepareForMarkCompact() {
2460 // Call prepare of the super class. 2218 // Call prepare of the super class.
2461 PagedSpace::PrepareForMarkCompact(will_compact); 2219 PagedSpace::PrepareForMarkCompact();
2462 2220
2463 if (will_compact) { 2221 // During a non-compacting collection, everything below the linear
2464 // Reset relocation info. 2222 // allocation pointer except wasted top-of-page blocks is considered
2465 MCResetRelocationInfo(); 2223 // allocated and we will rediscover available bytes during the
2466 2224 // collection.
2467 // During a compacting collection, everything in the space is considered 2225 accounting_stats_.AllocateBytes(free_list_.available());
2468 // 'available' (set by the call to MCResetRelocationInfo) and we will
2469 // rediscover live and wasted bytes during the collection.
2470 ASSERT(Available() == Capacity());
2471 } else {
2472 // During a non-compacting collection, everything below the linear
2473 // allocation pointer except wasted top-of-page blocks is considered
2474 // allocated and we will rediscover available bytes during the
2475 // collection.
2476 accounting_stats_.AllocateBytes(free_list_.available());
2477 }
2478 2226
2479 // Clear the free list before a full GC---it will be rebuilt afterward. 2227 // Clear the free list before a full GC---it will be rebuilt afterward.
2480 free_list_.Reset(); 2228 free_list_.Reset();
2481 } 2229 }
2482 2230
2483 2231
2484 void FixedSpace::MCCommitRelocationInfo() {
2485 // Update fast allocation info.
2486 allocation_info_.top = mc_forwarding_info_.top;
2487 allocation_info_.limit = mc_forwarding_info_.limit;
2488 ASSERT(allocation_info_.VerifyPagedAllocation());
2489
2490 // The space is compacted and we haven't yet wasted any space.
2491 ASSERT(Waste() == 0);
2492
2493 // Update allocation_top of each page in use and compute waste.
2494 int computed_size = 0;
2495 PageIterator it(this, PageIterator::PAGES_USED_BY_MC);
2496 while (it.has_next()) {
2497 Page* page = it.next();
2498 Address page_top = page->AllocationTop();
2499 computed_size += static_cast<int>(page_top - page->ObjectAreaStart());
2500 if (it.has_next()) {
2501 accounting_stats_.WasteBytes(
2502 static_cast<int>(page->ObjectAreaEnd() - page_top));
2503 page->SetAllocationWatermark(page_top);
2504 }
2505 }
2506
2507 // Make sure the computed size - based on the used portion of the
2508 // pages in use - matches the size we adjust during allocation.
2509 ASSERT(computed_size == Size());
2510 }
2511
2512
2513 // Slow case for normal allocation. Try in order: (1) allocate in the next
2514 // page in the space, (2) allocate off the space's free list, (3) expand the
2515 // space, (4) fail.
2516 HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
2517 ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
2518 // Linear allocation in this space has failed. If there is another page
2519 // in the space, move to that page and allocate there. This allocation
2520 // should succeed.
2521 Page* current_page = TopPageOf(allocation_info_);
2522 if (current_page->next_page()->is_valid()) {
2523 return AllocateInNextPage(current_page, size_in_bytes);
2524 }
2525
2526 // There is no next page in this space. Try free list allocation unless
2527 // that is currently forbidden. The fixed space free list implicitly assumes
2528 // that all free blocks are of the fixed size.
2529 if (!heap()->linear_allocation()) {
2530 Object* result;
2531 MaybeObject* maybe = free_list_.Allocate();
2532 if (maybe->ToObject(&result)) {
2533 accounting_stats_.AllocateBytes(size_in_bytes);
2534 HeapObject* obj = HeapObject::cast(result);
2535 Page* p = Page::FromAddress(obj->address());
2536
2537 if (obj->address() >= p->AllocationWatermark()) {
2538 // There should be no hole between the allocation watermark
2539 // and allocated object address.
2540 // Memory above the allocation watermark was not swept and
2541 // might contain garbage pointers to new space.
2542 ASSERT(obj->address() == p->AllocationWatermark());
2543 p->SetAllocationWatermark(obj->address() + size_in_bytes);
2544 }
2545
2546 return obj;
2547 }
2548 }
2549
2550 // Free list allocation failed and there is no next page. Fail if we have
2551 // hit the old generation size limit that should cause a garbage
2552 // collection.
2553 if (!heap()->always_allocate() &&
2554 heap()->OldGenerationAllocationLimitReached()) {
2555 return NULL;
2556 }
2557
2558 // Try to expand the space and allocate in the new next page.
2559 ASSERT(!current_page->next_page()->is_valid());
2560 if (Expand(current_page)) {
2561 return AllocateInNextPage(current_page, size_in_bytes);
2562 }
2563
2564 // Finally, fail.
2565 return NULL;
2566 }
2567
2568
2569 // Move to the next page (there is assumed to be one) and allocate there.
2570 // The top of page block is always wasted, because it is too small to hold a
2571 // map.
2572 HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
2573 int size_in_bytes) {
2574 ASSERT(current_page->next_page()->is_valid());
2575 ASSERT(allocation_info_.top == PageAllocationLimit(current_page));
2576 ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
2577 Page* next_page = current_page->next_page();
2578 next_page->ClearGCFields();
2579 current_page->SetAllocationWatermark(allocation_info_.top);
2580 accounting_stats_.WasteBytes(page_extra_);
2581 SetAllocationInfo(&allocation_info_, next_page);
2582 return AllocateLinearly(&allocation_info_, size_in_bytes);
2583 }
2584
2585
2586 void FixedSpace::DeallocateBlock(Address start,
2587 int size_in_bytes,
2588 bool add_to_freelist) {
2589 // Free-list elements in fixed space are assumed to have a fixed size.
2590 // We break the free block into chunks and add them to the free list
2591 // individually.
2592 int size = object_size_in_bytes();
2593 ASSERT(size_in_bytes % size == 0);
2594 Address end = start + size_in_bytes;
2595 for (Address a = start; a < end; a += size) {
2596 Free(a, add_to_freelist);
2597 }
2598 }
2599
2600
2601 #ifdef DEBUG
2602 void FixedSpace::ReportStatistics() {
2603 int pct = static_cast<int>(Available() * 100 / Capacity());
2604 PrintF(" capacity: %" V8_PTR_PREFIX "d"
2605 ", waste: %" V8_PTR_PREFIX "d"
2606 ", available: %" V8_PTR_PREFIX "d, %%%d\n",
2607 Capacity(), Waste(), Available(), pct);
2608
2609 ClearHistograms();
2610 HeapObjectIterator obj_it(this);
2611 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
2612 CollectHistogramInfo(obj);
2613 ReportHistogram(false);
2614 }
2615 #endif
2616
2617
2618 // ----------------------------------------------------------------------------- 2232 // -----------------------------------------------------------------------------
2619 // MapSpace implementation 2233 // MapSpace implementation
2620 2234
2621 void MapSpace::PrepareForMarkCompact(bool will_compact) {
2622 // Call prepare of the super class.
2623 FixedSpace::PrepareForMarkCompact(will_compact);
2624
2625 if (will_compact) {
2626 // Initialize map index entry.
2627 int page_count = 0;
2628 PageIterator it(this, PageIterator::ALL_PAGES);
2629 while (it.has_next()) {
2630 ASSERT_MAP_PAGE_INDEX(page_count);
2631
2632 Page* p = it.next();
2633 ASSERT(p->mc_page_index == page_count);
2634
2635 page_addresses_[page_count++] = p->address();
2636 }
2637 }
2638 }
2639
2640
2641 #ifdef DEBUG 2235 #ifdef DEBUG
2642 void MapSpace::VerifyObject(HeapObject* object) { 2236 void MapSpace::VerifyObject(HeapObject* object) {
2643 // The object should be a map or a free-list node. 2237 // The object should be a map or a free-list node.
2644 ASSERT(object->IsMap() || object->IsByteArray()); 2238 ASSERT(object->IsMap() || object->IsFreeSpace());
2645 } 2239 }
2646 #endif 2240 #endif
2647 2241
2648 2242
2649 // ----------------------------------------------------------------------------- 2243 // -----------------------------------------------------------------------------
2650 // GlobalPropertyCellSpace implementation 2244 // GlobalPropertyCellSpace implementation
2651 2245
2652 #ifdef DEBUG 2246 #ifdef DEBUG
2653 void CellSpace::VerifyObject(HeapObject* object) { 2247 void CellSpace::VerifyObject(HeapObject* object) {
2654 // The object should be a global object property cell or a free-list node. 2248 // The object should be a global object property cell or a free-list node.
2655 ASSERT(object->IsJSGlobalPropertyCell() || 2249 ASSERT(object->IsJSGlobalPropertyCell() ||
2656 object->map() == heap()->two_pointer_filler_map()); 2250 object->map() == heap()->two_pointer_filler_map());
2657 } 2251 }
2658 #endif 2252 #endif
2659 2253
2660 2254
2661 // ----------------------------------------------------------------------------- 2255 // -----------------------------------------------------------------------------
2662 // LargeObjectIterator 2256 // LargeObjectIterator
2663 2257
2664 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) { 2258 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2665 current_ = space->first_chunk_; 2259 current_ = space->first_page_;
2666 size_func_ = NULL; 2260 size_func_ = NULL;
2667 } 2261 }
2668 2262
2669 2263
2670 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space, 2264 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
2671 HeapObjectCallback size_func) { 2265 HeapObjectCallback size_func) {
2672 current_ = space->first_chunk_; 2266 current_ = space->first_page_;
2673 size_func_ = size_func; 2267 size_func_ = size_func;
2674 } 2268 }
2675 2269
2676 2270
2677 HeapObject* LargeObjectIterator::next() { 2271 HeapObject* LargeObjectIterator::Next() {
2678 if (current_ == NULL) return NULL; 2272 if (current_ == NULL) return NULL;
2679 2273
2680 HeapObject* object = current_->GetObject(); 2274 HeapObject* object = current_->GetObject();
2681 current_ = current_->next(); 2275 current_ = current_->next_page();
2682 return object; 2276 return object;
2683 } 2277 }
2684 2278
2685 2279
2686 // ----------------------------------------------------------------------------- 2280 // -----------------------------------------------------------------------------
2687 // LargeObjectChunk
2688
2689 LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
2690 Executability executable) {
2691 size_t requested = ChunkSizeFor(size_in_bytes);
2692 size_t size;
2693 size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
2694 Isolate* isolate = Isolate::Current();
2695 void* mem = isolate->memory_allocator()->AllocateRawMemory(
2696 requested + guard_size, &size, executable);
2697 if (mem == NULL) return NULL;
2698
2699 // The start of the chunk may be overlayed with a page so we have to
2700 // make sure that the page flags fit in the size field.
2701 ASSERT((size & Page::kPageFlagMask) == 0);
2702
2703 LOG(isolate, NewEvent("LargeObjectChunk", mem, size));
2704 if (size < requested + guard_size) {
2705 isolate->memory_allocator()->FreeRawMemory(
2706 mem, size, executable);
2707 LOG(isolate, DeleteEvent("LargeObjectChunk", mem));
2708 return NULL;
2709 }
2710
2711 if (guard_size != 0) {
2712 OS::Guard(mem, guard_size);
2713 size -= guard_size;
2714 mem = static_cast<Address>(mem) + guard_size;
2715 }
2716
2717 ObjectSpace space = (executable == EXECUTABLE)
2718 ? kObjectSpaceCodeSpace
2719 : kObjectSpaceLoSpace;
2720 isolate->memory_allocator()->PerformAllocationCallback(
2721 space, kAllocationActionAllocate, size);
2722
2723 LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem);
2724 chunk->size_ = size;
2725 chunk->GetPage()->heap_ = isolate->heap();
2726 return chunk;
2727 }
2728
2729
2730 void LargeObjectChunk::Free(Executability executable) {
2731 size_t guard_size = (executable == EXECUTABLE) ? Page::kPageSize : 0;
2732 ObjectSpace space =
2733 (executable == EXECUTABLE) ? kObjectSpaceCodeSpace : kObjectSpaceLoSpace;
2734 // Do not access instance fields after FreeRawMemory!
2735 Address my_address = address();
2736 size_t my_size = size();
2737 Isolate* isolate = GetPage()->heap_->isolate();
2738 MemoryAllocator* a = isolate->memory_allocator();
2739 a->FreeRawMemory(my_address - guard_size, my_size + guard_size, executable);
2740 a->PerformAllocationCallback(space, kAllocationActionFree, my_size);
2741 LOG(isolate, DeleteEvent("LargeObjectChunk", my_address));
2742 }
2743
2744
2745 int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
2746 int os_alignment = static_cast<int>(OS::AllocateAlignment());
2747 if (os_alignment < Page::kPageSize) {
2748 size_in_bytes += (Page::kPageSize - os_alignment);
2749 }
2750 return size_in_bytes + Page::kObjectStartOffset;
2751 }
2752
2753 // -----------------------------------------------------------------------------
2754 // LargeObjectSpace 2281 // LargeObjectSpace
2755 2282
2756 LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id) 2283 LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
2757 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis 2284 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
2758 first_chunk_(NULL), 2285 first_page_(NULL),
2759 size_(0), 2286 size_(0),
2760 page_count_(0), 2287 page_count_(0),
2761 objects_size_(0) {} 2288 objects_size_(0) {}
2762 2289
2763 2290
2764 bool LargeObjectSpace::Setup() { 2291 bool LargeObjectSpace::Setup() {
2765 first_chunk_ = NULL; 2292 first_page_ = NULL;
2766 size_ = 0; 2293 size_ = 0;
2767 page_count_ = 0; 2294 page_count_ = 0;
2768 objects_size_ = 0; 2295 objects_size_ = 0;
2769 return true; 2296 return true;
2770 } 2297 }
2771 2298
2772 2299
2773 void LargeObjectSpace::TearDown() { 2300 void LargeObjectSpace::TearDown() {
2774 while (first_chunk_ != NULL) { 2301 while (first_page_ != NULL) {
2775 LargeObjectChunk* chunk = first_chunk_; 2302 LargePage* page = first_page_;
2776 first_chunk_ = first_chunk_->next(); 2303 first_page_ = first_page_->next_page();
2777 chunk->Free(chunk->GetPage()->PageExecutability()); 2304 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
2305
2306 ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
2307 heap()->isolate()->memory_allocator()->PerformAllocationCallback(
2308 space, kAllocationActionFree, page->size());
2309 heap()->isolate()->memory_allocator()->Free(page);
2778 } 2310 }
2779 Setup(); 2311 Setup();
2780 } 2312 }
2781 2313
2782 2314
2783 MaybeObject* LargeObjectSpace::AllocateRawInternal(int requested_size, 2315 MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
2784 int object_size, 2316 Executability executable) {
2785 Executability executable) {
2786 ASSERT(0 < object_size && object_size <= requested_size);
2787
2788 // Check if we want to force a GC before growing the old space further. 2317 // Check if we want to force a GC before growing the old space further.
2789 // If so, fail the allocation. 2318 // If so, fail the allocation.
2790 if (!heap()->always_allocate() && 2319 if (!heap()->always_allocate() &&
2791 heap()->OldGenerationAllocationLimitReached()) { 2320 heap()->OldGenerationAllocationLimitReached()) {
2792 return Failure::RetryAfterGC(identity()); 2321 return Failure::RetryAfterGC(identity());
2793 } 2322 }
2794 2323
2795 LargeObjectChunk* chunk = LargeObjectChunk::New(requested_size, executable); 2324 LargePage* page = heap()->isolate()->memory_allocator()->
2796 if (chunk == NULL) { 2325 AllocateLargePage(object_size, executable, this);
2797 return Failure::RetryAfterGC(identity()); 2326 if (page == NULL) return Failure::RetryAfterGC(identity());
2798 } 2327 ASSERT(page->body_size() >= object_size);
2799 2328
2800 size_ += static_cast<int>(chunk->size()); 2329 size_ += static_cast<int>(page->size());
2801 objects_size_ += requested_size; 2330 objects_size_ += object_size;
2802 page_count_++; 2331 page_count_++;
2803 chunk->set_next(first_chunk_); 2332 page->set_next_page(first_page_);
2804 first_chunk_ = chunk; 2333 first_page_ = page;
2805 2334
2806 // Initialize page header. 2335 heap()->incremental_marking()->OldSpaceStep(object_size);
2807 Page* page = chunk->GetPage(); 2336 return page->GetObject();
2808 Address object_address = page->ObjectAreaStart();
2809
2810 // Clear the low order bit of the second word in the page to flag it as a
2811 // large object page. If the chunk_size happened to be written there, its
2812 // low order bit should already be clear.
2813 page->SetIsLargeObjectPage(true);
2814 page->SetPageExecutability(executable);
2815 page->SetRegionMarks(Page::kAllRegionsCleanMarks);
2816 return HeapObject::FromAddress(object_address);
2817 }
2818
2819
2820 MaybeObject* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
2821 ASSERT(0 < size_in_bytes);
2822 return AllocateRawInternal(size_in_bytes,
2823 size_in_bytes,
2824 EXECUTABLE);
2825 }
2826
2827
2828 MaybeObject* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
2829 ASSERT(0 < size_in_bytes);
2830 return AllocateRawInternal(size_in_bytes,
2831 size_in_bytes,
2832 NOT_EXECUTABLE);
2833 }
2834
2835
2836 MaybeObject* LargeObjectSpace::AllocateRaw(int size_in_bytes) {
2837 ASSERT(0 < size_in_bytes);
2838 return AllocateRawInternal(size_in_bytes,
2839 size_in_bytes,
2840 NOT_EXECUTABLE);
2841 } 2337 }
2842 2338
2843 2339
2844 // GC support 2340 // GC support
2845 MaybeObject* LargeObjectSpace::FindObject(Address a) { 2341 MaybeObject* LargeObjectSpace::FindObject(Address a) {
2846 for (LargeObjectChunk* chunk = first_chunk_; 2342 for (LargePage* page = first_page_;
2847 chunk != NULL; 2343 page != NULL;
2848 chunk = chunk->next()) { 2344 page = page->next_page()) {
2849 Address chunk_address = chunk->address(); 2345 Address page_address = page->address();
2850 if (chunk_address <= a && a < chunk_address + chunk->size()) { 2346 if (page_address <= a && a < page_address + page->size()) {
2851 return chunk->GetObject(); 2347 return page->GetObject();
2852 } 2348 }
2853 } 2349 }
2854 return Failure::Exception(); 2350 return Failure::Exception();
2855 } 2351 }
2856 2352
2857 2353
2858 LargeObjectChunk* LargeObjectSpace::FindChunkContainingPc(Address pc) { 2354 LargePage* LargeObjectSpace::FindPageContainingPc(Address pc) {
2859 // TODO(853): Change this implementation to only find executable 2355 // TODO(853): Change this implementation to only find executable
2860 // chunks and use some kind of hash-based approach to speed it up. 2356 // chunks and use some kind of hash-based approach to speed it up.
2861 for (LargeObjectChunk* chunk = first_chunk_; 2357 for (LargePage* chunk = first_page_;
2862 chunk != NULL; 2358 chunk != NULL;
2863 chunk = chunk->next()) { 2359 chunk = chunk->next_page()) {
2864 Address chunk_address = chunk->address(); 2360 Address chunk_address = chunk->address();
2865 if (chunk_address <= pc && pc < chunk_address + chunk->size()) { 2361 if (chunk_address <= pc && pc < chunk_address + chunk->size()) {
2866 return chunk; 2362 return chunk;
2867 } 2363 }
2868 } 2364 }
2869 return NULL; 2365 return NULL;
2870 } 2366 }
2871 2367
2872 2368
2873 void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) {
2874 LargeObjectIterator it(this);
2875 for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
2876 // We only have code, sequential strings, or fixed arrays in large
2877 // object space, and only fixed arrays can possibly contain pointers to
2878 // the young generation.
2879 if (object->IsFixedArray()) {
2880 Page* page = Page::FromAddress(object->address());
2881 uint32_t marks = page->GetRegionMarks();
2882 uint32_t newmarks = Page::kAllRegionsCleanMarks;
2883
2884 if (marks != Page::kAllRegionsCleanMarks) {
2885 // For a large page a single dirty mark corresponds to several
2886 // regions (modulo 32). So we treat a large page as a sequence of
2887 // normal pages of size Page::kPageSize having same dirty marks
2888 // and subsequently iterate dirty regions on each of these pages.
2889 Address start = object->address();
2890 Address end = page->ObjectAreaEnd();
2891 Address object_end = start + object->Size();
2892
2893 // Iterate regions of the first normal page covering object.
2894 uint32_t first_region_number = page->GetRegionNumberForAddress(start);
2895 newmarks |=
2896 heap()->IterateDirtyRegions(marks >> first_region_number,
2897 start,
2898 end,
2899 &Heap::IteratePointersInDirtyRegion,
2900 copy_object) << first_region_number;
2901
2902 start = end;
2903 end = start + Page::kPageSize;
2904 while (end <= object_end) {
2905 // Iterate next 32 regions.
2906 newmarks |=
2907 heap()->IterateDirtyRegions(marks,
2908 start,
2909 end,
2910 &Heap::IteratePointersInDirtyRegion,
2911 copy_object);
2912 start = end;
2913 end = start + Page::kPageSize;
2914 }
2915
2916 if (start != object_end) {
2917 // Iterate the last piece of an object which is less than
2918 // Page::kPageSize.
2919 newmarks |=
2920 heap()->IterateDirtyRegions(marks,
2921 start,
2922 object_end,
2923 &Heap::IteratePointersInDirtyRegion,
2924 copy_object);
2925 }
2926
2927 page->SetRegionMarks(newmarks);
2928 }
2929 }
2930 }
2931 }
2932
2933
2934 void LargeObjectSpace::FreeUnmarkedObjects() { 2369 void LargeObjectSpace::FreeUnmarkedObjects() {
2935 LargeObjectChunk* previous = NULL; 2370 LargePage* previous = NULL;
2936 LargeObjectChunk* current = first_chunk_; 2371 LargePage* current = first_page_;
2937 while (current != NULL) { 2372 while (current != NULL) {
2938 HeapObject* object = current->GetObject(); 2373 HeapObject* object = current->GetObject();
2939 if (object->IsMarked()) { 2374 // Can this large page contain pointers to non-trivial objects. No other
2940 object->ClearMark(); 2375 // pointer object is this big.
2941 heap()->mark_compact_collector()->tracer()->decrement_marked_count(); 2376 bool is_pointer_object = object->IsFixedArray();
2377 MarkBit mark_bit = Marking::MarkBitFrom(object);
2378 if (mark_bit.Get()) {
2379 mark_bit.Clear();
2380 MemoryChunk::IncrementLiveBytes(object->address(), -object->Size());
2942 previous = current; 2381 previous = current;
2943 current = current->next(); 2382 current = current->next_page();
2944 } else { 2383 } else {
2384 LargePage* page = current;
2945 // Cut the chunk out from the chunk list. 2385 // Cut the chunk out from the chunk list.
2946 LargeObjectChunk* current_chunk = current; 2386 current = current->next_page();
2947 current = current->next();
2948 if (previous == NULL) { 2387 if (previous == NULL) {
2949 first_chunk_ = current; 2388 first_page_ = current;
2950 } else { 2389 } else {
2951 previous->set_next(current); 2390 previous->set_next_page(current);
2952 } 2391 }
2953 2392
2954 // Free the chunk. 2393 // Free the chunk.
2955 heap()->mark_compact_collector()->ReportDeleteIfNeeded( 2394 heap()->mark_compact_collector()->ReportDeleteIfNeeded(
2956 object, heap()->isolate()); 2395 object, heap()->isolate());
2957 LiveObjectList::ProcessNonLive(object); 2396 size_ -= static_cast<int>(page->size());
2958
2959 size_ -= static_cast<int>(current_chunk->size());
2960 objects_size_ -= object->Size(); 2397 objects_size_ -= object->Size();
2961 page_count_--; 2398 page_count_--;
2962 current_chunk->Free(current_chunk->GetPage()->PageExecutability()); 2399
2400 if (is_pointer_object) {
2401 heap()->QueueMemoryChunkForFree(page);
2402 } else {
2403 heap()->isolate()->memory_allocator()->Free(page);
2404 }
2963 } 2405 }
2964 } 2406 }
2407 heap()->FreeQueuedChunks();
2965 } 2408 }
2966 2409
2967 2410
2968 bool LargeObjectSpace::Contains(HeapObject* object) { 2411 bool LargeObjectSpace::Contains(HeapObject* object) {
2969 Address address = object->address(); 2412 Address address = object->address();
2970 if (heap()->new_space()->Contains(address)) { 2413 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
2971 return false;
2972 }
2973 Page* page = Page::FromAddress(address);
2974 2414
2975 SLOW_ASSERT(!page->IsLargeObjectPage() 2415 bool owned = (chunk->owner() == this);
2976 || !FindObject(address)->IsFailure());
2977 2416
2978 return page->IsLargeObjectPage(); 2417 SLOW_ASSERT(!owned || !FindObject(address)->IsFailure());
2418
2419 return owned;
2979 } 2420 }
2980 2421
2981 2422
2982 #ifdef DEBUG 2423 #ifdef DEBUG
2983 // We do not assume that the large object iterator works, because it depends 2424 // We do not assume that the large object iterator works, because it depends
2984 // on the invariants we are checking during verification. 2425 // on the invariants we are checking during verification.
2985 void LargeObjectSpace::Verify() { 2426 void LargeObjectSpace::Verify() {
2986 for (LargeObjectChunk* chunk = first_chunk_; 2427 for (LargePage* chunk = first_page_;
2987 chunk != NULL; 2428 chunk != NULL;
2988 chunk = chunk->next()) { 2429 chunk = chunk->next_page()) {
2989 // Each chunk contains an object that starts at the large object page's 2430 // Each chunk contains an object that starts at the large object page's
2990 // object area start. 2431 // object area start.
2991 HeapObject* object = chunk->GetObject(); 2432 HeapObject* object = chunk->GetObject();
2992 Page* page = Page::FromAddress(object->address()); 2433 Page* page = Page::FromAddress(object->address());
2993 ASSERT(object->address() == page->ObjectAreaStart()); 2434 ASSERT(object->address() == page->ObjectAreaStart());
2994 2435
2995 // The first word should be a map, and we expect all map pointers to be 2436 // The first word should be a map, and we expect all map pointers to be
2996 // in map space. 2437 // in map space.
2997 Map* map = object->map(); 2438 Map* map = object->map();
2998 ASSERT(map->IsMap()); 2439 ASSERT(map->IsMap());
2999 ASSERT(heap()->map_space()->Contains(map)); 2440 ASSERT(heap()->map_space()->Contains(map));
3000 2441
3001 // We have only code, sequential strings, external strings 2442 // We have only code, sequential strings, external strings
3002 // (sequential strings that have been morphed into external 2443 // (sequential strings that have been morphed into external
3003 // strings), fixed arrays, and byte arrays in large object space. 2444 // strings), fixed arrays, and byte arrays in large object space.
3004 ASSERT(object->IsCode() || object->IsSeqString() || 2445 ASSERT(object->IsCode() || object->IsSeqString() ||
3005 object->IsExternalString() || object->IsFixedArray() || 2446 object->IsExternalString() || object->IsFixedArray() ||
3006 object->IsFixedDoubleArray() || object->IsByteArray()); 2447 object->IsFixedDoubleArray() || object->IsByteArray());
3007 2448
3008 // The object itself should look OK. 2449 // The object itself should look OK.
3009 object->Verify(); 2450 object->Verify();
3010 2451
3011 // Byte arrays and strings don't have interior pointers. 2452 // Byte arrays and strings don't have interior pointers.
3012 if (object->IsCode()) { 2453 if (object->IsCode()) {
3013 VerifyPointersVisitor code_visitor; 2454 VerifyPointersVisitor code_visitor;
3014 object->IterateBody(map->instance_type(), 2455 object->IterateBody(map->instance_type(),
3015 object->Size(), 2456 object->Size(),
3016 &code_visitor); 2457 &code_visitor);
3017 } else if (object->IsFixedArray()) { 2458 } else if (object->IsFixedArray()) {
3018 // We loop over fixed arrays ourselves, rather then using the visitor,
3019 // because the visitor doesn't support the start/offset iteration
3020 // needed for IsRegionDirty.
3021 FixedArray* array = FixedArray::cast(object); 2459 FixedArray* array = FixedArray::cast(object);
3022 for (int j = 0; j < array->length(); j++) { 2460 for (int j = 0; j < array->length(); j++) {
3023 Object* element = array->get(j); 2461 Object* element = array->get(j);
3024 if (element->IsHeapObject()) { 2462 if (element->IsHeapObject()) {
3025 HeapObject* element_object = HeapObject::cast(element); 2463 HeapObject* element_object = HeapObject::cast(element);
3026 ASSERT(heap()->Contains(element_object)); 2464 ASSERT(heap()->Contains(element_object));
3027 ASSERT(element_object->map()->IsMap()); 2465 ASSERT(element_object->map()->IsMap());
3028 if (heap()->InNewSpace(element_object)) {
3029 Address array_addr = object->address();
3030 Address element_addr = array_addr + FixedArray::kHeaderSize +
3031 j * kPointerSize;
3032
3033 ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr));
3034 }
3035 } 2466 }
3036 } 2467 }
3037 } 2468 }
3038 } 2469 }
3039 } 2470 }
3040 2471
3041 2472
3042 void LargeObjectSpace::Print() { 2473 void LargeObjectSpace::Print() {
3043 LargeObjectIterator it(this); 2474 LargeObjectIterator it(this);
3044 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) { 2475 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3045 obj->Print(); 2476 obj->Print();
3046 } 2477 }
3047 } 2478 }
3048 2479
3049 2480
3050 void LargeObjectSpace::ReportStatistics() { 2481 void LargeObjectSpace::ReportStatistics() {
3051 PrintF(" size: %" V8_PTR_PREFIX "d\n", size_); 2482 PrintF(" size: %" V8_PTR_PREFIX "d\n", size_);
3052 int num_objects = 0; 2483 int num_objects = 0;
3053 ClearHistograms(); 2484 ClearHistograms();
3054 LargeObjectIterator it(this); 2485 LargeObjectIterator it(this);
3055 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) { 2486 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
3056 num_objects++; 2487 num_objects++;
3057 CollectHistogramInfo(obj); 2488 CollectHistogramInfo(obj);
3058 } 2489 }
3059 2490
3060 PrintF(" number of objects %d, " 2491 PrintF(" number of objects %d, "
3061 "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_); 2492 "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_);
3062 if (num_objects > 0) ReportHistogram(false); 2493 if (num_objects > 0) ReportHistogram(false);
3063 } 2494 }
3064 2495
3065 2496
3066 void LargeObjectSpace::CollectCodeStatistics() { 2497 void LargeObjectSpace::CollectCodeStatistics() {
3067 Isolate* isolate = heap()->isolate(); 2498 Isolate* isolate = heap()->isolate();
3068 LargeObjectIterator obj_it(this); 2499 LargeObjectIterator obj_it(this);
3069 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { 2500 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
3070 if (obj->IsCode()) { 2501 if (obj->IsCode()) {
3071 Code* code = Code::cast(obj); 2502 Code* code = Code::cast(obj);
3072 isolate->code_kind_statistics()[code->kind()] += code->Size(); 2503 isolate->code_kind_statistics()[code->kind()] += code->Size();
3073 } 2504 }
3074 } 2505 }
3075 } 2506 }
3076 #endif // DEBUG 2507 #endif // DEBUG
3077 2508
3078 } } // namespace v8::internal 2509 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/spaces.h ('k') | src/spaces-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698