OLD | NEW |
| (Empty) |
1 // Copyright 2011 the V8 project authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "src/v8.h" | |
6 | |
7 #include "src/base/platform/platform.h" | |
8 #include "src/full-codegen.h" | |
9 #include "src/macro-assembler.h" | |
10 #include "src/mark-compact.h" | |
11 #include "src/msan.h" | |
12 | |
13 namespace v8 { | |
14 namespace internal { | |
15 | |
16 | |
17 // ---------------------------------------------------------------------------- | |
18 // HeapObjectIterator | |
19 | |
20 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { | |
21 // You can't actually iterate over the anchor page. It is not a real page, | |
22 // just an anchor for the double linked page list. Initialize as if we have | |
23 // reached the end of the anchor page, then the first iteration will move on | |
24 // to the first page. | |
25 Initialize(space, | |
26 NULL, | |
27 NULL, | |
28 kAllPagesInSpace, | |
29 NULL); | |
30 } | |
31 | |
32 | |
33 HeapObjectIterator::HeapObjectIterator(PagedSpace* space, | |
34 HeapObjectCallback size_func) { | |
35 // You can't actually iterate over the anchor page. It is not a real page, | |
36 // just an anchor for the double linked page list. Initialize the current | |
37 // address and end as NULL, then the first iteration will move on | |
38 // to the first page. | |
39 Initialize(space, | |
40 NULL, | |
41 NULL, | |
42 kAllPagesInSpace, | |
43 size_func); | |
44 } | |
45 | |
46 | |
47 HeapObjectIterator::HeapObjectIterator(Page* page, | |
48 HeapObjectCallback size_func) { | |
49 Space* owner = page->owner(); | |
50 DCHECK(owner == page->heap()->old_pointer_space() || | |
51 owner == page->heap()->old_data_space() || | |
52 owner == page->heap()->map_space() || | |
53 owner == page->heap()->cell_space() || | |
54 owner == page->heap()->property_cell_space() || | |
55 owner == page->heap()->code_space()); | |
56 Initialize(reinterpret_cast<PagedSpace*>(owner), | |
57 page->area_start(), | |
58 page->area_end(), | |
59 kOnePageOnly, | |
60 size_func); | |
61 DCHECK(page->WasSweptPrecisely() || page->SweepingCompleted()); | |
62 } | |
63 | |
64 | |
65 void HeapObjectIterator::Initialize(PagedSpace* space, | |
66 Address cur, Address end, | |
67 HeapObjectIterator::PageMode mode, | |
68 HeapObjectCallback size_f) { | |
69 // Check that we actually can iterate this space. | |
70 DCHECK(space->swept_precisely()); | |
71 | |
72 space_ = space; | |
73 cur_addr_ = cur; | |
74 cur_end_ = end; | |
75 page_mode_ = mode; | |
76 size_func_ = size_f; | |
77 } | |
78 | |
79 | |
80 // We have hit the end of the page and should advance to the next block of | |
81 // objects. This happens at the end of the page. | |
82 bool HeapObjectIterator::AdvanceToNextPage() { | |
83 DCHECK(cur_addr_ == cur_end_); | |
84 if (page_mode_ == kOnePageOnly) return false; | |
85 Page* cur_page; | |
86 if (cur_addr_ == NULL) { | |
87 cur_page = space_->anchor(); | |
88 } else { | |
89 cur_page = Page::FromAddress(cur_addr_ - 1); | |
90 DCHECK(cur_addr_ == cur_page->area_end()); | |
91 } | |
92 cur_page = cur_page->next_page(); | |
93 if (cur_page == space_->anchor()) return false; | |
94 cur_addr_ = cur_page->area_start(); | |
95 cur_end_ = cur_page->area_end(); | |
96 DCHECK(cur_page->WasSweptPrecisely()); | |
97 return true; | |
98 } | |
99 | |
100 | |
101 // ----------------------------------------------------------------------------- | |
102 // CodeRange | |
103 | |
104 | |
105 CodeRange::CodeRange(Isolate* isolate) | |
106 : isolate_(isolate), | |
107 code_range_(NULL), | |
108 free_list_(0), | |
109 allocation_list_(0), | |
110 current_allocation_block_index_(0) { | |
111 } | |
112 | |
113 | |
114 bool CodeRange::SetUp(size_t requested) { | |
115 DCHECK(code_range_ == NULL); | |
116 | |
117 if (requested == 0) { | |
118 // When a target requires the code range feature, we put all code objects | |
119 // in a kMaximalCodeRangeSize range of virtual address space, so that | |
120 // they can call each other with near calls. | |
121 if (kRequiresCodeRange) { | |
122 requested = kMaximalCodeRangeSize; | |
123 } else { | |
124 return true; | |
125 } | |
126 } | |
127 | |
128 DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize); | |
129 code_range_ = new base::VirtualMemory(requested); | |
130 CHECK(code_range_ != NULL); | |
131 if (!code_range_->IsReserved()) { | |
132 delete code_range_; | |
133 code_range_ = NULL; | |
134 return false; | |
135 } | |
136 | |
137 // We are sure that we have mapped a block of requested addresses. | |
138 DCHECK(code_range_->size() == requested); | |
139 LOG(isolate_, | |
140 NewEvent("CodeRange", code_range_->address(), requested)); | |
141 Address base = reinterpret_cast<Address>(code_range_->address()); | |
142 Address aligned_base = | |
143 RoundUp(reinterpret_cast<Address>(code_range_->address()), | |
144 MemoryChunk::kAlignment); | |
145 size_t size = code_range_->size() - (aligned_base - base); | |
146 allocation_list_.Add(FreeBlock(aligned_base, size)); | |
147 current_allocation_block_index_ = 0; | |
148 return true; | |
149 } | |
150 | |
151 | |
152 int CodeRange::CompareFreeBlockAddress(const FreeBlock* left, | |
153 const FreeBlock* right) { | |
154 // The entire point of CodeRange is that the difference between two | |
155 // addresses in the range can be represented as a signed 32-bit int, | |
156 // so the cast is semantically correct. | |
157 return static_cast<int>(left->start - right->start); | |
158 } | |
159 | |
160 | |
161 bool CodeRange::GetNextAllocationBlock(size_t requested) { | |
162 for (current_allocation_block_index_++; | |
163 current_allocation_block_index_ < allocation_list_.length(); | |
164 current_allocation_block_index_++) { | |
165 if (requested <= allocation_list_[current_allocation_block_index_].size) { | |
166 return true; // Found a large enough allocation block. | |
167 } | |
168 } | |
169 | |
170 // Sort and merge the free blocks on the free list and the allocation list. | |
171 free_list_.AddAll(allocation_list_); | |
172 allocation_list_.Clear(); | |
173 free_list_.Sort(&CompareFreeBlockAddress); | |
174 for (int i = 0; i < free_list_.length();) { | |
175 FreeBlock merged = free_list_[i]; | |
176 i++; | |
177 // Add adjacent free blocks to the current merged block. | |
178 while (i < free_list_.length() && | |
179 free_list_[i].start == merged.start + merged.size) { | |
180 merged.size += free_list_[i].size; | |
181 i++; | |
182 } | |
183 if (merged.size > 0) { | |
184 allocation_list_.Add(merged); | |
185 } | |
186 } | |
187 free_list_.Clear(); | |
188 | |
189 for (current_allocation_block_index_ = 0; | |
190 current_allocation_block_index_ < allocation_list_.length(); | |
191 current_allocation_block_index_++) { | |
192 if (requested <= allocation_list_[current_allocation_block_index_].size) { | |
193 return true; // Found a large enough allocation block. | |
194 } | |
195 } | |
196 current_allocation_block_index_ = 0; | |
197 // Code range is full or too fragmented. | |
198 return false; | |
199 } | |
200 | |
201 | |
202 Address CodeRange::AllocateRawMemory(const size_t requested_size, | |
203 const size_t commit_size, | |
204 size_t* allocated) { | |
205 DCHECK(commit_size <= requested_size); | |
206 DCHECK(current_allocation_block_index_ < allocation_list_.length()); | |
207 if (requested_size > allocation_list_[current_allocation_block_index_].size) { | |
208 // Find an allocation block large enough. | |
209 if (!GetNextAllocationBlock(requested_size)) return NULL; | |
210 } | |
211 // Commit the requested memory at the start of the current allocation block. | |
212 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment); | |
213 FreeBlock current = allocation_list_[current_allocation_block_index_]; | |
214 if (aligned_requested >= (current.size - Page::kPageSize)) { | |
215 // Don't leave a small free block, useless for a large object or chunk. | |
216 *allocated = current.size; | |
217 } else { | |
218 *allocated = aligned_requested; | |
219 } | |
220 DCHECK(*allocated <= current.size); | |
221 DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment)); | |
222 if (!isolate_->memory_allocator()->CommitExecutableMemory(code_range_, | |
223 current.start, | |
224 commit_size, | |
225 *allocated)) { | |
226 *allocated = 0; | |
227 return NULL; | |
228 } | |
229 allocation_list_[current_allocation_block_index_].start += *allocated; | |
230 allocation_list_[current_allocation_block_index_].size -= *allocated; | |
231 if (*allocated == current.size) { | |
232 // This block is used up, get the next one. | |
233 if (!GetNextAllocationBlock(0)) return NULL; | |
234 } | |
235 return current.start; | |
236 } | |
237 | |
238 | |
239 bool CodeRange::CommitRawMemory(Address start, size_t length) { | |
240 return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE); | |
241 } | |
242 | |
243 | |
244 bool CodeRange::UncommitRawMemory(Address start, size_t length) { | |
245 return code_range_->Uncommit(start, length); | |
246 } | |
247 | |
248 | |
249 void CodeRange::FreeRawMemory(Address address, size_t length) { | |
250 DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment)); | |
251 free_list_.Add(FreeBlock(address, length)); | |
252 code_range_->Uncommit(address, length); | |
253 } | |
254 | |
255 | |
256 void CodeRange::TearDown() { | |
257 delete code_range_; // Frees all memory in the virtual memory range. | |
258 code_range_ = NULL; | |
259 free_list_.Free(); | |
260 allocation_list_.Free(); | |
261 } | |
262 | |
263 | |
264 // ----------------------------------------------------------------------------- | |
265 // MemoryAllocator | |
266 // | |
267 | |
268 MemoryAllocator::MemoryAllocator(Isolate* isolate) | |
269 : isolate_(isolate), | |
270 capacity_(0), | |
271 capacity_executable_(0), | |
272 size_(0), | |
273 size_executable_(0), | |
274 lowest_ever_allocated_(reinterpret_cast<void*>(-1)), | |
275 highest_ever_allocated_(reinterpret_cast<void*>(0)) { | |
276 } | |
277 | |
278 | |
279 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) { | |
280 capacity_ = RoundUp(capacity, Page::kPageSize); | |
281 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize); | |
282 DCHECK_GE(capacity_, capacity_executable_); | |
283 | |
284 size_ = 0; | |
285 size_executable_ = 0; | |
286 | |
287 return true; | |
288 } | |
289 | |
290 | |
291 void MemoryAllocator::TearDown() { | |
292 // Check that spaces were torn down before MemoryAllocator. | |
293 DCHECK(size_ == 0); | |
294 // TODO(gc) this will be true again when we fix FreeMemory. | |
295 // DCHECK(size_executable_ == 0); | |
296 capacity_ = 0; | |
297 capacity_executable_ = 0; | |
298 } | |
299 | |
300 | |
301 bool MemoryAllocator::CommitMemory(Address base, | |
302 size_t size, | |
303 Executability executable) { | |
304 if (!base::VirtualMemory::CommitRegion(base, size, | |
305 executable == EXECUTABLE)) { | |
306 return false; | |
307 } | |
308 UpdateAllocatedSpaceLimits(base, base + size); | |
309 return true; | |
310 } | |
311 | |
312 | |
313 void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation, | |
314 Executability executable) { | |
315 // TODO(gc) make code_range part of memory allocator? | |
316 DCHECK(reservation->IsReserved()); | |
317 size_t size = reservation->size(); | |
318 DCHECK(size_ >= size); | |
319 size_ -= size; | |
320 | |
321 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); | |
322 | |
323 if (executable == EXECUTABLE) { | |
324 DCHECK(size_executable_ >= size); | |
325 size_executable_ -= size; | |
326 } | |
327 // Code which is part of the code-range does not have its own VirtualMemory. | |
328 DCHECK(isolate_->code_range() == NULL || | |
329 !isolate_->code_range()->contains( | |
330 static_cast<Address>(reservation->address()))); | |
331 DCHECK(executable == NOT_EXECUTABLE || | |
332 isolate_->code_range() == NULL || | |
333 !isolate_->code_range()->valid()); | |
334 reservation->Release(); | |
335 } | |
336 | |
337 | |
338 void MemoryAllocator::FreeMemory(Address base, | |
339 size_t size, | |
340 Executability executable) { | |
341 // TODO(gc) make code_range part of memory allocator? | |
342 DCHECK(size_ >= size); | |
343 size_ -= size; | |
344 | |
345 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); | |
346 | |
347 if (executable == EXECUTABLE) { | |
348 DCHECK(size_executable_ >= size); | |
349 size_executable_ -= size; | |
350 } | |
351 if (isolate_->code_range() != NULL && | |
352 isolate_->code_range()->contains(static_cast<Address>(base))) { | |
353 DCHECK(executable == EXECUTABLE); | |
354 isolate_->code_range()->FreeRawMemory(base, size); | |
355 } else { | |
356 DCHECK(executable == NOT_EXECUTABLE || | |
357 isolate_->code_range() == NULL || | |
358 !isolate_->code_range()->valid()); | |
359 bool result = base::VirtualMemory::ReleaseRegion(base, size); | |
360 USE(result); | |
361 DCHECK(result); | |
362 } | |
363 } | |
364 | |
365 | |
366 Address MemoryAllocator::ReserveAlignedMemory(size_t size, | |
367 size_t alignment, | |
368 base::VirtualMemory* controller) { | |
369 base::VirtualMemory reservation(size, alignment); | |
370 | |
371 if (!reservation.IsReserved()) return NULL; | |
372 size_ += reservation.size(); | |
373 Address base = RoundUp(static_cast<Address>(reservation.address()), | |
374 alignment); | |
375 controller->TakeControl(&reservation); | |
376 return base; | |
377 } | |
378 | |
379 | |
380 Address MemoryAllocator::AllocateAlignedMemory( | |
381 size_t reserve_size, size_t commit_size, size_t alignment, | |
382 Executability executable, base::VirtualMemory* controller) { | |
383 DCHECK(commit_size <= reserve_size); | |
384 base::VirtualMemory reservation; | |
385 Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation); | |
386 if (base == NULL) return NULL; | |
387 | |
388 if (executable == EXECUTABLE) { | |
389 if (!CommitExecutableMemory(&reservation, | |
390 base, | |
391 commit_size, | |
392 reserve_size)) { | |
393 base = NULL; | |
394 } | |
395 } else { | |
396 if (reservation.Commit(base, commit_size, false)) { | |
397 UpdateAllocatedSpaceLimits(base, base + commit_size); | |
398 } else { | |
399 base = NULL; | |
400 } | |
401 } | |
402 | |
403 if (base == NULL) { | |
404 // Failed to commit the body. Release the mapping and any partially | |
405 // commited regions inside it. | |
406 reservation.Release(); | |
407 return NULL; | |
408 } | |
409 | |
410 controller->TakeControl(&reservation); | |
411 return base; | |
412 } | |
413 | |
414 | |
415 void Page::InitializeAsAnchor(PagedSpace* owner) { | |
416 set_owner(owner); | |
417 set_prev_page(this); | |
418 set_next_page(this); | |
419 } | |
420 | |
421 | |
422 NewSpacePage* NewSpacePage::Initialize(Heap* heap, | |
423 Address start, | |
424 SemiSpace* semi_space) { | |
425 Address area_start = start + NewSpacePage::kObjectStartOffset; | |
426 Address area_end = start + Page::kPageSize; | |
427 | |
428 MemoryChunk* chunk = MemoryChunk::Initialize(heap, | |
429 start, | |
430 Page::kPageSize, | |
431 area_start, | |
432 area_end, | |
433 NOT_EXECUTABLE, | |
434 semi_space); | |
435 chunk->set_next_chunk(NULL); | |
436 chunk->set_prev_chunk(NULL); | |
437 chunk->initialize_scan_on_scavenge(true); | |
438 bool in_to_space = (semi_space->id() != kFromSpace); | |
439 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE | |
440 : MemoryChunk::IN_FROM_SPACE); | |
441 DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE | |
442 : MemoryChunk::IN_TO_SPACE)); | |
443 NewSpacePage* page = static_cast<NewSpacePage*>(chunk); | |
444 heap->incremental_marking()->SetNewSpacePageFlags(page); | |
445 return page; | |
446 } | |
447 | |
448 | |
449 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) { | |
450 set_owner(semi_space); | |
451 set_next_chunk(this); | |
452 set_prev_chunk(this); | |
453 // Flags marks this invalid page as not being in new-space. | |
454 // All real new-space pages will be in new-space. | |
455 SetFlags(0, ~0); | |
456 } | |
457 | |
458 | |
459 MemoryChunk* MemoryChunk::Initialize(Heap* heap, | |
460 Address base, | |
461 size_t size, | |
462 Address area_start, | |
463 Address area_end, | |
464 Executability executable, | |
465 Space* owner) { | |
466 MemoryChunk* chunk = FromAddress(base); | |
467 | |
468 DCHECK(base == chunk->address()); | |
469 | |
470 chunk->heap_ = heap; | |
471 chunk->size_ = size; | |
472 chunk->area_start_ = area_start; | |
473 chunk->area_end_ = area_end; | |
474 chunk->flags_ = 0; | |
475 chunk->set_owner(owner); | |
476 chunk->InitializeReservedMemory(); | |
477 chunk->slots_buffer_ = NULL; | |
478 chunk->skip_list_ = NULL; | |
479 chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity; | |
480 chunk->progress_bar_ = 0; | |
481 chunk->high_water_mark_ = static_cast<int>(area_start - base); | |
482 chunk->set_parallel_sweeping(SWEEPING_DONE); | |
483 chunk->available_in_small_free_list_ = 0; | |
484 chunk->available_in_medium_free_list_ = 0; | |
485 chunk->available_in_large_free_list_ = 0; | |
486 chunk->available_in_huge_free_list_ = 0; | |
487 chunk->non_available_small_blocks_ = 0; | |
488 chunk->ResetLiveBytes(); | |
489 Bitmap::Clear(chunk); | |
490 chunk->initialize_scan_on_scavenge(false); | |
491 chunk->SetFlag(WAS_SWEPT_PRECISELY); | |
492 | |
493 DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset); | |
494 DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset); | |
495 | |
496 if (executable == EXECUTABLE) { | |
497 chunk->SetFlag(IS_EXECUTABLE); | |
498 } | |
499 | |
500 if (owner == heap->old_data_space()) { | |
501 chunk->SetFlag(CONTAINS_ONLY_DATA); | |
502 } | |
503 | |
504 return chunk; | |
505 } | |
506 | |
507 | |
508 // Commit MemoryChunk area to the requested size. | |
509 bool MemoryChunk::CommitArea(size_t requested) { | |
510 size_t guard_size = IsFlagSet(IS_EXECUTABLE) ? | |
511 MemoryAllocator::CodePageGuardSize() : 0; | |
512 size_t header_size = area_start() - address() - guard_size; | |
513 size_t commit_size = | |
514 RoundUp(header_size + requested, base::OS::CommitPageSize()); | |
515 size_t committed_size = RoundUp(header_size + (area_end() - area_start()), | |
516 base::OS::CommitPageSize()); | |
517 | |
518 if (commit_size > committed_size) { | |
519 // Commit size should be less or equal than the reserved size. | |
520 DCHECK(commit_size <= size() - 2 * guard_size); | |
521 // Append the committed area. | |
522 Address start = address() + committed_size + guard_size; | |
523 size_t length = commit_size - committed_size; | |
524 if (reservation_.IsReserved()) { | |
525 Executability executable = IsFlagSet(IS_EXECUTABLE) | |
526 ? EXECUTABLE : NOT_EXECUTABLE; | |
527 if (!heap()->isolate()->memory_allocator()->CommitMemory( | |
528 start, length, executable)) { | |
529 return false; | |
530 } | |
531 } else { | |
532 CodeRange* code_range = heap_->isolate()->code_range(); | |
533 DCHECK(code_range != NULL && code_range->valid() && | |
534 IsFlagSet(IS_EXECUTABLE)); | |
535 if (!code_range->CommitRawMemory(start, length)) return false; | |
536 } | |
537 | |
538 if (Heap::ShouldZapGarbage()) { | |
539 heap_->isolate()->memory_allocator()->ZapBlock(start, length); | |
540 } | |
541 } else if (commit_size < committed_size) { | |
542 DCHECK(commit_size > 0); | |
543 // Shrink the committed area. | |
544 size_t length = committed_size - commit_size; | |
545 Address start = address() + committed_size + guard_size - length; | |
546 if (reservation_.IsReserved()) { | |
547 if (!reservation_.Uncommit(start, length)) return false; | |
548 } else { | |
549 CodeRange* code_range = heap_->isolate()->code_range(); | |
550 DCHECK(code_range != NULL && code_range->valid() && | |
551 IsFlagSet(IS_EXECUTABLE)); | |
552 if (!code_range->UncommitRawMemory(start, length)) return false; | |
553 } | |
554 } | |
555 | |
556 area_end_ = area_start_ + requested; | |
557 return true; | |
558 } | |
559 | |
560 | |
561 void MemoryChunk::InsertAfter(MemoryChunk* other) { | |
562 MemoryChunk* other_next = other->next_chunk(); | |
563 | |
564 set_next_chunk(other_next); | |
565 set_prev_chunk(other); | |
566 other_next->set_prev_chunk(this); | |
567 other->set_next_chunk(this); | |
568 } | |
569 | |
570 | |
571 void MemoryChunk::Unlink() { | |
572 MemoryChunk* next_element = next_chunk(); | |
573 MemoryChunk* prev_element = prev_chunk(); | |
574 next_element->set_prev_chunk(prev_element); | |
575 prev_element->set_next_chunk(next_element); | |
576 set_prev_chunk(NULL); | |
577 set_next_chunk(NULL); | |
578 } | |
579 | |
580 | |
581 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, | |
582 intptr_t commit_area_size, | |
583 Executability executable, | |
584 Space* owner) { | |
585 DCHECK(commit_area_size <= reserve_area_size); | |
586 | |
587 size_t chunk_size; | |
588 Heap* heap = isolate_->heap(); | |
589 Address base = NULL; | |
590 base::VirtualMemory reservation; | |
591 Address area_start = NULL; | |
592 Address area_end = NULL; | |
593 | |
594 // | |
595 // MemoryChunk layout: | |
596 // | |
597 // Executable | |
598 // +----------------------------+<- base aligned with MemoryChunk::kAlignment | |
599 // | Header | | |
600 // +----------------------------+<- base + CodePageGuardStartOffset | |
601 // | Guard | | |
602 // +----------------------------+<- area_start_ | |
603 // | Area | | |
604 // +----------------------------+<- area_end_ (area_start + commit_area_size) | |
605 // | Committed but not used | | |
606 // +----------------------------+<- aligned at OS page boundary | |
607 // | Reserved but not committed | | |
608 // +----------------------------+<- aligned at OS page boundary | |
609 // | Guard | | |
610 // +----------------------------+<- base + chunk_size | |
611 // | |
612 // Non-executable | |
613 // +----------------------------+<- base aligned with MemoryChunk::kAlignment | |
614 // | Header | | |
615 // +----------------------------+<- area_start_ (base + kObjectStartOffset) | |
616 // | Area | | |
617 // +----------------------------+<- area_end_ (area_start + commit_area_size) | |
618 // | Committed but not used | | |
619 // +----------------------------+<- aligned at OS page boundary | |
620 // | Reserved but not committed | | |
621 // +----------------------------+<- base + chunk_size | |
622 // | |
623 | |
624 if (executable == EXECUTABLE) { | |
625 chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size, | |
626 base::OS::CommitPageSize()) + CodePageGuardSize(); | |
627 | |
628 // Check executable memory limit. | |
629 if (size_executable_ + chunk_size > capacity_executable_) { | |
630 LOG(isolate_, | |
631 StringEvent("MemoryAllocator::AllocateRawMemory", | |
632 "V8 Executable Allocation capacity exceeded")); | |
633 return NULL; | |
634 } | |
635 | |
636 // Size of header (not executable) plus area (executable). | |
637 size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size, | |
638 base::OS::CommitPageSize()); | |
639 // Allocate executable memory either from code range or from the | |
640 // OS. | |
641 if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) { | |
642 base = isolate_->code_range()->AllocateRawMemory(chunk_size, | |
643 commit_size, | |
644 &chunk_size); | |
645 DCHECK(IsAligned(reinterpret_cast<intptr_t>(base), | |
646 MemoryChunk::kAlignment)); | |
647 if (base == NULL) return NULL; | |
648 size_ += chunk_size; | |
649 // Update executable memory size. | |
650 size_executable_ += chunk_size; | |
651 } else { | |
652 base = AllocateAlignedMemory(chunk_size, | |
653 commit_size, | |
654 MemoryChunk::kAlignment, | |
655 executable, | |
656 &reservation); | |
657 if (base == NULL) return NULL; | |
658 // Update executable memory size. | |
659 size_executable_ += reservation.size(); | |
660 } | |
661 | |
662 if (Heap::ShouldZapGarbage()) { | |
663 ZapBlock(base, CodePageGuardStartOffset()); | |
664 ZapBlock(base + CodePageAreaStartOffset(), commit_area_size); | |
665 } | |
666 | |
667 area_start = base + CodePageAreaStartOffset(); | |
668 area_end = area_start + commit_area_size; | |
669 } else { | |
670 chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size, | |
671 base::OS::CommitPageSize()); | |
672 size_t commit_size = RoundUp(MemoryChunk::kObjectStartOffset + | |
673 commit_area_size, base::OS::CommitPageSize()); | |
674 base = AllocateAlignedMemory(chunk_size, | |
675 commit_size, | |
676 MemoryChunk::kAlignment, | |
677 executable, | |
678 &reservation); | |
679 | |
680 if (base == NULL) return NULL; | |
681 | |
682 if (Heap::ShouldZapGarbage()) { | |
683 ZapBlock(base, Page::kObjectStartOffset + commit_area_size); | |
684 } | |
685 | |
686 area_start = base + Page::kObjectStartOffset; | |
687 area_end = area_start + commit_area_size; | |
688 } | |
689 | |
690 // Use chunk_size for statistics and callbacks because we assume that they | |
691 // treat reserved but not-yet committed memory regions of chunks as allocated. | |
692 isolate_->counters()->memory_allocated()-> | |
693 Increment(static_cast<int>(chunk_size)); | |
694 | |
695 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size)); | |
696 if (owner != NULL) { | |
697 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); | |
698 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); | |
699 } | |
700 | |
701 MemoryChunk* result = MemoryChunk::Initialize(heap, | |
702 base, | |
703 chunk_size, | |
704 area_start, | |
705 area_end, | |
706 executable, | |
707 owner); | |
708 result->set_reserved_memory(&reservation); | |
709 MSAN_MEMORY_IS_INITIALIZED_IN_JIT(base, chunk_size); | |
710 return result; | |
711 } | |
712 | |
713 | |
714 void Page::ResetFreeListStatistics() { | |
715 non_available_small_blocks_ = 0; | |
716 available_in_small_free_list_ = 0; | |
717 available_in_medium_free_list_ = 0; | |
718 available_in_large_free_list_ = 0; | |
719 available_in_huge_free_list_ = 0; | |
720 } | |
721 | |
722 | |
723 Page* MemoryAllocator::AllocatePage(intptr_t size, | |
724 PagedSpace* owner, | |
725 Executability executable) { | |
726 MemoryChunk* chunk = AllocateChunk(size, size, executable, owner); | |
727 | |
728 if (chunk == NULL) return NULL; | |
729 | |
730 return Page::Initialize(isolate_->heap(), chunk, executable, owner); | |
731 } | |
732 | |
733 | |
734 LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size, | |
735 Space* owner, | |
736 Executability executable) { | |
737 MemoryChunk* chunk = AllocateChunk(object_size, | |
738 object_size, | |
739 executable, | |
740 owner); | |
741 if (chunk == NULL) return NULL; | |
742 return LargePage::Initialize(isolate_->heap(), chunk); | |
743 } | |
744 | |
745 | |
746 void MemoryAllocator::Free(MemoryChunk* chunk) { | |
747 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); | |
748 if (chunk->owner() != NULL) { | |
749 ObjectSpace space = | |
750 static_cast<ObjectSpace>(1 << chunk->owner()->identity()); | |
751 PerformAllocationCallback(space, kAllocationActionFree, chunk->size()); | |
752 } | |
753 | |
754 isolate_->heap()->RememberUnmappedPage( | |
755 reinterpret_cast<Address>(chunk), chunk->IsEvacuationCandidate()); | |
756 | |
757 delete chunk->slots_buffer(); | |
758 delete chunk->skip_list(); | |
759 | |
760 base::VirtualMemory* reservation = chunk->reserved_memory(); | |
761 if (reservation->IsReserved()) { | |
762 FreeMemory(reservation, chunk->executable()); | |
763 } else { | |
764 FreeMemory(chunk->address(), | |
765 chunk->size(), | |
766 chunk->executable()); | |
767 } | |
768 } | |
769 | |
770 | |
771 bool MemoryAllocator::CommitBlock(Address start, | |
772 size_t size, | |
773 Executability executable) { | |
774 if (!CommitMemory(start, size, executable)) return false; | |
775 | |
776 if (Heap::ShouldZapGarbage()) { | |
777 ZapBlock(start, size); | |
778 } | |
779 | |
780 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size)); | |
781 return true; | |
782 } | |
783 | |
784 | |
785 bool MemoryAllocator::UncommitBlock(Address start, size_t size) { | |
786 if (!base::VirtualMemory::UncommitRegion(start, size)) return false; | |
787 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); | |
788 return true; | |
789 } | |
790 | |
791 | |
792 void MemoryAllocator::ZapBlock(Address start, size_t size) { | |
793 for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) { | |
794 Memory::Address_at(start + s) = kZapValue; | |
795 } | |
796 } | |
797 | |
798 | |
799 void MemoryAllocator::PerformAllocationCallback(ObjectSpace space, | |
800 AllocationAction action, | |
801 size_t size) { | |
802 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { | |
803 MemoryAllocationCallbackRegistration registration = | |
804 memory_allocation_callbacks_[i]; | |
805 if ((registration.space & space) == space && | |
806 (registration.action & action) == action) | |
807 registration.callback(space, action, static_cast<int>(size)); | |
808 } | |
809 } | |
810 | |
811 | |
812 bool MemoryAllocator::MemoryAllocationCallbackRegistered( | |
813 MemoryAllocationCallback callback) { | |
814 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { | |
815 if (memory_allocation_callbacks_[i].callback == callback) return true; | |
816 } | |
817 return false; | |
818 } | |
819 | |
820 | |
821 void MemoryAllocator::AddMemoryAllocationCallback( | |
822 MemoryAllocationCallback callback, | |
823 ObjectSpace space, | |
824 AllocationAction action) { | |
825 DCHECK(callback != NULL); | |
826 MemoryAllocationCallbackRegistration registration(callback, space, action); | |
827 DCHECK(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback)); | |
828 return memory_allocation_callbacks_.Add(registration); | |
829 } | |
830 | |
831 | |
832 void MemoryAllocator::RemoveMemoryAllocationCallback( | |
833 MemoryAllocationCallback callback) { | |
834 DCHECK(callback != NULL); | |
835 for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) { | |
836 if (memory_allocation_callbacks_[i].callback == callback) { | |
837 memory_allocation_callbacks_.Remove(i); | |
838 return; | |
839 } | |
840 } | |
841 UNREACHABLE(); | |
842 } | |
843 | |
844 | |
845 #ifdef DEBUG | |
846 void MemoryAllocator::ReportStatistics() { | |
847 float pct = static_cast<float>(capacity_ - size_) / capacity_; | |
848 PrintF(" capacity: %" V8_PTR_PREFIX "d" | |
849 ", used: %" V8_PTR_PREFIX "d" | |
850 ", available: %%%d\n\n", | |
851 capacity_, size_, static_cast<int>(pct*100)); | |
852 } | |
853 #endif | |
854 | |
855 | |
856 int MemoryAllocator::CodePageGuardStartOffset() { | |
857 // We are guarding code pages: the first OS page after the header | |
858 // will be protected as non-writable. | |
859 return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize()); | |
860 } | |
861 | |
862 | |
863 int MemoryAllocator::CodePageGuardSize() { | |
864 return static_cast<int>(base::OS::CommitPageSize()); | |
865 } | |
866 | |
867 | |
868 int MemoryAllocator::CodePageAreaStartOffset() { | |
869 // We are guarding code pages: the first OS page after the header | |
870 // will be protected as non-writable. | |
871 return CodePageGuardStartOffset() + CodePageGuardSize(); | |
872 } | |
873 | |
874 | |
875 int MemoryAllocator::CodePageAreaEndOffset() { | |
876 // We are guarding code pages: the last OS page will be protected as | |
877 // non-writable. | |
878 return Page::kPageSize - static_cast<int>(base::OS::CommitPageSize()); | |
879 } | |
880 | |
881 | |
882 bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm, | |
883 Address start, | |
884 size_t commit_size, | |
885 size_t reserved_size) { | |
886 // Commit page header (not executable). | |
887 if (!vm->Commit(start, | |
888 CodePageGuardStartOffset(), | |
889 false)) { | |
890 return false; | |
891 } | |
892 | |
893 // Create guard page after the header. | |
894 if (!vm->Guard(start + CodePageGuardStartOffset())) { | |
895 return false; | |
896 } | |
897 | |
898 // Commit page body (executable). | |
899 if (!vm->Commit(start + CodePageAreaStartOffset(), | |
900 commit_size - CodePageGuardStartOffset(), | |
901 true)) { | |
902 return false; | |
903 } | |
904 | |
905 // Create guard page before the end. | |
906 if (!vm->Guard(start + reserved_size - CodePageGuardSize())) { | |
907 return false; | |
908 } | |
909 | |
910 UpdateAllocatedSpaceLimits(start, | |
911 start + CodePageAreaStartOffset() + | |
912 commit_size - CodePageGuardStartOffset()); | |
913 return true; | |
914 } | |
915 | |
916 | |
917 // ----------------------------------------------------------------------------- | |
918 // MemoryChunk implementation | |
919 | |
920 void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) { | |
921 MemoryChunk* chunk = MemoryChunk::FromAddress(address); | |
922 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) { | |
923 static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by); | |
924 } | |
925 chunk->IncrementLiveBytes(by); | |
926 } | |
927 | |
928 | |
929 // ----------------------------------------------------------------------------- | |
930 // PagedSpace implementation | |
931 | |
932 PagedSpace::PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id, | |
933 Executability executable) | |
934 : Space(heap, id, executable), | |
935 free_list_(this), | |
936 swept_precisely_(true), | |
937 unswept_free_bytes_(0), | |
938 end_of_unswept_pages_(NULL), | |
939 emergency_memory_(NULL) { | |
940 if (id == CODE_SPACE) { | |
941 area_size_ = heap->isolate()->memory_allocator()-> | |
942 CodePageAreaSize(); | |
943 } else { | |
944 area_size_ = Page::kPageSize - Page::kObjectStartOffset; | |
945 } | |
946 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) | |
947 * AreaSize(); | |
948 accounting_stats_.Clear(); | |
949 | |
950 allocation_info_.set_top(NULL); | |
951 allocation_info_.set_limit(NULL); | |
952 | |
953 anchor_.InitializeAsAnchor(this); | |
954 } | |
955 | |
956 | |
957 bool PagedSpace::SetUp() { | |
958 return true; | |
959 } | |
960 | |
961 | |
962 bool PagedSpace::HasBeenSetUp() { | |
963 return true; | |
964 } | |
965 | |
966 | |
967 void PagedSpace::TearDown() { | |
968 PageIterator iterator(this); | |
969 while (iterator.has_next()) { | |
970 heap()->isolate()->memory_allocator()->Free(iterator.next()); | |
971 } | |
972 anchor_.set_next_page(&anchor_); | |
973 anchor_.set_prev_page(&anchor_); | |
974 accounting_stats_.Clear(); | |
975 } | |
976 | |
977 | |
978 size_t PagedSpace::CommittedPhysicalMemory() { | |
979 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory(); | |
980 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); | |
981 size_t size = 0; | |
982 PageIterator it(this); | |
983 while (it.has_next()) { | |
984 size += it.next()->CommittedPhysicalMemory(); | |
985 } | |
986 return size; | |
987 } | |
988 | |
989 | |
990 Object* PagedSpace::FindObject(Address addr) { | |
991 // Note: this function can only be called on precisely swept spaces. | |
992 DCHECK(!heap()->mark_compact_collector()->in_use()); | |
993 | |
994 if (!Contains(addr)) return Smi::FromInt(0); // Signaling not found. | |
995 | |
996 Page* p = Page::FromAddress(addr); | |
997 HeapObjectIterator it(p, NULL); | |
998 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { | |
999 Address cur = obj->address(); | |
1000 Address next = cur + obj->Size(); | |
1001 if ((cur <= addr) && (addr < next)) return obj; | |
1002 } | |
1003 | |
1004 UNREACHABLE(); | |
1005 return Smi::FromInt(0); | |
1006 } | |
1007 | |
1008 | |
1009 bool PagedSpace::CanExpand() { | |
1010 DCHECK(max_capacity_ % AreaSize() == 0); | |
1011 | |
1012 if (Capacity() == max_capacity_) return false; | |
1013 | |
1014 DCHECK(Capacity() < max_capacity_); | |
1015 | |
1016 // Are we going to exceed capacity for this space? | |
1017 if ((Capacity() + Page::kPageSize) > max_capacity_) return false; | |
1018 | |
1019 return true; | |
1020 } | |
1021 | |
1022 | |
1023 bool PagedSpace::Expand() { | |
1024 if (!CanExpand()) return false; | |
1025 | |
1026 intptr_t size = AreaSize(); | |
1027 | |
1028 if (anchor_.next_page() == &anchor_) { | |
1029 size = SizeOfFirstPage(); | |
1030 } | |
1031 | |
1032 Page* p = heap()->isolate()->memory_allocator()->AllocatePage( | |
1033 size, this, executable()); | |
1034 if (p == NULL) return false; | |
1035 | |
1036 DCHECK(Capacity() <= max_capacity_); | |
1037 | |
1038 p->InsertAfter(anchor_.prev_page()); | |
1039 | |
1040 return true; | |
1041 } | |
1042 | |
1043 | |
1044 intptr_t PagedSpace::SizeOfFirstPage() { | |
1045 int size = 0; | |
1046 switch (identity()) { | |
1047 case OLD_POINTER_SPACE: | |
1048 size = 112 * kPointerSize * KB; | |
1049 break; | |
1050 case OLD_DATA_SPACE: | |
1051 size = 192 * KB; | |
1052 break; | |
1053 case MAP_SPACE: | |
1054 size = 16 * kPointerSize * KB; | |
1055 break; | |
1056 case CELL_SPACE: | |
1057 size = 16 * kPointerSize * KB; | |
1058 break; | |
1059 case PROPERTY_CELL_SPACE: | |
1060 size = 8 * kPointerSize * KB; | |
1061 break; | |
1062 case CODE_SPACE: { | |
1063 CodeRange* code_range = heap()->isolate()->code_range(); | |
1064 if (code_range != NULL && code_range->valid()) { | |
1065 // When code range exists, code pages are allocated in a special way | |
1066 // (from the reserved code range). That part of the code is not yet | |
1067 // upgraded to handle small pages. | |
1068 size = AreaSize(); | |
1069 } else { | |
1070 size = RoundUp( | |
1071 480 * KB * FullCodeGenerator::kBootCodeSizeMultiplier / 100, | |
1072 kPointerSize); | |
1073 } | |
1074 break; | |
1075 } | |
1076 default: | |
1077 UNREACHABLE(); | |
1078 } | |
1079 return Min(size, AreaSize()); | |
1080 } | |
1081 | |
1082 | |
1083 int PagedSpace::CountTotalPages() { | |
1084 PageIterator it(this); | |
1085 int count = 0; | |
1086 while (it.has_next()) { | |
1087 it.next(); | |
1088 count++; | |
1089 } | |
1090 return count; | |
1091 } | |
1092 | |
1093 | |
1094 void PagedSpace::ObtainFreeListStatistics(Page* page, SizeStats* sizes) { | |
1095 sizes->huge_size_ = page->available_in_huge_free_list(); | |
1096 sizes->small_size_ = page->available_in_small_free_list(); | |
1097 sizes->medium_size_ = page->available_in_medium_free_list(); | |
1098 sizes->large_size_ = page->available_in_large_free_list(); | |
1099 } | |
1100 | |
1101 | |
1102 void PagedSpace::ResetFreeListStatistics() { | |
1103 PageIterator page_iterator(this); | |
1104 while (page_iterator.has_next()) { | |
1105 Page* page = page_iterator.next(); | |
1106 page->ResetFreeListStatistics(); | |
1107 } | |
1108 } | |
1109 | |
1110 | |
1111 void PagedSpace::IncreaseCapacity(int size) { | |
1112 accounting_stats_.ExpandSpace(size); | |
1113 } | |
1114 | |
1115 | |
1116 void PagedSpace::ReleasePage(Page* page) { | |
1117 DCHECK(page->LiveBytes() == 0); | |
1118 DCHECK(AreaSize() == page->area_size()); | |
1119 | |
1120 if (page->WasSwept()) { | |
1121 intptr_t size = free_list_.EvictFreeListItems(page); | |
1122 accounting_stats_.AllocateBytes(size); | |
1123 DCHECK_EQ(AreaSize(), static_cast<int>(size)); | |
1124 } else { | |
1125 DecreaseUnsweptFreeBytes(page); | |
1126 } | |
1127 | |
1128 if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) { | |
1129 heap()->decrement_scan_on_scavenge_pages(); | |
1130 page->ClearFlag(MemoryChunk::SCAN_ON_SCAVENGE); | |
1131 } | |
1132 | |
1133 DCHECK(!free_list_.ContainsPageFreeListItems(page)); | |
1134 | |
1135 if (Page::FromAllocationTop(allocation_info_.top()) == page) { | |
1136 allocation_info_.set_top(NULL); | |
1137 allocation_info_.set_limit(NULL); | |
1138 } | |
1139 | |
1140 page->Unlink(); | |
1141 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) { | |
1142 heap()->isolate()->memory_allocator()->Free(page); | |
1143 } else { | |
1144 heap()->QueueMemoryChunkForFree(page); | |
1145 } | |
1146 | |
1147 DCHECK(Capacity() > 0); | |
1148 accounting_stats_.ShrinkSpace(AreaSize()); | |
1149 } | |
1150 | |
1151 | |
1152 void PagedSpace::CreateEmergencyMemory() { | |
1153 emergency_memory_ = heap()->isolate()->memory_allocator()->AllocateChunk( | |
1154 AreaSize(), AreaSize(), executable(), this); | |
1155 } | |
1156 | |
1157 | |
1158 void PagedSpace::FreeEmergencyMemory() { | |
1159 Page* page = static_cast<Page*>(emergency_memory_); | |
1160 DCHECK(page->LiveBytes() == 0); | |
1161 DCHECK(AreaSize() == page->area_size()); | |
1162 DCHECK(!free_list_.ContainsPageFreeListItems(page)); | |
1163 heap()->isolate()->memory_allocator()->Free(page); | |
1164 emergency_memory_ = NULL; | |
1165 } | |
1166 | |
1167 | |
1168 void PagedSpace::UseEmergencyMemory() { | |
1169 Page* page = Page::Initialize(heap(), emergency_memory_, executable(), this); | |
1170 page->InsertAfter(anchor_.prev_page()); | |
1171 emergency_memory_ = NULL; | |
1172 } | |
1173 | |
1174 | |
1175 #ifdef DEBUG | |
1176 void PagedSpace::Print() { } | |
1177 #endif | |
1178 | |
1179 #ifdef VERIFY_HEAP | |
1180 void PagedSpace::Verify(ObjectVisitor* visitor) { | |
1181 // We can only iterate over the pages if they were swept precisely. | |
1182 if (!swept_precisely_) return; | |
1183 | |
1184 bool allocation_pointer_found_in_space = | |
1185 (allocation_info_.top() == allocation_info_.limit()); | |
1186 PageIterator page_iterator(this); | |
1187 while (page_iterator.has_next()) { | |
1188 Page* page = page_iterator.next(); | |
1189 CHECK(page->owner() == this); | |
1190 if (page == Page::FromAllocationTop(allocation_info_.top())) { | |
1191 allocation_pointer_found_in_space = true; | |
1192 } | |
1193 CHECK(page->WasSweptPrecisely()); | |
1194 HeapObjectIterator it(page, NULL); | |
1195 Address end_of_previous_object = page->area_start(); | |
1196 Address top = page->area_end(); | |
1197 int black_size = 0; | |
1198 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { | |
1199 CHECK(end_of_previous_object <= object->address()); | |
1200 | |
1201 // The first word should be a map, and we expect all map pointers to | |
1202 // be in map space. | |
1203 Map* map = object->map(); | |
1204 CHECK(map->IsMap()); | |
1205 CHECK(heap()->map_space()->Contains(map)); | |
1206 | |
1207 // Perform space-specific object verification. | |
1208 VerifyObject(object); | |
1209 | |
1210 // The object itself should look OK. | |
1211 object->ObjectVerify(); | |
1212 | |
1213 // All the interior pointers should be contained in the heap. | |
1214 int size = object->Size(); | |
1215 object->IterateBody(map->instance_type(), size, visitor); | |
1216 if (Marking::IsBlack(Marking::MarkBitFrom(object))) { | |
1217 black_size += size; | |
1218 } | |
1219 | |
1220 CHECK(object->address() + size <= top); | |
1221 end_of_previous_object = object->address() + size; | |
1222 } | |
1223 CHECK_LE(black_size, page->LiveBytes()); | |
1224 } | |
1225 CHECK(allocation_pointer_found_in_space); | |
1226 } | |
1227 #endif // VERIFY_HEAP | |
1228 | |
1229 // ----------------------------------------------------------------------------- | |
1230 // NewSpace implementation | |
1231 | |
1232 | |
1233 bool NewSpace::SetUp(int reserved_semispace_capacity, | |
1234 int maximum_semispace_capacity) { | |
1235 // Set up new space based on the preallocated memory block defined by | |
1236 // start and size. The provided space is divided into two semi-spaces. | |
1237 // To support fast containment testing in the new space, the size of | |
1238 // this chunk must be a power of two and it must be aligned to its size. | |
1239 int initial_semispace_capacity = heap()->InitialSemiSpaceSize(); | |
1240 | |
1241 size_t size = 2 * reserved_semispace_capacity; | |
1242 Address base = | |
1243 heap()->isolate()->memory_allocator()->ReserveAlignedMemory( | |
1244 size, size, &reservation_); | |
1245 if (base == NULL) return false; | |
1246 | |
1247 chunk_base_ = base; | |
1248 chunk_size_ = static_cast<uintptr_t>(size); | |
1249 LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_)); | |
1250 | |
1251 DCHECK(initial_semispace_capacity <= maximum_semispace_capacity); | |
1252 DCHECK(IsPowerOf2(maximum_semispace_capacity)); | |
1253 | |
1254 // Allocate and set up the histogram arrays if necessary. | |
1255 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); | |
1256 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); | |
1257 | |
1258 #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \ | |
1259 promoted_histogram_[name].set_name(#name); | |
1260 INSTANCE_TYPE_LIST(SET_NAME) | |
1261 #undef SET_NAME | |
1262 | |
1263 DCHECK(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize()); | |
1264 DCHECK(static_cast<intptr_t>(chunk_size_) >= | |
1265 2 * heap()->ReservedSemiSpaceSize()); | |
1266 DCHECK(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0)); | |
1267 | |
1268 to_space_.SetUp(chunk_base_, | |
1269 initial_semispace_capacity, | |
1270 maximum_semispace_capacity); | |
1271 from_space_.SetUp(chunk_base_ + reserved_semispace_capacity, | |
1272 initial_semispace_capacity, | |
1273 maximum_semispace_capacity); | |
1274 if (!to_space_.Commit()) { | |
1275 return false; | |
1276 } | |
1277 DCHECK(!from_space_.is_committed()); // No need to use memory yet. | |
1278 | |
1279 start_ = chunk_base_; | |
1280 address_mask_ = ~(2 * reserved_semispace_capacity - 1); | |
1281 object_mask_ = address_mask_ | kHeapObjectTagMask; | |
1282 object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag; | |
1283 | |
1284 ResetAllocationInfo(); | |
1285 | |
1286 return true; | |
1287 } | |
1288 | |
1289 | |
1290 void NewSpace::TearDown() { | |
1291 if (allocated_histogram_) { | |
1292 DeleteArray(allocated_histogram_); | |
1293 allocated_histogram_ = NULL; | |
1294 } | |
1295 if (promoted_histogram_) { | |
1296 DeleteArray(promoted_histogram_); | |
1297 promoted_histogram_ = NULL; | |
1298 } | |
1299 | |
1300 start_ = NULL; | |
1301 allocation_info_.set_top(NULL); | |
1302 allocation_info_.set_limit(NULL); | |
1303 | |
1304 to_space_.TearDown(); | |
1305 from_space_.TearDown(); | |
1306 | |
1307 LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_)); | |
1308 | |
1309 DCHECK(reservation_.IsReserved()); | |
1310 heap()->isolate()->memory_allocator()->FreeMemory(&reservation_, | |
1311 NOT_EXECUTABLE); | |
1312 chunk_base_ = NULL; | |
1313 chunk_size_ = 0; | |
1314 } | |
1315 | |
1316 | |
1317 void NewSpace::Flip() { | |
1318 SemiSpace::Swap(&from_space_, &to_space_); | |
1319 } | |
1320 | |
1321 | |
1322 void NewSpace::Grow() { | |
1323 // Double the semispace size but only up to maximum capacity. | |
1324 DCHECK(Capacity() < MaximumCapacity()); | |
1325 int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity())); | |
1326 if (to_space_.GrowTo(new_capacity)) { | |
1327 // Only grow from space if we managed to grow to-space. | |
1328 if (!from_space_.GrowTo(new_capacity)) { | |
1329 // If we managed to grow to-space but couldn't grow from-space, | |
1330 // attempt to shrink to-space. | |
1331 if (!to_space_.ShrinkTo(from_space_.Capacity())) { | |
1332 // We are in an inconsistent state because we could not | |
1333 // commit/uncommit memory from new space. | |
1334 V8::FatalProcessOutOfMemory("Failed to grow new space."); | |
1335 } | |
1336 } | |
1337 } | |
1338 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); | |
1339 } | |
1340 | |
1341 | |
1342 void NewSpace::Shrink() { | |
1343 int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt()); | |
1344 int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize); | |
1345 if (rounded_new_capacity < Capacity() && | |
1346 to_space_.ShrinkTo(rounded_new_capacity)) { | |
1347 // Only shrink from-space if we managed to shrink to-space. | |
1348 from_space_.Reset(); | |
1349 if (!from_space_.ShrinkTo(rounded_new_capacity)) { | |
1350 // If we managed to shrink to-space but couldn't shrink from | |
1351 // space, attempt to grow to-space again. | |
1352 if (!to_space_.GrowTo(from_space_.Capacity())) { | |
1353 // We are in an inconsistent state because we could not | |
1354 // commit/uncommit memory from new space. | |
1355 V8::FatalProcessOutOfMemory("Failed to shrink new space."); | |
1356 } | |
1357 } | |
1358 } | |
1359 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); | |
1360 } | |
1361 | |
1362 | |
1363 void NewSpace::UpdateAllocationInfo() { | |
1364 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); | |
1365 allocation_info_.set_top(to_space_.page_low()); | |
1366 allocation_info_.set_limit(to_space_.page_high()); | |
1367 UpdateInlineAllocationLimit(0); | |
1368 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); | |
1369 } | |
1370 | |
1371 | |
1372 void NewSpace::ResetAllocationInfo() { | |
1373 to_space_.Reset(); | |
1374 UpdateAllocationInfo(); | |
1375 pages_used_ = 0; | |
1376 // Clear all mark-bits in the to-space. | |
1377 NewSpacePageIterator it(&to_space_); | |
1378 while (it.has_next()) { | |
1379 Bitmap::Clear(it.next()); | |
1380 } | |
1381 } | |
1382 | |
1383 | |
1384 void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) { | |
1385 if (heap()->inline_allocation_disabled()) { | |
1386 // Lowest limit when linear allocation was disabled. | |
1387 Address high = to_space_.page_high(); | |
1388 Address new_top = allocation_info_.top() + size_in_bytes; | |
1389 allocation_info_.set_limit(Min(new_top, high)); | |
1390 } else if (inline_allocation_limit_step() == 0) { | |
1391 // Normal limit is the end of the current page. | |
1392 allocation_info_.set_limit(to_space_.page_high()); | |
1393 } else { | |
1394 // Lower limit during incremental marking. | |
1395 Address high = to_space_.page_high(); | |
1396 Address new_top = allocation_info_.top() + size_in_bytes; | |
1397 Address new_limit = new_top + inline_allocation_limit_step_; | |
1398 allocation_info_.set_limit(Min(new_limit, high)); | |
1399 } | |
1400 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); | |
1401 } | |
1402 | |
1403 | |
1404 bool NewSpace::AddFreshPage() { | |
1405 Address top = allocation_info_.top(); | |
1406 if (NewSpacePage::IsAtStart(top)) { | |
1407 // The current page is already empty. Don't try to make another. | |
1408 | |
1409 // We should only get here if someone asks to allocate more | |
1410 // than what can be stored in a single page. | |
1411 // TODO(gc): Change the limit on new-space allocation to prevent this | |
1412 // from happening (all such allocations should go directly to LOSpace). | |
1413 return false; | |
1414 } | |
1415 if (!to_space_.AdvancePage()) { | |
1416 // Failed to get a new page in to-space. | |
1417 return false; | |
1418 } | |
1419 | |
1420 // Clear remainder of current page. | |
1421 Address limit = NewSpacePage::FromLimit(top)->area_end(); | |
1422 if (heap()->gc_state() == Heap::SCAVENGE) { | |
1423 heap()->promotion_queue()->SetNewLimit(limit); | |
1424 heap()->promotion_queue()->ActivateGuardIfOnTheSamePage(); | |
1425 } | |
1426 | |
1427 int remaining_in_page = static_cast<int>(limit - top); | |
1428 heap()->CreateFillerObjectAt(top, remaining_in_page); | |
1429 pages_used_++; | |
1430 UpdateAllocationInfo(); | |
1431 | |
1432 return true; | |
1433 } | |
1434 | |
1435 | |
1436 AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes) { | |
1437 Address old_top = allocation_info_.top(); | |
1438 Address high = to_space_.page_high(); | |
1439 if (allocation_info_.limit() < high) { | |
1440 // Either the limit has been lowered because linear allocation was disabled | |
1441 // or because incremental marking wants to get a chance to do a step. Set | |
1442 // the new limit accordingly. | |
1443 Address new_top = old_top + size_in_bytes; | |
1444 int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_); | |
1445 heap()->incremental_marking()->Step( | |
1446 bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD); | |
1447 UpdateInlineAllocationLimit(size_in_bytes); | |
1448 top_on_previous_step_ = new_top; | |
1449 return AllocateRaw(size_in_bytes); | |
1450 } else if (AddFreshPage()) { | |
1451 // Switched to new page. Try allocating again. | |
1452 int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_); | |
1453 heap()->incremental_marking()->Step( | |
1454 bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD); | |
1455 top_on_previous_step_ = to_space_.page_low(); | |
1456 return AllocateRaw(size_in_bytes); | |
1457 } else { | |
1458 return AllocationResult::Retry(); | |
1459 } | |
1460 } | |
1461 | |
1462 | |
1463 #ifdef VERIFY_HEAP | |
1464 // We do not use the SemiSpaceIterator because verification doesn't assume | |
1465 // that it works (it depends on the invariants we are checking). | |
1466 void NewSpace::Verify() { | |
1467 // The allocation pointer should be in the space or at the very end. | |
1468 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); | |
1469 | |
1470 // There should be objects packed in from the low address up to the | |
1471 // allocation pointer. | |
1472 Address current = to_space_.first_page()->area_start(); | |
1473 CHECK_EQ(current, to_space_.space_start()); | |
1474 | |
1475 while (current != top()) { | |
1476 if (!NewSpacePage::IsAtEnd(current)) { | |
1477 // The allocation pointer should not be in the middle of an object. | |
1478 CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) || | |
1479 current < top()); | |
1480 | |
1481 HeapObject* object = HeapObject::FromAddress(current); | |
1482 | |
1483 // The first word should be a map, and we expect all map pointers to | |
1484 // be in map space. | |
1485 Map* map = object->map(); | |
1486 CHECK(map->IsMap()); | |
1487 CHECK(heap()->map_space()->Contains(map)); | |
1488 | |
1489 // The object should not be code or a map. | |
1490 CHECK(!object->IsMap()); | |
1491 CHECK(!object->IsCode()); | |
1492 | |
1493 // The object itself should look OK. | |
1494 object->ObjectVerify(); | |
1495 | |
1496 // All the interior pointers should be contained in the heap. | |
1497 VerifyPointersVisitor visitor; | |
1498 int size = object->Size(); | |
1499 object->IterateBody(map->instance_type(), size, &visitor); | |
1500 | |
1501 current += size; | |
1502 } else { | |
1503 // At end of page, switch to next page. | |
1504 NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page(); | |
1505 // Next page should be valid. | |
1506 CHECK(!page->is_anchor()); | |
1507 current = page->area_start(); | |
1508 } | |
1509 } | |
1510 | |
1511 // Check semi-spaces. | |
1512 CHECK_EQ(from_space_.id(), kFromSpace); | |
1513 CHECK_EQ(to_space_.id(), kToSpace); | |
1514 from_space_.Verify(); | |
1515 to_space_.Verify(); | |
1516 } | |
1517 #endif | |
1518 | |
1519 // ----------------------------------------------------------------------------- | |
1520 // SemiSpace implementation | |
1521 | |
1522 void SemiSpace::SetUp(Address start, | |
1523 int initial_capacity, | |
1524 int maximum_capacity) { | |
1525 // Creates a space in the young generation. The constructor does not | |
1526 // allocate memory from the OS. A SemiSpace is given a contiguous chunk of | |
1527 // memory of size 'capacity' when set up, and does not grow or shrink | |
1528 // otherwise. In the mark-compact collector, the memory region of the from | |
1529 // space is used as the marking stack. It requires contiguous memory | |
1530 // addresses. | |
1531 DCHECK(maximum_capacity >= Page::kPageSize); | |
1532 initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize); | |
1533 capacity_ = initial_capacity; | |
1534 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize); | |
1535 maximum_committed_ = 0; | |
1536 committed_ = false; | |
1537 start_ = start; | |
1538 address_mask_ = ~(maximum_capacity - 1); | |
1539 object_mask_ = address_mask_ | kHeapObjectTagMask; | |
1540 object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag; | |
1541 age_mark_ = start_; | |
1542 } | |
1543 | |
1544 | |
1545 void SemiSpace::TearDown() { | |
1546 start_ = NULL; | |
1547 capacity_ = 0; | |
1548 } | |
1549 | |
1550 | |
1551 bool SemiSpace::Commit() { | |
1552 DCHECK(!is_committed()); | |
1553 int pages = capacity_ / Page::kPageSize; | |
1554 if (!heap()->isolate()->memory_allocator()->CommitBlock(start_, | |
1555 capacity_, | |
1556 executable())) { | |
1557 return false; | |
1558 } | |
1559 | |
1560 NewSpacePage* current = anchor(); | |
1561 for (int i = 0; i < pages; i++) { | |
1562 NewSpacePage* new_page = | |
1563 NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this); | |
1564 new_page->InsertAfter(current); | |
1565 current = new_page; | |
1566 } | |
1567 | |
1568 SetCapacity(capacity_); | |
1569 committed_ = true; | |
1570 Reset(); | |
1571 return true; | |
1572 } | |
1573 | |
1574 | |
1575 bool SemiSpace::Uncommit() { | |
1576 DCHECK(is_committed()); | |
1577 Address start = start_ + maximum_capacity_ - capacity_; | |
1578 if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) { | |
1579 return false; | |
1580 } | |
1581 anchor()->set_next_page(anchor()); | |
1582 anchor()->set_prev_page(anchor()); | |
1583 | |
1584 committed_ = false; | |
1585 return true; | |
1586 } | |
1587 | |
1588 | |
1589 size_t SemiSpace::CommittedPhysicalMemory() { | |
1590 if (!is_committed()) return 0; | |
1591 size_t size = 0; | |
1592 NewSpacePageIterator it(this); | |
1593 while (it.has_next()) { | |
1594 size += it.next()->CommittedPhysicalMemory(); | |
1595 } | |
1596 return size; | |
1597 } | |
1598 | |
1599 | |
1600 bool SemiSpace::GrowTo(int new_capacity) { | |
1601 if (!is_committed()) { | |
1602 if (!Commit()) return false; | |
1603 } | |
1604 DCHECK((new_capacity & Page::kPageAlignmentMask) == 0); | |
1605 DCHECK(new_capacity <= maximum_capacity_); | |
1606 DCHECK(new_capacity > capacity_); | |
1607 int pages_before = capacity_ / Page::kPageSize; | |
1608 int pages_after = new_capacity / Page::kPageSize; | |
1609 | |
1610 size_t delta = new_capacity - capacity_; | |
1611 | |
1612 DCHECK(IsAligned(delta, base::OS::AllocateAlignment())); | |
1613 if (!heap()->isolate()->memory_allocator()->CommitBlock( | |
1614 start_ + capacity_, delta, executable())) { | |
1615 return false; | |
1616 } | |
1617 SetCapacity(new_capacity); | |
1618 NewSpacePage* last_page = anchor()->prev_page(); | |
1619 DCHECK(last_page != anchor()); | |
1620 for (int i = pages_before; i < pages_after; i++) { | |
1621 Address page_address = start_ + i * Page::kPageSize; | |
1622 NewSpacePage* new_page = NewSpacePage::Initialize(heap(), | |
1623 page_address, | |
1624 this); | |
1625 new_page->InsertAfter(last_page); | |
1626 Bitmap::Clear(new_page); | |
1627 // Duplicate the flags that was set on the old page. | |
1628 new_page->SetFlags(last_page->GetFlags(), | |
1629 NewSpacePage::kCopyOnFlipFlagsMask); | |
1630 last_page = new_page; | |
1631 } | |
1632 return true; | |
1633 } | |
1634 | |
1635 | |
1636 bool SemiSpace::ShrinkTo(int new_capacity) { | |
1637 DCHECK((new_capacity & Page::kPageAlignmentMask) == 0); | |
1638 DCHECK(new_capacity >= initial_capacity_); | |
1639 DCHECK(new_capacity < capacity_); | |
1640 if (is_committed()) { | |
1641 size_t delta = capacity_ - new_capacity; | |
1642 DCHECK(IsAligned(delta, base::OS::AllocateAlignment())); | |
1643 | |
1644 MemoryAllocator* allocator = heap()->isolate()->memory_allocator(); | |
1645 if (!allocator->UncommitBlock(start_ + new_capacity, delta)) { | |
1646 return false; | |
1647 } | |
1648 | |
1649 int pages_after = new_capacity / Page::kPageSize; | |
1650 NewSpacePage* new_last_page = | |
1651 NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize); | |
1652 new_last_page->set_next_page(anchor()); | |
1653 anchor()->set_prev_page(new_last_page); | |
1654 DCHECK((current_page_ >= first_page()) && (current_page_ <= new_last_page)); | |
1655 } | |
1656 | |
1657 SetCapacity(new_capacity); | |
1658 | |
1659 return true; | |
1660 } | |
1661 | |
1662 | |
1663 void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) { | |
1664 anchor_.set_owner(this); | |
1665 // Fixup back-pointers to anchor. Address of anchor changes | |
1666 // when we swap. | |
1667 anchor_.prev_page()->set_next_page(&anchor_); | |
1668 anchor_.next_page()->set_prev_page(&anchor_); | |
1669 | |
1670 bool becomes_to_space = (id_ == kFromSpace); | |
1671 id_ = becomes_to_space ? kToSpace : kFromSpace; | |
1672 NewSpacePage* page = anchor_.next_page(); | |
1673 while (page != &anchor_) { | |
1674 page->set_owner(this); | |
1675 page->SetFlags(flags, mask); | |
1676 if (becomes_to_space) { | |
1677 page->ClearFlag(MemoryChunk::IN_FROM_SPACE); | |
1678 page->SetFlag(MemoryChunk::IN_TO_SPACE); | |
1679 page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK); | |
1680 page->ResetLiveBytes(); | |
1681 } else { | |
1682 page->SetFlag(MemoryChunk::IN_FROM_SPACE); | |
1683 page->ClearFlag(MemoryChunk::IN_TO_SPACE); | |
1684 } | |
1685 DCHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)); | |
1686 DCHECK(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) || | |
1687 page->IsFlagSet(MemoryChunk::IN_FROM_SPACE)); | |
1688 page = page->next_page(); | |
1689 } | |
1690 } | |
1691 | |
1692 | |
1693 void SemiSpace::Reset() { | |
1694 DCHECK(anchor_.next_page() != &anchor_); | |
1695 current_page_ = anchor_.next_page(); | |
1696 } | |
1697 | |
1698 | |
1699 void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) { | |
1700 // We won't be swapping semispaces without data in them. | |
1701 DCHECK(from->anchor_.next_page() != &from->anchor_); | |
1702 DCHECK(to->anchor_.next_page() != &to->anchor_); | |
1703 | |
1704 // Swap bits. | |
1705 SemiSpace tmp = *from; | |
1706 *from = *to; | |
1707 *to = tmp; | |
1708 | |
1709 // Fixup back-pointers to the page list anchor now that its address | |
1710 // has changed. | |
1711 // Swap to/from-space bits on pages. | |
1712 // Copy GC flags from old active space (from-space) to new (to-space). | |
1713 intptr_t flags = from->current_page()->GetFlags(); | |
1714 to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask); | |
1715 | |
1716 from->FlipPages(0, 0); | |
1717 } | |
1718 | |
1719 | |
1720 void SemiSpace::SetCapacity(int new_capacity) { | |
1721 capacity_ = new_capacity; | |
1722 if (capacity_ > maximum_committed_) { | |
1723 maximum_committed_ = capacity_; | |
1724 } | |
1725 } | |
1726 | |
1727 | |
1728 void SemiSpace::set_age_mark(Address mark) { | |
1729 DCHECK(NewSpacePage::FromLimit(mark)->semi_space() == this); | |
1730 age_mark_ = mark; | |
1731 // Mark all pages up to the one containing mark. | |
1732 NewSpacePageIterator it(space_start(), mark); | |
1733 while (it.has_next()) { | |
1734 it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK); | |
1735 } | |
1736 } | |
1737 | |
1738 | |
1739 #ifdef DEBUG | |
1740 void SemiSpace::Print() { } | |
1741 #endif | |
1742 | |
1743 #ifdef VERIFY_HEAP | |
1744 void SemiSpace::Verify() { | |
1745 bool is_from_space = (id_ == kFromSpace); | |
1746 NewSpacePage* page = anchor_.next_page(); | |
1747 CHECK(anchor_.semi_space() == this); | |
1748 while (page != &anchor_) { | |
1749 CHECK(page->semi_space() == this); | |
1750 CHECK(page->InNewSpace()); | |
1751 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE | |
1752 : MemoryChunk::IN_TO_SPACE)); | |
1753 CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE | |
1754 : MemoryChunk::IN_FROM_SPACE)); | |
1755 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING)); | |
1756 if (!is_from_space) { | |
1757 // The pointers-from-here-are-interesting flag isn't updated dynamically | |
1758 // on from-space pages, so it might be out of sync with the marking state. | |
1759 if (page->heap()->incremental_marking()->IsMarking()) { | |
1760 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING)); | |
1761 } else { | |
1762 CHECK(!page->IsFlagSet( | |
1763 MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING)); | |
1764 } | |
1765 // TODO(gc): Check that the live_bytes_count_ field matches the | |
1766 // black marking on the page (if we make it match in new-space). | |
1767 } | |
1768 CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)); | |
1769 CHECK(page->prev_page()->next_page() == page); | |
1770 page = page->next_page(); | |
1771 } | |
1772 } | |
1773 #endif | |
1774 | |
1775 #ifdef DEBUG | |
1776 void SemiSpace::AssertValidRange(Address start, Address end) { | |
1777 // Addresses belong to same semi-space | |
1778 NewSpacePage* page = NewSpacePage::FromLimit(start); | |
1779 NewSpacePage* end_page = NewSpacePage::FromLimit(end); | |
1780 SemiSpace* space = page->semi_space(); | |
1781 CHECK_EQ(space, end_page->semi_space()); | |
1782 // Start address is before end address, either on same page, | |
1783 // or end address is on a later page in the linked list of | |
1784 // semi-space pages. | |
1785 if (page == end_page) { | |
1786 CHECK(start <= end); | |
1787 } else { | |
1788 while (page != end_page) { | |
1789 page = page->next_page(); | |
1790 CHECK_NE(page, space->anchor()); | |
1791 } | |
1792 } | |
1793 } | |
1794 #endif | |
1795 | |
1796 | |
1797 // ----------------------------------------------------------------------------- | |
1798 // SemiSpaceIterator implementation. | |
1799 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) { | |
1800 Initialize(space->bottom(), space->top(), NULL); | |
1801 } | |
1802 | |
1803 | |
1804 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, | |
1805 HeapObjectCallback size_func) { | |
1806 Initialize(space->bottom(), space->top(), size_func); | |
1807 } | |
1808 | |
1809 | |
1810 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) { | |
1811 Initialize(start, space->top(), NULL); | |
1812 } | |
1813 | |
1814 | |
1815 SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) { | |
1816 Initialize(from, to, NULL); | |
1817 } | |
1818 | |
1819 | |
1820 void SemiSpaceIterator::Initialize(Address start, | |
1821 Address end, | |
1822 HeapObjectCallback size_func) { | |
1823 SemiSpace::AssertValidRange(start, end); | |
1824 current_ = start; | |
1825 limit_ = end; | |
1826 size_func_ = size_func; | |
1827 } | |
1828 | |
1829 | |
1830 #ifdef DEBUG | |
1831 // heap_histograms is shared, always clear it before using it. | |
1832 static void ClearHistograms(Isolate* isolate) { | |
1833 // We reset the name each time, though it hasn't changed. | |
1834 #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name); | |
1835 INSTANCE_TYPE_LIST(DEF_TYPE_NAME) | |
1836 #undef DEF_TYPE_NAME | |
1837 | |
1838 #define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear(); | |
1839 INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM) | |
1840 #undef CLEAR_HISTOGRAM | |
1841 | |
1842 isolate->js_spill_information()->Clear(); | |
1843 } | |
1844 | |
1845 | |
1846 static void ClearCodeKindStatistics(int* code_kind_statistics) { | |
1847 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) { | |
1848 code_kind_statistics[i] = 0; | |
1849 } | |
1850 } | |
1851 | |
1852 | |
1853 static void ReportCodeKindStatistics(int* code_kind_statistics) { | |
1854 PrintF("\n Code kind histograms: \n"); | |
1855 for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) { | |
1856 if (code_kind_statistics[i] > 0) { | |
1857 PrintF(" %-20s: %10d bytes\n", | |
1858 Code::Kind2String(static_cast<Code::Kind>(i)), | |
1859 code_kind_statistics[i]); | |
1860 } | |
1861 } | |
1862 PrintF("\n"); | |
1863 } | |
1864 | |
1865 | |
1866 static int CollectHistogramInfo(HeapObject* obj) { | |
1867 Isolate* isolate = obj->GetIsolate(); | |
1868 InstanceType type = obj->map()->instance_type(); | |
1869 DCHECK(0 <= type && type <= LAST_TYPE); | |
1870 DCHECK(isolate->heap_histograms()[type].name() != NULL); | |
1871 isolate->heap_histograms()[type].increment_number(1); | |
1872 isolate->heap_histograms()[type].increment_bytes(obj->Size()); | |
1873 | |
1874 if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) { | |
1875 JSObject::cast(obj)->IncrementSpillStatistics( | |
1876 isolate->js_spill_information()); | |
1877 } | |
1878 | |
1879 return obj->Size(); | |
1880 } | |
1881 | |
1882 | |
1883 static void ReportHistogram(Isolate* isolate, bool print_spill) { | |
1884 PrintF("\n Object Histogram:\n"); | |
1885 for (int i = 0; i <= LAST_TYPE; i++) { | |
1886 if (isolate->heap_histograms()[i].number() > 0) { | |
1887 PrintF(" %-34s%10d (%10d bytes)\n", | |
1888 isolate->heap_histograms()[i].name(), | |
1889 isolate->heap_histograms()[i].number(), | |
1890 isolate->heap_histograms()[i].bytes()); | |
1891 } | |
1892 } | |
1893 PrintF("\n"); | |
1894 | |
1895 // Summarize string types. | |
1896 int string_number = 0; | |
1897 int string_bytes = 0; | |
1898 #define INCREMENT(type, size, name, camel_name) \ | |
1899 string_number += isolate->heap_histograms()[type].number(); \ | |
1900 string_bytes += isolate->heap_histograms()[type].bytes(); | |
1901 STRING_TYPE_LIST(INCREMENT) | |
1902 #undef INCREMENT | |
1903 if (string_number > 0) { | |
1904 PrintF(" %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number, | |
1905 string_bytes); | |
1906 } | |
1907 | |
1908 if (FLAG_collect_heap_spill_statistics && print_spill) { | |
1909 isolate->js_spill_information()->Print(); | |
1910 } | |
1911 } | |
1912 #endif // DEBUG | |
1913 | |
1914 | |
1915 // Support for statistics gathering for --heap-stats and --log-gc. | |
1916 void NewSpace::ClearHistograms() { | |
1917 for (int i = 0; i <= LAST_TYPE; i++) { | |
1918 allocated_histogram_[i].clear(); | |
1919 promoted_histogram_[i].clear(); | |
1920 } | |
1921 } | |
1922 | |
1923 | |
1924 // Because the copying collector does not touch garbage objects, we iterate | |
1925 // the new space before a collection to get a histogram of allocated objects. | |
1926 // This only happens when --log-gc flag is set. | |
1927 void NewSpace::CollectStatistics() { | |
1928 ClearHistograms(); | |
1929 SemiSpaceIterator it(this); | |
1930 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) | |
1931 RecordAllocation(obj); | |
1932 } | |
1933 | |
1934 | |
1935 static void DoReportStatistics(Isolate* isolate, | |
1936 HistogramInfo* info, const char* description) { | |
1937 LOG(isolate, HeapSampleBeginEvent("NewSpace", description)); | |
1938 // Lump all the string types together. | |
1939 int string_number = 0; | |
1940 int string_bytes = 0; | |
1941 #define INCREMENT(type, size, name, camel_name) \ | |
1942 string_number += info[type].number(); \ | |
1943 string_bytes += info[type].bytes(); | |
1944 STRING_TYPE_LIST(INCREMENT) | |
1945 #undef INCREMENT | |
1946 if (string_number > 0) { | |
1947 LOG(isolate, | |
1948 HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes)); | |
1949 } | |
1950 | |
1951 // Then do the other types. | |
1952 for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) { | |
1953 if (info[i].number() > 0) { | |
1954 LOG(isolate, | |
1955 HeapSampleItemEvent(info[i].name(), info[i].number(), | |
1956 info[i].bytes())); | |
1957 } | |
1958 } | |
1959 LOG(isolate, HeapSampleEndEvent("NewSpace", description)); | |
1960 } | |
1961 | |
1962 | |
1963 void NewSpace::ReportStatistics() { | |
1964 #ifdef DEBUG | |
1965 if (FLAG_heap_stats) { | |
1966 float pct = static_cast<float>(Available()) / Capacity(); | |
1967 PrintF(" capacity: %" V8_PTR_PREFIX "d" | |
1968 ", available: %" V8_PTR_PREFIX "d, %%%d\n", | |
1969 Capacity(), Available(), static_cast<int>(pct*100)); | |
1970 PrintF("\n Object Histogram:\n"); | |
1971 for (int i = 0; i <= LAST_TYPE; i++) { | |
1972 if (allocated_histogram_[i].number() > 0) { | |
1973 PrintF(" %-34s%10d (%10d bytes)\n", | |
1974 allocated_histogram_[i].name(), | |
1975 allocated_histogram_[i].number(), | |
1976 allocated_histogram_[i].bytes()); | |
1977 } | |
1978 } | |
1979 PrintF("\n"); | |
1980 } | |
1981 #endif // DEBUG | |
1982 | |
1983 if (FLAG_log_gc) { | |
1984 Isolate* isolate = heap()->isolate(); | |
1985 DoReportStatistics(isolate, allocated_histogram_, "allocated"); | |
1986 DoReportStatistics(isolate, promoted_histogram_, "promoted"); | |
1987 } | |
1988 } | |
1989 | |
1990 | |
1991 void NewSpace::RecordAllocation(HeapObject* obj) { | |
1992 InstanceType type = obj->map()->instance_type(); | |
1993 DCHECK(0 <= type && type <= LAST_TYPE); | |
1994 allocated_histogram_[type].increment_number(1); | |
1995 allocated_histogram_[type].increment_bytes(obj->Size()); | |
1996 } | |
1997 | |
1998 | |
1999 void NewSpace::RecordPromotion(HeapObject* obj) { | |
2000 InstanceType type = obj->map()->instance_type(); | |
2001 DCHECK(0 <= type && type <= LAST_TYPE); | |
2002 promoted_histogram_[type].increment_number(1); | |
2003 promoted_histogram_[type].increment_bytes(obj->Size()); | |
2004 } | |
2005 | |
2006 | |
2007 size_t NewSpace::CommittedPhysicalMemory() { | |
2008 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory(); | |
2009 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); | |
2010 size_t size = to_space_.CommittedPhysicalMemory(); | |
2011 if (from_space_.is_committed()) { | |
2012 size += from_space_.CommittedPhysicalMemory(); | |
2013 } | |
2014 return size; | |
2015 } | |
2016 | |
2017 | |
2018 // ----------------------------------------------------------------------------- | |
2019 // Free lists for old object spaces implementation | |
2020 | |
2021 void FreeListNode::set_size(Heap* heap, int size_in_bytes) { | |
2022 DCHECK(size_in_bytes > 0); | |
2023 DCHECK(IsAligned(size_in_bytes, kPointerSize)); | |
2024 | |
2025 // We write a map and possibly size information to the block. If the block | |
2026 // is big enough to be a FreeSpace with at least one extra word (the next | |
2027 // pointer), we set its map to be the free space map and its size to an | |
2028 // appropriate array length for the desired size from HeapObject::Size(). | |
2029 // If the block is too small (eg, one or two words), to hold both a size | |
2030 // field and a next pointer, we give it a filler map that gives it the | |
2031 // correct size. | |
2032 if (size_in_bytes > FreeSpace::kHeaderSize) { | |
2033 // Can't use FreeSpace::cast because it fails during deserialization. | |
2034 // We have to set the size first with a release store before we store | |
2035 // the map because a concurrent store buffer scan on scavenge must not | |
2036 // observe a map with an invalid size. | |
2037 FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this); | |
2038 this_as_free_space->nobarrier_set_size(size_in_bytes); | |
2039 synchronized_set_map_no_write_barrier(heap->raw_unchecked_free_space_map()); | |
2040 } else if (size_in_bytes == kPointerSize) { | |
2041 set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map()); | |
2042 } else if (size_in_bytes == 2 * kPointerSize) { | |
2043 set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map()); | |
2044 } else { | |
2045 UNREACHABLE(); | |
2046 } | |
2047 // We would like to DCHECK(Size() == size_in_bytes) but this would fail during | |
2048 // deserialization because the free space map is not done yet. | |
2049 } | |
2050 | |
2051 | |
2052 FreeListNode* FreeListNode::next() { | |
2053 DCHECK(IsFreeListNode(this)); | |
2054 if (map() == GetHeap()->raw_unchecked_free_space_map()) { | |
2055 DCHECK(map() == NULL || Size() >= kNextOffset + kPointerSize); | |
2056 return reinterpret_cast<FreeListNode*>( | |
2057 Memory::Address_at(address() + kNextOffset)); | |
2058 } else { | |
2059 return reinterpret_cast<FreeListNode*>( | |
2060 Memory::Address_at(address() + kPointerSize)); | |
2061 } | |
2062 } | |
2063 | |
2064 | |
2065 FreeListNode** FreeListNode::next_address() { | |
2066 DCHECK(IsFreeListNode(this)); | |
2067 if (map() == GetHeap()->raw_unchecked_free_space_map()) { | |
2068 DCHECK(Size() >= kNextOffset + kPointerSize); | |
2069 return reinterpret_cast<FreeListNode**>(address() + kNextOffset); | |
2070 } else { | |
2071 return reinterpret_cast<FreeListNode**>(address() + kPointerSize); | |
2072 } | |
2073 } | |
2074 | |
2075 | |
2076 void FreeListNode::set_next(FreeListNode* next) { | |
2077 DCHECK(IsFreeListNode(this)); | |
2078 // While we are booting the VM the free space map will actually be null. So | |
2079 // we have to make sure that we don't try to use it for anything at that | |
2080 // stage. | |
2081 if (map() == GetHeap()->raw_unchecked_free_space_map()) { | |
2082 DCHECK(map() == NULL || Size() >= kNextOffset + kPointerSize); | |
2083 base::NoBarrier_Store( | |
2084 reinterpret_cast<base::AtomicWord*>(address() + kNextOffset), | |
2085 reinterpret_cast<base::AtomicWord>(next)); | |
2086 } else { | |
2087 base::NoBarrier_Store( | |
2088 reinterpret_cast<base::AtomicWord*>(address() + kPointerSize), | |
2089 reinterpret_cast<base::AtomicWord>(next)); | |
2090 } | |
2091 } | |
2092 | |
2093 | |
2094 intptr_t FreeListCategory::Concatenate(FreeListCategory* category) { | |
2095 intptr_t free_bytes = 0; | |
2096 if (category->top() != NULL) { | |
2097 // This is safe (not going to deadlock) since Concatenate operations | |
2098 // are never performed on the same free lists at the same time in | |
2099 // reverse order. | |
2100 base::LockGuard<base::Mutex> target_lock_guard(mutex()); | |
2101 base::LockGuard<base::Mutex> source_lock_guard(category->mutex()); | |
2102 DCHECK(category->end_ != NULL); | |
2103 free_bytes = category->available(); | |
2104 if (end_ == NULL) { | |
2105 end_ = category->end(); | |
2106 } else { | |
2107 category->end()->set_next(top()); | |
2108 } | |
2109 set_top(category->top()); | |
2110 base::NoBarrier_Store(&top_, category->top_); | |
2111 available_ += category->available(); | |
2112 category->Reset(); | |
2113 } | |
2114 return free_bytes; | |
2115 } | |
2116 | |
2117 | |
2118 void FreeListCategory::Reset() { | |
2119 set_top(NULL); | |
2120 set_end(NULL); | |
2121 set_available(0); | |
2122 } | |
2123 | |
2124 | |
2125 intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) { | |
2126 int sum = 0; | |
2127 FreeListNode* t = top(); | |
2128 FreeListNode** n = &t; | |
2129 while (*n != NULL) { | |
2130 if (Page::FromAddress((*n)->address()) == p) { | |
2131 FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n); | |
2132 sum += free_space->Size(); | |
2133 *n = (*n)->next(); | |
2134 } else { | |
2135 n = (*n)->next_address(); | |
2136 } | |
2137 } | |
2138 set_top(t); | |
2139 if (top() == NULL) { | |
2140 set_end(NULL); | |
2141 } | |
2142 available_ -= sum; | |
2143 return sum; | |
2144 } | |
2145 | |
2146 | |
2147 bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) { | |
2148 FreeListNode* node = top(); | |
2149 while (node != NULL) { | |
2150 if (Page::FromAddress(node->address()) == p) return true; | |
2151 node = node->next(); | |
2152 } | |
2153 return false; | |
2154 } | |
2155 | |
2156 | |
2157 FreeListNode* FreeListCategory::PickNodeFromList(int *node_size) { | |
2158 FreeListNode* node = top(); | |
2159 | |
2160 if (node == NULL) return NULL; | |
2161 | |
2162 while (node != NULL && | |
2163 Page::FromAddress(node->address())->IsEvacuationCandidate()) { | |
2164 available_ -= reinterpret_cast<FreeSpace*>(node)->Size(); | |
2165 node = node->next(); | |
2166 } | |
2167 | |
2168 if (node != NULL) { | |
2169 set_top(node->next()); | |
2170 *node_size = reinterpret_cast<FreeSpace*>(node)->Size(); | |
2171 available_ -= *node_size; | |
2172 } else { | |
2173 set_top(NULL); | |
2174 } | |
2175 | |
2176 if (top() == NULL) { | |
2177 set_end(NULL); | |
2178 } | |
2179 | |
2180 return node; | |
2181 } | |
2182 | |
2183 | |
2184 FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes, | |
2185 int *node_size) { | |
2186 FreeListNode* node = PickNodeFromList(node_size); | |
2187 if (node != NULL && *node_size < size_in_bytes) { | |
2188 Free(node, *node_size); | |
2189 *node_size = 0; | |
2190 return NULL; | |
2191 } | |
2192 return node; | |
2193 } | |
2194 | |
2195 | |
2196 void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) { | |
2197 node->set_next(top()); | |
2198 set_top(node); | |
2199 if (end_ == NULL) { | |
2200 end_ = node; | |
2201 } | |
2202 available_ += size_in_bytes; | |
2203 } | |
2204 | |
2205 | |
2206 void FreeListCategory::RepairFreeList(Heap* heap) { | |
2207 FreeListNode* n = top(); | |
2208 while (n != NULL) { | |
2209 Map** map_location = reinterpret_cast<Map**>(n->address()); | |
2210 if (*map_location == NULL) { | |
2211 *map_location = heap->free_space_map(); | |
2212 } else { | |
2213 DCHECK(*map_location == heap->free_space_map()); | |
2214 } | |
2215 n = n->next(); | |
2216 } | |
2217 } | |
2218 | |
2219 | |
2220 FreeList::FreeList(PagedSpace* owner) | |
2221 : owner_(owner), heap_(owner->heap()) { | |
2222 Reset(); | |
2223 } | |
2224 | |
2225 | |
2226 intptr_t FreeList::Concatenate(FreeList* free_list) { | |
2227 intptr_t free_bytes = 0; | |
2228 free_bytes += small_list_.Concatenate(free_list->small_list()); | |
2229 free_bytes += medium_list_.Concatenate(free_list->medium_list()); | |
2230 free_bytes += large_list_.Concatenate(free_list->large_list()); | |
2231 free_bytes += huge_list_.Concatenate(free_list->huge_list()); | |
2232 return free_bytes; | |
2233 } | |
2234 | |
2235 | |
2236 void FreeList::Reset() { | |
2237 small_list_.Reset(); | |
2238 medium_list_.Reset(); | |
2239 large_list_.Reset(); | |
2240 huge_list_.Reset(); | |
2241 } | |
2242 | |
2243 | |
2244 int FreeList::Free(Address start, int size_in_bytes) { | |
2245 if (size_in_bytes == 0) return 0; | |
2246 | |
2247 FreeListNode* node = FreeListNode::FromAddress(start); | |
2248 node->set_size(heap_, size_in_bytes); | |
2249 Page* page = Page::FromAddress(start); | |
2250 | |
2251 // Early return to drop too-small blocks on the floor. | |
2252 if (size_in_bytes < kSmallListMin) { | |
2253 page->add_non_available_small_blocks(size_in_bytes); | |
2254 return size_in_bytes; | |
2255 } | |
2256 | |
2257 // Insert other blocks at the head of a free list of the appropriate | |
2258 // magnitude. | |
2259 if (size_in_bytes <= kSmallListMax) { | |
2260 small_list_.Free(node, size_in_bytes); | |
2261 page->add_available_in_small_free_list(size_in_bytes); | |
2262 } else if (size_in_bytes <= kMediumListMax) { | |
2263 medium_list_.Free(node, size_in_bytes); | |
2264 page->add_available_in_medium_free_list(size_in_bytes); | |
2265 } else if (size_in_bytes <= kLargeListMax) { | |
2266 large_list_.Free(node, size_in_bytes); | |
2267 page->add_available_in_large_free_list(size_in_bytes); | |
2268 } else { | |
2269 huge_list_.Free(node, size_in_bytes); | |
2270 page->add_available_in_huge_free_list(size_in_bytes); | |
2271 } | |
2272 | |
2273 DCHECK(IsVeryLong() || available() == SumFreeLists()); | |
2274 return 0; | |
2275 } | |
2276 | |
2277 | |
2278 FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) { | |
2279 FreeListNode* node = NULL; | |
2280 Page* page = NULL; | |
2281 | |
2282 if (size_in_bytes <= kSmallAllocationMax) { | |
2283 node = small_list_.PickNodeFromList(node_size); | |
2284 if (node != NULL) { | |
2285 DCHECK(size_in_bytes <= *node_size); | |
2286 page = Page::FromAddress(node->address()); | |
2287 page->add_available_in_small_free_list(-(*node_size)); | |
2288 DCHECK(IsVeryLong() || available() == SumFreeLists()); | |
2289 return node; | |
2290 } | |
2291 } | |
2292 | |
2293 if (size_in_bytes <= kMediumAllocationMax) { | |
2294 node = medium_list_.PickNodeFromList(node_size); | |
2295 if (node != NULL) { | |
2296 DCHECK(size_in_bytes <= *node_size); | |
2297 page = Page::FromAddress(node->address()); | |
2298 page->add_available_in_medium_free_list(-(*node_size)); | |
2299 DCHECK(IsVeryLong() || available() == SumFreeLists()); | |
2300 return node; | |
2301 } | |
2302 } | |
2303 | |
2304 if (size_in_bytes <= kLargeAllocationMax) { | |
2305 node = large_list_.PickNodeFromList(node_size); | |
2306 if (node != NULL) { | |
2307 DCHECK(size_in_bytes <= *node_size); | |
2308 page = Page::FromAddress(node->address()); | |
2309 page->add_available_in_large_free_list(-(*node_size)); | |
2310 DCHECK(IsVeryLong() || available() == SumFreeLists()); | |
2311 return node; | |
2312 } | |
2313 } | |
2314 | |
2315 int huge_list_available = huge_list_.available(); | |
2316 FreeListNode* top_node = huge_list_.top(); | |
2317 for (FreeListNode** cur = &top_node; | |
2318 *cur != NULL; | |
2319 cur = (*cur)->next_address()) { | |
2320 FreeListNode* cur_node = *cur; | |
2321 while (cur_node != NULL && | |
2322 Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) { | |
2323 int size = reinterpret_cast<FreeSpace*>(cur_node)->Size(); | |
2324 huge_list_available -= size; | |
2325 page = Page::FromAddress(cur_node->address()); | |
2326 page->add_available_in_huge_free_list(-size); | |
2327 cur_node = cur_node->next(); | |
2328 } | |
2329 | |
2330 *cur = cur_node; | |
2331 if (cur_node == NULL) { | |
2332 huge_list_.set_end(NULL); | |
2333 break; | |
2334 } | |
2335 | |
2336 DCHECK((*cur)->map() == heap_->raw_unchecked_free_space_map()); | |
2337 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur); | |
2338 int size = cur_as_free_space->Size(); | |
2339 if (size >= size_in_bytes) { | |
2340 // Large enough node found. Unlink it from the list. | |
2341 node = *cur; | |
2342 *cur = node->next(); | |
2343 *node_size = size; | |
2344 huge_list_available -= size; | |
2345 page = Page::FromAddress(node->address()); | |
2346 page->add_available_in_huge_free_list(-size); | |
2347 break; | |
2348 } | |
2349 } | |
2350 | |
2351 huge_list_.set_top(top_node); | |
2352 if (huge_list_.top() == NULL) { | |
2353 huge_list_.set_end(NULL); | |
2354 } | |
2355 huge_list_.set_available(huge_list_available); | |
2356 | |
2357 if (node != NULL) { | |
2358 DCHECK(IsVeryLong() || available() == SumFreeLists()); | |
2359 return node; | |
2360 } | |
2361 | |
2362 if (size_in_bytes <= kSmallListMax) { | |
2363 node = small_list_.PickNodeFromList(size_in_bytes, node_size); | |
2364 if (node != NULL) { | |
2365 DCHECK(size_in_bytes <= *node_size); | |
2366 page = Page::FromAddress(node->address()); | |
2367 page->add_available_in_small_free_list(-(*node_size)); | |
2368 } | |
2369 } else if (size_in_bytes <= kMediumListMax) { | |
2370 node = medium_list_.PickNodeFromList(size_in_bytes, node_size); | |
2371 if (node != NULL) { | |
2372 DCHECK(size_in_bytes <= *node_size); | |
2373 page = Page::FromAddress(node->address()); | |
2374 page->add_available_in_medium_free_list(-(*node_size)); | |
2375 } | |
2376 } else if (size_in_bytes <= kLargeListMax) { | |
2377 node = large_list_.PickNodeFromList(size_in_bytes, node_size); | |
2378 if (node != NULL) { | |
2379 DCHECK(size_in_bytes <= *node_size); | |
2380 page = Page::FromAddress(node->address()); | |
2381 page->add_available_in_large_free_list(-(*node_size)); | |
2382 } | |
2383 } | |
2384 | |
2385 DCHECK(IsVeryLong() || available() == SumFreeLists()); | |
2386 return node; | |
2387 } | |
2388 | |
2389 | |
2390 // Allocation on the old space free list. If it succeeds then a new linear | |
2391 // allocation space has been set up with the top and limit of the space. If | |
2392 // the allocation fails then NULL is returned, and the caller can perform a GC | |
2393 // or allocate a new page before retrying. | |
2394 HeapObject* FreeList::Allocate(int size_in_bytes) { | |
2395 DCHECK(0 < size_in_bytes); | |
2396 DCHECK(size_in_bytes <= kMaxBlockSize); | |
2397 DCHECK(IsAligned(size_in_bytes, kPointerSize)); | |
2398 // Don't free list allocate if there is linear space available. | |
2399 DCHECK(owner_->limit() - owner_->top() < size_in_bytes); | |
2400 | |
2401 int old_linear_size = static_cast<int>(owner_->limit() - owner_->top()); | |
2402 // Mark the old linear allocation area with a free space map so it can be | |
2403 // skipped when scanning the heap. This also puts it back in the free list | |
2404 // if it is big enough. | |
2405 owner_->Free(owner_->top(), old_linear_size); | |
2406 | |
2407 owner_->heap()->incremental_marking()->OldSpaceStep( | |
2408 size_in_bytes - old_linear_size); | |
2409 | |
2410 int new_node_size = 0; | |
2411 FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size); | |
2412 if (new_node == NULL) { | |
2413 owner_->SetTopAndLimit(NULL, NULL); | |
2414 return NULL; | |
2415 } | |
2416 | |
2417 int bytes_left = new_node_size - size_in_bytes; | |
2418 DCHECK(bytes_left >= 0); | |
2419 | |
2420 #ifdef DEBUG | |
2421 for (int i = 0; i < size_in_bytes / kPointerSize; i++) { | |
2422 reinterpret_cast<Object**>(new_node->address())[i] = | |
2423 Smi::FromInt(kCodeZapValue); | |
2424 } | |
2425 #endif | |
2426 | |
2427 // The old-space-step might have finished sweeping and restarted marking. | |
2428 // Verify that it did not turn the page of the new node into an evacuation | |
2429 // candidate. | |
2430 DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node)); | |
2431 | |
2432 const int kThreshold = IncrementalMarking::kAllocatedThreshold; | |
2433 | |
2434 // Memory in the linear allocation area is counted as allocated. We may free | |
2435 // a little of this again immediately - see below. | |
2436 owner_->Allocate(new_node_size); | |
2437 | |
2438 if (owner_->heap()->inline_allocation_disabled()) { | |
2439 // Keep the linear allocation area empty if requested to do so, just | |
2440 // return area back to the free list instead. | |
2441 owner_->Free(new_node->address() + size_in_bytes, bytes_left); | |
2442 DCHECK(owner_->top() == NULL && owner_->limit() == NULL); | |
2443 } else if (bytes_left > kThreshold && | |
2444 owner_->heap()->incremental_marking()->IsMarkingIncomplete() && | |
2445 FLAG_incremental_marking_steps) { | |
2446 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold); | |
2447 // We don't want to give too large linear areas to the allocator while | |
2448 // incremental marking is going on, because we won't check again whether | |
2449 // we want to do another increment until the linear area is used up. | |
2450 owner_->Free(new_node->address() + size_in_bytes + linear_size, | |
2451 new_node_size - size_in_bytes - linear_size); | |
2452 owner_->SetTopAndLimit(new_node->address() + size_in_bytes, | |
2453 new_node->address() + size_in_bytes + linear_size); | |
2454 } else if (bytes_left > 0) { | |
2455 // Normally we give the rest of the node to the allocator as its new | |
2456 // linear allocation area. | |
2457 owner_->SetTopAndLimit(new_node->address() + size_in_bytes, | |
2458 new_node->address() + new_node_size); | |
2459 } else { | |
2460 // TODO(gc) Try not freeing linear allocation region when bytes_left | |
2461 // are zero. | |
2462 owner_->SetTopAndLimit(NULL, NULL); | |
2463 } | |
2464 | |
2465 return new_node; | |
2466 } | |
2467 | |
2468 | |
2469 intptr_t FreeList::EvictFreeListItems(Page* p) { | |
2470 intptr_t sum = huge_list_.EvictFreeListItemsInList(p); | |
2471 p->set_available_in_huge_free_list(0); | |
2472 | |
2473 if (sum < p->area_size()) { | |
2474 sum += small_list_.EvictFreeListItemsInList(p) + | |
2475 medium_list_.EvictFreeListItemsInList(p) + | |
2476 large_list_.EvictFreeListItemsInList(p); | |
2477 p->set_available_in_small_free_list(0); | |
2478 p->set_available_in_medium_free_list(0); | |
2479 p->set_available_in_large_free_list(0); | |
2480 } | |
2481 | |
2482 return sum; | |
2483 } | |
2484 | |
2485 | |
2486 bool FreeList::ContainsPageFreeListItems(Page* p) { | |
2487 return huge_list_.EvictFreeListItemsInList(p) || | |
2488 small_list_.EvictFreeListItemsInList(p) || | |
2489 medium_list_.EvictFreeListItemsInList(p) || | |
2490 large_list_.EvictFreeListItemsInList(p); | |
2491 } | |
2492 | |
2493 | |
2494 void FreeList::RepairLists(Heap* heap) { | |
2495 small_list_.RepairFreeList(heap); | |
2496 medium_list_.RepairFreeList(heap); | |
2497 large_list_.RepairFreeList(heap); | |
2498 huge_list_.RepairFreeList(heap); | |
2499 } | |
2500 | |
2501 | |
2502 #ifdef DEBUG | |
2503 intptr_t FreeListCategory::SumFreeList() { | |
2504 intptr_t sum = 0; | |
2505 FreeListNode* cur = top(); | |
2506 while (cur != NULL) { | |
2507 DCHECK(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map()); | |
2508 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur); | |
2509 sum += cur_as_free_space->nobarrier_size(); | |
2510 cur = cur->next(); | |
2511 } | |
2512 return sum; | |
2513 } | |
2514 | |
2515 | |
2516 static const int kVeryLongFreeList = 500; | |
2517 | |
2518 | |
2519 int FreeListCategory::FreeListLength() { | |
2520 int length = 0; | |
2521 FreeListNode* cur = top(); | |
2522 while (cur != NULL) { | |
2523 length++; | |
2524 cur = cur->next(); | |
2525 if (length == kVeryLongFreeList) return length; | |
2526 } | |
2527 return length; | |
2528 } | |
2529 | |
2530 | |
2531 bool FreeList::IsVeryLong() { | |
2532 if (small_list_.FreeListLength() == kVeryLongFreeList) return true; | |
2533 if (medium_list_.FreeListLength() == kVeryLongFreeList) return true; | |
2534 if (large_list_.FreeListLength() == kVeryLongFreeList) return true; | |
2535 if (huge_list_.FreeListLength() == kVeryLongFreeList) return true; | |
2536 return false; | |
2537 } | |
2538 | |
2539 | |
2540 // This can take a very long time because it is linear in the number of entries | |
2541 // on the free list, so it should not be called if FreeListLength returns | |
2542 // kVeryLongFreeList. | |
2543 intptr_t FreeList::SumFreeLists() { | |
2544 intptr_t sum = small_list_.SumFreeList(); | |
2545 sum += medium_list_.SumFreeList(); | |
2546 sum += large_list_.SumFreeList(); | |
2547 sum += huge_list_.SumFreeList(); | |
2548 return sum; | |
2549 } | |
2550 #endif | |
2551 | |
2552 | |
2553 // ----------------------------------------------------------------------------- | |
2554 // OldSpace implementation | |
2555 | |
2556 void PagedSpace::PrepareForMarkCompact() { | |
2557 // We don't have a linear allocation area while sweeping. It will be restored | |
2558 // on the first allocation after the sweep. | |
2559 EmptyAllocationInfo(); | |
2560 | |
2561 // This counter will be increased for pages which will be swept by the | |
2562 // sweeper threads. | |
2563 unswept_free_bytes_ = 0; | |
2564 | |
2565 // Clear the free list before a full GC---it will be rebuilt afterward. | |
2566 free_list_.Reset(); | |
2567 } | |
2568 | |
2569 | |
2570 intptr_t PagedSpace::SizeOfObjects() { | |
2571 DCHECK(heap()->mark_compact_collector()->sweeping_in_progress() || | |
2572 (unswept_free_bytes_ == 0)); | |
2573 return Size() - unswept_free_bytes_ - (limit() - top()); | |
2574 } | |
2575 | |
2576 | |
2577 // After we have booted, we have created a map which represents free space | |
2578 // on the heap. If there was already a free list then the elements on it | |
2579 // were created with the wrong FreeSpaceMap (normally NULL), so we need to | |
2580 // fix them. | |
2581 void PagedSpace::RepairFreeListsAfterBoot() { | |
2582 free_list_.RepairLists(heap()); | |
2583 } | |
2584 | |
2585 | |
2586 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() { | |
2587 if (allocation_info_.top() >= allocation_info_.limit()) return; | |
2588 | |
2589 if (Page::FromAllocationTop(allocation_info_.top())-> | |
2590 IsEvacuationCandidate()) { | |
2591 // Create filler object to keep page iterable if it was iterable. | |
2592 int remaining = | |
2593 static_cast<int>(allocation_info_.limit() - allocation_info_.top()); | |
2594 heap()->CreateFillerObjectAt(allocation_info_.top(), remaining); | |
2595 | |
2596 allocation_info_.set_top(NULL); | |
2597 allocation_info_.set_limit(NULL); | |
2598 } | |
2599 } | |
2600 | |
2601 | |
2602 HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation( | |
2603 int size_in_bytes) { | |
2604 MarkCompactCollector* collector = heap()->mark_compact_collector(); | |
2605 if (collector->sweeping_in_progress()) { | |
2606 // Wait for the sweeper threads here and complete the sweeping phase. | |
2607 collector->EnsureSweepingCompleted(); | |
2608 | |
2609 // After waiting for the sweeper threads, there may be new free-list | |
2610 // entries. | |
2611 return free_list_.Allocate(size_in_bytes); | |
2612 } | |
2613 return NULL; | |
2614 } | |
2615 | |
2616 | |
2617 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { | |
2618 // Allocation in this space has failed. | |
2619 | |
2620 MarkCompactCollector* collector = heap()->mark_compact_collector(); | |
2621 // Sweeping is still in progress. | |
2622 if (collector->sweeping_in_progress()) { | |
2623 // First try to refill the free-list, concurrent sweeper threads | |
2624 // may have freed some objects in the meantime. | |
2625 collector->RefillFreeList(this); | |
2626 | |
2627 // Retry the free list allocation. | |
2628 HeapObject* object = free_list_.Allocate(size_in_bytes); | |
2629 if (object != NULL) return object; | |
2630 | |
2631 // If sweeping is still in progress try to sweep pages on the main thread. | |
2632 int free_chunk = | |
2633 collector->SweepInParallel(this, size_in_bytes); | |
2634 collector->RefillFreeList(this); | |
2635 if (free_chunk >= size_in_bytes) { | |
2636 HeapObject* object = free_list_.Allocate(size_in_bytes); | |
2637 // We should be able to allocate an object here since we just freed that | |
2638 // much memory. | |
2639 DCHECK(object != NULL); | |
2640 if (object != NULL) return object; | |
2641 } | |
2642 } | |
2643 | |
2644 // Free list allocation failed and there is no next page. Fail if we have | |
2645 // hit the old generation size limit that should cause a garbage | |
2646 // collection. | |
2647 if (!heap()->always_allocate() | |
2648 && heap()->OldGenerationAllocationLimitReached()) { | |
2649 // If sweeper threads are active, wait for them at that point and steal | |
2650 // elements form their free-lists. | |
2651 HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes); | |
2652 if (object != NULL) return object; | |
2653 } | |
2654 | |
2655 // Try to expand the space and allocate in the new next page. | |
2656 if (Expand()) { | |
2657 DCHECK(CountTotalPages() > 1 || size_in_bytes <= free_list_.available()); | |
2658 return free_list_.Allocate(size_in_bytes); | |
2659 } | |
2660 | |
2661 // If sweeper threads are active, wait for them at that point and steal | |
2662 // elements form their free-lists. Allocation may still fail their which | |
2663 // would indicate that there is not enough memory for the given allocation. | |
2664 return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes); | |
2665 } | |
2666 | |
2667 | |
2668 #ifdef DEBUG | |
2669 void PagedSpace::ReportCodeStatistics(Isolate* isolate) { | |
2670 CommentStatistic* comments_statistics = | |
2671 isolate->paged_space_comments_statistics(); | |
2672 ReportCodeKindStatistics(isolate->code_kind_statistics()); | |
2673 PrintF("Code comment statistics (\" [ comment-txt : size/ " | |
2674 "count (average)\"):\n"); | |
2675 for (int i = 0; i <= CommentStatistic::kMaxComments; i++) { | |
2676 const CommentStatistic& cs = comments_statistics[i]; | |
2677 if (cs.size > 0) { | |
2678 PrintF(" %-30s: %10d/%6d (%d)\n", cs.comment, cs.size, cs.count, | |
2679 cs.size/cs.count); | |
2680 } | |
2681 } | |
2682 PrintF("\n"); | |
2683 } | |
2684 | |
2685 | |
2686 void PagedSpace::ResetCodeStatistics(Isolate* isolate) { | |
2687 CommentStatistic* comments_statistics = | |
2688 isolate->paged_space_comments_statistics(); | |
2689 ClearCodeKindStatistics(isolate->code_kind_statistics()); | |
2690 for (int i = 0; i < CommentStatistic::kMaxComments; i++) { | |
2691 comments_statistics[i].Clear(); | |
2692 } | |
2693 comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown"; | |
2694 comments_statistics[CommentStatistic::kMaxComments].size = 0; | |
2695 comments_statistics[CommentStatistic::kMaxComments].count = 0; | |
2696 } | |
2697 | |
2698 | |
2699 // Adds comment to 'comment_statistics' table. Performance OK as long as | |
2700 // 'kMaxComments' is small | |
2701 static void EnterComment(Isolate* isolate, const char* comment, int delta) { | |
2702 CommentStatistic* comments_statistics = | |
2703 isolate->paged_space_comments_statistics(); | |
2704 // Do not count empty comments | |
2705 if (delta <= 0) return; | |
2706 CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments]; | |
2707 // Search for a free or matching entry in 'comments_statistics': 'cs' | |
2708 // points to result. | |
2709 for (int i = 0; i < CommentStatistic::kMaxComments; i++) { | |
2710 if (comments_statistics[i].comment == NULL) { | |
2711 cs = &comments_statistics[i]; | |
2712 cs->comment = comment; | |
2713 break; | |
2714 } else if (strcmp(comments_statistics[i].comment, comment) == 0) { | |
2715 cs = &comments_statistics[i]; | |
2716 break; | |
2717 } | |
2718 } | |
2719 // Update entry for 'comment' | |
2720 cs->size += delta; | |
2721 cs->count += 1; | |
2722 } | |
2723 | |
2724 | |
2725 // Call for each nested comment start (start marked with '[ xxx', end marked | |
2726 // with ']'. RelocIterator 'it' must point to a comment reloc info. | |
2727 static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) { | |
2728 DCHECK(!it->done()); | |
2729 DCHECK(it->rinfo()->rmode() == RelocInfo::COMMENT); | |
2730 const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data()); | |
2731 if (tmp[0] != '[') { | |
2732 // Not a nested comment; skip | |
2733 return; | |
2734 } | |
2735 | |
2736 // Search for end of nested comment or a new nested comment | |
2737 const char* const comment_txt = | |
2738 reinterpret_cast<const char*>(it->rinfo()->data()); | |
2739 const byte* prev_pc = it->rinfo()->pc(); | |
2740 int flat_delta = 0; | |
2741 it->next(); | |
2742 while (true) { | |
2743 // All nested comments must be terminated properly, and therefore exit | |
2744 // from loop. | |
2745 DCHECK(!it->done()); | |
2746 if (it->rinfo()->rmode() == RelocInfo::COMMENT) { | |
2747 const char* const txt = | |
2748 reinterpret_cast<const char*>(it->rinfo()->data()); | |
2749 flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc); | |
2750 if (txt[0] == ']') break; // End of nested comment | |
2751 // A new comment | |
2752 CollectCommentStatistics(isolate, it); | |
2753 // Skip code that was covered with previous comment | |
2754 prev_pc = it->rinfo()->pc(); | |
2755 } | |
2756 it->next(); | |
2757 } | |
2758 EnterComment(isolate, comment_txt, flat_delta); | |
2759 } | |
2760 | |
2761 | |
2762 // Collects code size statistics: | |
2763 // - by code kind | |
2764 // - by code comment | |
2765 void PagedSpace::CollectCodeStatistics() { | |
2766 Isolate* isolate = heap()->isolate(); | |
2767 HeapObjectIterator obj_it(this); | |
2768 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) { | |
2769 if (obj->IsCode()) { | |
2770 Code* code = Code::cast(obj); | |
2771 isolate->code_kind_statistics()[code->kind()] += code->Size(); | |
2772 RelocIterator it(code); | |
2773 int delta = 0; | |
2774 const byte* prev_pc = code->instruction_start(); | |
2775 while (!it.done()) { | |
2776 if (it.rinfo()->rmode() == RelocInfo::COMMENT) { | |
2777 delta += static_cast<int>(it.rinfo()->pc() - prev_pc); | |
2778 CollectCommentStatistics(isolate, &it); | |
2779 prev_pc = it.rinfo()->pc(); | |
2780 } | |
2781 it.next(); | |
2782 } | |
2783 | |
2784 DCHECK(code->instruction_start() <= prev_pc && | |
2785 prev_pc <= code->instruction_end()); | |
2786 delta += static_cast<int>(code->instruction_end() - prev_pc); | |
2787 EnterComment(isolate, "NoComment", delta); | |
2788 } | |
2789 } | |
2790 } | |
2791 | |
2792 | |
2793 void PagedSpace::ReportStatistics() { | |
2794 int pct = static_cast<int>(Available() * 100 / Capacity()); | |
2795 PrintF(" capacity: %" V8_PTR_PREFIX "d" | |
2796 ", waste: %" V8_PTR_PREFIX "d" | |
2797 ", available: %" V8_PTR_PREFIX "d, %%%d\n", | |
2798 Capacity(), Waste(), Available(), pct); | |
2799 | |
2800 if (!swept_precisely_) return; | |
2801 ClearHistograms(heap()->isolate()); | |
2802 HeapObjectIterator obj_it(this); | |
2803 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) | |
2804 CollectHistogramInfo(obj); | |
2805 ReportHistogram(heap()->isolate(), true); | |
2806 } | |
2807 #endif | |
2808 | |
2809 | |
2810 // ----------------------------------------------------------------------------- | |
2811 // MapSpace implementation | |
2812 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless | |
2813 // there is at least one non-inlined virtual function. I would prefer to hide | |
2814 // the VerifyObject definition behind VERIFY_HEAP. | |
2815 | |
2816 void MapSpace::VerifyObject(HeapObject* object) { | |
2817 CHECK(object->IsMap()); | |
2818 } | |
2819 | |
2820 | |
2821 // ----------------------------------------------------------------------------- | |
2822 // CellSpace and PropertyCellSpace implementation | |
2823 // TODO(mvstanton): this is weird...the compiler can't make a vtable unless | |
2824 // there is at least one non-inlined virtual function. I would prefer to hide | |
2825 // the VerifyObject definition behind VERIFY_HEAP. | |
2826 | |
2827 void CellSpace::VerifyObject(HeapObject* object) { | |
2828 CHECK(object->IsCell()); | |
2829 } | |
2830 | |
2831 | |
2832 void PropertyCellSpace::VerifyObject(HeapObject* object) { | |
2833 CHECK(object->IsPropertyCell()); | |
2834 } | |
2835 | |
2836 | |
2837 // ----------------------------------------------------------------------------- | |
2838 // LargeObjectIterator | |
2839 | |
2840 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) { | |
2841 current_ = space->first_page_; | |
2842 size_func_ = NULL; | |
2843 } | |
2844 | |
2845 | |
2846 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space, | |
2847 HeapObjectCallback size_func) { | |
2848 current_ = space->first_page_; | |
2849 size_func_ = size_func; | |
2850 } | |
2851 | |
2852 | |
2853 HeapObject* LargeObjectIterator::Next() { | |
2854 if (current_ == NULL) return NULL; | |
2855 | |
2856 HeapObject* object = current_->GetObject(); | |
2857 current_ = current_->next_page(); | |
2858 return object; | |
2859 } | |
2860 | |
2861 | |
2862 // ----------------------------------------------------------------------------- | |
2863 // LargeObjectSpace | |
2864 static bool ComparePointers(void* key1, void* key2) { | |
2865 return key1 == key2; | |
2866 } | |
2867 | |
2868 | |
2869 LargeObjectSpace::LargeObjectSpace(Heap* heap, | |
2870 intptr_t max_capacity, | |
2871 AllocationSpace id) | |
2872 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis | |
2873 max_capacity_(max_capacity), | |
2874 first_page_(NULL), | |
2875 size_(0), | |
2876 page_count_(0), | |
2877 objects_size_(0), | |
2878 chunk_map_(ComparePointers, 1024) {} | |
2879 | |
2880 | |
2881 bool LargeObjectSpace::SetUp() { | |
2882 first_page_ = NULL; | |
2883 size_ = 0; | |
2884 maximum_committed_ = 0; | |
2885 page_count_ = 0; | |
2886 objects_size_ = 0; | |
2887 chunk_map_.Clear(); | |
2888 return true; | |
2889 } | |
2890 | |
2891 | |
2892 void LargeObjectSpace::TearDown() { | |
2893 while (first_page_ != NULL) { | |
2894 LargePage* page = first_page_; | |
2895 first_page_ = first_page_->next_page(); | |
2896 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address())); | |
2897 | |
2898 ObjectSpace space = static_cast<ObjectSpace>(1 << identity()); | |
2899 heap()->isolate()->memory_allocator()->PerformAllocationCallback( | |
2900 space, kAllocationActionFree, page->size()); | |
2901 heap()->isolate()->memory_allocator()->Free(page); | |
2902 } | |
2903 SetUp(); | |
2904 } | |
2905 | |
2906 | |
2907 AllocationResult LargeObjectSpace::AllocateRaw(int object_size, | |
2908 Executability executable) { | |
2909 // Check if we want to force a GC before growing the old space further. | |
2910 // If so, fail the allocation. | |
2911 if (!heap()->always_allocate() && | |
2912 heap()->OldGenerationAllocationLimitReached()) { | |
2913 return AllocationResult::Retry(identity()); | |
2914 } | |
2915 | |
2916 if (Size() + object_size > max_capacity_) { | |
2917 return AllocationResult::Retry(identity()); | |
2918 } | |
2919 | |
2920 LargePage* page = heap()->isolate()->memory_allocator()-> | |
2921 AllocateLargePage(object_size, this, executable); | |
2922 if (page == NULL) return AllocationResult::Retry(identity()); | |
2923 DCHECK(page->area_size() >= object_size); | |
2924 | |
2925 size_ += static_cast<int>(page->size()); | |
2926 objects_size_ += object_size; | |
2927 page_count_++; | |
2928 page->set_next_page(first_page_); | |
2929 first_page_ = page; | |
2930 | |
2931 if (size_ > maximum_committed_) { | |
2932 maximum_committed_ = size_; | |
2933 } | |
2934 | |
2935 // Register all MemoryChunk::kAlignment-aligned chunks covered by | |
2936 // this large page in the chunk map. | |
2937 uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment; | |
2938 uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment; | |
2939 for (uintptr_t key = base; key <= limit; key++) { | |
2940 HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key), | |
2941 static_cast<uint32_t>(key), | |
2942 true); | |
2943 DCHECK(entry != NULL); | |
2944 entry->value = page; | |
2945 } | |
2946 | |
2947 HeapObject* object = page->GetObject(); | |
2948 | |
2949 if (Heap::ShouldZapGarbage()) { | |
2950 // Make the object consistent so the heap can be verified in OldSpaceStep. | |
2951 // We only need to do this in debug builds or if verify_heap is on. | |
2952 reinterpret_cast<Object**>(object->address())[0] = | |
2953 heap()->fixed_array_map(); | |
2954 reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0); | |
2955 } | |
2956 | |
2957 heap()->incremental_marking()->OldSpaceStep(object_size); | |
2958 return object; | |
2959 } | |
2960 | |
2961 | |
2962 size_t LargeObjectSpace::CommittedPhysicalMemory() { | |
2963 if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory(); | |
2964 size_t size = 0; | |
2965 LargePage* current = first_page_; | |
2966 while (current != NULL) { | |
2967 size += current->CommittedPhysicalMemory(); | |
2968 current = current->next_page(); | |
2969 } | |
2970 return size; | |
2971 } | |
2972 | |
2973 | |
2974 // GC support | |
2975 Object* LargeObjectSpace::FindObject(Address a) { | |
2976 LargePage* page = FindPage(a); | |
2977 if (page != NULL) { | |
2978 return page->GetObject(); | |
2979 } | |
2980 return Smi::FromInt(0); // Signaling not found. | |
2981 } | |
2982 | |
2983 | |
2984 LargePage* LargeObjectSpace::FindPage(Address a) { | |
2985 uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment; | |
2986 HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key), | |
2987 static_cast<uint32_t>(key), | |
2988 false); | |
2989 if (e != NULL) { | |
2990 DCHECK(e->value != NULL); | |
2991 LargePage* page = reinterpret_cast<LargePage*>(e->value); | |
2992 DCHECK(page->is_valid()); | |
2993 if (page->Contains(a)) { | |
2994 return page; | |
2995 } | |
2996 } | |
2997 return NULL; | |
2998 } | |
2999 | |
3000 | |
3001 void LargeObjectSpace::FreeUnmarkedObjects() { | |
3002 LargePage* previous = NULL; | |
3003 LargePage* current = first_page_; | |
3004 while (current != NULL) { | |
3005 HeapObject* object = current->GetObject(); | |
3006 // Can this large page contain pointers to non-trivial objects. No other | |
3007 // pointer object is this big. | |
3008 bool is_pointer_object = object->IsFixedArray(); | |
3009 MarkBit mark_bit = Marking::MarkBitFrom(object); | |
3010 if (mark_bit.Get()) { | |
3011 mark_bit.Clear(); | |
3012 Page::FromAddress(object->address())->ResetProgressBar(); | |
3013 Page::FromAddress(object->address())->ResetLiveBytes(); | |
3014 previous = current; | |
3015 current = current->next_page(); | |
3016 } else { | |
3017 LargePage* page = current; | |
3018 // Cut the chunk out from the chunk list. | |
3019 current = current->next_page(); | |
3020 if (previous == NULL) { | |
3021 first_page_ = current; | |
3022 } else { | |
3023 previous->set_next_page(current); | |
3024 } | |
3025 | |
3026 // Free the chunk. | |
3027 heap()->mark_compact_collector()->ReportDeleteIfNeeded( | |
3028 object, heap()->isolate()); | |
3029 size_ -= static_cast<int>(page->size()); | |
3030 objects_size_ -= object->Size(); | |
3031 page_count_--; | |
3032 | |
3033 // Remove entries belonging to this page. | |
3034 // Use variable alignment to help pass length check (<= 80 characters) | |
3035 // of single line in tools/presubmit.py. | |
3036 const intptr_t alignment = MemoryChunk::kAlignment; | |
3037 uintptr_t base = reinterpret_cast<uintptr_t>(page)/alignment; | |
3038 uintptr_t limit = base + (page->size()-1)/alignment; | |
3039 for (uintptr_t key = base; key <= limit; key++) { | |
3040 chunk_map_.Remove(reinterpret_cast<void*>(key), | |
3041 static_cast<uint32_t>(key)); | |
3042 } | |
3043 | |
3044 if (is_pointer_object) { | |
3045 heap()->QueueMemoryChunkForFree(page); | |
3046 } else { | |
3047 heap()->isolate()->memory_allocator()->Free(page); | |
3048 } | |
3049 } | |
3050 } | |
3051 heap()->FreeQueuedChunks(); | |
3052 } | |
3053 | |
3054 | |
3055 bool LargeObjectSpace::Contains(HeapObject* object) { | |
3056 Address address = object->address(); | |
3057 MemoryChunk* chunk = MemoryChunk::FromAddress(address); | |
3058 | |
3059 bool owned = (chunk->owner() == this); | |
3060 | |
3061 SLOW_DCHECK(!owned || FindObject(address)->IsHeapObject()); | |
3062 | |
3063 return owned; | |
3064 } | |
3065 | |
3066 | |
3067 #ifdef VERIFY_HEAP | |
3068 // We do not assume that the large object iterator works, because it depends | |
3069 // on the invariants we are checking during verification. | |
3070 void LargeObjectSpace::Verify() { | |
3071 for (LargePage* chunk = first_page_; | |
3072 chunk != NULL; | |
3073 chunk = chunk->next_page()) { | |
3074 // Each chunk contains an object that starts at the large object page's | |
3075 // object area start. | |
3076 HeapObject* object = chunk->GetObject(); | |
3077 Page* page = Page::FromAddress(object->address()); | |
3078 CHECK(object->address() == page->area_start()); | |
3079 | |
3080 // The first word should be a map, and we expect all map pointers to be | |
3081 // in map space. | |
3082 Map* map = object->map(); | |
3083 CHECK(map->IsMap()); | |
3084 CHECK(heap()->map_space()->Contains(map)); | |
3085 | |
3086 // We have only code, sequential strings, external strings | |
3087 // (sequential strings that have been morphed into external | |
3088 // strings), fixed arrays, byte arrays, and constant pool arrays in the | |
3089 // large object space. | |
3090 CHECK(object->IsCode() || object->IsSeqString() || | |
3091 object->IsExternalString() || object->IsFixedArray() || | |
3092 object->IsFixedDoubleArray() || object->IsByteArray() || | |
3093 object->IsConstantPoolArray()); | |
3094 | |
3095 // The object itself should look OK. | |
3096 object->ObjectVerify(); | |
3097 | |
3098 // Byte arrays and strings don't have interior pointers. | |
3099 if (object->IsCode()) { | |
3100 VerifyPointersVisitor code_visitor; | |
3101 object->IterateBody(map->instance_type(), | |
3102 object->Size(), | |
3103 &code_visitor); | |
3104 } else if (object->IsFixedArray()) { | |
3105 FixedArray* array = FixedArray::cast(object); | |
3106 for (int j = 0; j < array->length(); j++) { | |
3107 Object* element = array->get(j); | |
3108 if (element->IsHeapObject()) { | |
3109 HeapObject* element_object = HeapObject::cast(element); | |
3110 CHECK(heap()->Contains(element_object)); | |
3111 CHECK(element_object->map()->IsMap()); | |
3112 } | |
3113 } | |
3114 } | |
3115 } | |
3116 } | |
3117 #endif | |
3118 | |
3119 | |
3120 #ifdef DEBUG | |
3121 void LargeObjectSpace::Print() { | |
3122 OFStream os(stdout); | |
3123 LargeObjectIterator it(this); | |
3124 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { | |
3125 obj->Print(os); | |
3126 } | |
3127 } | |
3128 | |
3129 | |
3130 void LargeObjectSpace::ReportStatistics() { | |
3131 PrintF(" size: %" V8_PTR_PREFIX "d\n", size_); | |
3132 int num_objects = 0; | |
3133 ClearHistograms(heap()->isolate()); | |
3134 LargeObjectIterator it(this); | |
3135 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { | |
3136 num_objects++; | |
3137 CollectHistogramInfo(obj); | |
3138 } | |
3139 | |
3140 PrintF(" number of objects %d, " | |
3141 "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_); | |
3142 if (num_objects > 0) ReportHistogram(heap()->isolate(), false); | |
3143 } | |
3144 | |
3145 | |
3146 void LargeObjectSpace::CollectCodeStatistics() { | |
3147 Isolate* isolate = heap()->isolate(); | |
3148 LargeObjectIterator obj_it(this); | |
3149 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) { | |
3150 if (obj->IsCode()) { | |
3151 Code* code = Code::cast(obj); | |
3152 isolate->code_kind_statistics()[code->kind()] += code->Size(); | |
3153 } | |
3154 } | |
3155 } | |
3156 | |
3157 | |
3158 void Page::Print() { | |
3159 // Make a best-effort to print the objects in the page. | |
3160 PrintF("Page@%p in %s\n", | |
3161 this->address(), | |
3162 AllocationSpaceName(this->owner()->identity())); | |
3163 printf(" --------------------------------------\n"); | |
3164 HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction()); | |
3165 unsigned mark_size = 0; | |
3166 for (HeapObject* object = objects.Next(); | |
3167 object != NULL; | |
3168 object = objects.Next()) { | |
3169 bool is_marked = Marking::MarkBitFrom(object).Get(); | |
3170 PrintF(" %c ", (is_marked ? '!' : ' ')); // Indent a little. | |
3171 if (is_marked) { | |
3172 mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object); | |
3173 } | |
3174 object->ShortPrint(); | |
3175 PrintF("\n"); | |
3176 } | |
3177 printf(" --------------------------------------\n"); | |
3178 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | |
3179 } | |
3180 | |
3181 #endif // DEBUG | |
3182 | |
3183 } } // namespace v8::internal | |
OLD | NEW |