Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(245)

Side by Side Diff: src/heap.cc

Issue 8139027: Version 3.6.5 (Closed) Base URL: http://v8.googlecode.com/svn/trunk/
Patch Set: '' Created 9 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/heap.h ('k') | src/heap-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 18 matching lines...) Expand all
29 29
30 #include "accessors.h" 30 #include "accessors.h"
31 #include "api.h" 31 #include "api.h"
32 #include "bootstrapper.h" 32 #include "bootstrapper.h"
33 #include "codegen.h" 33 #include "codegen.h"
34 #include "compilation-cache.h" 34 #include "compilation-cache.h"
35 #include "debug.h" 35 #include "debug.h"
36 #include "deoptimizer.h" 36 #include "deoptimizer.h"
37 #include "global-handles.h" 37 #include "global-handles.h"
38 #include "heap-profiler.h" 38 #include "heap-profiler.h"
39 #include "incremental-marking.h"
39 #include "liveobjectlist-inl.h" 40 #include "liveobjectlist-inl.h"
40 #include "mark-compact.h" 41 #include "mark-compact.h"
41 #include "natives.h" 42 #include "natives.h"
42 #include "objects-visiting.h" 43 #include "objects-visiting.h"
44 #include "objects-visiting-inl.h"
43 #include "runtime-profiler.h" 45 #include "runtime-profiler.h"
44 #include "scopeinfo.h" 46 #include "scopeinfo.h"
45 #include "snapshot.h" 47 #include "snapshot.h"
48 #include "store-buffer.h"
46 #include "v8threads.h" 49 #include "v8threads.h"
47 #include "vm-state-inl.h" 50 #include "vm-state-inl.h"
48 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP 51 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
49 #include "regexp-macro-assembler.h" 52 #include "regexp-macro-assembler.h"
50 #include "arm/regexp-macro-assembler-arm.h" 53 #include "arm/regexp-macro-assembler-arm.h"
51 #endif 54 #endif
52 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP 55 #if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
53 #include "regexp-macro-assembler.h" 56 #include "regexp-macro-assembler.h"
54 #include "mips/regexp-macro-assembler-mips.h" 57 #include "mips/regexp-macro-assembler-mips.h"
55 #endif 58 #endif
56 59
57 namespace v8 { 60 namespace v8 {
58 namespace internal { 61 namespace internal {
59 62
60 63
61 static const intptr_t kMinimumPromotionLimit = 2 * MB;
62 static const intptr_t kMinimumAllocationLimit = 8 * MB;
63
64
65 static Mutex* gc_initializer_mutex = OS::CreateMutex(); 64 static Mutex* gc_initializer_mutex = OS::CreateMutex();
66 65
67 66
68 Heap::Heap() 67 Heap::Heap()
69 : isolate_(NULL), 68 : isolate_(NULL),
70 // semispace_size_ should be a power of 2 and old_generation_size_ should be 69 // semispace_size_ should be a power of 2 and old_generation_size_ should be
71 // a multiple of Page::kPageSize. 70 // a multiple of Page::kPageSize.
72 #if defined(ANDROID) 71 #if defined(ANDROID)
73 reserved_semispace_size_(2*MB), 72 #define LUMP_OF_MEMORY (128 * KB)
74 max_semispace_size_(2*MB),
75 initial_semispace_size_(128*KB),
76 max_old_generation_size_(192*MB),
77 max_executable_size_(max_old_generation_size_),
78 code_range_size_(0), 73 code_range_size_(0),
79 #elif defined(V8_TARGET_ARCH_X64) 74 #elif defined(V8_TARGET_ARCH_X64)
80 reserved_semispace_size_(16*MB), 75 #define LUMP_OF_MEMORY (2 * MB)
81 max_semispace_size_(16*MB),
82 initial_semispace_size_(1*MB),
83 max_old_generation_size_(1400*MB),
84 max_executable_size_(256*MB),
85 code_range_size_(512*MB), 76 code_range_size_(512*MB),
86 #else 77 #else
87 reserved_semispace_size_(8*MB), 78 #define LUMP_OF_MEMORY MB
88 max_semispace_size_(8*MB),
89 initial_semispace_size_(512*KB),
90 max_old_generation_size_(700*MB),
91 max_executable_size_(128*MB),
92 code_range_size_(0), 79 code_range_size_(0),
93 #endif 80 #endif
81 reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
82 max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
83 initial_semispace_size_(Max(LUMP_OF_MEMORY, Page::kPageSize)),
84 max_old_generation_size_(700ul * LUMP_OF_MEMORY),
85 max_executable_size_(128l * LUMP_OF_MEMORY),
86
94 // Variables set based on semispace_size_ and old_generation_size_ in 87 // Variables set based on semispace_size_ and old_generation_size_ in
95 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_) 88 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
96 // Will be 4 * reserved_semispace_size_ to ensure that young 89 // Will be 4 * reserved_semispace_size_ to ensure that young
97 // generation can be aligned to its size. 90 // generation can be aligned to its size.
98 survived_since_last_expansion_(0), 91 survived_since_last_expansion_(0),
99 sweep_generation_(0), 92 sweep_generation_(0),
100 always_allocate_scope_depth_(0), 93 always_allocate_scope_depth_(0),
101 linear_allocation_scope_depth_(0), 94 linear_allocation_scope_depth_(0),
102 contexts_disposed_(0), 95 contexts_disposed_(0),
96 scan_on_scavenge_pages_(0),
103 new_space_(this), 97 new_space_(this),
104 old_pointer_space_(NULL), 98 old_pointer_space_(NULL),
105 old_data_space_(NULL), 99 old_data_space_(NULL),
106 code_space_(NULL), 100 code_space_(NULL),
107 map_space_(NULL), 101 map_space_(NULL),
108 cell_space_(NULL), 102 cell_space_(NULL),
109 lo_space_(NULL), 103 lo_space_(NULL),
110 gc_state_(NOT_IN_GC), 104 gc_state_(NOT_IN_GC),
111 gc_post_processing_depth_(0), 105 gc_post_processing_depth_(0),
112 mc_count_(0),
113 ms_count_(0), 106 ms_count_(0),
114 gc_count_(0), 107 gc_count_(0),
115 unflattened_strings_length_(0), 108 unflattened_strings_length_(0),
116 #ifdef DEBUG 109 #ifdef DEBUG
117 allocation_allowed_(true), 110 allocation_allowed_(true),
118 allocation_timeout_(0), 111 allocation_timeout_(0),
119 disallow_allocation_failure_(false), 112 disallow_allocation_failure_(false),
120 debug_utils_(NULL), 113 debug_utils_(NULL),
121 #endif // DEBUG 114 #endif // DEBUG
122 old_gen_promotion_limit_(kMinimumPromotionLimit), 115 old_gen_promotion_limit_(kMinimumPromotionLimit),
123 old_gen_allocation_limit_(kMinimumAllocationLimit), 116 old_gen_allocation_limit_(kMinimumAllocationLimit),
117 old_gen_limit_factor_(1),
118 size_of_old_gen_at_last_old_space_gc_(0),
124 external_allocation_limit_(0), 119 external_allocation_limit_(0),
125 amount_of_external_allocated_memory_(0), 120 amount_of_external_allocated_memory_(0),
126 amount_of_external_allocated_memory_at_last_global_gc_(0), 121 amount_of_external_allocated_memory_at_last_global_gc_(0),
127 old_gen_exhausted_(false), 122 old_gen_exhausted_(false),
123 store_buffer_rebuilder_(store_buffer()),
128 hidden_symbol_(NULL), 124 hidden_symbol_(NULL),
129 global_gc_prologue_callback_(NULL), 125 global_gc_prologue_callback_(NULL),
130 global_gc_epilogue_callback_(NULL), 126 global_gc_epilogue_callback_(NULL),
131 gc_safe_size_of_old_object_(NULL), 127 gc_safe_size_of_old_object_(NULL),
132 total_regexp_code_generated_(0), 128 total_regexp_code_generated_(0),
133 tracer_(NULL), 129 tracer_(NULL),
134 young_survivors_after_last_gc_(0), 130 young_survivors_after_last_gc_(0),
135 high_survival_rate_period_length_(0), 131 high_survival_rate_period_length_(0),
136 survival_rate_(0), 132 survival_rate_(0),
137 previous_survival_rate_trend_(Heap::STABLE), 133 previous_survival_rate_trend_(Heap::STABLE),
138 survival_rate_trend_(Heap::STABLE), 134 survival_rate_trend_(Heap::STABLE),
139 max_gc_pause_(0), 135 max_gc_pause_(0),
140 max_alive_after_gc_(0), 136 max_alive_after_gc_(0),
141 min_in_mutator_(kMaxInt), 137 min_in_mutator_(kMaxInt),
142 alive_after_last_gc_(0), 138 alive_after_last_gc_(0),
143 last_gc_end_timestamp_(0.0), 139 last_gc_end_timestamp_(0.0),
144 page_watermark_invalidated_mark_(1 << Page::WATERMARK_INVALIDATED), 140 store_buffer_(this),
141 marking_(this),
142 incremental_marking_(this),
145 number_idle_notifications_(0), 143 number_idle_notifications_(0),
146 last_idle_notification_gc_count_(0), 144 last_idle_notification_gc_count_(0),
147 last_idle_notification_gc_count_init_(false), 145 last_idle_notification_gc_count_init_(false),
148 configured_(false), 146 configured_(false),
149 is_safe_to_read_maps_(true) { 147 chunks_queued_for_free_(NULL) {
150 // Allow build-time customization of the max semispace size. Building 148 // Allow build-time customization of the max semispace size. Building
151 // V8 with snapshots and a non-default max semispace size is much 149 // V8 with snapshots and a non-default max semispace size is much
152 // easier if you can define it as part of the build environment. 150 // easier if you can define it as part of the build environment.
153 #if defined(V8_MAX_SEMISPACE_SIZE) 151 #if defined(V8_MAX_SEMISPACE_SIZE)
154 max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE; 152 max_semispace_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
155 #endif 153 #endif
156 154
157 intptr_t max_virtual = OS::MaxVirtualMemory(); 155 intptr_t max_virtual = OS::MaxVirtualMemory();
158 156
159 if (max_virtual > 0) { 157 if (max_virtual > 0) {
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
217 return old_pointer_space_ != NULL && 215 return old_pointer_space_ != NULL &&
218 old_data_space_ != NULL && 216 old_data_space_ != NULL &&
219 code_space_ != NULL && 217 code_space_ != NULL &&
220 map_space_ != NULL && 218 map_space_ != NULL &&
221 cell_space_ != NULL && 219 cell_space_ != NULL &&
222 lo_space_ != NULL; 220 lo_space_ != NULL;
223 } 221 }
224 222
225 223
226 int Heap::GcSafeSizeOfOldObject(HeapObject* object) { 224 int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
227 ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects. 225 if (IntrusiveMarking::IsMarked(object)) {
228 ASSERT(!HEAP->mark_compact_collector()->are_map_pointers_encoded()); 226 return IntrusiveMarking::SizeOfMarkedObject(object);
229 MapWord map_word = object->map_word(); 227 }
230 map_word.ClearMark(); 228 return object->SizeFromMap(object->map());
231 map_word.ClearOverflow();
232 return object->SizeFromMap(map_word.ToMap());
233 } 229 }
234 230
235 231
236 int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) {
237 ASSERT(!HEAP->InNewSpace(object)); // Code only works for old objects.
238 ASSERT(HEAP->mark_compact_collector()->are_map_pointers_encoded());
239 uint32_t marker = Memory::uint32_at(object->address());
240 if (marker == MarkCompactCollector::kSingleFreeEncoding) {
241 return kIntSize;
242 } else if (marker == MarkCompactCollector::kMultiFreeEncoding) {
243 return Memory::int_at(object->address() + kIntSize);
244 } else {
245 MapWord map_word = object->map_word();
246 Address map_address = map_word.DecodeMapAddress(HEAP->map_space());
247 Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_address));
248 return object->SizeFromMap(map);
249 }
250 }
251
252
253 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) { 232 GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
254 // Is global GC requested? 233 // Is global GC requested?
255 if (space != NEW_SPACE || FLAG_gc_global) { 234 if (space != NEW_SPACE || FLAG_gc_global) {
256 isolate_->counters()->gc_compactor_caused_by_request()->Increment(); 235 isolate_->counters()->gc_compactor_caused_by_request()->Increment();
257 return MARK_COMPACTOR; 236 return MARK_COMPACTOR;
258 } 237 }
259 238
260 // Is enough data promoted to justify a global GC? 239 // Is enough data promoted to justify a global GC?
261 if (OldGenerationPromotionLimitReached()) { 240 if (OldGenerationPromotionLimitReached()) {
262 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment(); 241 isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
(...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after
393 } 372 }
394 373
395 if (FLAG_gc_verbose) Print(); 374 if (FLAG_gc_verbose) Print();
396 #endif // DEBUG 375 #endif // DEBUG
397 376
398 #if defined(DEBUG) 377 #if defined(DEBUG)
399 ReportStatisticsBeforeGC(); 378 ReportStatisticsBeforeGC();
400 #endif // DEBUG 379 #endif // DEBUG
401 380
402 LiveObjectList::GCPrologue(); 381 LiveObjectList::GCPrologue();
382 store_buffer()->GCPrologue();
403 } 383 }
404 384
405 intptr_t Heap::SizeOfObjects() { 385 intptr_t Heap::SizeOfObjects() {
406 intptr_t total = 0; 386 intptr_t total = 0;
407 AllSpaces spaces; 387 AllSpaces spaces;
408 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) { 388 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
409 total += space->SizeOfObjects(); 389 total += space->SizeOfObjects();
410 } 390 }
411 return total; 391 return total;
412 } 392 }
413 393
414 void Heap::GarbageCollectionEpilogue() { 394 void Heap::GarbageCollectionEpilogue() {
395 store_buffer()->GCEpilogue();
415 LiveObjectList::GCEpilogue(); 396 LiveObjectList::GCEpilogue();
416 #ifdef DEBUG 397 #ifdef DEBUG
417 allow_allocation(true); 398 allow_allocation(true);
418 ZapFromSpace(); 399 ZapFromSpace();
419 400
420 if (FLAG_verify_heap) { 401 if (FLAG_verify_heap) {
421 Verify(); 402 Verify();
422 } 403 }
423 404
424 if (FLAG_print_global_handles) isolate_->global_handles()->Print(); 405 if (FLAG_print_global_handles) isolate_->global_handles()->Print();
(...skipping 11 matching lines...) Expand all
436 symbol_table()->NumberOfElements()); 417 symbol_table()->NumberOfElements());
437 #if defined(DEBUG) 418 #if defined(DEBUG)
438 ReportStatisticsAfterGC(); 419 ReportStatisticsAfterGC();
439 #endif // DEBUG 420 #endif // DEBUG
440 #ifdef ENABLE_DEBUGGER_SUPPORT 421 #ifdef ENABLE_DEBUGGER_SUPPORT
441 isolate_->debug()->AfterGarbageCollection(); 422 isolate_->debug()->AfterGarbageCollection();
442 #endif // ENABLE_DEBUGGER_SUPPORT 423 #endif // ENABLE_DEBUGGER_SUPPORT
443 } 424 }
444 425
445 426
446 void Heap::CollectAllGarbage(bool force_compaction) { 427 void Heap::CollectAllGarbage(int flags) {
447 // Since we are ignoring the return value, the exact choice of space does 428 // Since we are ignoring the return value, the exact choice of space does
448 // not matter, so long as we do not specify NEW_SPACE, which would not 429 // not matter, so long as we do not specify NEW_SPACE, which would not
449 // cause a full GC. 430 // cause a full GC.
450 mark_compact_collector_.SetForceCompaction(force_compaction); 431 mark_compact_collector_.SetFlags(flags);
451 CollectGarbage(OLD_POINTER_SPACE); 432 CollectGarbage(OLD_POINTER_SPACE);
452 mark_compact_collector_.SetForceCompaction(false); 433 mark_compact_collector_.SetFlags(kNoGCFlags);
453 } 434 }
454 435
455 436
456 void Heap::CollectAllAvailableGarbage() { 437 void Heap::CollectAllAvailableGarbage() {
457 // Since we are ignoring the return value, the exact choice of space does 438 // Since we are ignoring the return value, the exact choice of space does
458 // not matter, so long as we do not specify NEW_SPACE, which would not 439 // not matter, so long as we do not specify NEW_SPACE, which would not
459 // cause a full GC. 440 // cause a full GC.
460 mark_compact_collector()->SetForceCompaction(true);
461
462 // Major GC would invoke weak handle callbacks on weakly reachable 441 // Major GC would invoke weak handle callbacks on weakly reachable
463 // handles, but won't collect weakly reachable objects until next 442 // handles, but won't collect weakly reachable objects until next
464 // major GC. Therefore if we collect aggressively and weak handle callback 443 // major GC. Therefore if we collect aggressively and weak handle callback
465 // has been invoked, we rerun major GC to release objects which become 444 // has been invoked, we rerun major GC to release objects which become
466 // garbage. 445 // garbage.
467 // Note: as weak callbacks can execute arbitrary code, we cannot 446 // Note: as weak callbacks can execute arbitrary code, we cannot
468 // hope that eventually there will be no weak callbacks invocations. 447 // hope that eventually there will be no weak callbacks invocations.
469 // Therefore stop recollecting after several attempts. 448 // Therefore stop recollecting after several attempts.
449 mark_compact_collector()->SetFlags(kMakeHeapIterableMask);
470 const int kMaxNumberOfAttempts = 7; 450 const int kMaxNumberOfAttempts = 7;
471 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) { 451 for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
472 if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) { 452 if (!CollectGarbage(OLD_POINTER_SPACE, MARK_COMPACTOR)) {
473 break; 453 break;
474 } 454 }
475 } 455 }
476 mark_compact_collector()->SetForceCompaction(false); 456 mark_compact_collector()->SetFlags(kNoGCFlags);
477 } 457 }
478 458
479 459
480 bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) { 460 bool Heap::CollectGarbage(AllocationSpace space, GarbageCollector collector) {
481 // The VM is in the GC state until exiting this function. 461 // The VM is in the GC state until exiting this function.
482 VMState state(isolate_, GC); 462 VMState state(isolate_, GC);
483 463
484 #ifdef DEBUG 464 #ifdef DEBUG
485 // Reset the allocation timeout to the GC interval, but make sure to 465 // Reset the allocation timeout to the GC interval, but make sure to
486 // allow at least a few allocations after a collection. The reason 466 // allow at least a few allocations after a collection. The reason
487 // for this is that we have a lot of allocation sequences and we 467 // for this is that we have a lot of allocation sequences and we
488 // assume that a garbage collection will allow the subsequent 468 // assume that a garbage collection will allow the subsequent
489 // allocation attempts to go through. 469 // allocation attempts to go through.
490 allocation_timeout_ = Max(6, FLAG_gc_interval); 470 allocation_timeout_ = Max(6, FLAG_gc_interval);
491 #endif 471 #endif
492 472
473 if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
474 if (FLAG_trace_incremental_marking) {
475 PrintF("[IncrementalMarking] Scavenge during marking.\n");
476 }
477 }
478
479 if (collector == MARK_COMPACTOR &&
480 !mark_compact_collector()->PreciseSweepingRequired() &&
481 !incremental_marking()->IsStopped() &&
482 !incremental_marking()->should_hurry() &&
483 FLAG_incremental_marking_steps) {
484 if (FLAG_trace_incremental_marking) {
485 PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
486 }
487 collector = SCAVENGER;
488 }
489
493 bool next_gc_likely_to_collect_more = false; 490 bool next_gc_likely_to_collect_more = false;
494 491
495 { GCTracer tracer(this); 492 { GCTracer tracer(this);
496 GarbageCollectionPrologue(); 493 GarbageCollectionPrologue();
497 // The GC count was incremented in the prologue. Tell the tracer about 494 // The GC count was incremented in the prologue. Tell the tracer about
498 // it. 495 // it.
499 tracer.set_gc_count(gc_count_); 496 tracer.set_gc_count(gc_count_);
500 497
501 // Tell the tracer which collector we've selected. 498 // Tell the tracer which collector we've selected.
502 tracer.set_collector(collector); 499 tracer.set_collector(collector);
503 500
504 HistogramTimer* rate = (collector == SCAVENGER) 501 HistogramTimer* rate = (collector == SCAVENGER)
505 ? isolate_->counters()->gc_scavenger() 502 ? isolate_->counters()->gc_scavenger()
506 : isolate_->counters()->gc_compactor(); 503 : isolate_->counters()->gc_compactor();
507 rate->Start(); 504 rate->Start();
508 next_gc_likely_to_collect_more = 505 next_gc_likely_to_collect_more =
509 PerformGarbageCollection(collector, &tracer); 506 PerformGarbageCollection(collector, &tracer);
510 rate->Stop(); 507 rate->Stop();
511 508
512 GarbageCollectionEpilogue(); 509 GarbageCollectionEpilogue();
513 } 510 }
514 511
512 ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
513 if (incremental_marking()->IsStopped()) {
514 if (incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull()) {
515 incremental_marking()->Start();
516 }
517 }
518
515 return next_gc_likely_to_collect_more; 519 return next_gc_likely_to_collect_more;
516 } 520 }
517 521
518 522
519 void Heap::PerformScavenge() { 523 void Heap::PerformScavenge() {
520 GCTracer tracer(this); 524 GCTracer tracer(this);
521 PerformGarbageCollection(SCAVENGER, &tracer); 525 if (incremental_marking()->IsStopped()) {
526 PerformGarbageCollection(SCAVENGER, &tracer);
527 } else {
528 PerformGarbageCollection(MARK_COMPACTOR, &tracer);
529 }
522 } 530 }
523 531
524 532
525 #ifdef DEBUG 533 #ifdef DEBUG
526 // Helper class for verifying the symbol table. 534 // Helper class for verifying the symbol table.
527 class SymbolTableVerifier : public ObjectVisitor { 535 class SymbolTableVerifier : public ObjectVisitor {
528 public: 536 public:
529 void VisitPointers(Object** start, Object** end) { 537 void VisitPointers(Object** start, Object** end) {
530 // Visit all HeapObject pointers in [start, end). 538 // Visit all HeapObject pointers in [start, end).
531 for (Object** p = start; p < end; p++) { 539 for (Object** p = start; p < end; p++) {
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
603 } 611 }
604 } 612 }
605 } 613 }
606 614
607 615
608 void Heap::EnsureFromSpaceIsCommitted() { 616 void Heap::EnsureFromSpaceIsCommitted() {
609 if (new_space_.CommitFromSpaceIfNeeded()) return; 617 if (new_space_.CommitFromSpaceIfNeeded()) return;
610 618
611 // Committing memory to from space failed. 619 // Committing memory to from space failed.
612 // Try shrinking and try again. 620 // Try shrinking and try again.
613 PagedSpaces spaces;
614 for (PagedSpace* space = spaces.next();
615 space != NULL;
616 space = spaces.next()) {
617 space->RelinkPageListInChunkOrder(true);
618 }
619
620 Shrink(); 621 Shrink();
621 if (new_space_.CommitFromSpaceIfNeeded()) return; 622 if (new_space_.CommitFromSpaceIfNeeded()) return;
622 623
623 // Committing memory to from space failed again. 624 // Committing memory to from space failed again.
624 // Memory is exhausted and we will die. 625 // Memory is exhausted and we will die.
625 V8::FatalProcessOutOfMemory("Committing semi space failed."); 626 V8::FatalProcessOutOfMemory("Committing semi space failed.");
626 } 627 }
627 628
628 629
629 void Heap::ClearJSFunctionResultCaches() { 630 void Heap::ClearJSFunctionResultCaches() {
(...skipping 10 matching lines...) Expand all
640 JSFunctionResultCache::cast(caches->get(i))->Clear(); 641 JSFunctionResultCache::cast(caches->get(i))->Clear();
641 } 642 }
642 // Get the next context: 643 // Get the next context:
643 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK); 644 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
644 } 645 }
645 } 646 }
646 647
647 648
648 649
649 void Heap::ClearNormalizedMapCaches() { 650 void Heap::ClearNormalizedMapCaches() {
650 if (isolate_->bootstrapper()->IsActive()) return; 651 if (isolate_->bootstrapper()->IsActive() &&
652 !incremental_marking()->IsMarking()) {
653 return;
654 }
651 655
652 Object* context = global_contexts_list_; 656 Object* context = global_contexts_list_;
653 while (!context->IsUndefined()) { 657 while (!context->IsUndefined()) {
654 Context::cast(context)->normalized_map_cache()->Clear(); 658 Context::cast(context)->normalized_map_cache()->Clear();
655 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK); 659 context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
656 } 660 }
657 } 661 }
658 662
659 663
660 #ifdef DEBUG
661
662 enum PageWatermarkValidity {
663 ALL_VALID,
664 ALL_INVALID
665 };
666
667 static void VerifyPageWatermarkValidity(PagedSpace* space,
668 PageWatermarkValidity validity) {
669 PageIterator it(space, PageIterator::PAGES_IN_USE);
670 bool expected_value = (validity == ALL_VALID);
671 while (it.has_next()) {
672 Page* page = it.next();
673 ASSERT(page->IsWatermarkValid() == expected_value);
674 }
675 }
676 #endif
677
678 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) { 664 void Heap::UpdateSurvivalRateTrend(int start_new_space_size) {
679 double survival_rate = 665 double survival_rate =
680 (static_cast<double>(young_survivors_after_last_gc_) * 100) / 666 (static_cast<double>(young_survivors_after_last_gc_) * 100) /
681 start_new_space_size; 667 start_new_space_size;
682 668
683 if (survival_rate > kYoungSurvivalRateThreshold) { 669 if (survival_rate > kYoungSurvivalRateThreshold) {
684 high_survival_rate_period_length_++; 670 high_survival_rate_period_length_++;
685 } else { 671 } else {
686 high_survival_rate_period_length_ = 0; 672 high_survival_rate_period_length_ = 0;
687 } 673 }
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
720 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) { 706 for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
721 if (gc_type & gc_prologue_callbacks_[i].gc_type) { 707 if (gc_type & gc_prologue_callbacks_[i].gc_type) {
722 gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags); 708 gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags);
723 } 709 }
724 } 710 }
725 711
726 EnsureFromSpaceIsCommitted(); 712 EnsureFromSpaceIsCommitted();
727 713
728 int start_new_space_size = Heap::new_space()->SizeAsInt(); 714 int start_new_space_size = Heap::new_space()->SizeAsInt();
729 715
716 if (IsHighSurvivalRate()) {
717 // We speed up the incremental marker if it is running so that it
718 // does not fall behind the rate of promotion, which would cause a
719 // constantly growing old space.
720 incremental_marking()->NotifyOfHighPromotionRate();
721 }
722
730 if (collector == MARK_COMPACTOR) { 723 if (collector == MARK_COMPACTOR) {
731 // Perform mark-sweep with optional compaction. 724 // Perform mark-sweep with optional compaction.
732 MarkCompact(tracer); 725 MarkCompact(tracer);
733 sweep_generation_++; 726 sweep_generation_++;
734 bool high_survival_rate_during_scavenges = IsHighSurvivalRate() && 727 bool high_survival_rate_during_scavenges = IsHighSurvivalRate() &&
735 IsStableOrIncreasingSurvivalTrend(); 728 IsStableOrIncreasingSurvivalTrend();
736 729
737 UpdateSurvivalRateTrend(start_new_space_size); 730 UpdateSurvivalRateTrend(start_new_space_size);
738 731
739 intptr_t old_gen_size = PromotedSpaceSize(); 732 size_of_old_gen_at_last_old_space_gc_ = PromotedSpaceSize();
740 old_gen_promotion_limit_ =
741 old_gen_size + Max(kMinimumPromotionLimit, old_gen_size / 3);
742 old_gen_allocation_limit_ =
743 old_gen_size + Max(kMinimumAllocationLimit, old_gen_size / 2);
744 733
745 if (high_survival_rate_during_scavenges && 734 if (high_survival_rate_during_scavenges &&
746 IsStableOrIncreasingSurvivalTrend()) { 735 IsStableOrIncreasingSurvivalTrend()) {
747 // Stable high survival rates of young objects both during partial and 736 // Stable high survival rates of young objects both during partial and
748 // full collection indicate that mutator is either building or modifying 737 // full collection indicate that mutator is either building or modifying
749 // a structure with a long lifetime. 738 // a structure with a long lifetime.
750 // In this case we aggressively raise old generation memory limits to 739 // In this case we aggressively raise old generation memory limits to
751 // postpone subsequent mark-sweep collection and thus trade memory 740 // postpone subsequent mark-sweep collection and thus trade memory
752 // space for the mutation speed. 741 // space for the mutation speed.
753 old_gen_promotion_limit_ *= 2; 742 old_gen_limit_factor_ = 2;
754 old_gen_allocation_limit_ *= 2; 743 } else {
744 old_gen_limit_factor_ = 1;
755 } 745 }
756 746
747 old_gen_promotion_limit_ =
748 OldGenPromotionLimit(size_of_old_gen_at_last_old_space_gc_);
749 old_gen_allocation_limit_ =
750 OldGenAllocationLimit(size_of_old_gen_at_last_old_space_gc_);
751
757 old_gen_exhausted_ = false; 752 old_gen_exhausted_ = false;
758 } else { 753 } else {
759 tracer_ = tracer; 754 tracer_ = tracer;
760 Scavenge(); 755 Scavenge();
761 tracer_ = NULL; 756 tracer_ = NULL;
762 757
763 UpdateSurvivalRateTrend(start_new_space_size); 758 UpdateSurvivalRateTrend(start_new_space_size);
764 } 759 }
765 760
766 isolate_->counters()->objs_since_last_young()->Set(0); 761 isolate_->counters()->objs_since_last_young()->Set(0);
767 762
768 gc_post_processing_depth_++; 763 gc_post_processing_depth_++;
769 { DisableAssertNoAllocation allow_allocation; 764 { DisableAssertNoAllocation allow_allocation;
770 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); 765 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
771 next_gc_likely_to_collect_more = 766 next_gc_likely_to_collect_more =
772 isolate_->global_handles()->PostGarbageCollectionProcessing(collector); 767 isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
773 } 768 }
774 gc_post_processing_depth_--; 769 gc_post_processing_depth_--;
775 770
776 // Update relocatables. 771 // Update relocatables.
777 Relocatable::PostGarbageCollectionProcessing(); 772 Relocatable::PostGarbageCollectionProcessing();
778 773
779 if (collector == MARK_COMPACTOR) { 774 if (collector == MARK_COMPACTOR) {
780 // Register the amount of external allocated memory. 775 // Register the amount of external allocated memory.
781 amount_of_external_allocated_memory_at_last_global_gc_ = 776 amount_of_external_allocated_memory_at_last_global_gc_ =
782 amount_of_external_allocated_memory_; 777 amount_of_external_allocated_memory_;
783 } 778 }
784 779
785 GCCallbackFlags callback_flags = tracer->is_compacting() 780 GCCallbackFlags callback_flags = kNoGCCallbackFlags;
786 ? kGCCallbackFlagCompacted
787 : kNoGCCallbackFlags;
788 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) { 781 for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
789 if (gc_type & gc_epilogue_callbacks_[i].gc_type) { 782 if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
790 gc_epilogue_callbacks_[i].callback(gc_type, callback_flags); 783 gc_epilogue_callbacks_[i].callback(gc_type, callback_flags);
791 } 784 }
792 } 785 }
793 786
794 if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) { 787 if (collector == MARK_COMPACTOR && global_gc_epilogue_callback_) {
795 ASSERT(!allocation_allowed_); 788 ASSERT(!allocation_allowed_);
796 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL); 789 GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
797 global_gc_epilogue_callback_(); 790 global_gc_epilogue_callback_();
798 } 791 }
799 VerifySymbolTable(); 792 VerifySymbolTable();
800 793
801 return next_gc_likely_to_collect_more; 794 return next_gc_likely_to_collect_more;
802 } 795 }
803 796
804 797
805 void Heap::MarkCompact(GCTracer* tracer) { 798 void Heap::MarkCompact(GCTracer* tracer) {
806 gc_state_ = MARK_COMPACT; 799 gc_state_ = MARK_COMPACT;
807 LOG(isolate_, ResourceEvent("markcompact", "begin")); 800 LOG(isolate_, ResourceEvent("markcompact", "begin"));
808 801
809 mark_compact_collector_.Prepare(tracer); 802 mark_compact_collector_.Prepare(tracer);
810 803
811 bool is_compacting = mark_compact_collector_.IsCompacting(); 804 ms_count_++;
805 tracer->set_full_gc_count(ms_count_);
812 806
813 if (is_compacting) { 807 MarkCompactPrologue();
814 mc_count_++;
815 } else {
816 ms_count_++;
817 }
818 tracer->set_full_gc_count(mc_count_ + ms_count_);
819 808
820 MarkCompactPrologue(is_compacting);
821
822 is_safe_to_read_maps_ = false;
823 mark_compact_collector_.CollectGarbage(); 809 mark_compact_collector_.CollectGarbage();
824 is_safe_to_read_maps_ = true;
825 810
826 LOG(isolate_, ResourceEvent("markcompact", "end")); 811 LOG(isolate_, ResourceEvent("markcompact", "end"));
827 812
828 gc_state_ = NOT_IN_GC; 813 gc_state_ = NOT_IN_GC;
829 814
830 Shrink();
831
832 isolate_->counters()->objs_since_last_full()->Set(0); 815 isolate_->counters()->objs_since_last_full()->Set(0);
833 816
834 contexts_disposed_ = 0; 817 contexts_disposed_ = 0;
835 } 818 }
836 819
837 820
838 void Heap::MarkCompactPrologue(bool is_compacting) { 821 void Heap::MarkCompactPrologue() {
839 // At any old GC clear the keyed lookup cache to enable collection of unused 822 // At any old GC clear the keyed lookup cache to enable collection of unused
840 // maps. 823 // maps.
841 isolate_->keyed_lookup_cache()->Clear(); 824 isolate_->keyed_lookup_cache()->Clear();
842 isolate_->context_slot_cache()->Clear(); 825 isolate_->context_slot_cache()->Clear();
843 isolate_->descriptor_lookup_cache()->Clear(); 826 isolate_->descriptor_lookup_cache()->Clear();
844 StringSplitCache::Clear(string_split_cache()); 827 StringSplitCache::Clear(string_split_cache());
845 828
846 isolate_->compilation_cache()->MarkCompactPrologue(); 829 isolate_->compilation_cache()->MarkCompactPrologue();
847 830
848 CompletelyClearInstanceofCache(); 831 CompletelyClearInstanceofCache();
849 832
850 if (is_compacting) FlushNumberStringCache(); 833 // TODO(1605) select heuristic for flushing NumberString cache with
834 // FlushNumberStringCache
851 if (FLAG_cleanup_code_caches_at_gc) { 835 if (FLAG_cleanup_code_caches_at_gc) {
852 polymorphic_code_cache()->set_cache(undefined_value()); 836 polymorphic_code_cache()->set_cache(undefined_value());
853 } 837 }
854 838
855 ClearNormalizedMapCaches(); 839 ClearNormalizedMapCaches();
856 } 840 }
857 841
858 842
859 Object* Heap::FindCodeObject(Address a) { 843 Object* Heap::FindCodeObject(Address a) {
860 Object* obj = NULL; // Initialization to please compiler. 844 return isolate()->inner_pointer_to_code_cache()->
861 { MaybeObject* maybe_obj = code_space_->FindObject(a); 845 GcSafeFindCodeForInnerPointer(a);
862 if (!maybe_obj->ToObject(&obj)) {
863 obj = lo_space_->FindObject(a)->ToObjectUnchecked();
864 }
865 }
866 return obj;
867 } 846 }
868 847
869 848
870 // Helper class for copying HeapObjects 849 // Helper class for copying HeapObjects
871 class ScavengeVisitor: public ObjectVisitor { 850 class ScavengeVisitor: public ObjectVisitor {
872 public: 851 public:
873 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {} 852 explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
874 853
875 void VisitPointer(Object** p) { ScavengePointer(p); } 854 void VisitPointer(Object** p) { ScavengePointer(p); }
876 855
(...skipping 27 matching lines...) Expand all
904 } 883 }
905 } 884 }
906 }; 885 };
907 886
908 887
909 static void VerifyNonPointerSpacePointers() { 888 static void VerifyNonPointerSpacePointers() {
910 // Verify that there are no pointers to new space in spaces where we 889 // Verify that there are no pointers to new space in spaces where we
911 // do not expect them. 890 // do not expect them.
912 VerifyNonPointerSpacePointersVisitor v; 891 VerifyNonPointerSpacePointersVisitor v;
913 HeapObjectIterator code_it(HEAP->code_space()); 892 HeapObjectIterator code_it(HEAP->code_space());
914 for (HeapObject* object = code_it.next(); 893 for (HeapObject* object = code_it.Next();
915 object != NULL; object = code_it.next()) 894 object != NULL; object = code_it.Next())
916 object->Iterate(&v); 895 object->Iterate(&v);
917 896
918 HeapObjectIterator data_it(HEAP->old_data_space()); 897 // The old data space was normally swept conservatively so that the iterator
919 for (HeapObject* object = data_it.next(); 898 // doesn't work, so we normally skip the next bit.
920 object != NULL; object = data_it.next()) 899 if (!HEAP->old_data_space()->was_swept_conservatively()) {
921 object->Iterate(&v); 900 HeapObjectIterator data_it(HEAP->old_data_space());
901 for (HeapObject* object = data_it.Next();
902 object != NULL; object = data_it.Next())
903 object->Iterate(&v);
904 }
922 } 905 }
923 #endif 906 #endif
924 907
925 908
926 void Heap::CheckNewSpaceExpansionCriteria() { 909 void Heap::CheckNewSpaceExpansionCriteria() {
927 if (new_space_.Capacity() < new_space_.MaximumCapacity() && 910 if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
928 survived_since_last_expansion_ > new_space_.Capacity()) { 911 survived_since_last_expansion_ > new_space_.Capacity()) {
929 // Grow the size of new space if there is room to grow and enough 912 // Grow the size of new space if there is room to grow and enough
930 // data has survived scavenge since the last expansion. 913 // data has survived scavenge since the last expansion.
931 new_space_.Grow(); 914 new_space_.Grow();
932 survived_since_last_expansion_ = 0; 915 survived_since_last_expansion_ = 0;
933 } 916 }
934 } 917 }
935 918
936 919
937 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) { 920 static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
938 return heap->InNewSpace(*p) && 921 return heap->InNewSpace(*p) &&
939 !HeapObject::cast(*p)->map_word().IsForwardingAddress(); 922 !HeapObject::cast(*p)->map_word().IsForwardingAddress();
940 } 923 }
941 924
942 925
926 void Heap::ScavengeStoreBufferCallback(
927 Heap* heap,
928 MemoryChunk* page,
929 StoreBufferEvent event) {
930 heap->store_buffer_rebuilder_.Callback(page, event);
931 }
932
933
934 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
935 if (event == kStoreBufferStartScanningPagesEvent) {
936 start_of_current_page_ = NULL;
937 current_page_ = NULL;
938 } else if (event == kStoreBufferScanningPageEvent) {
939 if (current_page_ != NULL) {
940 // If this page already overflowed the store buffer during this iteration.
941 if (current_page_->scan_on_scavenge()) {
942 // Then we should wipe out the entries that have been added for it.
943 store_buffer_->SetTop(start_of_current_page_);
944 } else if (store_buffer_->Top() - start_of_current_page_ >=
945 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
946 // Did we find too many pointers in the previous page? The heuristic is
947 // that no page can take more then 1/5 the remaining slots in the store
948 // buffer.
949 current_page_->set_scan_on_scavenge(true);
950 store_buffer_->SetTop(start_of_current_page_);
951 } else {
952 // In this case the page we scanned took a reasonable number of slots in
953 // the store buffer. It has now been rehabilitated and is no longer
954 // marked scan_on_scavenge.
955 ASSERT(!current_page_->scan_on_scavenge());
956 }
957 }
958 start_of_current_page_ = store_buffer_->Top();
959 current_page_ = page;
960 } else if (event == kStoreBufferFullEvent) {
961 // The current page overflowed the store buffer again. Wipe out its entries
962 // in the store buffer and mark it scan-on-scavenge again. This may happen
963 // several times while scanning.
964 if (current_page_ == NULL) {
965 // Store Buffer overflowed while scanning promoted objects. These are not
966 // in any particular page, though they are likely to be clustered by the
967 // allocation routines.
968 store_buffer_->HandleFullness();
969 } else {
970 // Store Buffer overflowed while scanning a particular old space page for
971 // pointers to new space.
972 ASSERT(current_page_ == page);
973 ASSERT(page != NULL);
974 current_page_->set_scan_on_scavenge(true);
975 ASSERT(start_of_current_page_ != store_buffer_->Top());
976 store_buffer_->SetTop(start_of_current_page_);
977 }
978 } else {
979 UNREACHABLE();
980 }
981 }
982
983
943 void Heap::Scavenge() { 984 void Heap::Scavenge() {
944 #ifdef DEBUG 985 #ifdef DEBUG
945 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers(); 986 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
946 #endif 987 #endif
947 988
948 gc_state_ = SCAVENGE; 989 gc_state_ = SCAVENGE;
949 990
950 SwitchScavengingVisitorsTableIfProfilingWasEnabled();
951
952 Page::FlipMeaningOfInvalidatedWatermarkFlag(this);
953 #ifdef DEBUG
954 VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
955 VerifyPageWatermarkValidity(map_space_, ALL_VALID);
956 #endif
957
958 // We do not update an allocation watermark of the top page during linear
959 // allocation to avoid overhead. So to maintain the watermark invariant
960 // we have to manually cache the watermark and mark the top page as having an
961 // invalid watermark. This guarantees that dirty regions iteration will use a
962 // correct watermark even if a linear allocation happens.
963 old_pointer_space_->FlushTopPageWatermark();
964 map_space_->FlushTopPageWatermark();
965
966 // Implements Cheney's copying algorithm 991 // Implements Cheney's copying algorithm
967 LOG(isolate_, ResourceEvent("scavenge", "begin")); 992 LOG(isolate_, ResourceEvent("scavenge", "begin"));
968 993
969 // Clear descriptor cache. 994 // Clear descriptor cache.
970 isolate_->descriptor_lookup_cache()->Clear(); 995 isolate_->descriptor_lookup_cache()->Clear();
971 996
972 // Used for updating survived_since_last_expansion_ at function end. 997 // Used for updating survived_since_last_expansion_ at function end.
973 intptr_t survived_watermark = PromotedSpaceSize(); 998 intptr_t survived_watermark = PromotedSpaceSize();
974 999
975 CheckNewSpaceExpansionCriteria(); 1000 CheckNewSpaceExpansionCriteria();
976 1001
1002 SelectScavengingVisitorsTable();
1003
1004 incremental_marking()->PrepareForScavenge();
1005
1006 old_pointer_space()->AdvanceSweeper(new_space_.Size());
1007 old_data_space()->AdvanceSweeper(new_space_.Size());
1008
977 // Flip the semispaces. After flipping, to space is empty, from space has 1009 // Flip the semispaces. After flipping, to space is empty, from space has
978 // live objects. 1010 // live objects.
979 new_space_.Flip(); 1011 new_space_.Flip();
980 new_space_.ResetAllocationInfo(); 1012 new_space_.ResetAllocationInfo();
981 1013
982 // We need to sweep newly copied objects which can be either in the 1014 // We need to sweep newly copied objects which can be either in the
983 // to space or promoted to the old generation. For to-space 1015 // to space or promoted to the old generation. For to-space
984 // objects, we treat the bottom of the to space as a queue. Newly 1016 // objects, we treat the bottom of the to space as a queue. Newly
985 // copied and unswept objects lie between a 'front' mark and the 1017 // copied and unswept objects lie between a 'front' mark and the
986 // allocation pointer. 1018 // allocation pointer.
987 // 1019 //
988 // Promoted objects can go into various old-generation spaces, and 1020 // Promoted objects can go into various old-generation spaces, and
989 // can be allocated internally in the spaces (from the free list). 1021 // can be allocated internally in the spaces (from the free list).
990 // We treat the top of the to space as a queue of addresses of 1022 // We treat the top of the to space as a queue of addresses of
991 // promoted objects. The addresses of newly promoted and unswept 1023 // promoted objects. The addresses of newly promoted and unswept
992 // objects lie between a 'front' mark and a 'rear' mark that is 1024 // objects lie between a 'front' mark and a 'rear' mark that is
993 // updated as a side effect of promoting an object. 1025 // updated as a side effect of promoting an object.
994 // 1026 //
995 // There is guaranteed to be enough room at the top of the to space 1027 // There is guaranteed to be enough room at the top of the to space
996 // for the addresses of promoted objects: every object promoted 1028 // for the addresses of promoted objects: every object promoted
997 // frees up its size in bytes from the top of the new space, and 1029 // frees up its size in bytes from the top of the new space, and
998 // objects are at least one pointer in size. 1030 // objects are at least one pointer in size.
999 Address new_space_front = new_space_.ToSpaceLow(); 1031 Address new_space_front = new_space_.ToSpaceStart();
1000 promotion_queue_.Initialize(new_space_.ToSpaceHigh()); 1032 promotion_queue_.Initialize(new_space_.ToSpaceEnd());
1001 1033
1002 is_safe_to_read_maps_ = false; 1034 #ifdef DEBUG
1035 store_buffer()->Clean();
1036 #endif
1037
1003 ScavengeVisitor scavenge_visitor(this); 1038 ScavengeVisitor scavenge_visitor(this);
1004 // Copy roots. 1039 // Copy roots.
1005 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE); 1040 IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
1006 1041
1007 // Copy objects reachable from the old generation. By definition, 1042 // Copy objects reachable from the old generation.
1008 // there are no intergenerational pointers in code or data spaces. 1043 {
1009 IterateDirtyRegions(old_pointer_space_, 1044 StoreBufferRebuildScope scope(this,
1010 &Heap::IteratePointersInDirtyRegion, 1045 store_buffer(),
1011 &ScavengePointer, 1046 &ScavengeStoreBufferCallback);
1012 WATERMARK_CAN_BE_INVALID); 1047 store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
1013 1048 }
1014 IterateDirtyRegions(map_space_,
1015 &IteratePointersInDirtyMapsRegion,
1016 &ScavengePointer,
1017 WATERMARK_CAN_BE_INVALID);
1018
1019 lo_space_->IterateDirtyRegions(&ScavengePointer);
1020 1049
1021 // Copy objects reachable from cells by scavenging cell values directly. 1050 // Copy objects reachable from cells by scavenging cell values directly.
1022 HeapObjectIterator cell_iterator(cell_space_); 1051 HeapObjectIterator cell_iterator(cell_space_);
1023 for (HeapObject* cell = cell_iterator.next(); 1052 for (HeapObject* cell = cell_iterator.Next();
1024 cell != NULL; cell = cell_iterator.next()) { 1053 cell != NULL; cell = cell_iterator.Next()) {
1025 if (cell->IsJSGlobalPropertyCell()) { 1054 if (cell->IsJSGlobalPropertyCell()) {
1026 Address value_address = 1055 Address value_address =
1027 reinterpret_cast<Address>(cell) + 1056 reinterpret_cast<Address>(cell) +
1028 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag); 1057 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
1029 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address)); 1058 scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
1030 } 1059 }
1031 } 1060 }
1032 1061
1033 // Scavenge object reachable from the global contexts list directly. 1062 // Scavenge object reachable from the global contexts list directly.
1034 scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_)); 1063 scavenge_visitor.VisitPointer(BitCast<Object**>(&global_contexts_list_));
1035 1064
1036 new_space_front = DoScavenge(&scavenge_visitor, new_space_front); 1065 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1037 isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles( 1066 isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1038 &IsUnscavengedHeapObject); 1067 &IsUnscavengedHeapObject);
1039 isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots( 1068 isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1040 &scavenge_visitor); 1069 &scavenge_visitor);
1041 new_space_front = DoScavenge(&scavenge_visitor, new_space_front); 1070 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1042 1071
1043 1072
1044 UpdateNewSpaceReferencesInExternalStringTable( 1073 UpdateNewSpaceReferencesInExternalStringTable(
1045 &UpdateNewSpaceReferenceInExternalStringTableEntry); 1074 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1046 1075
1047 LiveObjectList::UpdateReferencesForScavengeGC(); 1076 LiveObjectList::UpdateReferencesForScavengeGC();
1048 isolate()->runtime_profiler()->UpdateSamplesAfterScavenge(); 1077 isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
1078 incremental_marking()->UpdateMarkingDequeAfterScavenge();
1049 1079
1050 ASSERT(new_space_front == new_space_.top()); 1080 ASSERT(new_space_front == new_space_.top());
1051 1081
1052 is_safe_to_read_maps_ = true;
1053
1054 // Set age mark. 1082 // Set age mark.
1055 new_space_.set_age_mark(new_space_.top()); 1083 new_space_.set_age_mark(new_space_.top());
1056 1084
1085 new_space_.LowerInlineAllocationLimit(
1086 new_space_.inline_allocation_limit_step());
1087
1057 // Update how much has survived scavenge. 1088 // Update how much has survived scavenge.
1058 IncrementYoungSurvivorsCounter(static_cast<int>( 1089 IncrementYoungSurvivorsCounter(static_cast<int>(
1059 (PromotedSpaceSize() - survived_watermark) + new_space_.Size())); 1090 (PromotedSpaceSize() - survived_watermark) + new_space_.Size()));
1060 1091
1061 LOG(isolate_, ResourceEvent("scavenge", "end")); 1092 LOG(isolate_, ResourceEvent("scavenge", "end"));
1062 1093
1063 gc_state_ = NOT_IN_GC; 1094 gc_state_ = NOT_IN_GC;
1064 } 1095 }
1065 1096
1066 1097
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
1105 // String got promoted. Move it to the old string list. 1136 // String got promoted. Move it to the old string list.
1106 external_string_table_.AddOldString(target); 1137 external_string_table_.AddOldString(target);
1107 } 1138 }
1108 } 1139 }
1109 1140
1110 ASSERT(last <= end); 1141 ASSERT(last <= end);
1111 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start)); 1142 external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
1112 } 1143 }
1113 1144
1114 1145
1146 void Heap::UpdateReferencesInExternalStringTable(
1147 ExternalStringTableUpdaterCallback updater_func) {
1148
1149 // Update old space string references.
1150 if (external_string_table_.old_space_strings_.length() > 0) {
1151 Object** start = &external_string_table_.old_space_strings_[0];
1152 Object** end = start + external_string_table_.old_space_strings_.length();
1153 for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
1154 }
1155
1156 UpdateNewSpaceReferencesInExternalStringTable(updater_func);
1157 }
1158
1159
1115 static Object* ProcessFunctionWeakReferences(Heap* heap, 1160 static Object* ProcessFunctionWeakReferences(Heap* heap,
1116 Object* function, 1161 Object* function,
1117 WeakObjectRetainer* retainer) { 1162 WeakObjectRetainer* retainer) {
1118 Object* head = heap->undefined_value(); 1163 Object* undefined = heap->undefined_value();
1164 Object* head = undefined;
1119 JSFunction* tail = NULL; 1165 JSFunction* tail = NULL;
1120 Object* candidate = function; 1166 Object* candidate = function;
1121 while (candidate != heap->undefined_value()) { 1167 while (candidate != undefined) {
1122 // Check whether to keep the candidate in the list. 1168 // Check whether to keep the candidate in the list.
1123 JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate); 1169 JSFunction* candidate_function = reinterpret_cast<JSFunction*>(candidate);
1124 Object* retain = retainer->RetainAs(candidate); 1170 Object* retain = retainer->RetainAs(candidate);
1125 if (retain != NULL) { 1171 if (retain != NULL) {
1126 if (head == heap->undefined_value()) { 1172 if (head == undefined) {
1127 // First element in the list. 1173 // First element in the list.
1128 head = candidate_function; 1174 head = retain;
1129 } else { 1175 } else {
1130 // Subsequent elements in the list. 1176 // Subsequent elements in the list.
1131 ASSERT(tail != NULL); 1177 ASSERT(tail != NULL);
1132 tail->set_next_function_link(candidate_function); 1178 tail->set_next_function_link(retain);
1133 } 1179 }
1134 // Retained function is new tail. 1180 // Retained function is new tail.
1181 candidate_function = reinterpret_cast<JSFunction*>(retain);
1135 tail = candidate_function; 1182 tail = candidate_function;
1183
1184 ASSERT(retain->IsUndefined() || retain->IsJSFunction());
1185
1186 if (retain == undefined) break;
1136 } 1187 }
1188
1137 // Move to next element in the list. 1189 // Move to next element in the list.
1138 candidate = candidate_function->next_function_link(); 1190 candidate = candidate_function->next_function_link();
1139 } 1191 }
1140 1192
1141 // Terminate the list if there is one or more elements. 1193 // Terminate the list if there is one or more elements.
1142 if (tail != NULL) { 1194 if (tail != NULL) {
1143 tail->set_next_function_link(heap->undefined_value()); 1195 tail->set_next_function_link(undefined);
1144 } 1196 }
1145 1197
1146 return head; 1198 return head;
1147 } 1199 }
1148 1200
1149 1201
1150 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) { 1202 void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
1151 Object* head = undefined_value(); 1203 Object* undefined = undefined_value();
1204 Object* head = undefined;
1152 Context* tail = NULL; 1205 Context* tail = NULL;
1153 Object* candidate = global_contexts_list_; 1206 Object* candidate = global_contexts_list_;
1154 while (candidate != undefined_value()) { 1207 while (candidate != undefined) {
1155 // Check whether to keep the candidate in the list. 1208 // Check whether to keep the candidate in the list.
1156 Context* candidate_context = reinterpret_cast<Context*>(candidate); 1209 Context* candidate_context = reinterpret_cast<Context*>(candidate);
1157 Object* retain = retainer->RetainAs(candidate); 1210 Object* retain = retainer->RetainAs(candidate);
1158 if (retain != NULL) { 1211 if (retain != NULL) {
1159 if (head == undefined_value()) { 1212 if (head == undefined) {
1160 // First element in the list. 1213 // First element in the list.
1161 head = candidate_context; 1214 head = retain;
1162 } else { 1215 } else {
1163 // Subsequent elements in the list. 1216 // Subsequent elements in the list.
1164 ASSERT(tail != NULL); 1217 ASSERT(tail != NULL);
1165 tail->set_unchecked(this, 1218 tail->set_unchecked(this,
1166 Context::NEXT_CONTEXT_LINK, 1219 Context::NEXT_CONTEXT_LINK,
1167 candidate_context, 1220 retain,
1168 UPDATE_WRITE_BARRIER); 1221 UPDATE_WRITE_BARRIER);
1169 } 1222 }
1170 // Retained context is new tail. 1223 // Retained context is new tail.
1224 candidate_context = reinterpret_cast<Context*>(retain);
1171 tail = candidate_context; 1225 tail = candidate_context;
1172 1226
1227 if (retain == undefined) break;
1228
1173 // Process the weak list of optimized functions for the context. 1229 // Process the weak list of optimized functions for the context.
1174 Object* function_list_head = 1230 Object* function_list_head =
1175 ProcessFunctionWeakReferences( 1231 ProcessFunctionWeakReferences(
1176 this, 1232 this,
1177 candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST), 1233 candidate_context->get(Context::OPTIMIZED_FUNCTIONS_LIST),
1178 retainer); 1234 retainer);
1179 candidate_context->set_unchecked(this, 1235 candidate_context->set_unchecked(this,
1180 Context::OPTIMIZED_FUNCTIONS_LIST, 1236 Context::OPTIMIZED_FUNCTIONS_LIST,
1181 function_list_head, 1237 function_list_head,
1182 UPDATE_WRITE_BARRIER); 1238 UPDATE_WRITE_BARRIER);
1183 } 1239 }
1240
1184 // Move to next element in the list. 1241 // Move to next element in the list.
1185 candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK); 1242 candidate = candidate_context->get(Context::NEXT_CONTEXT_LINK);
1186 } 1243 }
1187 1244
1188 // Terminate the list if there is one or more elements. 1245 // Terminate the list if there is one or more elements.
1189 if (tail != NULL) { 1246 if (tail != NULL) {
1190 tail->set_unchecked(this, 1247 tail->set_unchecked(this,
1191 Context::NEXT_CONTEXT_LINK, 1248 Context::NEXT_CONTEXT_LINK,
1192 Heap::undefined_value(), 1249 Heap::undefined_value(),
1193 UPDATE_WRITE_BARRIER); 1250 UPDATE_WRITE_BARRIER);
(...skipping 11 matching lines...) Expand all
1205 if (!heap->InNewSpace(object)) return; 1262 if (!heap->InNewSpace(object)) return;
1206 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p), 1263 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
1207 reinterpret_cast<HeapObject*>(object)); 1264 reinterpret_cast<HeapObject*>(object));
1208 } 1265 }
1209 }; 1266 };
1210 1267
1211 1268
1212 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor, 1269 Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
1213 Address new_space_front) { 1270 Address new_space_front) {
1214 do { 1271 do {
1215 ASSERT(new_space_front <= new_space_.top()); 1272 SemiSpace::AssertValidRange(new_space_front, new_space_.top());
1216
1217 // The addresses new_space_front and new_space_.top() define a 1273 // The addresses new_space_front and new_space_.top() define a
1218 // queue of unprocessed copied objects. Process them until the 1274 // queue of unprocessed copied objects. Process them until the
1219 // queue is empty. 1275 // queue is empty.
1220 while (new_space_front < new_space_.top()) { 1276 while (new_space_front != new_space_.top()) {
1221 HeapObject* object = HeapObject::FromAddress(new_space_front); 1277 if (!NewSpacePage::IsAtEnd(new_space_front)) {
1222 new_space_front += NewSpaceScavenger::IterateBody(object->map(), object); 1278 HeapObject* object = HeapObject::FromAddress(new_space_front);
1279 new_space_front +=
1280 NewSpaceScavenger::IterateBody(object->map(), object);
1281 } else {
1282 new_space_front =
1283 NewSpacePage::FromLimit(new_space_front)->next_page()->body();
1284 }
1223 } 1285 }
1224 1286
1225 // Promote and process all the to-be-promoted objects. 1287 // Promote and process all the to-be-promoted objects.
1226 while (!promotion_queue_.is_empty()) { 1288 {
1227 HeapObject* target; 1289 StoreBufferRebuildScope scope(this,
1228 int size; 1290 store_buffer(),
1229 promotion_queue_.remove(&target, &size); 1291 &ScavengeStoreBufferCallback);
1292 while (!promotion_queue()->is_empty()) {
1293 HeapObject* target;
1294 int size;
1295 promotion_queue()->remove(&target, &size);
1230 1296
1231 // Promoted object might be already partially visited 1297 // Promoted object might be already partially visited
1232 // during dirty regions iteration. Thus we search specificly 1298 // during old space pointer iteration. Thus we search specificly
1233 // for pointers to from semispace instead of looking for pointers 1299 // for pointers to from semispace instead of looking for pointers
1234 // to new space. 1300 // to new space.
1235 ASSERT(!target->IsMap()); 1301 ASSERT(!target->IsMap());
1236 IterateAndMarkPointersToFromSpace(target->address(), 1302 IterateAndMarkPointersToFromSpace(target->address(),
1237 target->address() + size, 1303 target->address() + size,
1238 &ScavengePointer); 1304 &ScavengeObject);
1305 }
1239 } 1306 }
1240 1307
1241 // Take another spin if there are now unswept objects in new space 1308 // Take another spin if there are now unswept objects in new space
1242 // (there are currently no more unswept promoted objects). 1309 // (there are currently no more unswept promoted objects).
1243 } while (new_space_front < new_space_.top()); 1310 } while (new_space_front != new_space_.top());
1244 1311
1245 return new_space_front; 1312 return new_space_front;
1246 } 1313 }
1247 1314
1248 1315
1249 enum LoggingAndProfiling { 1316 enum LoggingAndProfiling {
1250 LOGGING_AND_PROFILING_ENABLED, 1317 LOGGING_AND_PROFILING_ENABLED,
1251 LOGGING_AND_PROFILING_DISABLED 1318 LOGGING_AND_PROFILING_DISABLED
1252 }; 1319 };
1253 1320
1254 1321
1255 typedef void (*ScavengingCallback)(Map* map, 1322 enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
1256 HeapObject** slot,
1257 HeapObject* object);
1258 1323
1259 1324
1260 static Atomic32 scavenging_visitors_table_mode_; 1325 template<MarksHandling marks_handling,
1261 static VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_; 1326 LoggingAndProfiling logging_and_profiling_mode>
1262
1263
1264 INLINE(static void DoScavengeObject(Map* map,
1265 HeapObject** slot,
1266 HeapObject* obj));
1267
1268
1269 void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
1270 scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
1271 }
1272
1273
1274 template<LoggingAndProfiling logging_and_profiling_mode>
1275 class ScavengingVisitor : public StaticVisitorBase { 1327 class ScavengingVisitor : public StaticVisitorBase {
1276 public: 1328 public:
1277 static void Initialize() { 1329 static void Initialize() {
1278 table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString); 1330 table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString);
1279 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString); 1331 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
1280 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate); 1332 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
1281 table_.Register(kVisitByteArray, &EvacuateByteArray); 1333 table_.Register(kVisitByteArray, &EvacuateByteArray);
1282 table_.Register(kVisitFixedArray, &EvacuateFixedArray); 1334 table_.Register(kVisitFixedArray, &EvacuateFixedArray);
1283 table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray); 1335 table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
1284 1336
(...skipping 14 matching lines...) Expand all
1299 template VisitSpecialized<SharedFunctionInfo::kSize>); 1351 template VisitSpecialized<SharedFunctionInfo::kSize>);
1300 1352
1301 table_.Register(kVisitJSWeakMap, 1353 table_.Register(kVisitJSWeakMap,
1302 &ObjectEvacuationStrategy<POINTER_OBJECT>:: 1354 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1303 Visit); 1355 Visit);
1304 1356
1305 table_.Register(kVisitJSRegExp, 1357 table_.Register(kVisitJSRegExp,
1306 &ObjectEvacuationStrategy<POINTER_OBJECT>:: 1358 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1307 Visit); 1359 Visit);
1308 1360
1309 table_.Register(kVisitJSFunction, 1361 if (marks_handling == IGNORE_MARKS) {
1310 &ObjectEvacuationStrategy<POINTER_OBJECT>:: 1362 table_.Register(kVisitJSFunction,
1311 template VisitSpecialized<JSFunction::kSize>); 1363 &ObjectEvacuationStrategy<POINTER_OBJECT>::
1364 template VisitSpecialized<JSFunction::kSize>);
1365 } else {
1366 table_.Register(kVisitJSFunction, &EvacuateJSFunction);
1367 }
1312 1368
1313 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>, 1369 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
1314 kVisitDataObject, 1370 kVisitDataObject,
1315 kVisitDataObjectGeneric>(); 1371 kVisitDataObjectGeneric>();
1316 1372
1317 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>, 1373 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
1318 kVisitJSObject, 1374 kVisitJSObject,
1319 kVisitJSObjectGeneric>(); 1375 kVisitJSObjectGeneric>();
1320 1376
1321 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>, 1377 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
1366 Isolate* isolate = heap->isolate(); 1422 Isolate* isolate = heap->isolate();
1367 if (isolate->logger()->is_logging() || 1423 if (isolate->logger()->is_logging() ||
1368 CpuProfiler::is_profiling(isolate)) { 1424 CpuProfiler::is_profiling(isolate)) {
1369 if (target->IsSharedFunctionInfo()) { 1425 if (target->IsSharedFunctionInfo()) {
1370 PROFILE(isolate, SharedFunctionInfoMoveEvent( 1426 PROFILE(isolate, SharedFunctionInfoMoveEvent(
1371 source->address(), target->address())); 1427 source->address(), target->address()));
1372 } 1428 }
1373 } 1429 }
1374 } 1430 }
1375 1431
1432 if (marks_handling == TRANSFER_MARKS) {
1433 if (Marking::TransferColor(source, target)) {
1434 MemoryChunk::IncrementLiveBytes(target->address(), size);
1435 }
1436 }
1437
1376 return target; 1438 return target;
1377 } 1439 }
1378 1440
1379
1380 template<ObjectContents object_contents, SizeRestriction size_restriction> 1441 template<ObjectContents object_contents, SizeRestriction size_restriction>
1381 static inline void EvacuateObject(Map* map, 1442 static inline void EvacuateObject(Map* map,
1382 HeapObject** slot, 1443 HeapObject** slot,
1383 HeapObject* object, 1444 HeapObject* object,
1384 int object_size) { 1445 int object_size) {
1385 ASSERT((size_restriction != SMALL) || 1446 ASSERT((size_restriction != SMALL) ||
1386 (object_size <= Page::kMaxHeapObjectSize)); 1447 (object_size <= Page::kMaxHeapObjectSize));
1387 ASSERT(object->Size() == object_size); 1448 ASSERT(object->Size() == object_size);
1388 1449
1389 Heap* heap = map->heap(); 1450 Heap* heap = map->GetHeap();
1390 if (heap->ShouldBePromoted(object->address(), object_size)) { 1451 if (heap->ShouldBePromoted(object->address(), object_size)) {
1391 MaybeObject* maybe_result; 1452 MaybeObject* maybe_result;
1392 1453
1393 if ((size_restriction != SMALL) && 1454 if ((size_restriction != SMALL) &&
1394 (object_size > Page::kMaxHeapObjectSize)) { 1455 (object_size > Page::kMaxHeapObjectSize)) {
1395 maybe_result = heap->lo_space()->AllocateRawFixedArray(object_size); 1456 maybe_result = heap->lo_space()->AllocateRaw(object_size,
1457 NOT_EXECUTABLE);
1396 } else { 1458 } else {
1397 if (object_contents == DATA_OBJECT) { 1459 if (object_contents == DATA_OBJECT) {
1398 maybe_result = heap->old_data_space()->AllocateRaw(object_size); 1460 maybe_result = heap->old_data_space()->AllocateRaw(object_size);
1399 } else { 1461 } else {
1400 maybe_result = heap->old_pointer_space()->AllocateRaw(object_size); 1462 maybe_result = heap->old_pointer_space()->AllocateRaw(object_size);
1401 } 1463 }
1402 } 1464 }
1403 1465
1404 Object* result = NULL; // Initialization to please compiler. 1466 Object* result = NULL; // Initialization to please compiler.
1405 if (maybe_result->ToObject(&result)) { 1467 if (maybe_result->ToObject(&result)) {
1406 HeapObject* target = HeapObject::cast(result); 1468 HeapObject* target = HeapObject::cast(result);
1407 *slot = MigrateObject(heap, object , target, object_size); 1469 *slot = MigrateObject(heap, object , target, object_size);
1408 1470
1409 if (object_contents == POINTER_OBJECT) { 1471 if (object_contents == POINTER_OBJECT) {
1410 heap->promotion_queue()->insert(target, object_size); 1472 heap->promotion_queue()->insert(target, object_size);
1411 } 1473 }
1412 1474
1413 heap->tracer()->increment_promoted_objects_size(object_size); 1475 heap->tracer()->increment_promoted_objects_size(object_size);
1414 return; 1476 return;
1415 } 1477 }
1416 } 1478 }
1417 Object* result = 1479 MaybeObject* allocation = heap->new_space()->AllocateRaw(object_size);
1418 heap->new_space()->AllocateRaw(object_size)->ToObjectUnchecked(); 1480 Object* result = allocation->ToObjectUnchecked();
1481
1419 *slot = MigrateObject(heap, object, HeapObject::cast(result), object_size); 1482 *slot = MigrateObject(heap, object, HeapObject::cast(result), object_size);
1420 return; 1483 return;
1421 } 1484 }
1422 1485
1423 1486
1487 static inline void EvacuateJSFunction(Map* map,
1488 HeapObject** slot,
1489 HeapObject* object) {
1490 ObjectEvacuationStrategy<POINTER_OBJECT>::
1491 template VisitSpecialized<JSFunction::kSize>(map, slot, object);
1492
1493 HeapObject* target = *slot;
1494 MarkBit mark_bit = Marking::MarkBitFrom(target);
1495 if (Marking::IsBlack(mark_bit)) {
1496 // This object is black and it might not be rescanned by marker.
1497 // We should explicitly record code entry slot for compaction because
1498 // promotion queue processing (IterateAndMarkPointersToFromSpace) will
1499 // miss it as it is not HeapObject-tagged.
1500 Address code_entry_slot =
1501 target->address() + JSFunction::kCodeEntryOffset;
1502 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
1503 map->GetHeap()->mark_compact_collector()->
1504 RecordCodeEntrySlot(code_entry_slot, code);
1505 }
1506 }
1507
1508
1424 static inline void EvacuateFixedArray(Map* map, 1509 static inline void EvacuateFixedArray(Map* map,
1425 HeapObject** slot, 1510 HeapObject** slot,
1426 HeapObject* object) { 1511 HeapObject* object) {
1427 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object); 1512 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
1428 EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map, 1513 EvacuateObject<POINTER_OBJECT, UNKNOWN_SIZE>(map,
1429 slot, 1514 slot,
1430 object, 1515 object,
1431 object_size); 1516 object_size);
1432 } 1517 }
1433 1518
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
1472 1557
1473 static inline bool IsShortcutCandidate(int type) { 1558 static inline bool IsShortcutCandidate(int type) {
1474 return ((type & kShortcutTypeMask) == kShortcutTypeTag); 1559 return ((type & kShortcutTypeMask) == kShortcutTypeTag);
1475 } 1560 }
1476 1561
1477 static inline void EvacuateShortcutCandidate(Map* map, 1562 static inline void EvacuateShortcutCandidate(Map* map,
1478 HeapObject** slot, 1563 HeapObject** slot,
1479 HeapObject* object) { 1564 HeapObject* object) {
1480 ASSERT(IsShortcutCandidate(map->instance_type())); 1565 ASSERT(IsShortcutCandidate(map->instance_type()));
1481 1566
1482 if (ConsString::cast(object)->unchecked_second() == 1567 Heap* heap = map->GetHeap();
1483 map->heap()->empty_string()) { 1568
1569 if (marks_handling == IGNORE_MARKS &&
1570 ConsString::cast(object)->unchecked_second() ==
1571 heap->empty_string()) {
1484 HeapObject* first = 1572 HeapObject* first =
1485 HeapObject::cast(ConsString::cast(object)->unchecked_first()); 1573 HeapObject::cast(ConsString::cast(object)->unchecked_first());
1486 1574
1487 *slot = first; 1575 *slot = first;
1488 1576
1489 if (!map->heap()->InNewSpace(first)) { 1577 if (!heap->InNewSpace(first)) {
1490 object->set_map_word(MapWord::FromForwardingAddress(first)); 1578 object->set_map_word(MapWord::FromForwardingAddress(first));
1491 return; 1579 return;
1492 } 1580 }
1493 1581
1494 MapWord first_word = first->map_word(); 1582 MapWord first_word = first->map_word();
1495 if (first_word.IsForwardingAddress()) { 1583 if (first_word.IsForwardingAddress()) {
1496 HeapObject* target = first_word.ToForwardingAddress(); 1584 HeapObject* target = first_word.ToForwardingAddress();
1497 1585
1498 *slot = target; 1586 *slot = target;
1499 object->set_map_word(MapWord::FromForwardingAddress(target)); 1587 object->set_map_word(MapWord::FromForwardingAddress(target));
1500 return; 1588 return;
1501 } 1589 }
1502 1590
1503 DoScavengeObject(first->map(), slot, first); 1591 heap->DoScavengeObject(first->map(), slot, first);
1504 object->set_map_word(MapWord::FromForwardingAddress(*slot)); 1592 object->set_map_word(MapWord::FromForwardingAddress(*slot));
1505 return; 1593 return;
1506 } 1594 }
1507 1595
1508 int object_size = ConsString::kSize; 1596 int object_size = ConsString::kSize;
1509 EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size); 1597 EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size);
1510 } 1598 }
1511 1599
1512 template<ObjectContents object_contents> 1600 template<ObjectContents object_contents>
1513 class ObjectEvacuationStrategy { 1601 class ObjectEvacuationStrategy {
(...skipping 10 matching lines...) Expand all
1524 HeapObject* object) { 1612 HeapObject* object) {
1525 int object_size = map->instance_size(); 1613 int object_size = map->instance_size();
1526 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size); 1614 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size);
1527 } 1615 }
1528 }; 1616 };
1529 1617
1530 static VisitorDispatchTable<ScavengingCallback> table_; 1618 static VisitorDispatchTable<ScavengingCallback> table_;
1531 }; 1619 };
1532 1620
1533 1621
1534 template<LoggingAndProfiling logging_and_profiling_mode> 1622 template<MarksHandling marks_handling,
1623 LoggingAndProfiling logging_and_profiling_mode>
1535 VisitorDispatchTable<ScavengingCallback> 1624 VisitorDispatchTable<ScavengingCallback>
1536 ScavengingVisitor<logging_and_profiling_mode>::table_; 1625 ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
1537 1626
1538 1627
1539 static void InitializeScavengingVisitorsTables() { 1628 static void InitializeScavengingVisitorsTables() {
1540 ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::Initialize(); 1629 ScavengingVisitor<TRANSFER_MARKS,
1541 ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::Initialize(); 1630 LOGGING_AND_PROFILING_DISABLED>::Initialize();
1542 scavenging_visitors_table_.CopyFrom( 1631 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
1543 ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::GetTable()); 1632 ScavengingVisitor<TRANSFER_MARKS,
1544 scavenging_visitors_table_mode_ = LOGGING_AND_PROFILING_DISABLED; 1633 LOGGING_AND_PROFILING_ENABLED>::Initialize();
1634 ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
1545 } 1635 }
1546 1636
1547 1637
1548 void Heap::SwitchScavengingVisitorsTableIfProfilingWasEnabled() { 1638 void Heap::SelectScavengingVisitorsTable() {
1549 if (scavenging_visitors_table_mode_ == LOGGING_AND_PROFILING_ENABLED) { 1639 bool logging_and_profiling =
1550 // Table was already updated by some isolate. 1640 isolate()->logger()->is_logging() ||
1551 return;
1552 }
1553
1554 if (isolate()->logger()->is_logging() |
1555 CpuProfiler::is_profiling(isolate()) || 1641 CpuProfiler::is_profiling(isolate()) ||
1556 (isolate()->heap_profiler() != NULL && 1642 (isolate()->heap_profiler() != NULL &&
1557 isolate()->heap_profiler()->is_profiling())) { 1643 isolate()->heap_profiler()->is_profiling());
1558 // If one of the isolates is doing scavenge at this moment of time
1559 // it might see this table in an inconsitent state when
1560 // some of the callbacks point to
1561 // ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED> and others
1562 // to ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>.
1563 // However this does not lead to any bugs as such isolate does not have
1564 // profiling enabled and any isolate with enabled profiling is guaranteed
1565 // to see the table in the consistent state.
1566 scavenging_visitors_table_.CopyFrom(
1567 ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::GetTable());
1568 1644
1569 // We use Release_Store to prevent reordering of this write before writes 1645 if (!incremental_marking()->IsMarking()) {
1570 // to the table. 1646 if (!logging_and_profiling) {
1571 Release_Store(&scavenging_visitors_table_mode_, 1647 scavenging_visitors_table_.CopyFrom(
1572 LOGGING_AND_PROFILING_ENABLED); 1648 ScavengingVisitor<IGNORE_MARKS,
1649 LOGGING_AND_PROFILING_DISABLED>::GetTable());
1650 } else {
1651 scavenging_visitors_table_.CopyFrom(
1652 ScavengingVisitor<IGNORE_MARKS,
1653 LOGGING_AND_PROFILING_ENABLED>::GetTable());
1654 }
1655 } else {
1656 if (!logging_and_profiling) {
1657 scavenging_visitors_table_.CopyFrom(
1658 ScavengingVisitor<TRANSFER_MARKS,
1659 LOGGING_AND_PROFILING_DISABLED>::GetTable());
1660 } else {
1661 scavenging_visitors_table_.CopyFrom(
1662 ScavengingVisitor<TRANSFER_MARKS,
1663 LOGGING_AND_PROFILING_ENABLED>::GetTable());
1664 }
1573 } 1665 }
1574 } 1666 }
1575 1667
1576 1668
1577 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) { 1669 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
1578 ASSERT(HEAP->InFromSpace(object)); 1670 ASSERT(HEAP->InFromSpace(object));
1579 MapWord first_word = object->map_word(); 1671 MapWord first_word = object->map_word();
1580 ASSERT(!first_word.IsForwardingAddress()); 1672 ASSERT(!first_word.IsForwardingAddress());
1581 Map* map = first_word.ToMap(); 1673 Map* map = first_word.ToMap();
1582 DoScavengeObject(map, p, object); 1674 map->GetHeap()->DoScavengeObject(map, p, object);
1583 } 1675 }
1584 1676
1585 1677
1586 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type, 1678 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type,
1587 int instance_size) { 1679 int instance_size) {
1588 Object* result; 1680 Object* result;
1589 { MaybeObject* maybe_result = AllocateRawMap(); 1681 { MaybeObject* maybe_result = AllocateRawMap();
1590 if (!maybe_result->ToObject(&result)) return maybe_result; 1682 if (!maybe_result->ToObject(&result)) return maybe_result;
1591 } 1683 }
1592 1684
1593 // Map::cast cannot be used due to uninitialized map field. 1685 // Map::cast cannot be used due to uninitialized map field.
1594 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map()); 1686 reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
1595 reinterpret_cast<Map*>(result)->set_instance_type(instance_type); 1687 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
1596 reinterpret_cast<Map*>(result)->set_instance_size(instance_size); 1688 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
1597 reinterpret_cast<Map*>(result)->set_visitor_id( 1689 reinterpret_cast<Map*>(result)->set_visitor_id(
1598 StaticVisitorBase::GetVisitorId(instance_type, instance_size)); 1690 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
1599 reinterpret_cast<Map*>(result)->set_inobject_properties(0); 1691 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
1600 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0); 1692 reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
1601 reinterpret_cast<Map*>(result)->set_unused_property_fields(0); 1693 reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
1602 reinterpret_cast<Map*>(result)->set_bit_field(0); 1694 reinterpret_cast<Map*>(result)->set_bit_field(0);
1603 reinterpret_cast<Map*>(result)->set_bit_field2(0); 1695 reinterpret_cast<Map*>(result)->set_bit_field2(0);
1604 return result; 1696 return result;
1605 } 1697 }
1606 1698
1607 1699
1608 MaybeObject* Heap::AllocateMap(InstanceType instance_type, int instance_size) { 1700 MaybeObject* Heap::AllocateMap(InstanceType instance_type,
1701 int instance_size,
1702 ElementsKind elements_kind) {
1609 Object* result; 1703 Object* result;
1610 { MaybeObject* maybe_result = AllocateRawMap(); 1704 { MaybeObject* maybe_result = AllocateRawMap();
1611 if (!maybe_result->ToObject(&result)) return maybe_result; 1705 if (!maybe_result->ToObject(&result)) return maybe_result;
1612 } 1706 }
1613 1707
1614 Map* map = reinterpret_cast<Map*>(result); 1708 Map* map = reinterpret_cast<Map*>(result);
1615 map->set_map(meta_map()); 1709 map->set_map(meta_map());
1616 map->set_instance_type(instance_type); 1710 map->set_instance_type(instance_type);
1617 map->set_visitor_id( 1711 map->set_visitor_id(
1618 StaticVisitorBase::GetVisitorId(instance_type, instance_size)); 1712 StaticVisitorBase::GetVisitorId(instance_type, instance_size));
1619 map->set_prototype(null_value()); 1713 map->set_prototype(null_value());
1620 map->set_constructor(null_value()); 1714 map->set_constructor(null_value());
1621 map->set_instance_size(instance_size); 1715 map->set_instance_size(instance_size);
1622 map->set_inobject_properties(0); 1716 map->set_inobject_properties(0);
1623 map->set_pre_allocated_property_fields(0); 1717 map->set_pre_allocated_property_fields(0);
1624 map->init_instance_descriptors(); 1718 map->init_instance_descriptors();
1625 map->set_code_cache(empty_fixed_array()); 1719 map->set_code_cache(empty_fixed_array());
1626 map->set_prototype_transitions(empty_fixed_array()); 1720 map->set_prototype_transitions(empty_fixed_array());
1627 map->set_unused_property_fields(0); 1721 map->set_unused_property_fields(0);
1628 map->set_bit_field(0); 1722 map->set_bit_field(0);
1629 map->set_bit_field2(1 << Map::kIsExtensible); 1723 map->set_bit_field2(1 << Map::kIsExtensible);
1630 map->set_elements_kind(FAST_ELEMENTS); 1724 map->set_elements_kind(elements_kind);
1631 1725
1632 // If the map object is aligned fill the padding area with Smi 0 objects. 1726 // If the map object is aligned fill the padding area with Smi 0 objects.
1633 if (Map::kPadStart < Map::kSize) { 1727 if (Map::kPadStart < Map::kSize) {
1634 memset(reinterpret_cast<byte*>(map) + Map::kPadStart - kHeapObjectTag, 1728 memset(reinterpret_cast<byte*>(map) + Map::kPadStart - kHeapObjectTag,
1635 0, 1729 0,
1636 Map::kSize - Map::kPadStart); 1730 Map::kSize - Map::kPadStart);
1637 } 1731 }
1638 return map; 1732 return map;
1639 } 1733 }
1640 1734
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
1700 if (!maybe_obj->ToObject(&obj)) return false; 1794 if (!maybe_obj->ToObject(&obj)) return false;
1701 } 1795 }
1702 set_oddball_map(Map::cast(obj)); 1796 set_oddball_map(Map::cast(obj));
1703 1797
1704 // Allocate the empty array. 1798 // Allocate the empty array.
1705 { MaybeObject* maybe_obj = AllocateEmptyFixedArray(); 1799 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1706 if (!maybe_obj->ToObject(&obj)) return false; 1800 if (!maybe_obj->ToObject(&obj)) return false;
1707 } 1801 }
1708 set_empty_fixed_array(FixedArray::cast(obj)); 1802 set_empty_fixed_array(FixedArray::cast(obj));
1709 1803
1710 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE); 1804 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
1711 if (!maybe_obj->ToObject(&obj)) return false; 1805 if (!maybe_obj->ToObject(&obj)) return false;
1712 } 1806 }
1713 set_null_value(obj); 1807 set_null_value(obj);
1714 Oddball::cast(obj)->set_kind(Oddball::kNull); 1808 Oddball::cast(obj)->set_kind(Oddball::kNull);
1715 1809
1716 // Allocate the empty descriptor array. 1810 // Allocate the empty descriptor array.
1717 { MaybeObject* maybe_obj = AllocateEmptyFixedArray(); 1811 { MaybeObject* maybe_obj = AllocateEmptyFixedArray();
1718 if (!maybe_obj->ToObject(&obj)) return false; 1812 if (!maybe_obj->ToObject(&obj)) return false;
1719 } 1813 }
1720 set_empty_descriptor_array(DescriptorArray::cast(obj)); 1814 set_empty_descriptor_array(DescriptorArray::cast(obj));
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
1791 if (!maybe_obj->ToObject(&obj)) return false; 1885 if (!maybe_obj->ToObject(&obj)) return false;
1792 } 1886 }
1793 set_fixed_double_array_map(Map::cast(obj)); 1887 set_fixed_double_array_map(Map::cast(obj));
1794 1888
1795 { MaybeObject* maybe_obj = 1889 { MaybeObject* maybe_obj =
1796 AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel); 1890 AllocateMap(BYTE_ARRAY_TYPE, kVariableSizeSentinel);
1797 if (!maybe_obj->ToObject(&obj)) return false; 1891 if (!maybe_obj->ToObject(&obj)) return false;
1798 } 1892 }
1799 set_byte_array_map(Map::cast(obj)); 1893 set_byte_array_map(Map::cast(obj));
1800 1894
1895 { MaybeObject* maybe_obj =
1896 AllocateMap(FREE_SPACE_TYPE, kVariableSizeSentinel);
1897 if (!maybe_obj->ToObject(&obj)) return false;
1898 }
1899 set_free_space_map(Map::cast(obj));
1900
1801 { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED); 1901 { MaybeObject* maybe_obj = AllocateByteArray(0, TENURED);
1802 if (!maybe_obj->ToObject(&obj)) return false; 1902 if (!maybe_obj->ToObject(&obj)) return false;
1803 } 1903 }
1804 set_empty_byte_array(ByteArray::cast(obj)); 1904 set_empty_byte_array(ByteArray::cast(obj));
1805 1905
1806 { MaybeObject* maybe_obj = 1906 { MaybeObject* maybe_obj =
1807 AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize); 1907 AllocateMap(EXTERNAL_PIXEL_ARRAY_TYPE, ExternalArray::kAlignedSize);
1808 if (!maybe_obj->ToObject(&obj)) return false; 1908 if (!maybe_obj->ToObject(&obj)) return false;
1809 } 1909 }
1810 set_external_pixel_array_map(Map::cast(obj)); 1910 set_external_pixel_array_map(Map::cast(obj));
(...skipping 180 matching lines...) Expand 10 before | Expand all | Expand 10 after
1991 HeapObject::cast(result)->set_map(global_property_cell_map()); 2091 HeapObject::cast(result)->set_map(global_property_cell_map());
1992 JSGlobalPropertyCell::cast(result)->set_value(value); 2092 JSGlobalPropertyCell::cast(result)->set_value(value);
1993 return result; 2093 return result;
1994 } 2094 }
1995 2095
1996 2096
1997 MaybeObject* Heap::CreateOddball(const char* to_string, 2097 MaybeObject* Heap::CreateOddball(const char* to_string,
1998 Object* to_number, 2098 Object* to_number,
1999 byte kind) { 2099 byte kind) {
2000 Object* result; 2100 Object* result;
2001 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_DATA_SPACE); 2101 { MaybeObject* maybe_result = Allocate(oddball_map(), OLD_POINTER_SPACE);
2002 if (!maybe_result->ToObject(&result)) return maybe_result; 2102 if (!maybe_result->ToObject(&result)) return maybe_result;
2003 } 2103 }
2004 return Oddball::cast(result)->Initialize(to_string, to_number, kind); 2104 return Oddball::cast(result)->Initialize(to_string, to_number, kind);
2005 } 2105 }
2006 2106
2007 2107
2008 bool Heap::CreateApiObjects() { 2108 bool Heap::CreateApiObjects() {
2009 Object* obj; 2109 Object* obj;
2010 2110
2011 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize); 2111 { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
2012 if (!maybe_obj->ToObject(&obj)) return false; 2112 if (!maybe_obj->ToObject(&obj)) return false;
2013 } 2113 }
2014 set_neander_map(Map::cast(obj)); 2114 // Don't use Smi-only elements optimizations for objects with the neander
2115 // map. There are too many cases where element values are set directly with a
2116 // bottleneck to trap the Smi-only -> fast elements transition, and there
2117 // appears to be no benefit for optimize this case.
2118 Map* new_neander_map = Map::cast(obj);
2119 new_neander_map->set_elements_kind(FAST_ELEMENTS);
2120 set_neander_map(new_neander_map);
2015 2121
2016 { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map()); 2122 { MaybeObject* maybe_obj = AllocateJSObjectFromMap(neander_map());
2017 if (!maybe_obj->ToObject(&obj)) return false; 2123 if (!maybe_obj->ToObject(&obj)) return false;
2018 } 2124 }
2019 Object* elements; 2125 Object* elements;
2020 { MaybeObject* maybe_elements = AllocateFixedArray(2); 2126 { MaybeObject* maybe_elements = AllocateFixedArray(2);
2021 if (!maybe_elements->ToObject(&elements)) return false; 2127 if (!maybe_elements->ToObject(&elements)) return false;
2022 } 2128 }
2023 FixedArray::cast(elements)->set(0, Smi::FromInt(0)); 2129 FixedArray::cast(elements)->set(0, Smi::FromInt(0));
2024 JSObject::cast(obj)->set_elements(FixedArray::cast(elements)); 2130 JSObject::cast(obj)->set_elements(FixedArray::cast(elements));
(...skipping 24 matching lines...) Expand all
2049 // gcc-4.4 has problem generating correct code of following snippet: 2155 // gcc-4.4 has problem generating correct code of following snippet:
2050 // { JSEntryStub stub; 2156 // { JSEntryStub stub;
2051 // js_entry_code_ = *stub.GetCode(); 2157 // js_entry_code_ = *stub.GetCode();
2052 // } 2158 // }
2053 // { JSConstructEntryStub stub; 2159 // { JSConstructEntryStub stub;
2054 // js_construct_entry_code_ = *stub.GetCode(); 2160 // js_construct_entry_code_ = *stub.GetCode();
2055 // } 2161 // }
2056 // To workaround the problem, make separate functions without inlining. 2162 // To workaround the problem, make separate functions without inlining.
2057 Heap::CreateJSEntryStub(); 2163 Heap::CreateJSEntryStub();
2058 Heap::CreateJSConstructEntryStub(); 2164 Heap::CreateJSConstructEntryStub();
2165
2166 // Create stubs that should be there, so we don't unexpectedly have to
2167 // create them if we need them during the creation of another stub.
2168 // Stub creation mixes raw pointers and handles in an unsafe manner so
2169 // we cannot create stubs while we are creating stubs.
2170 CodeStub::GenerateStubsAheadOfTime();
2059 } 2171 }
2060 2172
2061 2173
2062 bool Heap::CreateInitialObjects() { 2174 bool Heap::CreateInitialObjects() {
2063 Object* obj; 2175 Object* obj;
2064 2176
2065 // The -0 value must be set before NumberFromDouble works. 2177 // The -0 value must be set before NumberFromDouble works.
2066 { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED); 2178 { MaybeObject* maybe_obj = AllocateHeapNumber(-0.0, TENURED);
2067 if (!maybe_obj->ToObject(&obj)) return false; 2179 if (!maybe_obj->ToObject(&obj)) return false;
2068 } 2180 }
2069 set_minus_zero_value(obj); 2181 set_minus_zero_value(obj);
2070 ASSERT(signbit(minus_zero_value()->Number()) != 0); 2182 ASSERT(signbit(minus_zero_value()->Number()) != 0);
2071 2183
2072 { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED); 2184 { MaybeObject* maybe_obj = AllocateHeapNumber(OS::nan_value(), TENURED);
2073 if (!maybe_obj->ToObject(&obj)) return false; 2185 if (!maybe_obj->ToObject(&obj)) return false;
2074 } 2186 }
2075 set_nan_value(obj); 2187 set_nan_value(obj);
2076 2188
2077 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_DATA_SPACE); 2189 { MaybeObject* maybe_obj = AllocateHeapNumber(V8_INFINITY, TENURED);
2190 if (!maybe_obj->ToObject(&obj)) return false;
2191 }
2192 set_infinity_value(obj);
2193
2194 { MaybeObject* maybe_obj = Allocate(oddball_map(), OLD_POINTER_SPACE);
2078 if (!maybe_obj->ToObject(&obj)) return false; 2195 if (!maybe_obj->ToObject(&obj)) return false;
2079 } 2196 }
2080 set_undefined_value(obj); 2197 set_undefined_value(obj);
2081 Oddball::cast(obj)->set_kind(Oddball::kUndefined); 2198 Oddball::cast(obj)->set_kind(Oddball::kUndefined);
2082 ASSERT(!InNewSpace(undefined_value())); 2199 ASSERT(!InNewSpace(undefined_value()));
2083 2200
2084 // Allocate initial symbol table. 2201 // Allocate initial symbol table.
2085 { MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize); 2202 { MaybeObject* maybe_obj = SymbolTable::Allocate(kInitialSymbolTableSize);
2086 if (!maybe_obj->ToObject(&obj)) return false; 2203 if (!maybe_obj->ToObject(&obj)) return false;
2087 } 2204 }
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
2119 set_false_value(obj); 2236 set_false_value(obj);
2120 2237
2121 { MaybeObject* maybe_obj = CreateOddball("hole", 2238 { MaybeObject* maybe_obj = CreateOddball("hole",
2122 Smi::FromInt(-1), 2239 Smi::FromInt(-1),
2123 Oddball::kTheHole); 2240 Oddball::kTheHole);
2124 if (!maybe_obj->ToObject(&obj)) return false; 2241 if (!maybe_obj->ToObject(&obj)) return false;
2125 } 2242 }
2126 set_the_hole_value(obj); 2243 set_the_hole_value(obj);
2127 2244
2128 { MaybeObject* maybe_obj = CreateOddball("arguments_marker", 2245 { MaybeObject* maybe_obj = CreateOddball("arguments_marker",
2129 Smi::FromInt(-4), 2246 Smi::FromInt(-2),
2130 Oddball::kArgumentMarker); 2247 Oddball::kArgumentMarker);
2131 if (!maybe_obj->ToObject(&obj)) return false; 2248 if (!maybe_obj->ToObject(&obj)) return false;
2132 } 2249 }
2133 set_arguments_marker(obj); 2250 set_arguments_marker(obj);
2134 2251
2135 { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel", 2252 { MaybeObject* maybe_obj = CreateOddball("no_interceptor_result_sentinel",
2136 Smi::FromInt(-2), 2253 Smi::FromInt(-3),
2137 Oddball::kOther); 2254 Oddball::kOther);
2138 if (!maybe_obj->ToObject(&obj)) return false; 2255 if (!maybe_obj->ToObject(&obj)) return false;
2139 } 2256 }
2140 set_no_interceptor_result_sentinel(obj); 2257 set_no_interceptor_result_sentinel(obj);
2141 2258
2142 { MaybeObject* maybe_obj = CreateOddball("termination_exception", 2259 { MaybeObject* maybe_obj = CreateOddball("termination_exception",
2143 Smi::FromInt(-3), 2260 Smi::FromInt(-4),
2144 Oddball::kOther); 2261 Oddball::kOther);
2145 if (!maybe_obj->ToObject(&obj)) return false; 2262 if (!maybe_obj->ToObject(&obj)) return false;
2146 } 2263 }
2147 set_termination_exception(obj); 2264 set_termination_exception(obj);
2148 2265
2266 { MaybeObject* maybe_obj = CreateOddball("frame_alignment_marker",
2267 Smi::FromInt(-5),
2268 Oddball::kOther);
2269 if (!maybe_obj->ToObject(&obj)) return false;
2270 }
2271 set_frame_alignment_marker(obj);
2272 STATIC_ASSERT(Oddball::kLeastHiddenOddballNumber == -5);
2273
2149 // Allocate the empty string. 2274 // Allocate the empty string.
2150 { MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED); 2275 { MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
2151 if (!maybe_obj->ToObject(&obj)) return false; 2276 if (!maybe_obj->ToObject(&obj)) return false;
2152 } 2277 }
2153 set_empty_string(String::cast(obj)); 2278 set_empty_string(String::cast(obj));
2154 2279
2155 for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) { 2280 for (unsigned i = 0; i < ARRAY_SIZE(constant_symbol_table); i++) {
2156 { MaybeObject* maybe_obj = 2281 { MaybeObject* maybe_obj =
2157 LookupAsciiSymbol(constant_symbol_table[i].contents); 2282 LookupAsciiSymbol(constant_symbol_table[i].contents);
2158 if (!maybe_obj->ToObject(&obj)) return false; 2283 if (!maybe_obj->ToObject(&obj)) return false;
(...skipping 256 matching lines...) Expand 10 before | Expand all | Expand 10 after
2415 2540
2416 Object* js_string; 2541 Object* js_string;
2417 MaybeObject* maybe_js_string = AllocateStringFromAscii(CStrVector(str)); 2542 MaybeObject* maybe_js_string = AllocateStringFromAscii(CStrVector(str));
2418 if (maybe_js_string->ToObject(&js_string)) { 2543 if (maybe_js_string->ToObject(&js_string)) {
2419 SetNumberStringCache(number, String::cast(js_string)); 2544 SetNumberStringCache(number, String::cast(js_string));
2420 } 2545 }
2421 return maybe_js_string; 2546 return maybe_js_string;
2422 } 2547 }
2423 2548
2424 2549
2550 MaybeObject* Heap::Uint32ToString(uint32_t value,
2551 bool check_number_string_cache) {
2552 Object* number;
2553 MaybeObject* maybe = NumberFromUint32(value);
2554 if (!maybe->To<Object>(&number)) return maybe;
2555 return NumberToString(number, check_number_string_cache);
2556 }
2557
2558
2425 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) { 2559 Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
2426 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]); 2560 return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
2427 } 2561 }
2428 2562
2429 2563
2430 Heap::RootListIndex Heap::RootIndexForExternalArrayType( 2564 Heap::RootListIndex Heap::RootIndexForExternalArrayType(
2431 ExternalArrayType array_type) { 2565 ExternalArrayType array_type) {
2432 switch (array_type) { 2566 switch (array_type) {
2433 case kExternalByteArray: 2567 case kExternalByteArray:
2434 return kExternalByteArrayMapRootIndex; 2568 return kExternalByteArrayMapRootIndex;
(...skipping 295 matching lines...) Expand 10 before | Expand all | Expand 10 after
2730 // dictionary. Check whether we already have the string in the symbol 2864 // dictionary. Check whether we already have the string in the symbol
2731 // table to prevent creation of many unneccesary strings. 2865 // table to prevent creation of many unneccesary strings.
2732 unsigned c1 = buffer->Get(start); 2866 unsigned c1 = buffer->Get(start);
2733 unsigned c2 = buffer->Get(start + 1); 2867 unsigned c2 = buffer->Get(start + 1);
2734 return MakeOrFindTwoCharacterString(this, c1, c2); 2868 return MakeOrFindTwoCharacterString(this, c1, c2);
2735 } 2869 }
2736 2870
2737 // Make an attempt to flatten the buffer to reduce access time. 2871 // Make an attempt to flatten the buffer to reduce access time.
2738 buffer = buffer->TryFlattenGetString(); 2872 buffer = buffer->TryFlattenGetString();
2739 2873
2740 // TODO(1626): For now slicing external strings is not supported. However,
2741 // a flat cons string can have an external string as first part in some cases.
2742 // Therefore we have to single out this case as well.
2743 if (!FLAG_string_slices || 2874 if (!FLAG_string_slices ||
2744 (buffer->IsConsString() && 2875 !buffer->IsFlat() ||
2745 (!buffer->IsFlat() ||
2746 !ConsString::cast(buffer)->first()->IsSeqString())) ||
2747 buffer->IsExternalString() ||
2748 length < SlicedString::kMinLength || 2876 length < SlicedString::kMinLength ||
2749 pretenure == TENURED) { 2877 pretenure == TENURED) {
2750 Object* result; 2878 Object* result;
2751 { MaybeObject* maybe_result = buffer->IsAsciiRepresentation() 2879 // WriteToFlat takes care of the case when an indirect string has a
2752 ? AllocateRawAsciiString(length, pretenure) 2880 // different encoding from its underlying string. These encodings may
2753 : AllocateRawTwoByteString(length, pretenure); 2881 // differ because of externalization.
2882 bool is_ascii = buffer->IsAsciiRepresentation();
2883 { MaybeObject* maybe_result = is_ascii
2884 ? AllocateRawAsciiString(length, pretenure)
2885 : AllocateRawTwoByteString(length, pretenure);
2754 if (!maybe_result->ToObject(&result)) return maybe_result; 2886 if (!maybe_result->ToObject(&result)) return maybe_result;
2755 } 2887 }
2756 String* string_result = String::cast(result); 2888 String* string_result = String::cast(result);
2757 // Copy the characters into the new object. 2889 // Copy the characters into the new object.
2758 if (buffer->IsAsciiRepresentation()) { 2890 if (is_ascii) {
2759 ASSERT(string_result->IsAsciiRepresentation()); 2891 ASSERT(string_result->IsAsciiRepresentation());
2760 char* dest = SeqAsciiString::cast(string_result)->GetChars(); 2892 char* dest = SeqAsciiString::cast(string_result)->GetChars();
2761 String::WriteToFlat(buffer, dest, start, end); 2893 String::WriteToFlat(buffer, dest, start, end);
2762 } else { 2894 } else {
2763 ASSERT(string_result->IsTwoByteRepresentation()); 2895 ASSERT(string_result->IsTwoByteRepresentation());
2764 uc16* dest = SeqTwoByteString::cast(string_result)->GetChars(); 2896 uc16* dest = SeqTwoByteString::cast(string_result)->GetChars();
2765 String::WriteToFlat(buffer, dest, start, end); 2897 String::WriteToFlat(buffer, dest, start, end);
2766 } 2898 }
2767 return result; 2899 return result;
2768 } 2900 }
2769 2901
2770 ASSERT(buffer->IsFlat()); 2902 ASSERT(buffer->IsFlat());
2771 ASSERT(!buffer->IsExternalString());
2772 #if DEBUG 2903 #if DEBUG
2773 buffer->StringVerify(); 2904 buffer->StringVerify();
2774 #endif 2905 #endif
2775 2906
2776 Object* result; 2907 Object* result;
2908 // When slicing an indirect string we use its encoding for a newly created
2909 // slice and don't check the encoding of the underlying string. This is safe
2910 // even if the encodings are different because of externalization. If an
2911 // indirect ASCII string is pointing to a two-byte string, the two-byte char
2912 // codes of the underlying string must still fit into ASCII (because
2913 // externalization must not change char codes).
2777 { Map* map = buffer->IsAsciiRepresentation() 2914 { Map* map = buffer->IsAsciiRepresentation()
2778 ? sliced_ascii_string_map() 2915 ? sliced_ascii_string_map()
2779 : sliced_string_map(); 2916 : sliced_string_map();
2780 MaybeObject* maybe_result = Allocate(map, NEW_SPACE); 2917 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2781 if (!maybe_result->ToObject(&result)) return maybe_result; 2918 if (!maybe_result->ToObject(&result)) return maybe_result;
2782 } 2919 }
2783 2920
2784 AssertNoAllocation no_gc; 2921 AssertNoAllocation no_gc;
2785 SlicedString* sliced_string = SlicedString::cast(result); 2922 SlicedString* sliced_string = SlicedString::cast(result);
2786 sliced_string->set_length(length); 2923 sliced_string->set_length(length);
2787 sliced_string->set_hash_field(String::kEmptyHashField); 2924 sliced_string->set_hash_field(String::kEmptyHashField);
2788 if (buffer->IsConsString()) { 2925 if (buffer->IsConsString()) {
2789 ConsString* cons = ConsString::cast(buffer); 2926 ConsString* cons = ConsString::cast(buffer);
2790 ASSERT(cons->second()->length() == 0); 2927 ASSERT(cons->second()->length() == 0);
2791 sliced_string->set_parent(cons->first()); 2928 sliced_string->set_parent(cons->first());
2792 sliced_string->set_offset(start); 2929 sliced_string->set_offset(start);
2793 } else if (buffer->IsSlicedString()) { 2930 } else if (buffer->IsSlicedString()) {
2794 // Prevent nesting sliced strings. 2931 // Prevent nesting sliced strings.
2795 SlicedString* parent_slice = SlicedString::cast(buffer); 2932 SlicedString* parent_slice = SlicedString::cast(buffer);
2796 sliced_string->set_parent(parent_slice->parent()); 2933 sliced_string->set_parent(parent_slice->parent());
2797 sliced_string->set_offset(start + parent_slice->offset()); 2934 sliced_string->set_offset(start + parent_slice->offset());
2798 } else { 2935 } else {
2799 sliced_string->set_parent(buffer); 2936 sliced_string->set_parent(buffer);
2800 sliced_string->set_offset(start); 2937 sliced_string->set_offset(start);
2801 } 2938 }
2802 ASSERT(sliced_string->parent()->IsSeqString()); 2939 ASSERT(sliced_string->parent()->IsSeqString() ||
2940 sliced_string->parent()->IsExternalString());
2803 return result; 2941 return result;
2804 } 2942 }
2805 2943
2806 2944
2807 MaybeObject* Heap::AllocateExternalStringFromAscii( 2945 MaybeObject* Heap::AllocateExternalStringFromAscii(
2808 ExternalAsciiString::Resource* resource) { 2946 const ExternalAsciiString::Resource* resource) {
2809 size_t length = resource->length(); 2947 size_t length = resource->length();
2810 if (length > static_cast<size_t>(String::kMaxLength)) { 2948 if (length > static_cast<size_t>(String::kMaxLength)) {
2811 isolate()->context()->mark_out_of_memory(); 2949 isolate()->context()->mark_out_of_memory();
2812 return Failure::OutOfMemoryException(); 2950 return Failure::OutOfMemoryException();
2813 } 2951 }
2814 2952
2815 Map* map = external_ascii_string_map(); 2953 Map* map = external_ascii_string_map();
2816 Object* result; 2954 Object* result;
2817 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE); 2955 { MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
2818 if (!maybe_result->ToObject(&result)) return maybe_result; 2956 if (!maybe_result->ToObject(&result)) return maybe_result;
2819 } 2957 }
2820 2958
2821 ExternalAsciiString* external_string = ExternalAsciiString::cast(result); 2959 ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
2822 external_string->set_length(static_cast<int>(length)); 2960 external_string->set_length(static_cast<int>(length));
2823 external_string->set_hash_field(String::kEmptyHashField); 2961 external_string->set_hash_field(String::kEmptyHashField);
2824 external_string->set_resource(resource); 2962 external_string->set_resource(resource);
2825 2963
2826 return result; 2964 return result;
2827 } 2965 }
2828 2966
2829 2967
2830 MaybeObject* Heap::AllocateExternalStringFromTwoByte( 2968 MaybeObject* Heap::AllocateExternalStringFromTwoByte(
2831 ExternalTwoByteString::Resource* resource) { 2969 const ExternalTwoByteString::Resource* resource) {
2832 size_t length = resource->length(); 2970 size_t length = resource->length();
2833 if (length > static_cast<size_t>(String::kMaxLength)) { 2971 if (length > static_cast<size_t>(String::kMaxLength)) {
2834 isolate()->context()->mark_out_of_memory(); 2972 isolate()->context()->mark_out_of_memory();
2835 return Failure::OutOfMemoryException(); 2973 return Failure::OutOfMemoryException();
2836 } 2974 }
2837 2975
2838 // For small strings we check whether the resource contains only 2976 // For small strings we check whether the resource contains only
2839 // ASCII characters. If yes, we use a different string map. 2977 // ASCII characters. If yes, we use a different string map.
2840 static const size_t kAsciiCheckLengthLimit = 32; 2978 static const size_t kAsciiCheckLengthLimit = 32;
2841 bool is_ascii = length <= kAsciiCheckLengthLimit && 2979 bool is_ascii = length <= kAsciiCheckLengthLimit &&
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
2885 if (length < 0 || length > ByteArray::kMaxLength) { 3023 if (length < 0 || length > ByteArray::kMaxLength) {
2886 return Failure::OutOfMemoryException(); 3024 return Failure::OutOfMemoryException();
2887 } 3025 }
2888 if (pretenure == NOT_TENURED) { 3026 if (pretenure == NOT_TENURED) {
2889 return AllocateByteArray(length); 3027 return AllocateByteArray(length);
2890 } 3028 }
2891 int size = ByteArray::SizeFor(length); 3029 int size = ByteArray::SizeFor(length);
2892 Object* result; 3030 Object* result;
2893 { MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace()) 3031 { MaybeObject* maybe_result = (size <= MaxObjectSizeInPagedSpace())
2894 ? old_data_space_->AllocateRaw(size) 3032 ? old_data_space_->AllocateRaw(size)
2895 : lo_space_->AllocateRaw(size); 3033 : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
2896 if (!maybe_result->ToObject(&result)) return maybe_result; 3034 if (!maybe_result->ToObject(&result)) return maybe_result;
2897 } 3035 }
2898 3036
2899 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map()); 3037 reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
2900 reinterpret_cast<ByteArray*>(result)->set_length(length); 3038 reinterpret_cast<ByteArray*>(result)->set_length(length);
2901 return result; 3039 return result;
2902 } 3040 }
2903 3041
2904 3042
2905 MaybeObject* Heap::AllocateByteArray(int length) { 3043 MaybeObject* Heap::AllocateByteArray(int length) {
(...skipping 15 matching lines...) Expand all
2921 3059
2922 3060
2923 void Heap::CreateFillerObjectAt(Address addr, int size) { 3061 void Heap::CreateFillerObjectAt(Address addr, int size) {
2924 if (size == 0) return; 3062 if (size == 0) return;
2925 HeapObject* filler = HeapObject::FromAddress(addr); 3063 HeapObject* filler = HeapObject::FromAddress(addr);
2926 if (size == kPointerSize) { 3064 if (size == kPointerSize) {
2927 filler->set_map(one_pointer_filler_map()); 3065 filler->set_map(one_pointer_filler_map());
2928 } else if (size == 2 * kPointerSize) { 3066 } else if (size == 2 * kPointerSize) {
2929 filler->set_map(two_pointer_filler_map()); 3067 filler->set_map(two_pointer_filler_map());
2930 } else { 3068 } else {
2931 filler->set_map(byte_array_map()); 3069 filler->set_map(free_space_map());
2932 ByteArray::cast(filler)->set_length(ByteArray::LengthFor(size)); 3070 FreeSpace::cast(filler)->set_size(size);
2933 } 3071 }
2934 } 3072 }
2935 3073
2936 3074
2937 MaybeObject* Heap::AllocateExternalArray(int length, 3075 MaybeObject* Heap::AllocateExternalArray(int length,
2938 ExternalArrayType array_type, 3076 ExternalArrayType array_type,
2939 void* external_pointer, 3077 void* external_pointer,
2940 PretenureFlag pretenure) { 3078 PretenureFlag pretenure) {
2941 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE; 3079 AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
2942 Object* result; 3080 Object* result;
(...skipping 25 matching lines...) Expand all
2968 } 3106 }
2969 3107
2970 // Compute size. 3108 // Compute size.
2971 int body_size = RoundUp(desc.instr_size, kObjectAlignment); 3109 int body_size = RoundUp(desc.instr_size, kObjectAlignment);
2972 int obj_size = Code::SizeFor(body_size); 3110 int obj_size = Code::SizeFor(body_size);
2973 ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment)); 3111 ASSERT(IsAligned(static_cast<intptr_t>(obj_size), kCodeAlignment));
2974 MaybeObject* maybe_result; 3112 MaybeObject* maybe_result;
2975 // Large code objects and code objects which should stay at a fixed address 3113 // Large code objects and code objects which should stay at a fixed address
2976 // are allocated in large object space. 3114 // are allocated in large object space.
2977 if (obj_size > MaxObjectSizeInPagedSpace() || immovable) { 3115 if (obj_size > MaxObjectSizeInPagedSpace() || immovable) {
2978 maybe_result = lo_space_->AllocateRawCode(obj_size); 3116 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
2979 } else { 3117 } else {
2980 maybe_result = code_space_->AllocateRaw(obj_size); 3118 maybe_result = code_space_->AllocateRaw(obj_size);
2981 } 3119 }
2982 3120
2983 Object* result; 3121 Object* result;
2984 if (!maybe_result->ToObject(&result)) return maybe_result; 3122 if (!maybe_result->ToObject(&result)) return maybe_result;
2985 3123
2986 // Initialize the object 3124 // Initialize the object
2987 HeapObject::cast(result)->set_map(code_map()); 3125 HeapObject::cast(result)->set_map(code_map());
2988 Code* code = Code::cast(result); 3126 Code* code = Code::cast(result);
(...skipping 24 matching lines...) Expand all
3013 #endif 3151 #endif
3014 return code; 3152 return code;
3015 } 3153 }
3016 3154
3017 3155
3018 MaybeObject* Heap::CopyCode(Code* code) { 3156 MaybeObject* Heap::CopyCode(Code* code) {
3019 // Allocate an object the same size as the code object. 3157 // Allocate an object the same size as the code object.
3020 int obj_size = code->Size(); 3158 int obj_size = code->Size();
3021 MaybeObject* maybe_result; 3159 MaybeObject* maybe_result;
3022 if (obj_size > MaxObjectSizeInPagedSpace()) { 3160 if (obj_size > MaxObjectSizeInPagedSpace()) {
3023 maybe_result = lo_space_->AllocateRawCode(obj_size); 3161 maybe_result = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
3024 } else { 3162 } else {
3025 maybe_result = code_space_->AllocateRaw(obj_size); 3163 maybe_result = code_space_->AllocateRaw(obj_size);
3026 } 3164 }
3027 3165
3028 Object* result; 3166 Object* result;
3029 if (!maybe_result->ToObject(&result)) return maybe_result; 3167 if (!maybe_result->ToObject(&result)) return maybe_result;
3030 3168
3031 // Copy code object. 3169 // Copy code object.
3032 Address old_addr = code->address(); 3170 Address old_addr = code->address();
3033 Address new_addr = reinterpret_cast<HeapObject*>(result)->address(); 3171 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
(...skipping 22 matching lines...) Expand all
3056 3194
3057 int new_obj_size = Code::SizeFor(new_body_size); 3195 int new_obj_size = Code::SizeFor(new_body_size);
3058 3196
3059 Address old_addr = code->address(); 3197 Address old_addr = code->address();
3060 3198
3061 size_t relocation_offset = 3199 size_t relocation_offset =
3062 static_cast<size_t>(code->instruction_end() - old_addr); 3200 static_cast<size_t>(code->instruction_end() - old_addr);
3063 3201
3064 MaybeObject* maybe_result; 3202 MaybeObject* maybe_result;
3065 if (new_obj_size > MaxObjectSizeInPagedSpace()) { 3203 if (new_obj_size > MaxObjectSizeInPagedSpace()) {
3066 maybe_result = lo_space_->AllocateRawCode(new_obj_size); 3204 maybe_result = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
3067 } else { 3205 } else {
3068 maybe_result = code_space_->AllocateRaw(new_obj_size); 3206 maybe_result = code_space_->AllocateRaw(new_obj_size);
3069 } 3207 }
3070 3208
3071 Object* result; 3209 Object* result;
3072 if (!maybe_result->ToObject(&result)) return maybe_result; 3210 if (!maybe_result->ToObject(&result)) return maybe_result;
3073 3211
3074 // Copy code object. 3212 // Copy code object.
3075 Address new_addr = reinterpret_cast<HeapObject*>(result)->address(); 3213 Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
3076 3214
(...skipping 28 matching lines...) Expand all
3105 Object* result; 3243 Object* result;
3106 { MaybeObject* maybe_result = 3244 { MaybeObject* maybe_result =
3107 AllocateRaw(map->instance_size(), space, retry_space); 3245 AllocateRaw(map->instance_size(), space, retry_space);
3108 if (!maybe_result->ToObject(&result)) return maybe_result; 3246 if (!maybe_result->ToObject(&result)) return maybe_result;
3109 } 3247 }
3110 HeapObject::cast(result)->set_map(map); 3248 HeapObject::cast(result)->set_map(map);
3111 return result; 3249 return result;
3112 } 3250 }
3113 3251
3114 3252
3115 MaybeObject* Heap::InitializeFunction(JSFunction* function, 3253 void Heap::InitializeFunction(JSFunction* function,
3116 SharedFunctionInfo* shared, 3254 SharedFunctionInfo* shared,
3117 Object* prototype) { 3255 Object* prototype) {
3118 ASSERT(!prototype->IsMap()); 3256 ASSERT(!prototype->IsMap());
3119 function->initialize_properties(); 3257 function->initialize_properties();
3120 function->initialize_elements(); 3258 function->initialize_elements();
3121 function->set_shared(shared); 3259 function->set_shared(shared);
3122 function->set_code(shared->code()); 3260 function->set_code(shared->code());
3123 function->set_prototype_or_initial_map(prototype); 3261 function->set_prototype_or_initial_map(prototype);
3124 function->set_context(undefined_value()); 3262 function->set_context(undefined_value());
3125 function->set_literals(empty_fixed_array()); 3263 function->set_literals(empty_fixed_array());
3126 function->set_next_function_link(undefined_value()); 3264 function->set_next_function_link(undefined_value());
3127 return function;
3128 } 3265 }
3129 3266
3130 3267
3131 MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) { 3268 MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
3132 // Allocate the prototype. Make sure to use the object function 3269 // Allocate the prototype. Make sure to use the object function
3133 // from the function's context, since the function can be from a 3270 // from the function's context, since the function can be from a
3134 // different context. 3271 // different context.
3135 JSFunction* object_function = 3272 JSFunction* object_function =
3136 function->context()->global_context()->object_function(); 3273 function->context()->global_context()->object_function();
3274
3275 // Each function prototype gets a copy of the object function map.
3276 // This avoid unwanted sharing of maps between prototypes of different
3277 // constructors.
3278 Map* new_map;
3279 ASSERT(object_function->has_initial_map());
3280 { MaybeObject* maybe_map =
3281 object_function->initial_map()->CopyDropTransitions();
3282 if (!maybe_map->To<Map>(&new_map)) return maybe_map;
3283 }
3137 Object* prototype; 3284 Object* prototype;
3138 { MaybeObject* maybe_prototype = AllocateJSObject(object_function); 3285 { MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
3139 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype; 3286 if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
3140 } 3287 }
3141 // When creating the prototype for the function we must set its 3288 // When creating the prototype for the function we must set its
3142 // constructor to the function. 3289 // constructor to the function.
3143 Object* result; 3290 Object* result;
3144 { MaybeObject* maybe_result = 3291 { MaybeObject* maybe_result =
3145 JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes( 3292 JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
3146 constructor_symbol(), function, DONT_ENUM); 3293 constructor_symbol(), function, DONT_ENUM);
3147 if (!maybe_result->ToObject(&result)) return maybe_result; 3294 if (!maybe_result->ToObject(&result)) return maybe_result;
3148 } 3295 }
3149 return prototype; 3296 return prototype;
3150 } 3297 }
3151 3298
3152 3299
3153 MaybeObject* Heap::AllocateFunction(Map* function_map, 3300 MaybeObject* Heap::AllocateFunction(Map* function_map,
3154 SharedFunctionInfo* shared, 3301 SharedFunctionInfo* shared,
3155 Object* prototype, 3302 Object* prototype,
3156 PretenureFlag pretenure) { 3303 PretenureFlag pretenure) {
3157 AllocationSpace space = 3304 AllocationSpace space =
3158 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE; 3305 (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
3159 Object* result; 3306 Object* result;
3160 { MaybeObject* maybe_result = Allocate(function_map, space); 3307 { MaybeObject* maybe_result = Allocate(function_map, space);
3161 if (!maybe_result->ToObject(&result)) return maybe_result; 3308 if (!maybe_result->ToObject(&result)) return maybe_result;
3162 } 3309 }
3163 return InitializeFunction(JSFunction::cast(result), shared, prototype); 3310 InitializeFunction(JSFunction::cast(result), shared, prototype);
3311 return result;
3164 } 3312 }
3165 3313
3166 3314
3167 MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) { 3315 MaybeObject* Heap::AllocateArgumentsObject(Object* callee, int length) {
3168 // To get fast allocation and map sharing for arguments objects we 3316 // To get fast allocation and map sharing for arguments objects we
3169 // allocate them based on an arguments boilerplate. 3317 // allocate them based on an arguments boilerplate.
3170 3318
3171 JSObject* boilerplate; 3319 JSObject* boilerplate;
3172 int arguments_object_size; 3320 int arguments_object_size;
3173 bool strict_mode_callee = callee->IsJSFunction() && 3321 bool strict_mode_callee = callee->IsJSFunction() &&
(...skipping 149 matching lines...) Expand 10 before | Expand all | Expand 10 after
3323 // according to the object's initial map. For example, if the map's 3471 // according to the object's initial map. For example, if the map's
3324 // instance type is JS_ARRAY_TYPE, the length field should be initialized 3472 // instance type is JS_ARRAY_TYPE, the length field should be initialized
3325 // to a number (eg, Smi::FromInt(0)) and the elements initialized to a 3473 // to a number (eg, Smi::FromInt(0)) and the elements initialized to a
3326 // fixed array (eg, Heap::empty_fixed_array()). Currently, the object 3474 // fixed array (eg, Heap::empty_fixed_array()). Currently, the object
3327 // verification code has to cope with (temporarily) invalid objects. See 3475 // verification code has to cope with (temporarily) invalid objects. See
3328 // for example, JSArray::JSArrayVerify). 3476 // for example, JSArray::JSArrayVerify).
3329 Object* filler; 3477 Object* filler;
3330 // We cannot always fill with one_pointer_filler_map because objects 3478 // We cannot always fill with one_pointer_filler_map because objects
3331 // created from API functions expect their internal fields to be initialized 3479 // created from API functions expect their internal fields to be initialized
3332 // with undefined_value. 3480 // with undefined_value.
3481 // Pre-allocated fields need to be initialized with undefined_value as well
3482 // so that object accesses before the constructor completes (e.g. in the
3483 // debugger) will not cause a crash.
3333 if (map->constructor()->IsJSFunction() && 3484 if (map->constructor()->IsJSFunction() &&
3334 JSFunction::cast(map->constructor())->shared()-> 3485 JSFunction::cast(map->constructor())->shared()->
3335 IsInobjectSlackTrackingInProgress()) { 3486 IsInobjectSlackTrackingInProgress()) {
3336 // We might want to shrink the object later. 3487 // We might want to shrink the object later.
3337 ASSERT(obj->GetInternalFieldCount() == 0); 3488 ASSERT(obj->GetInternalFieldCount() == 0);
3338 filler = Heap::one_pointer_filler_map(); 3489 filler = Heap::one_pointer_filler_map();
3339 } else { 3490 } else {
3340 filler = Heap::undefined_value(); 3491 filler = Heap::undefined_value();
3341 } 3492 }
3342 obj->InitializeBody(map->instance_size(), filler); 3493 obj->InitializeBody(map, Heap::undefined_value(), filler);
3343 } 3494 }
3344 3495
3345 3496
3346 MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) { 3497 MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
3347 // JSFunctions should be allocated using AllocateFunction to be 3498 // JSFunctions should be allocated using AllocateFunction to be
3348 // properly initialized. 3499 // properly initialized.
3349 ASSERT(map->instance_type() != JS_FUNCTION_TYPE); 3500 ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
3350 3501
3351 // Both types of global objects should be allocated using 3502 // Both types of global objects should be allocated using
3352 // AllocateGlobalObject to be properly initialized. 3503 // AllocateGlobalObject to be properly initialized.
(...skipping 17 matching lines...) Expand all
3370 if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE; 3521 if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
3371 Object* obj; 3522 Object* obj;
3372 { MaybeObject* maybe_obj = Allocate(map, space); 3523 { MaybeObject* maybe_obj = Allocate(map, space);
3373 if (!maybe_obj->ToObject(&obj)) return maybe_obj; 3524 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3374 } 3525 }
3375 3526
3376 // Initialize the JSObject. 3527 // Initialize the JSObject.
3377 InitializeJSObjectFromMap(JSObject::cast(obj), 3528 InitializeJSObjectFromMap(JSObject::cast(obj),
3378 FixedArray::cast(properties), 3529 FixedArray::cast(properties),
3379 map); 3530 map);
3380 ASSERT(JSObject::cast(obj)->HasFastElements()); 3531 ASSERT(JSObject::cast(obj)->HasFastSmiOnlyElements() ||
3532 JSObject::cast(obj)->HasFastElements());
3381 return obj; 3533 return obj;
3382 } 3534 }
3383 3535
3384 3536
3385 MaybeObject* Heap::AllocateJSObject(JSFunction* constructor, 3537 MaybeObject* Heap::AllocateJSObject(JSFunction* constructor,
3386 PretenureFlag pretenure) { 3538 PretenureFlag pretenure) {
3387 // Allocate the initial map if absent. 3539 // Allocate the initial map if absent.
3388 if (!constructor->has_initial_map()) { 3540 if (!constructor->has_initial_map()) {
3389 Object* initial_map; 3541 Object* initial_map;
3390 { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor); 3542 { MaybeObject* maybe_initial_map = AllocateInitialMap(constructor);
(...skipping 22 matching lines...) Expand all
3413 MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize); 3565 MaybeObject* maybe_map_obj = AllocateMap(JS_PROXY_TYPE, JSProxy::kSize);
3414 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj; 3566 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
3415 map->set_prototype(prototype); 3567 map->set_prototype(prototype);
3416 3568
3417 // Allocate the proxy object. 3569 // Allocate the proxy object.
3418 JSProxy* result; 3570 JSProxy* result;
3419 MaybeObject* maybe_result = Allocate(map, NEW_SPACE); 3571 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3420 if (!maybe_result->To<JSProxy>(&result)) return maybe_result; 3572 if (!maybe_result->To<JSProxy>(&result)) return maybe_result;
3421 result->InitializeBody(map->instance_size(), Smi::FromInt(0)); 3573 result->InitializeBody(map->instance_size(), Smi::FromInt(0));
3422 result->set_handler(handler); 3574 result->set_handler(handler);
3575 result->set_hash(undefined_value());
3423 return result; 3576 return result;
3424 } 3577 }
3425 3578
3426 3579
3427 MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler, 3580 MaybeObject* Heap::AllocateJSFunctionProxy(Object* handler,
3428 Object* call_trap, 3581 Object* call_trap,
3429 Object* construct_trap, 3582 Object* construct_trap,
3430 Object* prototype) { 3583 Object* prototype) {
3431 // Allocate map. 3584 // Allocate map.
3432 // TODO(rossberg): Once we optimize proxies, think about a scheme to share 3585 // TODO(rossberg): Once we optimize proxies, think about a scheme to share
3433 // maps. Will probably depend on the identity of the handler object, too. 3586 // maps. Will probably depend on the identity of the handler object, too.
3434 Map* map; 3587 Map* map;
3435 MaybeObject* maybe_map_obj = 3588 MaybeObject* maybe_map_obj =
3436 AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize); 3589 AllocateMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
3437 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj; 3590 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj;
3438 map->set_prototype(prototype); 3591 map->set_prototype(prototype);
3439 3592
3440 // Allocate the proxy object. 3593 // Allocate the proxy object.
3441 JSFunctionProxy* result; 3594 JSFunctionProxy* result;
3442 MaybeObject* maybe_result = Allocate(map, NEW_SPACE); 3595 MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
3443 if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result; 3596 if (!maybe_result->To<JSFunctionProxy>(&result)) return maybe_result;
3444 result->InitializeBody(map->instance_size(), Smi::FromInt(0)); 3597 result->InitializeBody(map->instance_size(), Smi::FromInt(0));
3445 result->set_handler(handler); 3598 result->set_handler(handler);
3599 result->set_hash(undefined_value());
3446 result->set_call_trap(call_trap); 3600 result->set_call_trap(call_trap);
3447 result->set_construct_trap(construct_trap); 3601 result->set_construct_trap(construct_trap);
3448 return result; 3602 return result;
3449 } 3603 }
3450 3604
3451 3605
3452 MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) { 3606 MaybeObject* Heap::AllocateGlobalObject(JSFunction* constructor) {
3453 ASSERT(constructor->has_initial_map()); 3607 ASSERT(constructor->has_initial_map());
3454 Map* map = constructor->initial_map(); 3608 Map* map = constructor->initial_map();
3455 3609
(...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after
3552 if (!maybe_clone->ToObject(&clone)) return maybe_clone; 3706 if (!maybe_clone->ToObject(&clone)) return maybe_clone;
3553 } 3707 }
3554 ASSERT(InNewSpace(clone)); 3708 ASSERT(InNewSpace(clone));
3555 // Since we know the clone is allocated in new space, we can copy 3709 // Since we know the clone is allocated in new space, we can copy
3556 // the contents without worrying about updating the write barrier. 3710 // the contents without worrying about updating the write barrier.
3557 CopyBlock(HeapObject::cast(clone)->address(), 3711 CopyBlock(HeapObject::cast(clone)->address(),
3558 source->address(), 3712 source->address(),
3559 object_size); 3713 object_size);
3560 } 3714 }
3561 3715
3716 ASSERT(JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
3562 FixedArrayBase* elements = FixedArrayBase::cast(source->elements()); 3717 FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
3563 FixedArray* properties = FixedArray::cast(source->properties()); 3718 FixedArray* properties = FixedArray::cast(source->properties());
3564 // Update elements if necessary. 3719 // Update elements if necessary.
3565 if (elements->length() > 0) { 3720 if (elements->length() > 0) {
3566 Object* elem; 3721 Object* elem;
3567 { MaybeObject* maybe_elem; 3722 { MaybeObject* maybe_elem;
3568 if (elements->map() == fixed_cow_array_map()) { 3723 if (elements->map() == fixed_cow_array_map()) {
3569 maybe_elem = FixedArray::cast(elements); 3724 maybe_elem = FixedArray::cast(elements);
3570 } else if (source->HasFastDoubleElements()) { 3725 } else if (source->HasFastDoubleElements()) {
3571 maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements)); 3726 maybe_elem = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
(...skipping 12 matching lines...) Expand all
3584 } 3739 }
3585 JSObject::cast(clone)->set_properties(FixedArray::cast(prop)); 3740 JSObject::cast(clone)->set_properties(FixedArray::cast(prop));
3586 } 3741 }
3587 // Return the new clone. 3742 // Return the new clone.
3588 return clone; 3743 return clone;
3589 } 3744 }
3590 3745
3591 3746
3592 MaybeObject* Heap::ReinitializeJSReceiver( 3747 MaybeObject* Heap::ReinitializeJSReceiver(
3593 JSReceiver* object, InstanceType type, int size) { 3748 JSReceiver* object, InstanceType type, int size) {
3594 ASSERT(type >= FIRST_JS_RECEIVER_TYPE); 3749 ASSERT(type >= FIRST_JS_OBJECT_TYPE);
3595 3750
3596 // Allocate fresh map. 3751 // Allocate fresh map.
3597 // TODO(rossberg): Once we optimize proxies, cache these maps. 3752 // TODO(rossberg): Once we optimize proxies, cache these maps.
3598 Map* map; 3753 Map* map;
3599 MaybeObject* maybe_map_obj = AllocateMap(type, size); 3754 MaybeObject* maybe = AllocateMap(type, size);
3600 if (!maybe_map_obj->To<Map>(&map)) return maybe_map_obj; 3755 if (!maybe->To<Map>(&map)) return maybe;
3601 3756
3602 // Check that the receiver has at least the size of the fresh object. 3757 // Check that the receiver has at least the size of the fresh object.
3603 int size_difference = object->map()->instance_size() - map->instance_size(); 3758 int size_difference = object->map()->instance_size() - map->instance_size();
3604 ASSERT(size_difference >= 0); 3759 ASSERT(size_difference >= 0);
3605 3760
3606 map->set_prototype(object->map()->prototype()); 3761 map->set_prototype(object->map()->prototype());
3607 3762
3608 // Allocate the backing storage for the properties. 3763 // Allocate the backing storage for the properties.
3609 int prop_size = map->unused_property_fields() - map->inobject_properties(); 3764 int prop_size = map->unused_property_fields() - map->inobject_properties();
3610 Object* properties; 3765 Object* properties;
3611 { MaybeObject* maybe_properties = AllocateFixedArray(prop_size, TENURED); 3766 maybe = AllocateFixedArray(prop_size, TENURED);
3612 if (!maybe_properties->ToObject(&properties)) return maybe_properties; 3767 if (!maybe->ToObject(&properties)) return maybe;
3768
3769 // Functions require some allocation, which might fail here.
3770 SharedFunctionInfo* shared = NULL;
3771 if (type == JS_FUNCTION_TYPE) {
3772 String* name;
3773 maybe = LookupAsciiSymbol("<freezing call trap>");
3774 if (!maybe->To<String>(&name)) return maybe;
3775 maybe = AllocateSharedFunctionInfo(name);
3776 if (!maybe->To<SharedFunctionInfo>(&shared)) return maybe;
3613 } 3777 }
3614 3778
3779 // Because of possible retries of this function after failure,
3780 // we must NOT fail after this point, where we have changed the type!
3781
3615 // Reset the map for the object. 3782 // Reset the map for the object.
3616 object->set_map(map); 3783 object->set_map(map);
3784 JSObject* jsobj = JSObject::cast(object);
3617 3785
3618 // Reinitialize the object from the constructor map. 3786 // Reinitialize the object from the constructor map.
3619 InitializeJSObjectFromMap(JSObject::cast(object), 3787 InitializeJSObjectFromMap(jsobj, FixedArray::cast(properties), map);
3620 FixedArray::cast(properties), map);
3621 3788
3622 // Functions require some minimal initialization. 3789 // Functions require some minimal initialization.
3623 if (type == JS_FUNCTION_TYPE) { 3790 if (type == JS_FUNCTION_TYPE) {
3624 String* name; 3791 map->set_function_with_prototype(true);
3625 MaybeObject* maybe_name = LookupAsciiSymbol("<freezing call trap>"); 3792 InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
3626 if (!maybe_name->To<String>(&name)) return maybe_name; 3793 JSFunction::cast(object)->set_context(
3627 SharedFunctionInfo* shared; 3794 isolate()->context()->global_context());
3628 MaybeObject* maybe_shared = AllocateSharedFunctionInfo(name);
3629 if (!maybe_shared->To<SharedFunctionInfo>(&shared)) return maybe_shared;
3630 JSFunction* func;
3631 MaybeObject* maybe_func =
3632 InitializeFunction(JSFunction::cast(object), shared, the_hole_value());
3633 if (!maybe_func->To<JSFunction>(&func)) return maybe_func;
3634 func->set_context(isolate()->context()->global_context());
3635 } 3795 }
3636 3796
3637 // Put in filler if the new object is smaller than the old. 3797 // Put in filler if the new object is smaller than the old.
3638 if (size_difference > 0) { 3798 if (size_difference > 0) {
3639 CreateFillerObjectAt( 3799 CreateFillerObjectAt(
3640 object->address() + map->instance_size(), size_difference); 3800 object->address() + map->instance_size(), size_difference);
3641 } 3801 }
3642 3802
3643 return object; 3803 return object;
3644 } 3804 }
(...skipping 162 matching lines...) Expand 10 before | Expand all | Expand 10 after
3807 if (chars > SeqTwoByteString::kMaxLength) { 3967 if (chars > SeqTwoByteString::kMaxLength) {
3808 return Failure::OutOfMemoryException(); 3968 return Failure::OutOfMemoryException();
3809 } 3969 }
3810 map = symbol_map(); 3970 map = symbol_map();
3811 size = SeqTwoByteString::SizeFor(chars); 3971 size = SeqTwoByteString::SizeFor(chars);
3812 } 3972 }
3813 3973
3814 // Allocate string. 3974 // Allocate string.
3815 Object* result; 3975 Object* result;
3816 { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace()) 3976 { MaybeObject* maybe_result = (size > MaxObjectSizeInPagedSpace())
3817 ? lo_space_->AllocateRaw(size) 3977 ? lo_space_->AllocateRaw(size, NOT_EXECUTABLE)
3818 : old_data_space_->AllocateRaw(size); 3978 : old_data_space_->AllocateRaw(size);
3819 if (!maybe_result->ToObject(&result)) return maybe_result; 3979 if (!maybe_result->ToObject(&result)) return maybe_result;
3820 } 3980 }
3821 3981
3822 reinterpret_cast<HeapObject*>(result)->set_map(map); 3982 reinterpret_cast<HeapObject*>(result)->set_map(map);
3823 // Set length and hash fields of the allocated string. 3983 // Set length and hash fields of the allocated string.
3824 String* answer = String::cast(result); 3984 String* answer = String::cast(result);
3825 answer->set_length(chars); 3985 answer->set_length(chars);
3826 answer->set_hash_field(hash_field); 3986 answer->set_hash_field(hash_field);
3827 3987
(...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after
3924 if (length < 0 || length > FixedArray::kMaxLength) { 4084 if (length < 0 || length > FixedArray::kMaxLength) {
3925 return Failure::OutOfMemoryException(); 4085 return Failure::OutOfMemoryException();
3926 } 4086 }
3927 ASSERT(length > 0); 4087 ASSERT(length > 0);
3928 // Use the general function if we're forced to always allocate. 4088 // Use the general function if we're forced to always allocate.
3929 if (always_allocate()) return AllocateFixedArray(length, TENURED); 4089 if (always_allocate()) return AllocateFixedArray(length, TENURED);
3930 // Allocate the raw data for a fixed array. 4090 // Allocate the raw data for a fixed array.
3931 int size = FixedArray::SizeFor(length); 4091 int size = FixedArray::SizeFor(length);
3932 return size <= kMaxObjectSizeInNewSpace 4092 return size <= kMaxObjectSizeInNewSpace
3933 ? new_space_.AllocateRaw(size) 4093 ? new_space_.AllocateRaw(size)
3934 : lo_space_->AllocateRawFixedArray(size); 4094 : lo_space_->AllocateRaw(size, NOT_EXECUTABLE);
3935 } 4095 }
3936 4096
3937 4097
3938 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) { 4098 MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
3939 int len = src->length(); 4099 int len = src->length();
3940 Object* obj; 4100 Object* obj;
3941 { MaybeObject* maybe_obj = AllocateRawFixedArray(len); 4101 { MaybeObject* maybe_obj = AllocateRawFixedArray(len);
3942 if (!maybe_obj->ToObject(&obj)) return maybe_obj; 4102 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
3943 } 4103 }
3944 if (InNewSpace(obj)) { 4104 if (InNewSpace(obj)) {
(...skipping 310 matching lines...) Expand 10 before | Expand all | Expand 10 after
4255 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE; 4415 (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
4256 Object* result; 4416 Object* result;
4257 { MaybeObject* maybe_result = Allocate(map, space); 4417 { MaybeObject* maybe_result = Allocate(map, space);
4258 if (!maybe_result->ToObject(&result)) return maybe_result; 4418 if (!maybe_result->ToObject(&result)) return maybe_result;
4259 } 4419 }
4260 Struct::cast(result)->InitializeBody(size); 4420 Struct::cast(result)->InitializeBody(size);
4261 return result; 4421 return result;
4262 } 4422 }
4263 4423
4264 4424
4425 bool Heap::IsHeapIterable() {
4426 return (!old_pointer_space()->was_swept_conservatively() &&
4427 !old_data_space()->was_swept_conservatively());
4428 }
4429
4430
4431 void Heap::EnsureHeapIsIterable() {
4432 ASSERT(IsAllocationAllowed());
4433 if (!IsHeapIterable()) {
4434 CollectAllGarbage(kMakeHeapIterableMask);
4435 }
4436 ASSERT(IsHeapIterable());
4437 }
4438
4439
4265 bool Heap::IdleNotification() { 4440 bool Heap::IdleNotification() {
4266 static const int kIdlesBeforeScavenge = 4; 4441 static const int kIdlesBeforeScavenge = 4;
4267 static const int kIdlesBeforeMarkSweep = 7; 4442 static const int kIdlesBeforeMarkSweep = 7;
4268 static const int kIdlesBeforeMarkCompact = 8; 4443 static const int kIdlesBeforeMarkCompact = 8;
4269 static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1; 4444 static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
4270 static const unsigned int kGCsBetweenCleanup = 4; 4445 static const unsigned int kGCsBetweenCleanup = 4;
4271 4446
4272 if (!last_idle_notification_gc_count_init_) { 4447 if (!last_idle_notification_gc_count_init_) {
4273 last_idle_notification_gc_count_ = gc_count_; 4448 last_idle_notification_gc_count_ = gc_count_;
4274 last_idle_notification_gc_count_init_ = true; 4449 last_idle_notification_gc_count_init_ = true;
(...skipping 10 matching lines...) Expand all
4285 number_idle_notifications_ = 4460 number_idle_notifications_ =
4286 Min(number_idle_notifications_ + 1, kMaxIdleCount); 4461 Min(number_idle_notifications_ + 1, kMaxIdleCount);
4287 } else { 4462 } else {
4288 number_idle_notifications_ = 0; 4463 number_idle_notifications_ = 0;
4289 last_idle_notification_gc_count_ = gc_count_; 4464 last_idle_notification_gc_count_ = gc_count_;
4290 } 4465 }
4291 4466
4292 if (number_idle_notifications_ == kIdlesBeforeScavenge) { 4467 if (number_idle_notifications_ == kIdlesBeforeScavenge) {
4293 if (contexts_disposed_ > 0) { 4468 if (contexts_disposed_ > 0) {
4294 HistogramTimerScope scope(isolate_->counters()->gc_context()); 4469 HistogramTimerScope scope(isolate_->counters()->gc_context());
4295 CollectAllGarbage(false); 4470 CollectAllGarbage(kNoGCFlags);
4296 } else { 4471 } else {
4297 CollectGarbage(NEW_SPACE); 4472 CollectGarbage(NEW_SPACE);
4298 } 4473 }
4299 new_space_.Shrink(); 4474 new_space_.Shrink();
4300 last_idle_notification_gc_count_ = gc_count_; 4475 last_idle_notification_gc_count_ = gc_count_;
4301 } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) { 4476 } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
4302 // Before doing the mark-sweep collections we clear the 4477 // Before doing the mark-sweep collections we clear the
4303 // compilation cache to avoid hanging on to source code and 4478 // compilation cache to avoid hanging on to source code and
4304 // generated code for cached functions. 4479 // generated code for cached functions.
4305 isolate_->compilation_cache()->Clear(); 4480 isolate_->compilation_cache()->Clear();
4306 4481
4307 CollectAllGarbage(false); 4482 CollectAllGarbage(kNoGCFlags);
4308 new_space_.Shrink(); 4483 new_space_.Shrink();
4309 last_idle_notification_gc_count_ = gc_count_; 4484 last_idle_notification_gc_count_ = gc_count_;
4310 4485
4311 } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) { 4486 } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
4312 CollectAllGarbage(true); 4487 CollectAllGarbage(kNoGCFlags);
4313 new_space_.Shrink(); 4488 new_space_.Shrink();
4314 last_idle_notification_gc_count_ = gc_count_; 4489 last_idle_notification_gc_count_ = gc_count_;
4315 number_idle_notifications_ = 0; 4490 number_idle_notifications_ = 0;
4316 finished = true; 4491 finished = true;
4317 } else if (contexts_disposed_ > 0) { 4492 } else if (contexts_disposed_ > 0) {
4318 if (FLAG_expose_gc) { 4493 if (FLAG_expose_gc) {
4319 contexts_disposed_ = 0; 4494 contexts_disposed_ = 0;
4320 } else { 4495 } else {
4321 HistogramTimerScope scope(isolate_->counters()->gc_context()); 4496 HistogramTimerScope scope(isolate_->counters()->gc_context());
4322 CollectAllGarbage(false); 4497 CollectAllGarbage(kNoGCFlags);
4323 last_idle_notification_gc_count_ = gc_count_; 4498 last_idle_notification_gc_count_ = gc_count_;
4324 } 4499 }
4325 // If this is the first idle notification, we reset the 4500 // If this is the first idle notification, we reset the
4326 // notification count to avoid letting idle notifications for 4501 // notification count to avoid letting idle notifications for
4327 // context disposal garbage collections start a potentially too 4502 // context disposal garbage collections start a potentially too
4328 // aggressive idle GC cycle. 4503 // aggressive idle GC cycle.
4329 if (number_idle_notifications_ <= 1) { 4504 if (number_idle_notifications_ <= 1) {
4330 number_idle_notifications_ = 0; 4505 number_idle_notifications_ = 0;
4331 uncommit = false; 4506 uncommit = false;
4332 } 4507 }
4333 } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) { 4508 } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
4334 // If we have received more than kIdlesBeforeMarkCompact idle 4509 // If we have received more than kIdlesBeforeMarkCompact idle
4335 // notifications we do not perform any cleanup because we don't 4510 // notifications we do not perform any cleanup because we don't
4336 // expect to gain much by doing so. 4511 // expect to gain much by doing so.
4337 finished = true; 4512 finished = true;
4338 } 4513 }
4339 4514
4340 // Make sure that we have no pending context disposals and 4515 // Make sure that we have no pending context disposals and
4341 // conditionally uncommit from space. 4516 // conditionally uncommit from space.
4342 ASSERT(contexts_disposed_ == 0); 4517 // Take into account that we might have decided to delay full collection
4518 // because incremental marking is in progress.
4519 ASSERT((contexts_disposed_ == 0) || !incremental_marking()->IsStopped());
4343 if (uncommit) UncommitFromSpace(); 4520 if (uncommit) UncommitFromSpace();
4521
4344 return finished; 4522 return finished;
4345 } 4523 }
4346 4524
4347 4525
4348 #ifdef DEBUG 4526 #ifdef DEBUG
4349 4527
4350 void Heap::Print() { 4528 void Heap::Print() {
4351 if (!HasBeenSetup()) return; 4529 if (!HasBeenSetup()) return;
4352 isolate()->PrintStack(); 4530 isolate()->PrintStack();
4353 AllSpaces spaces; 4531 AllSpaces spaces;
(...skipping 13 matching lines...) Expand all
4367 } 4545 }
4368 4546
4369 4547
4370 // This function expects that NewSpace's allocated objects histogram is 4548 // This function expects that NewSpace's allocated objects histogram is
4371 // populated (via a call to CollectStatistics or else as a side effect of a 4549 // populated (via a call to CollectStatistics or else as a side effect of a
4372 // just-completed scavenge collection). 4550 // just-completed scavenge collection).
4373 void Heap::ReportHeapStatistics(const char* title) { 4551 void Heap::ReportHeapStatistics(const char* title) {
4374 USE(title); 4552 USE(title);
4375 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n", 4553 PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
4376 title, gc_count_); 4554 title, gc_count_);
4377 PrintF("mark-compact GC : %d\n", mc_count_);
4378 PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n", 4555 PrintF("old_gen_promotion_limit_ %" V8_PTR_PREFIX "d\n",
4379 old_gen_promotion_limit_); 4556 old_gen_promotion_limit_);
4380 PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n", 4557 PrintF("old_gen_allocation_limit_ %" V8_PTR_PREFIX "d\n",
4381 old_gen_allocation_limit_); 4558 old_gen_allocation_limit_);
4559 PrintF("old_gen_limit_factor_ %d\n", old_gen_limit_factor_);
4382 4560
4383 PrintF("\n"); 4561 PrintF("\n");
4384 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles()); 4562 PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles());
4385 isolate_->global_handles()->PrintStats(); 4563 isolate_->global_handles()->PrintStats();
4386 PrintF("\n"); 4564 PrintF("\n");
4387 4565
4388 PrintF("Heap statistics : "); 4566 PrintF("Heap statistics : ");
4389 isolate_->memory_allocator()->ReportStatistics(); 4567 isolate_->memory_allocator()->ReportStatistics();
4390 PrintF("To space : "); 4568 PrintF("To space : ");
4391 new_space_.ReportStatistics(); 4569 new_space_.ReportStatistics();
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
4448 return cell_space_->Contains(addr); 4626 return cell_space_->Contains(addr);
4449 case LO_SPACE: 4627 case LO_SPACE:
4450 return lo_space_->SlowContains(addr); 4628 return lo_space_->SlowContains(addr);
4451 } 4629 }
4452 4630
4453 return false; 4631 return false;
4454 } 4632 }
4455 4633
4456 4634
4457 #ifdef DEBUG 4635 #ifdef DEBUG
4458 static void DummyScavengePointer(HeapObject** p) {
4459 }
4460
4461
4462 static void VerifyPointersUnderWatermark(
4463 PagedSpace* space,
4464 DirtyRegionCallback visit_dirty_region) {
4465 PageIterator it(space, PageIterator::PAGES_IN_USE);
4466
4467 while (it.has_next()) {
4468 Page* page = it.next();
4469 Address start = page->ObjectAreaStart();
4470 Address end = page->AllocationWatermark();
4471
4472 HEAP->IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
4473 start,
4474 end,
4475 visit_dirty_region,
4476 &DummyScavengePointer);
4477 }
4478 }
4479
4480
4481 static void VerifyPointersUnderWatermark(LargeObjectSpace* space) {
4482 LargeObjectIterator it(space);
4483 for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
4484 if (object->IsFixedArray()) {
4485 Address slot_address = object->address();
4486 Address end = object->address() + object->Size();
4487
4488 while (slot_address < end) {
4489 HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
4490 // When we are not in GC the Heap::InNewSpace() predicate
4491 // checks that pointers which satisfy predicate point into
4492 // the active semispace.
4493 HEAP->InNewSpace(*slot);
4494 slot_address += kPointerSize;
4495 }
4496 }
4497 }
4498 }
4499
4500
4501 void Heap::Verify() { 4636 void Heap::Verify() {
4502 ASSERT(HasBeenSetup()); 4637 ASSERT(HasBeenSetup());
4503 4638
4639 store_buffer()->Verify();
4640
4504 VerifyPointersVisitor visitor; 4641 VerifyPointersVisitor visitor;
4505 IterateRoots(&visitor, VISIT_ONLY_STRONG); 4642 IterateRoots(&visitor, VISIT_ONLY_STRONG);
4506 4643
4507 new_space_.Verify(); 4644 new_space_.Verify();
4508 4645
4509 VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor; 4646 old_pointer_space_->Verify(&visitor);
4510 old_pointer_space_->Verify(&dirty_regions_visitor); 4647 map_space_->Verify(&visitor);
4511 map_space_->Verify(&dirty_regions_visitor);
4512
4513 VerifyPointersUnderWatermark(old_pointer_space_,
4514 &IteratePointersInDirtyRegion);
4515 VerifyPointersUnderWatermark(map_space_,
4516 &IteratePointersInDirtyMapsRegion);
4517 VerifyPointersUnderWatermark(lo_space_);
4518
4519 VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
4520 VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
4521 4648
4522 VerifyPointersVisitor no_dirty_regions_visitor; 4649 VerifyPointersVisitor no_dirty_regions_visitor;
4523 old_data_space_->Verify(&no_dirty_regions_visitor); 4650 old_data_space_->Verify(&no_dirty_regions_visitor);
4524 code_space_->Verify(&no_dirty_regions_visitor); 4651 code_space_->Verify(&no_dirty_regions_visitor);
4525 cell_space_->Verify(&no_dirty_regions_visitor); 4652 cell_space_->Verify(&no_dirty_regions_visitor);
4526 4653
4527 lo_space_->Verify(); 4654 lo_space_->Verify();
4528 } 4655 }
4656
4529 #endif // DEBUG 4657 #endif // DEBUG
4530 4658
4531 4659
4532 MaybeObject* Heap::LookupSymbol(Vector<const char> string) { 4660 MaybeObject* Heap::LookupSymbol(Vector<const char> string) {
4533 Object* symbol = NULL; 4661 Object* symbol = NULL;
4534 Object* new_table; 4662 Object* new_table;
4535 { MaybeObject* maybe_new_table = 4663 { MaybeObject* maybe_new_table =
4536 symbol_table()->LookupSymbol(string, &symbol); 4664 symbol_table()->LookupSymbol(string, &symbol);
4537 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table; 4665 if (!maybe_new_table->ToObject(&new_table)) return maybe_new_table;
4538 } 4666 }
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
4614 if (string->IsSymbol()) { 4742 if (string->IsSymbol()) {
4615 *symbol = string; 4743 *symbol = string;
4616 return true; 4744 return true;
4617 } 4745 }
4618 return symbol_table()->LookupSymbolIfExists(string, symbol); 4746 return symbol_table()->LookupSymbolIfExists(string, symbol);
4619 } 4747 }
4620 4748
4621 4749
4622 #ifdef DEBUG 4750 #ifdef DEBUG
4623 void Heap::ZapFromSpace() { 4751 void Heap::ZapFromSpace() {
4624 ASSERT(reinterpret_cast<Object*>(kFromSpaceZapValue)->IsFailure()); 4752 NewSpacePageIterator it(new_space_.FromSpaceStart(),
4625 for (Address a = new_space_.FromSpaceLow(); 4753 new_space_.FromSpaceEnd());
4626 a < new_space_.FromSpaceHigh(); 4754 while (it.has_next()) {
4627 a += kPointerSize) { 4755 NewSpacePage* page = it.next();
4628 Memory::Address_at(a) = kFromSpaceZapValue; 4756 for (Address cursor = page->body(), limit = page->body_limit();
4757 cursor < limit;
4758 cursor += kPointerSize) {
4759 Memory::Address_at(cursor) = kFromSpaceZapValue;
4760 }
4629 } 4761 }
4630 } 4762 }
4631 #endif // DEBUG 4763 #endif // DEBUG
4632 4764
4633 4765
4634 bool Heap::IteratePointersInDirtyRegion(Heap* heap,
4635 Address start,
4636 Address end,
4637 ObjectSlotCallback copy_object_func) {
4638 Address slot_address = start;
4639 bool pointers_to_new_space_found = false;
4640
4641 while (slot_address < end) {
4642 Object** slot = reinterpret_cast<Object**>(slot_address);
4643 if (heap->InNewSpace(*slot)) {
4644 ASSERT((*slot)->IsHeapObject());
4645 copy_object_func(reinterpret_cast<HeapObject**>(slot));
4646 if (heap->InNewSpace(*slot)) {
4647 ASSERT((*slot)->IsHeapObject());
4648 pointers_to_new_space_found = true;
4649 }
4650 }
4651 slot_address += kPointerSize;
4652 }
4653 return pointers_to_new_space_found;
4654 }
4655
4656
4657 // Compute start address of the first map following given addr.
4658 static inline Address MapStartAlign(Address addr) {
4659 Address page = Page::FromAddress(addr)->ObjectAreaStart();
4660 return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
4661 }
4662
4663
4664 // Compute end address of the first map preceding given addr.
4665 static inline Address MapEndAlign(Address addr) {
4666 Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
4667 return page + ((addr - page) / Map::kSize * Map::kSize);
4668 }
4669
4670
4671 static bool IteratePointersInDirtyMaps(Address start,
4672 Address end,
4673 ObjectSlotCallback copy_object_func) {
4674 ASSERT(MapStartAlign(start) == start);
4675 ASSERT(MapEndAlign(end) == end);
4676
4677 Address map_address = start;
4678 bool pointers_to_new_space_found = false;
4679
4680 Heap* heap = HEAP;
4681 while (map_address < end) {
4682 ASSERT(!heap->InNewSpace(Memory::Object_at(map_address)));
4683 ASSERT(Memory::Object_at(map_address)->IsMap());
4684
4685 Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
4686 Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
4687
4688 if (Heap::IteratePointersInDirtyRegion(heap,
4689 pointer_fields_start,
4690 pointer_fields_end,
4691 copy_object_func)) {
4692 pointers_to_new_space_found = true;
4693 }
4694
4695 map_address += Map::kSize;
4696 }
4697
4698 return pointers_to_new_space_found;
4699 }
4700
4701
4702 bool Heap::IteratePointersInDirtyMapsRegion(
4703 Heap* heap,
4704 Address start,
4705 Address end,
4706 ObjectSlotCallback copy_object_func) {
4707 Address map_aligned_start = MapStartAlign(start);
4708 Address map_aligned_end = MapEndAlign(end);
4709
4710 bool contains_pointers_to_new_space = false;
4711
4712 if (map_aligned_start != start) {
4713 Address prev_map = map_aligned_start - Map::kSize;
4714 ASSERT(Memory::Object_at(prev_map)->IsMap());
4715
4716 Address pointer_fields_start =
4717 Max(start, prev_map + Map::kPointerFieldsBeginOffset);
4718
4719 Address pointer_fields_end =
4720 Min(prev_map + Map::kPointerFieldsEndOffset, end);
4721
4722 contains_pointers_to_new_space =
4723 IteratePointersInDirtyRegion(heap,
4724 pointer_fields_start,
4725 pointer_fields_end,
4726 copy_object_func)
4727 || contains_pointers_to_new_space;
4728 }
4729
4730 contains_pointers_to_new_space =
4731 IteratePointersInDirtyMaps(map_aligned_start,
4732 map_aligned_end,
4733 copy_object_func)
4734 || contains_pointers_to_new_space;
4735
4736 if (map_aligned_end != end) {
4737 ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
4738
4739 Address pointer_fields_start =
4740 map_aligned_end + Map::kPointerFieldsBeginOffset;
4741
4742 Address pointer_fields_end =
4743 Min(end, map_aligned_end + Map::kPointerFieldsEndOffset);
4744
4745 contains_pointers_to_new_space =
4746 IteratePointersInDirtyRegion(heap,
4747 pointer_fields_start,
4748 pointer_fields_end,
4749 copy_object_func)
4750 || contains_pointers_to_new_space;
4751 }
4752
4753 return contains_pointers_to_new_space;
4754 }
4755
4756
4757 void Heap::IterateAndMarkPointersToFromSpace(Address start, 4766 void Heap::IterateAndMarkPointersToFromSpace(Address start,
4758 Address end, 4767 Address end,
4759 ObjectSlotCallback callback) { 4768 ObjectSlotCallback callback) {
4760 Address slot_address = start; 4769 Address slot_address = start;
4761 Page* page = Page::FromAddress(start); 4770
4762 4771 // We are not collecting slots on new space objects during mutation
4763 uint32_t marks = page->GetRegionMarks(); 4772 // thus we have to scan for pointers to evacuation candidates when we
4773 // promote objects. But we should not record any slots in non-black
4774 // objects. Grey object's slots would be rescanned.
4775 // White object might not survive until the end of collection
4776 // it would be a violation of the invariant to record it's slots.
4777 bool record_slots = false;
4778 if (incremental_marking()->IsCompacting()) {
4779 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
4780 record_slots = Marking::IsBlack(mark_bit);
4781 }
4764 4782
4765 while (slot_address < end) { 4783 while (slot_address < end) {
4766 Object** slot = reinterpret_cast<Object**>(slot_address); 4784 Object** slot = reinterpret_cast<Object**>(slot_address);
4767 if (InFromSpace(*slot)) { 4785 Object* object = *slot;
4768 ASSERT((*slot)->IsHeapObject()); 4786 // If the store buffer becomes overfull we mark pages as being exempt from
4769 callback(reinterpret_cast<HeapObject**>(slot)); 4787 // the store buffer. These pages are scanned to find pointers that point
4770 if (InNewSpace(*slot)) { 4788 // to the new space. In that case we may hit newly promoted objects and
4771 ASSERT((*slot)->IsHeapObject()); 4789 // fix the pointers before the promotion queue gets to them. Thus the 'if'.
4772 marks |= page->GetRegionMaskForAddress(slot_address); 4790 if (object->IsHeapObject()) {
4791 if (Heap::InFromSpace(object)) {
4792 callback(reinterpret_cast<HeapObject**>(slot),
4793 HeapObject::cast(object));
4794 Object* new_object = *slot;
4795 if (InNewSpace(new_object)) {
4796 ASSERT(Heap::InToSpace(new_object));
4797 ASSERT(new_object->IsHeapObject());
4798 store_buffer_.EnterDirectlyIntoStoreBuffer(
4799 reinterpret_cast<Address>(slot));
4800 }
4801 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
4802 } else if (record_slots &&
4803 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
4804 mark_compact_collector()->RecordSlot(slot, slot, object);
4773 } 4805 }
4774 } 4806 }
4775 slot_address += kPointerSize; 4807 slot_address += kPointerSize;
4776 } 4808 }
4777 4809 }
4778 page->SetRegionMarks(marks); 4810
4779 } 4811
4780 4812 #ifdef DEBUG
4781 4813 typedef bool (*CheckStoreBufferFilter)(Object** addr);
4782 uint32_t Heap::IterateDirtyRegions( 4814
4783 uint32_t marks, 4815
4784 Address area_start, 4816 bool IsAMapPointerAddress(Object** addr) {
4785 Address area_end, 4817 uintptr_t a = reinterpret_cast<uintptr_t>(addr);
4786 DirtyRegionCallback visit_dirty_region, 4818 int mod = a % Map::kSize;
4787 ObjectSlotCallback copy_object_func) { 4819 return mod >= Map::kPointerFieldsBeginOffset &&
4788 uint32_t newmarks = 0; 4820 mod < Map::kPointerFieldsEndOffset;
4789 uint32_t mask = 1; 4821 }
4790 4822
4791 if (area_start >= area_end) { 4823
4792 return newmarks; 4824 bool EverythingsAPointer(Object** addr) {
4793 } 4825 return true;
4794 4826 }
4795 Address region_start = area_start; 4827
4796 4828
4797 // area_start does not necessarily coincide with start of the first region. 4829 static void CheckStoreBuffer(Heap* heap,
4798 // Thus to calculate the beginning of the next region we have to align 4830 Object** current,
4799 // area_start by Page::kRegionSize. 4831 Object** limit,
4800 Address second_region = 4832 Object**** store_buffer_position,
4801 reinterpret_cast<Address>( 4833 Object*** store_buffer_top,
4802 reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) & 4834 CheckStoreBufferFilter filter,
4803 ~Page::kRegionAlignmentMask); 4835 Address special_garbage_start,
4804 4836 Address special_garbage_end) {
4805 // Next region might be beyond area_end. 4837 Map* free_space_map = heap->free_space_map();
4806 Address region_end = Min(second_region, area_end); 4838 for ( ; current < limit; current++) {
4807 4839 Object* o = *current;
4808 if (marks & mask) { 4840 Address current_address = reinterpret_cast<Address>(current);
4809 if (visit_dirty_region(this, region_start, region_end, copy_object_func)) { 4841 // Skip free space.
4810 newmarks |= mask; 4842 if (o == free_space_map) {
4811 } 4843 Address current_address = reinterpret_cast<Address>(current);
4812 } 4844 FreeSpace* free_space =
4813 mask <<= 1; 4845 FreeSpace::cast(HeapObject::FromAddress(current_address));
4814 4846 int skip = free_space->Size();
4815 // Iterate subsequent regions which fully lay inside [area_start, area_end[. 4847 ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
4816 region_start = region_end; 4848 ASSERT(skip > 0);
4817 region_end = region_start + Page::kRegionSize; 4849 current_address += skip - kPointerSize;
4818 4850 current = reinterpret_cast<Object**>(current_address);
4819 while (region_end <= area_end) { 4851 continue;
4820 if (marks & mask) { 4852 }
4821 if (visit_dirty_region(this, 4853 // Skip the current linear allocation space between top and limit which is
4822 region_start, 4854 // unmarked with the free space map, but can contain junk.
4823 region_end, 4855 if (current_address == special_garbage_start &&
4824 copy_object_func)) { 4856 special_garbage_end != special_garbage_start) {
4825 newmarks |= mask; 4857 current_address = special_garbage_end - kPointerSize;
4826 } 4858 current = reinterpret_cast<Object**>(current_address);
4827 } 4859 continue;
4828 4860 }
4829 region_start = region_end; 4861 if (!(*filter)(current)) continue;
4830 region_end = region_start + Page::kRegionSize; 4862 ASSERT(current_address < special_garbage_start ||
4831 4863 current_address >= special_garbage_end);
4832 mask <<= 1; 4864 ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
4833 } 4865 // We have to check that the pointer does not point into new space
4834 4866 // without trying to cast it to a heap object since the hash field of
4835 if (region_start != area_end) { 4867 // a string can contain values like 1 and 3 which are tagged null
4836 // A small piece of area left uniterated because area_end does not coincide 4868 // pointers.
4837 // with region end. Check whether region covering last part of area is 4869 if (!heap->InNewSpace(o)) continue;
4838 // dirty. 4870 while (**store_buffer_position < current &&
4839 if (marks & mask) { 4871 *store_buffer_position < store_buffer_top) {
4840 if (visit_dirty_region(this, region_start, area_end, copy_object_func)) { 4872 (*store_buffer_position)++;
4841 newmarks |= mask; 4873 }
4842 } 4874 if (**store_buffer_position != current ||
4843 } 4875 *store_buffer_position == store_buffer_top) {
4844 } 4876 Object** obj_start = current;
4845 4877 while (!(*obj_start)->IsMap()) obj_start--;
4846 return newmarks; 4878 UNREACHABLE();
4847 } 4879 }
4848 4880 }
4849 4881 }
4850 4882
4851 void Heap::IterateDirtyRegions( 4883
4852 PagedSpace* space, 4884 // Check that the store buffer contains all intergenerational pointers by
4853 DirtyRegionCallback visit_dirty_region, 4885 // scanning a page and ensuring that all pointers to young space are in the
4854 ObjectSlotCallback copy_object_func, 4886 // store buffer.
4855 ExpectedPageWatermarkState expected_page_watermark_state) { 4887 void Heap::OldPointerSpaceCheckStoreBuffer() {
4856 4888 OldSpace* space = old_pointer_space();
4857 PageIterator it(space, PageIterator::PAGES_IN_USE); 4889 PageIterator pages(space);
4858 4890
4859 while (it.has_next()) { 4891 store_buffer()->SortUniq();
4860 Page* page = it.next(); 4892
4861 uint32_t marks = page->GetRegionMarks(); 4893 while (pages.has_next()) {
4862 4894 Page* page = pages.next();
4863 if (marks != Page::kAllRegionsCleanMarks) { 4895 Object** current = reinterpret_cast<Object**>(page->ObjectAreaStart());
4864 Address start = page->ObjectAreaStart(); 4896
4865 4897 Address end = page->ObjectAreaEnd();
4866 // Do not try to visit pointers beyond page allocation watermark. 4898
4867 // Page can contain garbage pointers there. 4899 Object*** store_buffer_position = store_buffer()->Start();
4868 Address end; 4900 Object*** store_buffer_top = store_buffer()->Top();
4869 4901
4870 if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) || 4902 Object** limit = reinterpret_cast<Object**>(end);
4871 page->IsWatermarkValid()) { 4903 CheckStoreBuffer(this,
4872 end = page->AllocationWatermark(); 4904 current,
4873 } else { 4905 limit,
4874 end = page->CachedAllocationWatermark(); 4906 &store_buffer_position,
4875 } 4907 store_buffer_top,
4876 4908 &EverythingsAPointer,
4877 ASSERT(space == old_pointer_space_ || 4909 space->top(),
4878 (space == map_space_ && 4910 space->limit());
4879 ((page->ObjectAreaStart() - end) % Map::kSize == 0))); 4911 }
4880 4912 }
4881 page->SetRegionMarks(IterateDirtyRegions(marks, 4913
4882 start, 4914
4883 end, 4915 void Heap::MapSpaceCheckStoreBuffer() {
4884 visit_dirty_region, 4916 MapSpace* space = map_space();
4885 copy_object_func)); 4917 PageIterator pages(space);
4886 } 4918
4887 4919 store_buffer()->SortUniq();
4888 // Mark page watermark as invalid to maintain watermark validity invariant. 4920
4889 // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details. 4921 while (pages.has_next()) {
4890 page->InvalidateWatermark(true); 4922 Page* page = pages.next();
4891 } 4923 Object** current = reinterpret_cast<Object**>(page->ObjectAreaStart());
4892 } 4924
4925 Address end = page->ObjectAreaEnd();
4926
4927 Object*** store_buffer_position = store_buffer()->Start();
4928 Object*** store_buffer_top = store_buffer()->Top();
4929
4930 Object** limit = reinterpret_cast<Object**>(end);
4931 CheckStoreBuffer(this,
4932 current,
4933 limit,
4934 &store_buffer_position,
4935 store_buffer_top,
4936 &IsAMapPointerAddress,
4937 space->top(),
4938 space->limit());
4939 }
4940 }
4941
4942
4943 void Heap::LargeObjectSpaceCheckStoreBuffer() {
4944 LargeObjectIterator it(lo_space());
4945 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
4946 // We only have code, sequential strings, or fixed arrays in large
4947 // object space, and only fixed arrays can possibly contain pointers to
4948 // the young generation.
4949 if (object->IsFixedArray()) {
4950 Object*** store_buffer_position = store_buffer()->Start();
4951 Object*** store_buffer_top = store_buffer()->Top();
4952 Object** current = reinterpret_cast<Object**>(object->address());
4953 Object** limit =
4954 reinterpret_cast<Object**>(object->address() + object->Size());
4955 CheckStoreBuffer(this,
4956 current,
4957 limit,
4958 &store_buffer_position,
4959 store_buffer_top,
4960 &EverythingsAPointer,
4961 NULL,
4962 NULL);
4963 }
4964 }
4965 }
4966 #endif
4893 4967
4894 4968
4895 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) { 4969 void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
4896 IterateStrongRoots(v, mode); 4970 IterateStrongRoots(v, mode);
4897 IterateWeakRoots(v, mode); 4971 IterateWeakRoots(v, mode);
4898 } 4972 }
4899 4973
4900 4974
4901 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) { 4975 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
4902 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex])); 4976 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
4934 isolate_->compilation_cache()->Iterate(v); 5008 isolate_->compilation_cache()->Iterate(v);
4935 v->Synchronize("compilationcache"); 5009 v->Synchronize("compilationcache");
4936 5010
4937 // Iterate over local handles in handle scopes. 5011 // Iterate over local handles in handle scopes.
4938 isolate_->handle_scope_implementer()->Iterate(v); 5012 isolate_->handle_scope_implementer()->Iterate(v);
4939 v->Synchronize("handlescope"); 5013 v->Synchronize("handlescope");
4940 5014
4941 // Iterate over the builtin code objects and code stubs in the 5015 // Iterate over the builtin code objects and code stubs in the
4942 // heap. Note that it is not necessary to iterate over code objects 5016 // heap. Note that it is not necessary to iterate over code objects
4943 // on scavenge collections. 5017 // on scavenge collections.
4944 if (mode != VISIT_ALL_IN_SCAVENGE && 5018 if (mode != VISIT_ALL_IN_SCAVENGE) {
4945 mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
4946 isolate_->builtins()->IterateBuiltins(v); 5019 isolate_->builtins()->IterateBuiltins(v);
4947 } 5020 }
4948 v->Synchronize("builtins"); 5021 v->Synchronize("builtins");
4949 5022
4950 // Iterate over global handles. 5023 // Iterate over global handles.
4951 switch (mode) { 5024 switch (mode) {
4952 case VISIT_ONLY_STRONG: 5025 case VISIT_ONLY_STRONG:
4953 isolate_->global_handles()->IterateStrongRoots(v); 5026 isolate_->global_handles()->IterateStrongRoots(v);
4954 break; 5027 break;
4955 case VISIT_ALL_IN_SCAVENGE: 5028 case VISIT_ALL_IN_SCAVENGE:
(...skipping 23 matching lines...) Expand all
4979 // output a flag to the snapshot. However at this point the serializer and 5052 // output a flag to the snapshot. However at this point the serializer and
4980 // deserializer are deliberately a little unsynchronized (see above) so the 5053 // deserializer are deliberately a little unsynchronized (see above) so the
4981 // checking of the sync flag in the snapshot would fail. 5054 // checking of the sync flag in the snapshot would fail.
4982 } 5055 }
4983 5056
4984 5057
4985 // TODO(1236194): Since the heap size is configurable on the command line 5058 // TODO(1236194): Since the heap size is configurable on the command line
4986 // and through the API, we should gracefully handle the case that the heap 5059 // and through the API, we should gracefully handle the case that the heap
4987 // size is not big enough to fit all the initial objects. 5060 // size is not big enough to fit all the initial objects.
4988 bool Heap::ConfigureHeap(int max_semispace_size, 5061 bool Heap::ConfigureHeap(int max_semispace_size,
4989 int max_old_gen_size, 5062 intptr_t max_old_gen_size,
4990 int max_executable_size) { 5063 intptr_t max_executable_size) {
4991 if (HasBeenSetup()) return false; 5064 if (HasBeenSetup()) return false;
4992 5065
4993 if (max_semispace_size > 0) max_semispace_size_ = max_semispace_size; 5066 if (max_semispace_size > 0) {
5067 if (max_semispace_size < Page::kPageSize) {
5068 max_semispace_size = Page::kPageSize;
5069 if (FLAG_trace_gc) {
5070 PrintF("Max semispace size cannot be less than %dkbytes",
5071 Page::kPageSize >> 10);
5072 }
5073 }
5074 max_semispace_size_ = max_semispace_size;
5075 }
4994 5076
4995 if (Snapshot::IsEnabled()) { 5077 if (Snapshot::IsEnabled()) {
4996 // If we are using a snapshot we always reserve the default amount 5078 // If we are using a snapshot we always reserve the default amount
4997 // of memory for each semispace because code in the snapshot has 5079 // of memory for each semispace because code in the snapshot has
4998 // write-barrier code that relies on the size and alignment of new 5080 // write-barrier code that relies on the size and alignment of new
4999 // space. We therefore cannot use a larger max semispace size 5081 // space. We therefore cannot use a larger max semispace size
5000 // than the default reserved semispace size. 5082 // than the default reserved semispace size.
5001 if (max_semispace_size_ > reserved_semispace_size_) { 5083 if (max_semispace_size_ > reserved_semispace_size_) {
5002 max_semispace_size_ = reserved_semispace_size_; 5084 max_semispace_size_ = reserved_semispace_size_;
5085 if (FLAG_trace_gc) {
5086 PrintF("Max semispace size cannot be more than %dkbytes",
5087 reserved_semispace_size_ >> 10);
5088 }
5003 } 5089 }
5004 } else { 5090 } else {
5005 // If we are not using snapshots we reserve space for the actual 5091 // If we are not using snapshots we reserve space for the actual
5006 // max semispace size. 5092 // max semispace size.
5007 reserved_semispace_size_ = max_semispace_size_; 5093 reserved_semispace_size_ = max_semispace_size_;
5008 } 5094 }
5009 5095
5010 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size; 5096 if (max_old_gen_size > 0) max_old_generation_size_ = max_old_gen_size;
5011 if (max_executable_size > 0) { 5097 if (max_executable_size > 0) {
5012 max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize); 5098 max_executable_size_ = RoundUp(max_executable_size, Page::kPageSize);
5013 } 5099 }
5014 5100
5015 // The max executable size must be less than or equal to the max old 5101 // The max executable size must be less than or equal to the max old
5016 // generation size. 5102 // generation size.
5017 if (max_executable_size_ > max_old_generation_size_) { 5103 if (max_executable_size_ > max_old_generation_size_) {
5018 max_executable_size_ = max_old_generation_size_; 5104 max_executable_size_ = max_old_generation_size_;
5019 } 5105 }
5020 5106
5021 // The new space size must be a power of two to support single-bit testing 5107 // The new space size must be a power of two to support single-bit testing
5022 // for containment. 5108 // for containment.
5023 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_); 5109 max_semispace_size_ = RoundUpToPowerOf2(max_semispace_size_);
5024 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_); 5110 reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
5025 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_); 5111 initial_semispace_size_ = Min(initial_semispace_size_, max_semispace_size_);
5026 external_allocation_limit_ = 10 * max_semispace_size_; 5112 external_allocation_limit_ = 10 * max_semispace_size_;
5027 5113
5028 // The old generation is paged. 5114 // The old generation is paged and needs at least one page for each space.
5029 max_old_generation_size_ = RoundUp(max_old_generation_size_, Page::kPageSize); 5115 int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
5116 max_old_generation_size_ = Max(static_cast<intptr_t>(paged_space_count *
5117 Page::kPageSize),
5118 RoundUp(max_old_generation_size_,
5119 Page::kPageSize));
5030 5120
5031 configured_ = true; 5121 configured_ = true;
5032 return true; 5122 return true;
5033 } 5123 }
5034 5124
5035 5125
5036 bool Heap::ConfigureHeapDefault() { 5126 bool Heap::ConfigureHeapDefault() {
5037 return ConfigureHeap(FLAG_max_new_space_size / 2 * KB, 5127 return ConfigureHeap(static_cast<intptr_t>(FLAG_max_new_space_size / 2) * KB,
5038 FLAG_max_old_space_size * MB, 5128 static_cast<intptr_t>(FLAG_max_old_space_size) * MB,
5039 FLAG_max_executable_size * MB); 5129 static_cast<intptr_t>(FLAG_max_executable_size) * MB);
5040 } 5130 }
5041 5131
5042 5132
5043 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) { 5133 void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
5044 *stats->start_marker = HeapStats::kStartMarker; 5134 *stats->start_marker = HeapStats::kStartMarker;
5045 *stats->end_marker = HeapStats::kEndMarker; 5135 *stats->end_marker = HeapStats::kEndMarker;
5046 *stats->new_space_size = new_space_.SizeAsInt(); 5136 *stats->new_space_size = new_space_.SizeAsInt();
5047 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity()); 5137 *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
5048 *stats->old_pointer_space_size = old_pointer_space_->Size(); 5138 *stats->old_pointer_space_size = old_pointer_space_->Size();
5049 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity(); 5139 *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
5050 *stats->old_data_space_size = old_data_space_->Size(); 5140 *stats->old_data_space_size = old_data_space_->Size();
5051 *stats->old_data_space_capacity = old_data_space_->Capacity(); 5141 *stats->old_data_space_capacity = old_data_space_->Capacity();
5052 *stats->code_space_size = code_space_->Size(); 5142 *stats->code_space_size = code_space_->Size();
5053 *stats->code_space_capacity = code_space_->Capacity(); 5143 *stats->code_space_capacity = code_space_->Capacity();
5054 *stats->map_space_size = map_space_->Size(); 5144 *stats->map_space_size = map_space_->Size();
5055 *stats->map_space_capacity = map_space_->Capacity(); 5145 *stats->map_space_capacity = map_space_->Capacity();
5056 *stats->cell_space_size = cell_space_->Size(); 5146 *stats->cell_space_size = cell_space_->Size();
5057 *stats->cell_space_capacity = cell_space_->Capacity(); 5147 *stats->cell_space_capacity = cell_space_->Capacity();
5058 *stats->lo_space_size = lo_space_->Size(); 5148 *stats->lo_space_size = lo_space_->Size();
5059 isolate_->global_handles()->RecordStats(stats); 5149 isolate_->global_handles()->RecordStats(stats);
5060 *stats->memory_allocator_size = isolate()->memory_allocator()->Size(); 5150 *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
5061 *stats->memory_allocator_capacity = 5151 *stats->memory_allocator_capacity =
5062 isolate()->memory_allocator()->Size() + 5152 isolate()->memory_allocator()->Size() +
5063 isolate()->memory_allocator()->Available(); 5153 isolate()->memory_allocator()->Available();
5064 *stats->os_error = OS::GetLastError(); 5154 *stats->os_error = OS::GetLastError();
5065 isolate()->memory_allocator()->Available(); 5155 isolate()->memory_allocator()->Available();
5066 if (take_snapshot) { 5156 if (take_snapshot) {
5067 HeapIterator iterator(HeapIterator::kFilterFreeListNodes); 5157 HeapIterator iterator;
5068 for (HeapObject* obj = iterator.next(); 5158 for (HeapObject* obj = iterator.next();
5069 obj != NULL; 5159 obj != NULL;
5070 obj = iterator.next()) { 5160 obj = iterator.next()) {
5071 InstanceType type = obj->map()->instance_type(); 5161 InstanceType type = obj->map()->instance_type();
5072 ASSERT(0 <= type && type <= LAST_TYPE); 5162 ASSERT(0 <= type && type <= LAST_TYPE);
5073 stats->objects_per_type[type]++; 5163 stats->objects_per_type[type]++;
5074 stats->size_per_type[type] += obj->Size(); 5164 stats->size_per_type[type] += obj->Size();
5075 } 5165 }
5076 } 5166 }
5077 } 5167 }
(...skipping 195 matching lines...) Expand 10 before | Expand all | Expand 10 after
5273 // Configuration is based on the flags new-space-size (really the semispace 5363 // Configuration is based on the flags new-space-size (really the semispace
5274 // size) and old-space-size if set or the initial values of semispace_size_ 5364 // size) and old-space-size if set or the initial values of semispace_size_
5275 // and old_generation_size_ otherwise. 5365 // and old_generation_size_ otherwise.
5276 if (!configured_) { 5366 if (!configured_) {
5277 if (!ConfigureHeapDefault()) return false; 5367 if (!ConfigureHeapDefault()) return false;
5278 } 5368 }
5279 5369
5280 gc_initializer_mutex->Lock(); 5370 gc_initializer_mutex->Lock();
5281 static bool initialized_gc = false; 5371 static bool initialized_gc = false;
5282 if (!initialized_gc) { 5372 if (!initialized_gc) {
5283 initialized_gc = true; 5373 initialized_gc = true;
5284 InitializeScavengingVisitorsTables(); 5374 InitializeScavengingVisitorsTables();
5285 NewSpaceScavenger::Initialize(); 5375 NewSpaceScavenger::Initialize();
5286 MarkCompactCollector::Initialize(); 5376 MarkCompactCollector::Initialize();
5287 } 5377 }
5288 gc_initializer_mutex->Unlock(); 5378 gc_initializer_mutex->Unlock();
5289 5379
5290 MarkMapPointersAsEncoded(false); 5380 MarkMapPointersAsEncoded(false);
5291 5381
5292 // Setup memory allocator and reserve a chunk of memory for new 5382 // Setup memory allocator.
5293 // space. The chunk is double the size of the requested reserved
5294 // new space size to ensure that we can find a pair of semispaces that
5295 // are contiguous and aligned to their size.
5296 if (!isolate_->memory_allocator()->Setup(MaxReserved(), MaxExecutableSize())) 5383 if (!isolate_->memory_allocator()->Setup(MaxReserved(), MaxExecutableSize()))
5297 return false; 5384 return false;
5298 void* chunk =
5299 isolate_->memory_allocator()->ReserveInitialChunk(
5300 4 * reserved_semispace_size_);
5301 if (chunk == NULL) return false;
5302 5385
5303 // Align the pair of semispaces to their size, which must be a power 5386 // Setup new space.
5304 // of 2. 5387 if (!new_space_.Setup(reserved_semispace_size_, max_semispace_size_)) {
5305 Address new_space_start =
5306 RoundUp(reinterpret_cast<byte*>(chunk), 2 * reserved_semispace_size_);
5307 if (!new_space_.Setup(new_space_start, 2 * reserved_semispace_size_)) {
5308 return false; 5388 return false;
5309 } 5389 }
5310 5390
5311 // Initialize old pointer space. 5391 // Initialize old pointer space.
5312 old_pointer_space_ = 5392 old_pointer_space_ =
5313 new OldSpace(this, 5393 new OldSpace(this,
5314 max_old_generation_size_, 5394 max_old_generation_size_,
5315 OLD_POINTER_SPACE, 5395 OLD_POINTER_SPACE,
5316 NOT_EXECUTABLE); 5396 NOT_EXECUTABLE);
5317 if (old_pointer_space_ == NULL) return false; 5397 if (old_pointer_space_ == NULL) return false;
5318 if (!old_pointer_space_->Setup(NULL, 0)) return false; 5398 if (!old_pointer_space_->Setup()) return false;
5319 5399
5320 // Initialize old data space. 5400 // Initialize old data space.
5321 old_data_space_ = 5401 old_data_space_ =
5322 new OldSpace(this, 5402 new OldSpace(this,
5323 max_old_generation_size_, 5403 max_old_generation_size_,
5324 OLD_DATA_SPACE, 5404 OLD_DATA_SPACE,
5325 NOT_EXECUTABLE); 5405 NOT_EXECUTABLE);
5326 if (old_data_space_ == NULL) return false; 5406 if (old_data_space_ == NULL) return false;
5327 if (!old_data_space_->Setup(NULL, 0)) return false; 5407 if (!old_data_space_->Setup()) return false;
5328 5408
5329 // Initialize the code space, set its maximum capacity to the old 5409 // Initialize the code space, set its maximum capacity to the old
5330 // generation size. It needs executable memory. 5410 // generation size. It needs executable memory.
5331 // On 64-bit platform(s), we put all code objects in a 2 GB range of 5411 // On 64-bit platform(s), we put all code objects in a 2 GB range of
5332 // virtual address space, so that they can call each other with near calls. 5412 // virtual address space, so that they can call each other with near calls.
5333 if (code_range_size_ > 0) { 5413 if (code_range_size_ > 0) {
5334 if (!isolate_->code_range()->Setup(code_range_size_)) { 5414 if (!isolate_->code_range()->Setup(code_range_size_)) {
5335 return false; 5415 return false;
5336 } 5416 }
5337 } 5417 }
5338 5418
5339 code_space_ = 5419 code_space_ =
5340 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE); 5420 new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
5341 if (code_space_ == NULL) return false; 5421 if (code_space_ == NULL) return false;
5342 if (!code_space_->Setup(NULL, 0)) return false; 5422 if (!code_space_->Setup()) return false;
5343 5423
5344 // Initialize map space. 5424 // Initialize map space.
5345 map_space_ = new MapSpace(this, FLAG_use_big_map_space 5425 map_space_ = new MapSpace(this,
5346 ? max_old_generation_size_ 5426 max_old_generation_size_,
5347 : MapSpace::kMaxMapPageIndex * Page::kPageSize, 5427 FLAG_max_map_space_pages,
5348 FLAG_max_map_space_pages, 5428 MAP_SPACE);
5349 MAP_SPACE);
5350 if (map_space_ == NULL) return false; 5429 if (map_space_ == NULL) return false;
5351 if (!map_space_->Setup(NULL, 0)) return false; 5430 if (!map_space_->Setup()) return false;
5352 5431
5353 // Initialize global property cell space. 5432 // Initialize global property cell space.
5354 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE); 5433 cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
5355 if (cell_space_ == NULL) return false; 5434 if (cell_space_ == NULL) return false;
5356 if (!cell_space_->Setup(NULL, 0)) return false; 5435 if (!cell_space_->Setup()) return false;
5357 5436
5358 // The large object code space may contain code or data. We set the memory 5437 // The large object code space may contain code or data. We set the memory
5359 // to be non-executable here for safety, but this means we need to enable it 5438 // to be non-executable here for safety, but this means we need to enable it
5360 // explicitly when allocating large code objects. 5439 // explicitly when allocating large code objects.
5361 lo_space_ = new LargeObjectSpace(this, LO_SPACE); 5440 lo_space_ = new LargeObjectSpace(this, LO_SPACE);
5362 if (lo_space_ == NULL) return false; 5441 if (lo_space_ == NULL) return false;
5363 if (!lo_space_->Setup()) return false; 5442 if (!lo_space_->Setup()) return false;
5364
5365 if (create_heap_objects) { 5443 if (create_heap_objects) {
5366 // Create initial maps. 5444 // Create initial maps.
5367 if (!CreateInitialMaps()) return false; 5445 if (!CreateInitialMaps()) return false;
5368 if (!CreateApiObjects()) return false; 5446 if (!CreateApiObjects()) return false;
5369 5447
5370 // Create initial objects 5448 // Create initial objects
5371 if (!CreateInitialObjects()) return false; 5449 if (!CreateInitialObjects()) return false;
5372 5450
5373 global_contexts_list_ = undefined_value(); 5451 global_contexts_list_ = undefined_value();
5374 } 5452 }
5375 5453
5376 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity())); 5454 LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
5377 LOG(isolate_, IntPtrTEvent("heap-available", Available())); 5455 LOG(isolate_, IntPtrTEvent("heap-available", Available()));
5378 5456
5457 store_buffer()->Setup();
5458
5379 return true; 5459 return true;
5380 } 5460 }
5381 5461
5382 5462
5383 void Heap::SetStackLimits() { 5463 void Heap::SetStackLimits() {
5384 ASSERT(isolate_ != NULL); 5464 ASSERT(isolate_ != NULL);
5385 ASSERT(isolate_ == isolate()); 5465 ASSERT(isolate_ == isolate());
5386 // On 64 bit machines, pointers are generally out of range of Smis. We write 5466 // On 64 bit machines, pointers are generally out of range of Smis. We write
5387 // something that looks like an out of range Smi to the GC. 5467 // something that looks like an out of range Smi to the GC.
5388 5468
5389 // Set up the special root array entries containing the stack limits. 5469 // Set up the special root array entries containing the stack limits.
5390 // These are actually addresses, but the tag makes the GC ignore it. 5470 // These are actually addresses, but the tag makes the GC ignore it.
5391 roots_[kStackLimitRootIndex] = 5471 roots_[kStackLimitRootIndex] =
5392 reinterpret_cast<Object*>( 5472 reinterpret_cast<Object*>(
5393 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag); 5473 (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
5394 roots_[kRealStackLimitRootIndex] = 5474 roots_[kRealStackLimitRootIndex] =
5395 reinterpret_cast<Object*>( 5475 reinterpret_cast<Object*>(
5396 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag); 5476 (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
5397 } 5477 }
5398 5478
5399 5479
5400 void Heap::TearDown() { 5480 void Heap::TearDown() {
5401 if (FLAG_print_cumulative_gc_stat) { 5481 if (FLAG_print_cumulative_gc_stat) {
5402 PrintF("\n\n"); 5482 PrintF("\n\n");
5403 PrintF("gc_count=%d ", gc_count_); 5483 PrintF("gc_count=%d ", gc_count_);
5404 PrintF("mark_sweep_count=%d ", ms_count_); 5484 PrintF("mark_sweep_count=%d ", ms_count_);
5405 PrintF("mark_compact_count=%d ", mc_count_);
5406 PrintF("max_gc_pause=%d ", get_max_gc_pause()); 5485 PrintF("max_gc_pause=%d ", get_max_gc_pause());
5407 PrintF("min_in_mutator=%d ", get_min_in_mutator()); 5486 PrintF("min_in_mutator=%d ", get_min_in_mutator());
5408 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ", 5487 PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
5409 get_max_alive_after_gc()); 5488 get_max_alive_after_gc());
5410 PrintF("\n\n"); 5489 PrintF("\n\n");
5411 } 5490 }
5412 5491
5413 isolate_->global_handles()->TearDown(); 5492 isolate_->global_handles()->TearDown();
5414 5493
5415 external_string_table_.TearDown(); 5494 external_string_table_.TearDown();
(...skipping 29 matching lines...) Expand all
5445 delete cell_space_; 5524 delete cell_space_;
5446 cell_space_ = NULL; 5525 cell_space_ = NULL;
5447 } 5526 }
5448 5527
5449 if (lo_space_ != NULL) { 5528 if (lo_space_ != NULL) {
5450 lo_space_->TearDown(); 5529 lo_space_->TearDown();
5451 delete lo_space_; 5530 delete lo_space_;
5452 lo_space_ = NULL; 5531 lo_space_ = NULL;
5453 } 5532 }
5454 5533
5534 store_buffer()->TearDown();
5535 incremental_marking()->TearDown();
5536
5455 isolate_->memory_allocator()->TearDown(); 5537 isolate_->memory_allocator()->TearDown();
5456 5538
5457 #ifdef DEBUG 5539 #ifdef DEBUG
5458 delete debug_utils_; 5540 delete debug_utils_;
5459 debug_utils_ = NULL; 5541 debug_utils_ = NULL;
5460 #endif 5542 #endif
5461 } 5543 }
5462 5544
5463 5545
5464 void Heap::Shrink() { 5546 void Heap::Shrink() {
5465 // Try to shrink all paged spaces. 5547 // Try to shrink all paged spaces.
5466 PagedSpaces spaces; 5548 PagedSpaces spaces;
5467 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next()) 5549 for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
5468 space->Shrink(); 5550 space->ReleaseAllUnusedPages();
5469 } 5551 }
5470 5552
5471 5553
5472 void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) { 5554 void Heap::AddGCPrologueCallback(GCPrologueCallback callback, GCType gc_type) {
5473 ASSERT(callback != NULL); 5555 ASSERT(callback != NULL);
5474 GCPrologueCallbackPair pair(callback, gc_type); 5556 GCPrologueCallbackPair pair(callback, gc_type);
5475 ASSERT(!gc_prologue_callbacks_.Contains(pair)); 5557 ASSERT(!gc_prologue_callbacks_.Contains(pair));
5476 return gc_prologue_callbacks_.Add(pair); 5558 return gc_prologue_callbacks_.Add(pair);
5477 } 5559 }
5478 5560
(...skipping 182 matching lines...) Expand 10 before | Expand all | Expand 10 after
5661 } 5743 }
5662 5744
5663 5745
5664 class HeapObjectsFilter { 5746 class HeapObjectsFilter {
5665 public: 5747 public:
5666 virtual ~HeapObjectsFilter() {} 5748 virtual ~HeapObjectsFilter() {}
5667 virtual bool SkipObject(HeapObject* object) = 0; 5749 virtual bool SkipObject(HeapObject* object) = 0;
5668 }; 5750 };
5669 5751
5670 5752
5671 class FreeListNodesFilter : public HeapObjectsFilter {
5672 public:
5673 FreeListNodesFilter() {
5674 MarkFreeListNodes();
5675 }
5676
5677 bool SkipObject(HeapObject* object) {
5678 if (object->IsMarked()) {
5679 object->ClearMark();
5680 return true;
5681 } else {
5682 return false;
5683 }
5684 }
5685
5686 private:
5687 void MarkFreeListNodes() {
5688 Heap* heap = HEAP;
5689 heap->old_pointer_space()->MarkFreeListNodes();
5690 heap->old_data_space()->MarkFreeListNodes();
5691 MarkCodeSpaceFreeListNodes(heap);
5692 heap->map_space()->MarkFreeListNodes();
5693 heap->cell_space()->MarkFreeListNodes();
5694 }
5695
5696 void MarkCodeSpaceFreeListNodes(Heap* heap) {
5697 // For code space, using FreeListNode::IsFreeListNode is OK.
5698 HeapObjectIterator iter(heap->code_space());
5699 for (HeapObject* obj = iter.next_object();
5700 obj != NULL;
5701 obj = iter.next_object()) {
5702 if (FreeListNode::IsFreeListNode(obj)) obj->SetMark();
5703 }
5704 }
5705
5706 AssertNoAllocation no_alloc;
5707 };
5708
5709
5710 class UnreachableObjectsFilter : public HeapObjectsFilter { 5753 class UnreachableObjectsFilter : public HeapObjectsFilter {
5711 public: 5754 public:
5712 UnreachableObjectsFilter() { 5755 UnreachableObjectsFilter() {
5713 MarkUnreachableObjects(); 5756 MarkUnreachableObjects();
5714 } 5757 }
5715 5758
5716 bool SkipObject(HeapObject* object) { 5759 bool SkipObject(HeapObject* object) {
5717 if (object->IsMarked()) { 5760 if (IntrusiveMarking::IsMarked(object)) {
5718 object->ClearMark(); 5761 IntrusiveMarking::ClearMark(object);
5719 return true; 5762 return true;
5720 } else { 5763 } else {
5721 return false; 5764 return false;
5722 } 5765 }
5723 } 5766 }
5724 5767
5725 private: 5768 private:
5726 class UnmarkingVisitor : public ObjectVisitor { 5769 class UnmarkingVisitor : public ObjectVisitor {
5727 public: 5770 public:
5728 UnmarkingVisitor() : list_(10) {} 5771 UnmarkingVisitor() : list_(10) {}
5729 5772
5730 void VisitPointers(Object** start, Object** end) { 5773 void VisitPointers(Object** start, Object** end) {
5731 for (Object** p = start; p < end; p++) { 5774 for (Object** p = start; p < end; p++) {
5732 if (!(*p)->IsHeapObject()) continue; 5775 if (!(*p)->IsHeapObject()) continue;
5733 HeapObject* obj = HeapObject::cast(*p); 5776 HeapObject* obj = HeapObject::cast(*p);
5734 if (obj->IsMarked()) { 5777 if (IntrusiveMarking::IsMarked(obj)) {
5735 obj->ClearMark(); 5778 IntrusiveMarking::ClearMark(obj);
5736 list_.Add(obj); 5779 list_.Add(obj);
5737 } 5780 }
5738 } 5781 }
5739 } 5782 }
5740 5783
5741 bool can_process() { return !list_.is_empty(); } 5784 bool can_process() { return !list_.is_empty(); }
5742 5785
5743 void ProcessNext() { 5786 void ProcessNext() {
5744 HeapObject* obj = list_.RemoveLast(); 5787 HeapObject* obj = list_.RemoveLast();
5745 obj->Iterate(this); 5788 obj->Iterate(this);
5746 } 5789 }
5747 5790
5748 private: 5791 private:
5749 List<HeapObject*> list_; 5792 List<HeapObject*> list_;
5750 }; 5793 };
5751 5794
5752 void MarkUnreachableObjects() { 5795 void MarkUnreachableObjects() {
5753 HeapIterator iterator; 5796 HeapIterator iterator;
5754 for (HeapObject* obj = iterator.next(); 5797 for (HeapObject* obj = iterator.next();
5755 obj != NULL; 5798 obj != NULL;
5756 obj = iterator.next()) { 5799 obj = iterator.next()) {
5757 obj->SetMark(); 5800 IntrusiveMarking::SetMark(obj);
5758 } 5801 }
5759 UnmarkingVisitor visitor; 5802 UnmarkingVisitor visitor;
5760 HEAP->IterateRoots(&visitor, VISIT_ALL); 5803 HEAP->IterateRoots(&visitor, VISIT_ALL);
5761 while (visitor.can_process()) 5804 while (visitor.can_process())
5762 visitor.ProcessNext(); 5805 visitor.ProcessNext();
5763 } 5806 }
5764 5807
5765 AssertNoAllocation no_alloc; 5808 AssertNoAllocation no_alloc;
5766 }; 5809 };
5767 5810
(...skipping 13 matching lines...) Expand all
5781 5824
5782 5825
5783 HeapIterator::~HeapIterator() { 5826 HeapIterator::~HeapIterator() {
5784 Shutdown(); 5827 Shutdown();
5785 } 5828 }
5786 5829
5787 5830
5788 void HeapIterator::Init() { 5831 void HeapIterator::Init() {
5789 // Start the iteration. 5832 // Start the iteration.
5790 space_iterator_ = filtering_ == kNoFiltering ? new SpaceIterator : 5833 space_iterator_ = filtering_ == kNoFiltering ? new SpaceIterator :
5791 new SpaceIterator(MarkCompactCollector::SizeOfMarkedObject); 5834 new SpaceIterator(Isolate::Current()->heap()->
5835 GcSafeSizeOfOldObjectFunction());
5792 switch (filtering_) { 5836 switch (filtering_) {
5793 case kFilterFreeListNodes: 5837 case kFilterFreeListNodes:
5794 filter_ = new FreeListNodesFilter; 5838 // TODO(gc): Not handled.
5795 break; 5839 break;
5796 case kFilterUnreachable: 5840 case kFilterUnreachable:
5797 filter_ = new UnreachableObjectsFilter; 5841 filter_ = new UnreachableObjectsFilter;
5798 break; 5842 break;
5799 default: 5843 default:
5800 break; 5844 break;
5801 } 5845 }
5802 object_iterator_ = space_iterator_->next(); 5846 object_iterator_ = space_iterator_->next();
5803 } 5847 }
5804 5848
(...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after
5921 MarkVisitor mark_visitor(this); 5965 MarkVisitor mark_visitor(this);
5922 MarkRecursively(root, &mark_visitor); 5966 MarkRecursively(root, &mark_visitor);
5923 5967
5924 UnmarkVisitor unmark_visitor(this); 5968 UnmarkVisitor unmark_visitor(this);
5925 UnmarkRecursively(root, &unmark_visitor); 5969 UnmarkRecursively(root, &unmark_visitor);
5926 5970
5927 ProcessResults(); 5971 ProcessResults();
5928 } 5972 }
5929 5973
5930 5974
5975 static bool SafeIsGlobalContext(HeapObject* obj) {
5976 return obj->map() == obj->GetHeap()->raw_unchecked_global_context_map();
5977 }
5978
5979
5931 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) { 5980 void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
5932 if (!(*p)->IsHeapObject()) return; 5981 if (!(*p)->IsHeapObject()) return;
5933 5982
5934 HeapObject* obj = HeapObject::cast(*p); 5983 HeapObject* obj = HeapObject::cast(*p);
5935 5984
5936 Object* map = obj->map(); 5985 Object* map = obj->map();
5937 5986
5938 if (!map->IsHeapObject()) return; // visited before 5987 if (!map->IsHeapObject()) return; // visited before
5939 5988
5940 if (found_target_in_trace_) return; // stop if target found 5989 if (found_target_in_trace_) return; // stop if target found
5941 object_stack_.Add(obj); 5990 object_stack_.Add(obj);
5942 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) || 5991 if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
5943 (obj == search_target_)) { 5992 (obj == search_target_)) {
5944 found_target_in_trace_ = true; 5993 found_target_in_trace_ = true;
5945 found_target_ = true; 5994 found_target_ = true;
5946 return; 5995 return;
5947 } 5996 }
5948 5997
5949 bool is_global_context = obj->IsGlobalContext(); 5998 bool is_global_context = SafeIsGlobalContext(obj);
5950 5999
5951 // not visited yet 6000 // not visited yet
5952 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map)); 6001 Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
5953 6002
5954 Address map_addr = map_p->address(); 6003 Address map_addr = map_p->address();
5955 6004
5956 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag)); 6005 obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
5957 6006
5958 // Scan the object body. 6007 // Scan the object body.
5959 if (is_global_context && (visit_mode_ == VISIT_ONLY_STRONG)) { 6008 if (is_global_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after
6047 } 6096 }
6048 #endif 6097 #endif
6049 6098
6050 6099
6051 static intptr_t CountTotalHolesSize() { 6100 static intptr_t CountTotalHolesSize() {
6052 intptr_t holes_size = 0; 6101 intptr_t holes_size = 0;
6053 OldSpaces spaces; 6102 OldSpaces spaces;
6054 for (OldSpace* space = spaces.next(); 6103 for (OldSpace* space = spaces.next();
6055 space != NULL; 6104 space != NULL;
6056 space = spaces.next()) { 6105 space = spaces.next()) {
6057 holes_size += space->Waste() + space->AvailableFree(); 6106 holes_size += space->Waste() + space->Available();
6058 } 6107 }
6059 return holes_size; 6108 return holes_size;
6060 } 6109 }
6061 6110
6062 6111
6063 GCTracer::GCTracer(Heap* heap) 6112 GCTracer::GCTracer(Heap* heap)
6064 : start_time_(0.0), 6113 : start_time_(0.0),
6065 start_size_(0), 6114 start_size_(0),
6066 gc_count_(0), 6115 gc_count_(0),
6067 full_gc_count_(0), 6116 full_gc_count_(0),
6068 is_compacting_(false),
6069 marked_count_(0),
6070 allocated_since_last_gc_(0), 6117 allocated_since_last_gc_(0),
6071 spent_in_mutator_(0), 6118 spent_in_mutator_(0),
6072 promoted_objects_size_(0), 6119 promoted_objects_size_(0),
6073 heap_(heap) { 6120 heap_(heap) {
6074 // These two fields reflect the state of the previous full collection.
6075 // Set them before they are changed by the collector.
6076 previous_has_compacted_ = heap_->mark_compact_collector_.HasCompacted();
6077 previous_marked_count_ =
6078 heap_->mark_compact_collector_.previous_marked_count();
6079 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return; 6121 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
6080 start_time_ = OS::TimeCurrentMillis(); 6122 start_time_ = OS::TimeCurrentMillis();
6081 start_size_ = heap_->SizeOfObjects(); 6123 start_size_ = heap_->SizeOfObjects();
6082 6124
6083 for (int i = 0; i < Scope::kNumberOfScopes; i++) { 6125 for (int i = 0; i < Scope::kNumberOfScopes; i++) {
6084 scopes_[i] = 0; 6126 scopes_[i] = 0;
6085 } 6127 }
6086 6128
6087 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(); 6129 in_free_list_or_wasted_before_gc_ = CountTotalHolesSize();
6088 6130
6089 allocated_since_last_gc_ = 6131 allocated_since_last_gc_ =
6090 heap_->SizeOfObjects() - heap_->alive_after_last_gc_; 6132 heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
6091 6133
6092 if (heap_->last_gc_end_timestamp_ > 0) { 6134 if (heap_->last_gc_end_timestamp_ > 0) {
6093 spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0); 6135 spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
6094 } 6136 }
6137
6138 steps_count_ = heap_->incremental_marking()->steps_count();
6139 steps_took_ = heap_->incremental_marking()->steps_took();
6140 longest_step_ = heap_->incremental_marking()->longest_step();
6141 steps_count_since_last_gc_ =
6142 heap_->incremental_marking()->steps_count_since_last_gc();
6143 steps_took_since_last_gc_ =
6144 heap_->incremental_marking()->steps_took_since_last_gc();
6095 } 6145 }
6096 6146
6097 6147
6098 GCTracer::~GCTracer() { 6148 GCTracer::~GCTracer() {
6099 // Printf ONE line iff flag is set. 6149 // Printf ONE line iff flag is set.
6100 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return; 6150 if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
6101 6151
6102 bool first_gc = (heap_->last_gc_end_timestamp_ == 0); 6152 bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
6103 6153
6104 heap_->alive_after_last_gc_ = heap_->SizeOfObjects(); 6154 heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
(...skipping 14 matching lines...) Expand all
6119 6169
6120 if (!FLAG_trace_gc_nvp) { 6170 if (!FLAG_trace_gc_nvp) {
6121 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]); 6171 int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
6122 6172
6123 PrintF("%s %.1f -> %.1f MB, ", 6173 PrintF("%s %.1f -> %.1f MB, ",
6124 CollectorString(), 6174 CollectorString(),
6125 static_cast<double>(start_size_) / MB, 6175 static_cast<double>(start_size_) / MB,
6126 SizeOfHeapObjects()); 6176 SizeOfHeapObjects());
6127 6177
6128 if (external_time > 0) PrintF("%d / ", external_time); 6178 if (external_time > 0) PrintF("%d / ", external_time);
6129 PrintF("%d ms.\n", time); 6179 PrintF("%d ms", time);
6180 if (steps_count_ > 0) {
6181 if (collector_ == SCAVENGER) {
6182 PrintF(" (+ %d ms in %d steps since last GC)",
6183 static_cast<int>(steps_took_since_last_gc_),
6184 steps_count_since_last_gc_);
6185 } else {
6186 PrintF(" (+ %d ms in %d steps since start of marking, "
6187 "biggest step %f ms)",
6188 static_cast<int>(steps_took_),
6189 steps_count_,
6190 longest_step_);
6191 }
6192 }
6193 PrintF(".\n");
6130 } else { 6194 } else {
6131 PrintF("pause=%d ", time); 6195 PrintF("pause=%d ", time);
6132 PrintF("mutator=%d ", 6196 PrintF("mutator=%d ",
6133 static_cast<int>(spent_in_mutator_)); 6197 static_cast<int>(spent_in_mutator_));
6134 6198
6135 PrintF("gc="); 6199 PrintF("gc=");
6136 switch (collector_) { 6200 switch (collector_) {
6137 case SCAVENGER: 6201 case SCAVENGER:
6138 PrintF("s"); 6202 PrintF("s");
6139 break; 6203 break;
6140 case MARK_COMPACTOR: 6204 case MARK_COMPACTOR:
6141 PrintF("%s", 6205 PrintF("ms");
6142 heap_->mark_compact_collector_.HasCompacted() ? "mc" : "ms");
6143 break; 6206 break;
6144 default: 6207 default:
6145 UNREACHABLE(); 6208 UNREACHABLE();
6146 } 6209 }
6147 PrintF(" "); 6210 PrintF(" ");
6148 6211
6149 PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL])); 6212 PrintF("external=%d ", static_cast<int>(scopes_[Scope::EXTERNAL]));
6150 PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK])); 6213 PrintF("mark=%d ", static_cast<int>(scopes_[Scope::MC_MARK]));
6151 PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP])); 6214 PrintF("sweep=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP]));
6152 PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE])); 6215 PrintF("sweepns=%d ", static_cast<int>(scopes_[Scope::MC_SWEEP_NEWSPACE]));
6153 PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT])); 6216 PrintF("compact=%d ", static_cast<int>(scopes_[Scope::MC_COMPACT]));
6154 6217
6155 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_size_); 6218 PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_size_);
6156 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects()); 6219 PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
6157 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ", 6220 PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
6158 in_free_list_or_wasted_before_gc_); 6221 in_free_list_or_wasted_before_gc_);
6159 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize()); 6222 PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize());
6160 6223
6161 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_); 6224 PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
6162 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_); 6225 PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
6163 6226
6227 if (collector_ == SCAVENGER) {
6228 PrintF("stepscount=%d ", steps_count_since_last_gc_);
6229 PrintF("stepstook=%d ", static_cast<int>(steps_took_since_last_gc_));
6230 } else {
6231 PrintF("stepscount=%d ", steps_count_);
6232 PrintF("stepstook=%d ", static_cast<int>(steps_took_));
6233 }
6234
6164 PrintF("\n"); 6235 PrintF("\n");
6165 } 6236 }
6166 6237
6167 heap_->PrintShortHeapStatistics(); 6238 heap_->PrintShortHeapStatistics();
6168 } 6239 }
6169 6240
6170 6241
6171 const char* GCTracer::CollectorString() { 6242 const char* GCTracer::CollectorString() {
6172 switch (collector_) { 6243 switch (collector_) {
6173 case SCAVENGER: 6244 case SCAVENGER:
6174 return "Scavenge"; 6245 return "Scavenge";
6175 case MARK_COMPACTOR: 6246 case MARK_COMPACTOR:
6176 return heap_->mark_compact_collector_.HasCompacted() ? "Mark-compact" 6247 return "Mark-sweep";
6177 : "Mark-sweep";
6178 } 6248 }
6179 return "Unknown GC"; 6249 return "Unknown GC";
6180 } 6250 }
6181 6251
6182 6252
6183 int KeyedLookupCache::Hash(Map* map, String* name) { 6253 int KeyedLookupCache::Hash(Map* map, String* name) {
6184 // Uses only lower 32 bits if pointers are larger. 6254 // Uses only lower 32 bits if pointers are larger.
6185 uintptr_t addr_hash = 6255 uintptr_t addr_hash =
6186 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift; 6256 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> kMapHashShift;
6187 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask); 6257 return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after
6274 Verify(); 6344 Verify();
6275 } 6345 }
6276 6346
6277 6347
6278 void ExternalStringTable::TearDown() { 6348 void ExternalStringTable::TearDown() {
6279 new_space_strings_.Free(); 6349 new_space_strings_.Free();
6280 old_space_strings_.Free(); 6350 old_space_strings_.Free();
6281 } 6351 }
6282 6352
6283 6353
6354 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
6355 chunk->set_next_chunk(chunks_queued_for_free_);
6356 chunks_queued_for_free_ = chunk;
6357 }
6358
6359
6360 void Heap::FreeQueuedChunks() {
6361 if (chunks_queued_for_free_ == NULL) return;
6362 MemoryChunk* next;
6363 MemoryChunk* chunk;
6364 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6365 next = chunk->next_chunk();
6366 chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6367
6368 if (chunk->owner()->identity() == LO_SPACE) {
6369 // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
6370 // If FromAnyPointerAddress encounters a slot that belongs to a large
6371 // chunk queued for deletion it will fail to find the chunk because
6372 // it try to perform a search in the list of pages owned by of the large
6373 // object space and queued chunks were detached from that list.
6374 // To work around this we split large chunk into normal kPageSize aligned
6375 // pieces and initialize owner field and flags of every piece.
6376 // If FromAnyPointerAddress encounteres a slot that belongs to one of
6377 // these smaller pieces it will treat it as a slot on a normal Page.
6378 MemoryChunk* inner = MemoryChunk::FromAddress(
6379 chunk->address() + Page::kPageSize);
6380 MemoryChunk* inner_last = MemoryChunk::FromAddress(
6381 chunk->address() + chunk->size() - 1);
6382 while (inner <= inner_last) {
6383 // Size of a large chunk is always a multiple of
6384 // OS::AllocationAlignment() so there is always
6385 // enough space for a fake MemoryChunk header.
6386 inner->set_owner(lo_space());
6387 inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6388 inner = MemoryChunk::FromAddress(
6389 inner->address() + Page::kPageSize);
6390 }
6391 }
6392 }
6393 isolate_->heap()->store_buffer()->Compact();
6394 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
6395 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6396 next = chunk->next_chunk();
6397 isolate_->memory_allocator()->Free(chunk);
6398 }
6399 chunks_queued_for_free_ = NULL;
6400 }
6401
6284 } } // namespace v8::internal 6402 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/heap.h ('k') | src/heap-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698