Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(228)

Side by Side Diff: src/heap.cc

Issue 19971002: Rip out infrastructure for deferred stack trace formatting. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 7 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/heap.h ('k') | src/heap-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 565 matching lines...) Expand 10 before | Expand all | Expand 10 after
576 #undef UPDATE_COUNTERS_FOR_SPACE 576 #undef UPDATE_COUNTERS_FOR_SPACE
577 #undef UPDATE_FRAGMENTATION_FOR_SPACE 577 #undef UPDATE_FRAGMENTATION_FOR_SPACE
578 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE 578 #undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
579 579
580 #if defined(DEBUG) 580 #if defined(DEBUG)
581 ReportStatisticsAfterGC(); 581 ReportStatisticsAfterGC();
582 #endif // DEBUG 582 #endif // DEBUG
583 #ifdef ENABLE_DEBUGGER_SUPPORT 583 #ifdef ENABLE_DEBUGGER_SUPPORT
584 isolate_->debug()->AfterGarbageCollection(); 584 isolate_->debug()->AfterGarbageCollection();
585 #endif // ENABLE_DEBUGGER_SUPPORT 585 #endif // ENABLE_DEBUGGER_SUPPORT
586
587 error_object_list_.DeferredFormatStackTrace(isolate());
588 } 586 }
589 587
590 588
591 void Heap::CollectAllGarbage(int flags, const char* gc_reason) { 589 void Heap::CollectAllGarbage(int flags, const char* gc_reason) {
592 // Since we are ignoring the return value, the exact choice of space does 590 // Since we are ignoring the return value, the exact choice of space does
593 // not matter, so long as we do not specify NEW_SPACE, which would not 591 // not matter, so long as we do not specify NEW_SPACE, which would not
594 // cause a full GC. 592 // cause a full GC.
595 mark_compact_collector_.SetFlags(flags); 593 mark_compact_collector_.SetFlags(flags);
596 CollectGarbage(OLD_POINTER_SPACE, gc_reason); 594 CollectGarbage(OLD_POINTER_SPACE, gc_reason);
597 mark_compact_collector_.SetFlags(kNoGCFlags); 595 mark_compact_collector_.SetFlags(kNoGCFlags);
(...skipping 823 matching lines...) Expand 10 before | Expand all | Expand 10 after
1421 1419
1422 isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles( 1420 isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
1423 &IsUnscavengedHeapObject); 1421 &IsUnscavengedHeapObject);
1424 isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots( 1422 isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
1425 &scavenge_visitor); 1423 &scavenge_visitor);
1426 new_space_front = DoScavenge(&scavenge_visitor, new_space_front); 1424 new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
1427 1425
1428 UpdateNewSpaceReferencesInExternalStringTable( 1426 UpdateNewSpaceReferencesInExternalStringTable(
1429 &UpdateNewSpaceReferenceInExternalStringTableEntry); 1427 &UpdateNewSpaceReferenceInExternalStringTableEntry);
1430 1428
1431 error_object_list_.UpdateReferencesInNewSpace(this);
1432
1433 promotion_queue_.Destroy(); 1429 promotion_queue_.Destroy();
1434 1430
1435 if (!FLAG_watch_ic_patching) { 1431 if (!FLAG_watch_ic_patching) {
1436 isolate()->runtime_profiler()->UpdateSamplesAfterScavenge(); 1432 isolate()->runtime_profiler()->UpdateSamplesAfterScavenge();
1437 } 1433 }
1438 incremental_marking()->UpdateMarkingDequeAfterScavenge(); 1434 incremental_marking()->UpdateMarkingDequeAfterScavenge();
1439 1435
1440 ScavengeWeakObjectRetainer weak_object_retainer(this); 1436 ScavengeWeakObjectRetainer weak_object_retainer(this);
1441 ProcessWeakReferences(&weak_object_retainer); 1437 ProcessWeakReferences(&weak_object_retainer);
1442 1438
(...skipping 5125 matching lines...) Expand 10 before | Expand all | Expand 10 after
6568 } 6564 }
6569 6565
6570 6566
6571 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) { 6567 void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
6572 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex])); 6568 v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
6573 v->Synchronize(VisitorSynchronization::kStringTable); 6569 v->Synchronize(VisitorSynchronization::kStringTable);
6574 if (mode != VISIT_ALL_IN_SCAVENGE && 6570 if (mode != VISIT_ALL_IN_SCAVENGE &&
6575 mode != VISIT_ALL_IN_SWEEP_NEWSPACE) { 6571 mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
6576 // Scavenge collections have special processing for this. 6572 // Scavenge collections have special processing for this.
6577 external_string_table_.Iterate(v); 6573 external_string_table_.Iterate(v);
6578 error_object_list_.Iterate(v);
6579 } 6574 }
6580 v->Synchronize(VisitorSynchronization::kExternalStringsTable); 6575 v->Synchronize(VisitorSynchronization::kExternalStringsTable);
6581 } 6576 }
6582 6577
6583 6578
6584 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) { 6579 void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
6585 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]); 6580 v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
6586 v->Synchronize(VisitorSynchronization::kStrongRootList); 6581 v->Synchronize(VisitorSynchronization::kStrongRootList);
6587 6582
6588 v->VisitPointer(BitCast<Object**>(&hidden_string_)); 6583 v->VisitPointer(BitCast<Object**>(&hidden_string_));
(...skipping 379 matching lines...) Expand 10 before | Expand all | Expand 10 after
6968 PrintF("total_sweeping_time=%.1f ", sweeping_time()); 6963 PrintF("total_sweeping_time=%.1f ", sweeping_time());
6969 PrintF("\n\n"); 6964 PrintF("\n\n");
6970 } 6965 }
6971 6966
6972 TearDownArrayBuffers(); 6967 TearDownArrayBuffers();
6973 6968
6974 isolate_->global_handles()->TearDown(); 6969 isolate_->global_handles()->TearDown();
6975 6970
6976 external_string_table_.TearDown(); 6971 external_string_table_.TearDown();
6977 6972
6978 error_object_list_.TearDown();
6979
6980 new_space_.TearDown(); 6973 new_space_.TearDown();
6981 6974
6982 if (old_pointer_space_ != NULL) { 6975 if (old_pointer_space_ != NULL) {
6983 old_pointer_space_->TearDown(); 6976 old_pointer_space_->TearDown();
6984 delete old_pointer_space_; 6977 delete old_pointer_space_;
6985 old_pointer_space_ = NULL; 6978 old_pointer_space_ = NULL;
6986 } 6979 }
6987 6980
6988 if (old_data_space_ != NULL) { 6981 if (old_data_space_ != NULL) {
6989 old_data_space_->TearDown(); 6982 old_data_space_->TearDown();
(...skipping 932 matching lines...) Expand 10 before | Expand all | Expand 10 after
7922 #endif 7915 #endif
7923 } 7916 }
7924 7917
7925 7918
7926 void ExternalStringTable::TearDown() { 7919 void ExternalStringTable::TearDown() {
7927 new_space_strings_.Free(); 7920 new_space_strings_.Free();
7928 old_space_strings_.Free(); 7921 old_space_strings_.Free();
7929 } 7922 }
7930 7923
7931 7924
7932 // Update all references.
7933 void ErrorObjectList::UpdateReferences() {
7934 for (int i = 0; i < list_.length(); i++) {
7935 HeapObject* object = HeapObject::cast(list_[i]);
7936 MapWord first_word = object->map_word();
7937 if (first_word.IsForwardingAddress()) {
7938 list_[i] = first_word.ToForwardingAddress();
7939 }
7940 }
7941 }
7942
7943
7944 // Unforwarded objects in new space are dead and removed from the list.
7945 void ErrorObjectList::UpdateReferencesInNewSpace(Heap* heap) {
7946 if (list_.is_empty()) return;
7947 if (!nested_) {
7948 int write_index = 0;
7949 for (int i = 0; i < list_.length(); i++) {
7950 MapWord first_word = HeapObject::cast(list_[i])->map_word();
7951 if (first_word.IsForwardingAddress()) {
7952 list_[write_index++] = first_word.ToForwardingAddress();
7953 }
7954 }
7955 list_.Rewind(write_index);
7956 } else {
7957 // If a GC is triggered during DeferredFormatStackTrace, we do not move
7958 // objects in the list, just remove dead ones, as to not confuse the
7959 // loop in DeferredFormatStackTrace.
7960 for (int i = 0; i < list_.length(); i++) {
7961 MapWord first_word = HeapObject::cast(list_[i])->map_word();
7962 list_[i] = first_word.IsForwardingAddress()
7963 ? first_word.ToForwardingAddress()
7964 : heap->the_hole_value();
7965 }
7966 }
7967 }
7968
7969
7970 void ErrorObjectList::DeferredFormatStackTrace(Isolate* isolate) {
7971 // If formatting the stack trace causes a GC, this method will be
7972 // recursively called. In that case, skip the recursive call, since
7973 // the loop modifies the list while iterating over it.
7974 if (nested_ || list_.is_empty() || isolate->has_pending_exception()) return;
7975 nested_ = true;
7976 HandleScope scope(isolate);
7977 Handle<String> stack_key = isolate->factory()->stack_string();
7978 int write_index = 0;
7979 int budget = kBudgetPerGC;
7980 for (int i = 0; i < list_.length(); i++) {
7981 Object* object = list_[i];
7982 JSFunction* getter_fun;
7983
7984 { DisallowHeapAllocation no_gc;
7985 // Skip possible holes in the list.
7986 if (object->IsTheHole()) continue;
7987 if (isolate->heap()->InNewSpace(object) || budget == 0) {
7988 list_[write_index++] = object;
7989 continue;
7990 }
7991
7992 // Check whether the stack property is backed by the original getter.
7993 LookupResult lookup(isolate);
7994 JSObject::cast(object)->LocalLookupRealNamedProperty(*stack_key, &lookup);
7995 if (!lookup.IsFound() || lookup.type() != CALLBACKS) continue;
7996 Object* callback = lookup.GetCallbackObject();
7997 if (!callback->IsAccessorPair()) continue;
7998 Object* getter_obj = AccessorPair::cast(callback)->getter();
7999 if (!getter_obj->IsJSFunction()) continue;
8000 getter_fun = JSFunction::cast(getter_obj);
8001 String* key = isolate->heap()->hidden_stack_trace_string();
8002 Object* value = getter_fun->GetHiddenProperty(key);
8003 if (key != value) continue;
8004 }
8005
8006 budget--;
8007 HandleScope scope(isolate);
8008 bool has_exception = false;
8009 #ifdef DEBUG
8010 Handle<Map> map(HeapObject::cast(object)->map(), isolate);
8011 #endif
8012 Handle<Object> object_handle(object, isolate);
8013 Handle<Object> getter_handle(getter_fun, isolate);
8014 Execution::Call(getter_handle, object_handle, 0, NULL, &has_exception);
8015 ASSERT(*map == HeapObject::cast(*object_handle)->map());
8016 if (has_exception) {
8017 // Hit an exception (most likely a stack overflow).
8018 // Wrap up this pass and retry after another GC.
8019 isolate->clear_pending_exception();
8020 // We use the handle since calling the getter might have caused a GC.
8021 list_[write_index++] = *object_handle;
8022 budget = 0;
8023 }
8024 }
8025 list_.Rewind(write_index);
8026 list_.Trim();
8027 nested_ = false;
8028 }
8029
8030
8031 void ErrorObjectList::RemoveUnmarked(Heap* heap) {
8032 for (int i = 0; i < list_.length(); i++) {
8033 HeapObject* object = HeapObject::cast(list_[i]);
8034 if (!Marking::MarkBitFrom(object).Get()) {
8035 list_[i] = heap->the_hole_value();
8036 }
8037 }
8038 }
8039
8040
8041 void ErrorObjectList::TearDown() {
8042 list_.Free();
8043 }
8044
8045
8046 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) { 7925 void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
8047 chunk->set_next_chunk(chunks_queued_for_free_); 7926 chunk->set_next_chunk(chunks_queued_for_free_);
8048 chunks_queued_for_free_ = chunk; 7927 chunks_queued_for_free_ = chunk;
8049 } 7928 }
8050 7929
8051 7930
8052 void Heap::FreeQueuedChunks() { 7931 void Heap::FreeQueuedChunks() {
8053 if (chunks_queued_for_free_ == NULL) return; 7932 if (chunks_queued_for_free_ == NULL) return;
8054 MemoryChunk* next; 7933 MemoryChunk* next;
8055 MemoryChunk* chunk; 7934 MemoryChunk* chunk;
(...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after
8175 if (FLAG_parallel_recompilation) { 8054 if (FLAG_parallel_recompilation) {
8176 heap_->relocation_mutex_->Lock(); 8055 heap_->relocation_mutex_->Lock();
8177 #ifdef DEBUG 8056 #ifdef DEBUG
8178 heap_->relocation_mutex_locked_by_optimizer_thread_ = 8057 heap_->relocation_mutex_locked_by_optimizer_thread_ =
8179 heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread(); 8058 heap_->isolate()->optimizing_compiler_thread()->IsOptimizerThread();
8180 #endif // DEBUG 8059 #endif // DEBUG
8181 } 8060 }
8182 } 8061 }
8183 8062
8184 } } // namespace v8::internal 8063 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/heap.h ('k') | src/heap-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698