| OLD | NEW |
| 1 // Copyright 2009 the V8 project authors. All rights reserved. | 1 // Copyright 2009 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 619 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 630 if (gc_type & gc_prologue_callbacks_[i].gc_type) { | 630 if (gc_type & gc_prologue_callbacks_[i].gc_type) { |
| 631 gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags); | 631 gc_prologue_callbacks_[i].callback(gc_type, kNoGCCallbackFlags); |
| 632 } | 632 } |
| 633 } | 633 } |
| 634 | 634 |
| 635 EnsureFromSpaceIsCommitted(); | 635 EnsureFromSpaceIsCommitted(); |
| 636 | 636 |
| 637 int start_new_space_size = Heap::new_space()->Size(); | 637 int start_new_space_size = Heap::new_space()->Size(); |
| 638 | 638 |
| 639 if (collector == MARK_COMPACTOR) { | 639 if (collector == MARK_COMPACTOR) { |
| 640 if (FLAG_flush_code) { | |
| 641 // Flush all potentially unused code. | |
| 642 GCTracer::Scope gc_scope(tracer, GCTracer::Scope::MC_FLUSH_CODE); | |
| 643 FlushCode(); | |
| 644 } | |
| 645 | |
| 646 // Perform mark-sweep with optional compaction. | 640 // Perform mark-sweep with optional compaction. |
| 647 MarkCompact(tracer); | 641 MarkCompact(tracer); |
| 648 | 642 |
| 649 bool high_survival_rate_during_scavenges = IsHighSurvivalRate() && | 643 bool high_survival_rate_during_scavenges = IsHighSurvivalRate() && |
| 650 IsStableOrIncreasingSurvivalTrend(); | 644 IsStableOrIncreasingSurvivalTrend(); |
| 651 | 645 |
| 652 UpdateSurvivalRateTrend(start_new_space_size); | 646 UpdateSurvivalRateTrend(start_new_space_size); |
| 653 | 647 |
| 654 int old_gen_size = PromotedSpaceSize(); | 648 int old_gen_size = PromotedSpaceSize(); |
| 655 old_gen_promotion_limit_ = | 649 old_gen_promotion_limit_ = |
| (...skipping 437 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1093 typedef ObjectEvacuationStrategy<POINTER_OBJECT> PointerObject; | 1087 typedef ObjectEvacuationStrategy<POINTER_OBJECT> PointerObject; |
| 1094 | 1088 |
| 1095 table_.Register(kVisitConsString, | 1089 table_.Register(kVisitConsString, |
| 1096 &ObjectEvacuationStrategy<POINTER_OBJECT>:: | 1090 &ObjectEvacuationStrategy<POINTER_OBJECT>:: |
| 1097 VisitSpecialized<ConsString::kSize>); | 1091 VisitSpecialized<ConsString::kSize>); |
| 1098 | 1092 |
| 1099 table_.Register(kVisitSharedFunctionInfo, | 1093 table_.Register(kVisitSharedFunctionInfo, |
| 1100 &ObjectEvacuationStrategy<POINTER_OBJECT>:: | 1094 &ObjectEvacuationStrategy<POINTER_OBJECT>:: |
| 1101 VisitSpecialized<SharedFunctionInfo::kSize>); | 1095 VisitSpecialized<SharedFunctionInfo::kSize>); |
| 1102 | 1096 |
| 1097 table_.Register(kVisitJSFunction, |
| 1098 &ObjectEvacuationStrategy<POINTER_OBJECT>:: |
| 1099 VisitSpecialized<JSFunction::kSize>); |
| 1100 |
| 1103 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>, | 1101 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>, |
| 1104 kVisitDataObject, | 1102 kVisitDataObject, |
| 1105 kVisitDataObjectGeneric>(); | 1103 kVisitDataObjectGeneric>(); |
| 1106 | 1104 |
| 1107 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>, | 1105 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>, |
| 1108 kVisitJSObject, | 1106 kVisitJSObject, |
| 1109 kVisitJSObjectGeneric>(); | 1107 kVisitJSObjectGeneric>(); |
| 1110 | 1108 |
| 1111 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>, | 1109 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>, |
| 1112 kVisitStruct, | 1110 kVisitStruct, |
| (...skipping 1239 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2352 reinterpret_cast<ExternalArray*>(result)->set_map( | 2350 reinterpret_cast<ExternalArray*>(result)->set_map( |
| 2353 MapForExternalArrayType(array_type)); | 2351 MapForExternalArrayType(array_type)); |
| 2354 reinterpret_cast<ExternalArray*>(result)->set_length(length); | 2352 reinterpret_cast<ExternalArray*>(result)->set_length(length); |
| 2355 reinterpret_cast<ExternalArray*>(result)->set_external_pointer( | 2353 reinterpret_cast<ExternalArray*>(result)->set_external_pointer( |
| 2356 external_pointer); | 2354 external_pointer); |
| 2357 | 2355 |
| 2358 return result; | 2356 return result; |
| 2359 } | 2357 } |
| 2360 | 2358 |
| 2361 | 2359 |
| 2362 // The StackVisitor is used to traverse all the archived threads to see if | |
| 2363 // there are activations on any of the stacks corresponding to the code. | |
| 2364 class FlushingStackVisitor : public ThreadVisitor { | |
| 2365 public: | |
| 2366 explicit FlushingStackVisitor(Code* code) : found_(false), code_(code) {} | |
| 2367 | |
| 2368 void VisitThread(ThreadLocalTop* top) { | |
| 2369 // If we already found the code in a previous traversed thread we return. | |
| 2370 if (found_) return; | |
| 2371 | |
| 2372 for (StackFrameIterator it(top); !it.done(); it.Advance()) { | |
| 2373 if (code_->contains(it.frame()->pc())) { | |
| 2374 found_ = true; | |
| 2375 return; | |
| 2376 } | |
| 2377 } | |
| 2378 } | |
| 2379 bool FoundCode() {return found_;} | |
| 2380 | |
| 2381 private: | |
| 2382 bool found_; | |
| 2383 Code* code_; | |
| 2384 }; | |
| 2385 | |
| 2386 | |
| 2387 static bool CodeIsActive(Code* code) { | |
| 2388 // Make sure we are not referencing the code from the stack. | |
| 2389 for (StackFrameIterator it; !it.done(); it.Advance()) { | |
| 2390 if (code->contains(it.frame()->pc())) return true; | |
| 2391 } | |
| 2392 // Iterate the archived stacks in all threads to check if | |
| 2393 // the code is referenced. | |
| 2394 FlushingStackVisitor threadvisitor(code); | |
| 2395 ThreadManager::IterateArchivedThreads(&threadvisitor); | |
| 2396 if (threadvisitor.FoundCode()) return true; | |
| 2397 return false; | |
| 2398 } | |
| 2399 | |
| 2400 | |
| 2401 static void FlushCodeForFunction(JSFunction* function) { | |
| 2402 SharedFunctionInfo* shared_info = function->shared(); | |
| 2403 | |
| 2404 // Special handling if the function and shared info objects | |
| 2405 // have different code objects. | |
| 2406 if (function->code() != shared_info->code()) { | |
| 2407 // If the shared function has been flushed but the function has not, | |
| 2408 // we flush the function if possible. | |
| 2409 if (!shared_info->is_compiled() && function->is_compiled() && | |
| 2410 !CodeIsActive(function->code())) { | |
| 2411 function->set_code(shared_info->code()); | |
| 2412 } | |
| 2413 return; | |
| 2414 } | |
| 2415 | |
| 2416 // The function must be compiled and have the source code available, | |
| 2417 // to be able to recompile it in case we need the function again. | |
| 2418 if (!(shared_info->is_compiled() && shared_info->HasSourceCode())) return; | |
| 2419 | |
| 2420 // We never flush code for Api functions. | |
| 2421 if (shared_info->IsApiFunction()) return; | |
| 2422 | |
| 2423 // Only flush code for functions. | |
| 2424 if (!shared_info->code()->kind() == Code::FUNCTION) return; | |
| 2425 | |
| 2426 // Function must be lazy compilable. | |
| 2427 if (!shared_info->allows_lazy_compilation()) return; | |
| 2428 | |
| 2429 // If this is a full script wrapped in a function we do no flush the code. | |
| 2430 if (shared_info->is_toplevel()) return; | |
| 2431 | |
| 2432 // If this function is in the compilation cache we do not flush the code. | |
| 2433 if (CompilationCache::HasFunction(shared_info)) return; | |
| 2434 | |
| 2435 // Check stack and archived threads for the code. | |
| 2436 if (CodeIsActive(shared_info->code())) return; | |
| 2437 | |
| 2438 // Compute the lazy compilable version of the code. | |
| 2439 Code* code = Builtins::builtin(Builtins::LazyCompile); | |
| 2440 shared_info->set_code(code); | |
| 2441 function->set_code(code); | |
| 2442 } | |
| 2443 | |
| 2444 | |
| 2445 void Heap::FlushCode() { | |
| 2446 #ifdef ENABLE_DEBUGGER_SUPPORT | |
| 2447 // Do not flush code if the debugger is loaded or there are breakpoints. | |
| 2448 if (Debug::IsLoaded() || Debug::has_break_points()) return; | |
| 2449 #endif | |
| 2450 HeapObjectIterator it(old_pointer_space()); | |
| 2451 for (HeapObject* obj = it.next(); obj != NULL; obj = it.next()) { | |
| 2452 if (obj->IsJSFunction()) { | |
| 2453 JSFunction* function = JSFunction::cast(obj); | |
| 2454 | |
| 2455 // The function must have a valid context and not be a builtin. | |
| 2456 if (function->unchecked_context()->IsContext() && | |
| 2457 !function->IsBuiltin()) { | |
| 2458 FlushCodeForFunction(function); | |
| 2459 } | |
| 2460 } | |
| 2461 } | |
| 2462 } | |
| 2463 | |
| 2464 | |
| 2465 Object* Heap::CreateCode(const CodeDesc& desc, | 2360 Object* Heap::CreateCode(const CodeDesc& desc, |
| 2466 Code::Flags flags, | 2361 Code::Flags flags, |
| 2467 Handle<Object> self_reference) { | 2362 Handle<Object> self_reference) { |
| 2468 // Allocate ByteArray before the Code object, so that we do not risk | 2363 // Allocate ByteArray before the Code object, so that we do not risk |
| 2469 // leaving uninitialized Code object (and breaking the heap). | 2364 // leaving uninitialized Code object (and breaking the heap). |
| 2470 Object* reloc_info = AllocateByteArray(desc.reloc_size, TENURED); | 2365 Object* reloc_info = AllocateByteArray(desc.reloc_size, TENURED); |
| 2471 if (reloc_info->IsFailure()) return reloc_info; | 2366 if (reloc_info->IsFailure()) return reloc_info; |
| 2472 | 2367 |
| 2473 // Compute size | 2368 // Compute size |
| 2474 int body_size = RoundUp(desc.instr_size, kObjectAlignment); | 2369 int body_size = RoundUp(desc.instr_size, kObjectAlignment); |
| (...skipping 2490 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4965 void ExternalStringTable::TearDown() { | 4860 void ExternalStringTable::TearDown() { |
| 4966 new_space_strings_.Free(); | 4861 new_space_strings_.Free(); |
| 4967 old_space_strings_.Free(); | 4862 old_space_strings_.Free(); |
| 4968 } | 4863 } |
| 4969 | 4864 |
| 4970 | 4865 |
| 4971 List<Object*> ExternalStringTable::new_space_strings_; | 4866 List<Object*> ExternalStringTable::new_space_strings_; |
| 4972 List<Object*> ExternalStringTable::old_space_strings_; | 4867 List<Object*> ExternalStringTable::old_space_strings_; |
| 4973 | 4868 |
| 4974 } } // namespace v8::internal | 4869 } } // namespace v8::internal |
| OLD | NEW |