| OLD | NEW |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 43 static MemoryChunk* AllocateCodeChunk(MemoryAllocator* allocator) { | 43 static MemoryChunk* AllocateCodeChunk(MemoryAllocator* allocator) { |
| 44 return allocator->AllocateChunk(Deoptimizer::GetMaxDeoptTableSize(), | 44 return allocator->AllocateChunk(Deoptimizer::GetMaxDeoptTableSize(), |
| 45 OS::CommitPageSize(), | 45 OS::CommitPageSize(), |
| 46 EXECUTABLE, | 46 EXECUTABLE, |
| 47 NULL); | 47 NULL); |
| 48 } | 48 } |
| 49 | 49 |
| 50 | 50 |
| 51 DeoptimizerData::DeoptimizerData(MemoryAllocator* allocator) | 51 DeoptimizerData::DeoptimizerData(MemoryAllocator* allocator) |
| 52 : allocator_(allocator), | 52 : allocator_(allocator), |
| 53 eager_deoptimization_entry_code_entries_(-1), | |
| 54 lazy_deoptimization_entry_code_entries_(-1), | |
| 55 eager_deoptimization_entry_code_(AllocateCodeChunk(allocator)), | |
| 56 lazy_deoptimization_entry_code_(AllocateCodeChunk(allocator)), | |
| 57 current_(NULL), | 53 current_(NULL), |
| 58 #ifdef ENABLE_DEBUGGER_SUPPORT | 54 #ifdef ENABLE_DEBUGGER_SUPPORT |
| 59 deoptimized_frame_info_(NULL), | 55 deoptimized_frame_info_(NULL), |
| 60 #endif | 56 #endif |
| 61 deoptimizing_code_list_(NULL) { } | 57 deoptimizing_code_list_(NULL) { |
| 58 for (int i = 0; i < Deoptimizer::kBailoutTypesWithCodeEntry; ++i) { |
| 59 deopt_entry_code_entries_[i] = -1; |
| 60 deopt_entry_code_[i] = AllocateCodeChunk(allocator); |
| 61 } |
| 62 } |
| 62 | 63 |
| 63 | 64 |
| 64 DeoptimizerData::~DeoptimizerData() { | 65 DeoptimizerData::~DeoptimizerData() { |
| 65 allocator_->Free(eager_deoptimization_entry_code_); | 66 for (int i = 0; i < Deoptimizer::kBailoutTypesWithCodeEntry; ++i) { |
| 66 eager_deoptimization_entry_code_ = NULL; | 67 allocator_->Free(deopt_entry_code_[i]); |
| 67 allocator_->Free(lazy_deoptimization_entry_code_); | 68 deopt_entry_code_[i] = NULL; |
| 68 lazy_deoptimization_entry_code_ = NULL; | 69 } |
| 69 | 70 |
| 70 DeoptimizingCodeListNode* current = deoptimizing_code_list_; | 71 DeoptimizingCodeListNode* current = deoptimizing_code_list_; |
| 71 while (current != NULL) { | 72 while (current != NULL) { |
| 72 DeoptimizingCodeListNode* prev = current; | 73 DeoptimizingCodeListNode* prev = current; |
| 73 current = current->next(); | 74 current = current->next(); |
| 74 delete prev; | 75 delete prev; |
| 75 } | 76 } |
| 76 deoptimizing_code_list_ = NULL; | 77 deoptimizing_code_list_ = NULL; |
| 77 } | 78 } |
| 78 | 79 |
| (...skipping 402 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 481 | 482 |
| 482 void Deoptimizer::ComputeOutputFrames(Deoptimizer* deoptimizer) { | 483 void Deoptimizer::ComputeOutputFrames(Deoptimizer* deoptimizer) { |
| 483 deoptimizer->DoComputeOutputFrames(); | 484 deoptimizer->DoComputeOutputFrames(); |
| 484 } | 485 } |
| 485 | 486 |
| 486 | 487 |
| 487 bool Deoptimizer::TraceEnabledFor(BailoutType deopt_type, | 488 bool Deoptimizer::TraceEnabledFor(BailoutType deopt_type, |
| 488 StackFrame::Type frame_type) { | 489 StackFrame::Type frame_type) { |
| 489 switch (deopt_type) { | 490 switch (deopt_type) { |
| 490 case EAGER: | 491 case EAGER: |
| 492 case SOFT: |
| 491 case LAZY: | 493 case LAZY: |
| 492 case DEBUGGER: | 494 case DEBUGGER: |
| 493 return (frame_type == StackFrame::STUB) | 495 return (frame_type == StackFrame::STUB) |
| 494 ? FLAG_trace_stub_failures | 496 ? FLAG_trace_stub_failures |
| 495 : FLAG_trace_deopt; | 497 : FLAG_trace_deopt; |
| 496 case OSR: | 498 case OSR: |
| 497 return FLAG_trace_osr; | 499 return FLAG_trace_osr; |
| 498 } | 500 } |
| 499 UNREACHABLE(); | 501 UNREACHABLE(); |
| 500 return false; | 502 return false; |
| 501 } | 503 } |
| 502 | 504 |
| 503 | 505 |
| 504 const char* Deoptimizer::MessageFor(BailoutType type) { | 506 const char* Deoptimizer::MessageFor(BailoutType type) { |
| 505 switch (type) { | 507 switch (type) { |
| 506 case EAGER: | 508 case EAGER: |
| 509 case SOFT: |
| 507 case LAZY: | 510 case LAZY: |
| 508 return "DEOPT"; | 511 return "DEOPT"; |
| 509 case DEBUGGER: | 512 case DEBUGGER: |
| 510 return "DEOPT FOR DEBUGGER"; | 513 return "DEOPT FOR DEBUGGER"; |
| 511 case OSR: | 514 case OSR: |
| 512 return "OSR"; | 515 return "OSR"; |
| 513 } | 516 } |
| 514 UNREACHABLE(); | 517 UNREACHABLE(); |
| 515 return NULL; | 518 return NULL; |
| 516 } | 519 } |
| (...skipping 21 matching lines...) Expand all Loading... |
| 538 deferred_arguments_objects_(0), | 541 deferred_arguments_objects_(0), |
| 539 deferred_heap_numbers_(0), | 542 deferred_heap_numbers_(0), |
| 540 trace_(false) { | 543 trace_(false) { |
| 541 // For COMPILED_STUBs called from builtins, the function pointer is a SMI | 544 // For COMPILED_STUBs called from builtins, the function pointer is a SMI |
| 542 // indicating an internal frame. | 545 // indicating an internal frame. |
| 543 if (function->IsSmi()) { | 546 if (function->IsSmi()) { |
| 544 function = NULL; | 547 function = NULL; |
| 545 } | 548 } |
| 546 if (function != NULL && function->IsOptimized()) { | 549 if (function != NULL && function->IsOptimized()) { |
| 547 function->shared()->increment_deopt_count(); | 550 function->shared()->increment_deopt_count(); |
| 551 if (bailout_type_ == Deoptimizer::SOFT) { |
| 552 // Soft deopts shouldn't count against the overall re-optimization count |
| 553 // that can eventually lead to disabling optimization for a function. |
| 554 int opt_count = function->shared()->opt_count(); |
| 555 if (opt_count > 0) opt_count--; |
| 556 function->shared()->set_opt_count(opt_count); |
| 557 } |
| 548 } | 558 } |
| 549 compiled_code_ = FindOptimizedCode(function, optimized_code); | 559 compiled_code_ = FindOptimizedCode(function, optimized_code); |
| 550 StackFrame::Type frame_type = function == NULL | 560 StackFrame::Type frame_type = function == NULL |
| 551 ? StackFrame::STUB | 561 ? StackFrame::STUB |
| 552 : StackFrame::JAVA_SCRIPT; | 562 : StackFrame::JAVA_SCRIPT; |
| 553 trace_ = TraceEnabledFor(type, frame_type); | 563 trace_ = TraceEnabledFor(type, frame_type); |
| 554 if (trace_) Trace(); | 564 if (trace_) Trace(); |
| 555 ASSERT(HEAP->allow_allocation(false)); | 565 ASSERT(HEAP->allow_allocation(false)); |
| 556 unsigned size = ComputeInputFrameSize(); | 566 unsigned size = ComputeInputFrameSize(); |
| 557 input_ = new(size) FrameDescription(size, function); | 567 input_ = new(size) FrameDescription(size, function); |
| 558 input_->SetFrameType(frame_type); | 568 input_->SetFrameType(frame_type); |
| 559 } | 569 } |
| 560 | 570 |
| 561 | 571 |
| 562 Code* Deoptimizer::FindOptimizedCode(JSFunction* function, | 572 Code* Deoptimizer::FindOptimizedCode(JSFunction* function, |
| 563 Code* optimized_code) { | 573 Code* optimized_code) { |
| 564 switch (bailout_type_) { | 574 switch (bailout_type_) { |
| 575 case Deoptimizer::SOFT: |
| 565 case Deoptimizer::EAGER: | 576 case Deoptimizer::EAGER: |
| 566 ASSERT(from_ == NULL); | 577 ASSERT(from_ == NULL); |
| 567 return function->code(); | 578 return function->code(); |
| 568 case Deoptimizer::LAZY: { | 579 case Deoptimizer::LAZY: { |
| 569 Code* compiled_code = | 580 Code* compiled_code = |
| 570 isolate_->deoptimizer_data()->FindDeoptimizingCode(from_); | 581 isolate_->deoptimizer_data()->FindDeoptimizingCode(from_); |
| 571 return (compiled_code == NULL) | 582 return (compiled_code == NULL) |
| 572 ? static_cast<Code*>(isolate_->heap()->FindCodeObject(from_)) | 583 ? static_cast<Code*>(isolate_->heap()->FindCodeObject(from_)) |
| 573 : compiled_code; | 584 : compiled_code; |
| 574 } | 585 } |
| (...skipping 15 matching lines...) Expand all Loading... |
| 590 } | 601 } |
| 591 | 602 |
| 592 | 603 |
| 593 void Deoptimizer::Trace() { | 604 void Deoptimizer::Trace() { |
| 594 PrintF("**** %s: ", Deoptimizer::MessageFor(bailout_type_)); | 605 PrintF("**** %s: ", Deoptimizer::MessageFor(bailout_type_)); |
| 595 PrintFunctionName(); | 606 PrintFunctionName(); |
| 596 PrintF(" at id #%u, address 0x%" V8PRIxPTR ", frame size %d\n", | 607 PrintF(" at id #%u, address 0x%" V8PRIxPTR ", frame size %d\n", |
| 597 bailout_id_, | 608 bailout_id_, |
| 598 reinterpret_cast<intptr_t>(from_), | 609 reinterpret_cast<intptr_t>(from_), |
| 599 fp_to_sp_delta_ - (2 * kPointerSize)); | 610 fp_to_sp_delta_ - (2 * kPointerSize)); |
| 600 if (bailout_type_ == EAGER) compiled_code_->PrintDeoptLocation(bailout_id_); | 611 if (bailout_type_ == EAGER || bailout_type_ == SOFT) { |
| 612 compiled_code_->PrintDeoptLocation(bailout_id_); |
| 613 } |
| 601 } | 614 } |
| 602 | 615 |
| 603 | 616 |
| 604 void Deoptimizer::PrintFunctionName() { | 617 void Deoptimizer::PrintFunctionName() { |
| 605 if (function_->IsJSFunction()) { | 618 if (function_->IsJSFunction()) { |
| 606 function_->PrintName(); | 619 function_->PrintName(); |
| 607 } else { | 620 } else { |
| 608 PrintF("%s", Code::Kind2String(compiled_code_->kind())); | 621 PrintF("%s", Code::Kind2String(compiled_code_->kind())); |
| 609 } | 622 } |
| 610 } | 623 } |
| (...skipping 21 matching lines...) Expand all Loading... |
| 632 BailoutType type, | 645 BailoutType type, |
| 633 GetEntryMode mode) { | 646 GetEntryMode mode) { |
| 634 ASSERT(id >= 0); | 647 ASSERT(id >= 0); |
| 635 if (id >= kMaxNumberOfEntries) return NULL; | 648 if (id >= kMaxNumberOfEntries) return NULL; |
| 636 if (mode == ENSURE_ENTRY_CODE) { | 649 if (mode == ENSURE_ENTRY_CODE) { |
| 637 EnsureCodeForDeoptimizationEntry(isolate, type, id); | 650 EnsureCodeForDeoptimizationEntry(isolate, type, id); |
| 638 } else { | 651 } else { |
| 639 ASSERT(mode == CALCULATE_ENTRY_ADDRESS); | 652 ASSERT(mode == CALCULATE_ENTRY_ADDRESS); |
| 640 } | 653 } |
| 641 DeoptimizerData* data = isolate->deoptimizer_data(); | 654 DeoptimizerData* data = isolate->deoptimizer_data(); |
| 642 MemoryChunk* base = (type == EAGER) | 655 ASSERT(type < kBailoutTypesWithCodeEntry); |
| 643 ? data->eager_deoptimization_entry_code_ | 656 MemoryChunk* base = data->deopt_entry_code_[type]; |
| 644 : data->lazy_deoptimization_entry_code_; | |
| 645 return base->area_start() + (id * table_entry_size_); | 657 return base->area_start() + (id * table_entry_size_); |
| 646 } | 658 } |
| 647 | 659 |
| 648 | 660 |
| 649 int Deoptimizer::GetDeoptimizationId(Isolate* isolate, | 661 int Deoptimizer::GetDeoptimizationId(Isolate* isolate, |
| 650 Address addr, | 662 Address addr, |
| 651 BailoutType type) { | 663 BailoutType type) { |
| 652 DeoptimizerData* data = isolate->deoptimizer_data(); | 664 DeoptimizerData* data = isolate->deoptimizer_data(); |
| 653 MemoryChunk* base = (type == EAGER) | 665 MemoryChunk* base = data->deopt_entry_code_[type]; |
| 654 ? data->eager_deoptimization_entry_code_ | |
| 655 : data->lazy_deoptimization_entry_code_; | |
| 656 Address start = base->area_start(); | 666 Address start = base->area_start(); |
| 657 if (base == NULL || | 667 if (base == NULL || |
| 658 addr < start || | 668 addr < start || |
| 659 addr >= start + (kMaxNumberOfEntries * table_entry_size_)) { | 669 addr >= start + (kMaxNumberOfEntries * table_entry_size_)) { |
| 660 return kNotDeoptimizationEntry; | 670 return kNotDeoptimizationEntry; |
| 661 } | 671 } |
| 662 ASSERT_EQ(0, | 672 ASSERT_EQ(0, |
| 663 static_cast<int>(addr - start) % table_entry_size_); | 673 static_cast<int>(addr - start) % table_entry_size_); |
| 664 return static_cast<int>(addr - start) / table_entry_size_; | 674 return static_cast<int>(addr - start) / table_entry_size_; |
| 665 } | 675 } |
| (...skipping 1533 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2199 } | 2209 } |
| 2200 | 2210 |
| 2201 | 2211 |
| 2202 void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate, | 2212 void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate, |
| 2203 BailoutType type, | 2213 BailoutType type, |
| 2204 int max_entry_id) { | 2214 int max_entry_id) { |
| 2205 // We cannot run this if the serializer is enabled because this will | 2215 // We cannot run this if the serializer is enabled because this will |
| 2206 // cause us to emit relocation information for the external | 2216 // cause us to emit relocation information for the external |
| 2207 // references. This is fine because the deoptimizer's code section | 2217 // references. This is fine because the deoptimizer's code section |
| 2208 // isn't meant to be serialized at all. | 2218 // isn't meant to be serialized at all. |
| 2209 ASSERT(type == EAGER || type == LAZY); | 2219 ASSERT(type == EAGER || type == SOFT || type == LAZY); |
| 2210 DeoptimizerData* data = isolate->deoptimizer_data(); | 2220 DeoptimizerData* data = isolate->deoptimizer_data(); |
| 2211 int entry_count = (type == EAGER) | 2221 int entry_count = data->deopt_entry_code_entries_[type]; |
| 2212 ? data->eager_deoptimization_entry_code_entries_ | |
| 2213 : data->lazy_deoptimization_entry_code_entries_; | |
| 2214 if (max_entry_id < entry_count) return; | 2222 if (max_entry_id < entry_count) return; |
| 2215 entry_count = Max(entry_count, Deoptimizer::kMinNumberOfEntries); | 2223 entry_count = Max(entry_count, Deoptimizer::kMinNumberOfEntries); |
| 2216 while (max_entry_id >= entry_count) entry_count *= 2; | 2224 while (max_entry_id >= entry_count) entry_count *= 2; |
| 2217 ASSERT(entry_count <= Deoptimizer::kMaxNumberOfEntries); | 2225 ASSERT(entry_count <= Deoptimizer::kMaxNumberOfEntries); |
| 2218 | 2226 |
| 2219 MacroAssembler masm(isolate, NULL, 16 * KB); | 2227 MacroAssembler masm(isolate, NULL, 16 * KB); |
| 2220 masm.set_emit_debug_code(false); | 2228 masm.set_emit_debug_code(false); |
| 2221 GenerateDeoptimizationEntries(&masm, entry_count, type); | 2229 GenerateDeoptimizationEntries(&masm, entry_count, type); |
| 2222 CodeDesc desc; | 2230 CodeDesc desc; |
| 2223 masm.GetCode(&desc); | 2231 masm.GetCode(&desc); |
| 2224 ASSERT(!RelocInfo::RequiresRelocation(desc)); | 2232 ASSERT(!RelocInfo::RequiresRelocation(desc)); |
| 2225 | 2233 |
| 2226 MemoryChunk* chunk = (type == EAGER) | 2234 MemoryChunk* chunk = data->deopt_entry_code_[type]; |
| 2227 ? data->eager_deoptimization_entry_code_ | |
| 2228 : data->lazy_deoptimization_entry_code_; | |
| 2229 ASSERT(static_cast<int>(Deoptimizer::GetMaxDeoptTableSize()) >= | 2235 ASSERT(static_cast<int>(Deoptimizer::GetMaxDeoptTableSize()) >= |
| 2230 desc.instr_size); | 2236 desc.instr_size); |
| 2231 chunk->CommitArea(desc.instr_size); | 2237 chunk->CommitArea(desc.instr_size); |
| 2232 CopyBytes(chunk->area_start(), desc.buffer, | 2238 CopyBytes(chunk->area_start(), desc.buffer, |
| 2233 static_cast<size_t>(desc.instr_size)); | 2239 static_cast<size_t>(desc.instr_size)); |
| 2234 CPU::FlushICache(chunk->area_start(), desc.instr_size); | 2240 CPU::FlushICache(chunk->area_start(), desc.instr_size); |
| 2235 | 2241 |
| 2236 if (type == EAGER) { | 2242 data->deopt_entry_code_entries_[type] = entry_count; |
| 2237 data->eager_deoptimization_entry_code_entries_ = entry_count; | |
| 2238 } else { | |
| 2239 data->lazy_deoptimization_entry_code_entries_ = entry_count; | |
| 2240 } | |
| 2241 } | 2243 } |
| 2242 | 2244 |
| 2243 | 2245 |
| 2244 void Deoptimizer::ReplaceCodeForRelatedFunctions(JSFunction* function, | 2246 void Deoptimizer::ReplaceCodeForRelatedFunctions(JSFunction* function, |
| 2245 Code* code) { | 2247 Code* code) { |
| 2246 SharedFunctionInfo* shared = function->shared(); | 2248 SharedFunctionInfo* shared = function->shared(); |
| 2247 Object* undefined = function->GetHeap()->undefined_value(); | 2249 Object* undefined = function->GetHeap()->undefined_value(); |
| 2248 Object* current = function; | 2250 Object* current = function; |
| 2249 | 2251 |
| 2250 while (current != undefined) { | 2252 while (current != undefined) { |
| (...skipping 522 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2773 | 2775 |
| 2774 void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) { | 2776 void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) { |
| 2775 v->VisitPointer(BitCast<Object**>(&function_)); | 2777 v->VisitPointer(BitCast<Object**>(&function_)); |
| 2776 v->VisitPointers(parameters_, parameters_ + parameters_count_); | 2778 v->VisitPointers(parameters_, parameters_ + parameters_count_); |
| 2777 v->VisitPointers(expression_stack_, expression_stack_ + expression_count_); | 2779 v->VisitPointers(expression_stack_, expression_stack_ + expression_count_); |
| 2778 } | 2780 } |
| 2779 | 2781 |
| 2780 #endif // ENABLE_DEBUGGER_SUPPORT | 2782 #endif // ENABLE_DEBUGGER_SUPPORT |
| 2781 | 2783 |
| 2782 } } // namespace v8::internal | 2784 } } // namespace v8::internal |
| OLD | NEW |