OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
43 static MemoryChunk* AllocateCodeChunk(MemoryAllocator* allocator) { | 43 static MemoryChunk* AllocateCodeChunk(MemoryAllocator* allocator) { |
44 return allocator->AllocateChunk(Deoptimizer::GetMaxDeoptTableSize(), | 44 return allocator->AllocateChunk(Deoptimizer::GetMaxDeoptTableSize(), |
45 OS::CommitPageSize(), | 45 OS::CommitPageSize(), |
46 EXECUTABLE, | 46 EXECUTABLE, |
47 NULL); | 47 NULL); |
48 } | 48 } |
49 | 49 |
50 | 50 |
51 DeoptimizerData::DeoptimizerData(MemoryAllocator* allocator) | 51 DeoptimizerData::DeoptimizerData(MemoryAllocator* allocator) |
52 : allocator_(allocator), | 52 : allocator_(allocator), |
53 eager_deoptimization_entry_code_entries_(-1), | |
54 lazy_deoptimization_entry_code_entries_(-1), | |
55 eager_deoptimization_entry_code_(AllocateCodeChunk(allocator)), | |
56 lazy_deoptimization_entry_code_(AllocateCodeChunk(allocator)), | |
57 current_(NULL), | 53 current_(NULL), |
58 #ifdef ENABLE_DEBUGGER_SUPPORT | 54 #ifdef ENABLE_DEBUGGER_SUPPORT |
59 deoptimized_frame_info_(NULL), | 55 deoptimized_frame_info_(NULL), |
60 #endif | 56 #endif |
61 deoptimizing_code_list_(NULL) { } | 57 deoptimizing_code_list_(NULL) { |
| 58 for (int i = 0; i < Deoptimizer::kBailoutTypesWithCodeEntry; ++i) { |
| 59 deopt_entry_code_entries_[i] = -1; |
| 60 deopt_entry_code_[i] = AllocateCodeChunk(allocator); |
| 61 } |
| 62 } |
62 | 63 |
63 | 64 |
64 DeoptimizerData::~DeoptimizerData() { | 65 DeoptimizerData::~DeoptimizerData() { |
65 allocator_->Free(eager_deoptimization_entry_code_); | 66 for (int i = 0; i < Deoptimizer::kBailoutTypesWithCodeEntry; ++i) { |
66 eager_deoptimization_entry_code_ = NULL; | 67 allocator_->Free(deopt_entry_code_[i]); |
67 allocator_->Free(lazy_deoptimization_entry_code_); | 68 deopt_entry_code_[i] = NULL; |
68 lazy_deoptimization_entry_code_ = NULL; | 69 } |
69 | 70 |
70 DeoptimizingCodeListNode* current = deoptimizing_code_list_; | 71 DeoptimizingCodeListNode* current = deoptimizing_code_list_; |
71 while (current != NULL) { | 72 while (current != NULL) { |
72 DeoptimizingCodeListNode* prev = current; | 73 DeoptimizingCodeListNode* prev = current; |
73 current = current->next(); | 74 current = current->next(); |
74 delete prev; | 75 delete prev; |
75 } | 76 } |
76 deoptimizing_code_list_ = NULL; | 77 deoptimizing_code_list_ = NULL; |
77 } | 78 } |
78 | 79 |
(...skipping 402 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
481 | 482 |
482 void Deoptimizer::ComputeOutputFrames(Deoptimizer* deoptimizer) { | 483 void Deoptimizer::ComputeOutputFrames(Deoptimizer* deoptimizer) { |
483 deoptimizer->DoComputeOutputFrames(); | 484 deoptimizer->DoComputeOutputFrames(); |
484 } | 485 } |
485 | 486 |
486 | 487 |
487 bool Deoptimizer::TraceEnabledFor(BailoutType deopt_type, | 488 bool Deoptimizer::TraceEnabledFor(BailoutType deopt_type, |
488 StackFrame::Type frame_type) { | 489 StackFrame::Type frame_type) { |
489 switch (deopt_type) { | 490 switch (deopt_type) { |
490 case EAGER: | 491 case EAGER: |
| 492 case SOFT: |
491 case LAZY: | 493 case LAZY: |
492 case DEBUGGER: | 494 case DEBUGGER: |
493 return (frame_type == StackFrame::STUB) | 495 return (frame_type == StackFrame::STUB) |
494 ? FLAG_trace_stub_failures | 496 ? FLAG_trace_stub_failures |
495 : FLAG_trace_deopt; | 497 : FLAG_trace_deopt; |
496 case OSR: | 498 case OSR: |
497 return FLAG_trace_osr; | 499 return FLAG_trace_osr; |
498 } | 500 } |
499 UNREACHABLE(); | 501 UNREACHABLE(); |
500 return false; | 502 return false; |
501 } | 503 } |
502 | 504 |
503 | 505 |
504 const char* Deoptimizer::MessageFor(BailoutType type) { | 506 const char* Deoptimizer::MessageFor(BailoutType type) { |
505 switch (type) { | 507 switch (type) { |
506 case EAGER: | 508 case EAGER: |
| 509 case SOFT: |
507 case LAZY: | 510 case LAZY: |
508 return "DEOPT"; | 511 return "DEOPT"; |
509 case DEBUGGER: | 512 case DEBUGGER: |
510 return "DEOPT FOR DEBUGGER"; | 513 return "DEOPT FOR DEBUGGER"; |
511 case OSR: | 514 case OSR: |
512 return "OSR"; | 515 return "OSR"; |
513 } | 516 } |
514 UNREACHABLE(); | 517 UNREACHABLE(); |
515 return NULL; | 518 return NULL; |
516 } | 519 } |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
555 ASSERT(HEAP->allow_allocation(false)); | 558 ASSERT(HEAP->allow_allocation(false)); |
556 unsigned size = ComputeInputFrameSize(); | 559 unsigned size = ComputeInputFrameSize(); |
557 input_ = new(size) FrameDescription(size, function); | 560 input_ = new(size) FrameDescription(size, function); |
558 input_->SetFrameType(frame_type); | 561 input_->SetFrameType(frame_type); |
559 } | 562 } |
560 | 563 |
561 | 564 |
562 Code* Deoptimizer::FindOptimizedCode(JSFunction* function, | 565 Code* Deoptimizer::FindOptimizedCode(JSFunction* function, |
563 Code* optimized_code) { | 566 Code* optimized_code) { |
564 switch (bailout_type_) { | 567 switch (bailout_type_) { |
| 568 case Deoptimizer::SOFT: |
565 case Deoptimizer::EAGER: | 569 case Deoptimizer::EAGER: |
566 ASSERT(from_ == NULL); | 570 ASSERT(from_ == NULL); |
567 return function->code(); | 571 return function->code(); |
568 case Deoptimizer::LAZY: { | 572 case Deoptimizer::LAZY: { |
569 Code* compiled_code = | 573 Code* compiled_code = |
570 isolate_->deoptimizer_data()->FindDeoptimizingCode(from_); | 574 isolate_->deoptimizer_data()->FindDeoptimizingCode(from_); |
571 return (compiled_code == NULL) | 575 return (compiled_code == NULL) |
572 ? static_cast<Code*>(isolate_->heap()->FindCodeObject(from_)) | 576 ? static_cast<Code*>(isolate_->heap()->FindCodeObject(from_)) |
573 : compiled_code; | 577 : compiled_code; |
574 } | 578 } |
(...skipping 15 matching lines...) Expand all Loading... |
590 } | 594 } |
591 | 595 |
592 | 596 |
593 void Deoptimizer::Trace() { | 597 void Deoptimizer::Trace() { |
594 PrintF("**** %s: ", Deoptimizer::MessageFor(bailout_type_)); | 598 PrintF("**** %s: ", Deoptimizer::MessageFor(bailout_type_)); |
595 PrintFunctionName(); | 599 PrintFunctionName(); |
596 PrintF(" at id #%u, address 0x%" V8PRIxPTR ", frame size %d\n", | 600 PrintF(" at id #%u, address 0x%" V8PRIxPTR ", frame size %d\n", |
597 bailout_id_, | 601 bailout_id_, |
598 reinterpret_cast<intptr_t>(from_), | 602 reinterpret_cast<intptr_t>(from_), |
599 fp_to_sp_delta_ - (2 * kPointerSize)); | 603 fp_to_sp_delta_ - (2 * kPointerSize)); |
600 if (bailout_type_ == EAGER) compiled_code_->PrintDeoptLocation(bailout_id_); | 604 if (bailout_type_ == EAGER || bailout_type_ == SOFT) { |
| 605 compiled_code_->PrintDeoptLocation(bailout_id_); |
| 606 } |
601 } | 607 } |
602 | 608 |
603 | 609 |
604 void Deoptimizer::PrintFunctionName() { | 610 void Deoptimizer::PrintFunctionName() { |
605 if (function_->IsJSFunction()) { | 611 if (function_->IsJSFunction()) { |
606 function_->PrintName(); | 612 function_->PrintName(); |
607 } else { | 613 } else { |
608 PrintF("%s", Code::Kind2String(compiled_code_->kind())); | 614 PrintF("%s", Code::Kind2String(compiled_code_->kind())); |
609 } | 615 } |
610 } | 616 } |
(...skipping 21 matching lines...) Expand all Loading... |
632 BailoutType type, | 638 BailoutType type, |
633 GetEntryMode mode) { | 639 GetEntryMode mode) { |
634 ASSERT(id >= 0); | 640 ASSERT(id >= 0); |
635 if (id >= kMaxNumberOfEntries) return NULL; | 641 if (id >= kMaxNumberOfEntries) return NULL; |
636 if (mode == ENSURE_ENTRY_CODE) { | 642 if (mode == ENSURE_ENTRY_CODE) { |
637 EnsureCodeForDeoptimizationEntry(isolate, type, id); | 643 EnsureCodeForDeoptimizationEntry(isolate, type, id); |
638 } else { | 644 } else { |
639 ASSERT(mode == CALCULATE_ENTRY_ADDRESS); | 645 ASSERT(mode == CALCULATE_ENTRY_ADDRESS); |
640 } | 646 } |
641 DeoptimizerData* data = isolate->deoptimizer_data(); | 647 DeoptimizerData* data = isolate->deoptimizer_data(); |
642 MemoryChunk* base = (type == EAGER) | 648 ASSERT(type < kBailoutTypesWithCodeEntry); |
643 ? data->eager_deoptimization_entry_code_ | 649 MemoryChunk* base = data->deopt_entry_code_[type]; |
644 : data->lazy_deoptimization_entry_code_; | |
645 return base->area_start() + (id * table_entry_size_); | 650 return base->area_start() + (id * table_entry_size_); |
646 } | 651 } |
647 | 652 |
648 | 653 |
649 int Deoptimizer::GetDeoptimizationId(Isolate* isolate, | 654 int Deoptimizer::GetDeoptimizationId(Isolate* isolate, |
650 Address addr, | 655 Address addr, |
651 BailoutType type) { | 656 BailoutType type) { |
652 DeoptimizerData* data = isolate->deoptimizer_data(); | 657 DeoptimizerData* data = isolate->deoptimizer_data(); |
653 MemoryChunk* base = (type == EAGER) | 658 MemoryChunk* base = data->deopt_entry_code_[type]; |
654 ? data->eager_deoptimization_entry_code_ | |
655 : data->lazy_deoptimization_entry_code_; | |
656 Address start = base->area_start(); | 659 Address start = base->area_start(); |
657 if (base == NULL || | 660 if (base == NULL || |
658 addr < start || | 661 addr < start || |
659 addr >= start + (kMaxNumberOfEntries * table_entry_size_)) { | 662 addr >= start + (kMaxNumberOfEntries * table_entry_size_)) { |
660 return kNotDeoptimizationEntry; | 663 return kNotDeoptimizationEntry; |
661 } | 664 } |
662 ASSERT_EQ(0, | 665 ASSERT_EQ(0, |
663 static_cast<int>(addr - start) % table_entry_size_); | 666 static_cast<int>(addr - start) % table_entry_size_); |
664 return static_cast<int>(addr - start) / table_entry_size_; | 667 return static_cast<int>(addr - start) / table_entry_size_; |
665 } | 668 } |
(...skipping 1533 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2199 } | 2202 } |
2200 | 2203 |
2201 | 2204 |
2202 void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate, | 2205 void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate, |
2203 BailoutType type, | 2206 BailoutType type, |
2204 int max_entry_id) { | 2207 int max_entry_id) { |
2205 // We cannot run this if the serializer is enabled because this will | 2208 // We cannot run this if the serializer is enabled because this will |
2206 // cause us to emit relocation information for the external | 2209 // cause us to emit relocation information for the external |
2207 // references. This is fine because the deoptimizer's code section | 2210 // references. This is fine because the deoptimizer's code section |
2208 // isn't meant to be serialized at all. | 2211 // isn't meant to be serialized at all. |
2209 ASSERT(type == EAGER || type == LAZY); | 2212 ASSERT(type == EAGER || type == SOFT || type == LAZY); |
2210 DeoptimizerData* data = isolate->deoptimizer_data(); | 2213 DeoptimizerData* data = isolate->deoptimizer_data(); |
2211 int entry_count = (type == EAGER) | 2214 int entry_count = data->deopt_entry_code_entries_[type]; |
2212 ? data->eager_deoptimization_entry_code_entries_ | |
2213 : data->lazy_deoptimization_entry_code_entries_; | |
2214 if (max_entry_id < entry_count) return; | 2215 if (max_entry_id < entry_count) return; |
2215 entry_count = Max(entry_count, Deoptimizer::kMinNumberOfEntries); | 2216 entry_count = Max(entry_count, Deoptimizer::kMinNumberOfEntries); |
2216 while (max_entry_id >= entry_count) entry_count *= 2; | 2217 while (max_entry_id >= entry_count) entry_count *= 2; |
2217 ASSERT(entry_count <= Deoptimizer::kMaxNumberOfEntries); | 2218 ASSERT(entry_count <= Deoptimizer::kMaxNumberOfEntries); |
2218 | 2219 |
2219 MacroAssembler masm(isolate, NULL, 16 * KB); | 2220 MacroAssembler masm(isolate, NULL, 16 * KB); |
2220 masm.set_emit_debug_code(false); | 2221 masm.set_emit_debug_code(false); |
2221 GenerateDeoptimizationEntries(&masm, entry_count, type); | 2222 GenerateDeoptimizationEntries(&masm, entry_count, type); |
2222 CodeDesc desc; | 2223 CodeDesc desc; |
2223 masm.GetCode(&desc); | 2224 masm.GetCode(&desc); |
2224 ASSERT(!RelocInfo::RequiresRelocation(desc)); | 2225 ASSERT(!RelocInfo::RequiresRelocation(desc)); |
2225 | 2226 |
2226 MemoryChunk* chunk = (type == EAGER) | 2227 MemoryChunk* chunk = data->deopt_entry_code_[type]; |
2227 ? data->eager_deoptimization_entry_code_ | |
2228 : data->lazy_deoptimization_entry_code_; | |
2229 ASSERT(static_cast<int>(Deoptimizer::GetMaxDeoptTableSize()) >= | 2228 ASSERT(static_cast<int>(Deoptimizer::GetMaxDeoptTableSize()) >= |
2230 desc.instr_size); | 2229 desc.instr_size); |
2231 chunk->CommitArea(desc.instr_size); | 2230 chunk->CommitArea(desc.instr_size); |
2232 CopyBytes(chunk->area_start(), desc.buffer, | 2231 CopyBytes(chunk->area_start(), desc.buffer, |
2233 static_cast<size_t>(desc.instr_size)); | 2232 static_cast<size_t>(desc.instr_size)); |
2234 CPU::FlushICache(chunk->area_start(), desc.instr_size); | 2233 CPU::FlushICache(chunk->area_start(), desc.instr_size); |
2235 | 2234 |
2236 if (type == EAGER) { | 2235 data->deopt_entry_code_entries_[type] = entry_count; |
2237 data->eager_deoptimization_entry_code_entries_ = entry_count; | |
2238 } else { | |
2239 data->lazy_deoptimization_entry_code_entries_ = entry_count; | |
2240 } | |
2241 } | 2236 } |
2242 | 2237 |
2243 | 2238 |
2244 void Deoptimizer::ReplaceCodeForRelatedFunctions(JSFunction* function, | 2239 void Deoptimizer::ReplaceCodeForRelatedFunctions(JSFunction* function, |
2245 Code* code) { | 2240 Code* code) { |
2246 SharedFunctionInfo* shared = function->shared(); | 2241 SharedFunctionInfo* shared = function->shared(); |
2247 Object* undefined = function->GetHeap()->undefined_value(); | 2242 Object* undefined = function->GetHeap()->undefined_value(); |
2248 Object* current = function; | 2243 Object* current = function; |
2249 | 2244 |
2250 while (current != undefined) { | 2245 while (current != undefined) { |
(...skipping 522 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2773 | 2768 |
2774 void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) { | 2769 void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) { |
2775 v->VisitPointer(BitCast<Object**>(&function_)); | 2770 v->VisitPointer(BitCast<Object**>(&function_)); |
2776 v->VisitPointers(parameters_, parameters_ + parameters_count_); | 2771 v->VisitPointers(parameters_, parameters_ + parameters_count_); |
2777 v->VisitPointers(expression_stack_, expression_stack_ + expression_count_); | 2772 v->VisitPointers(expression_stack_, expression_stack_ + expression_count_); |
2778 } | 2773 } |
2779 | 2774 |
2780 #endif // ENABLE_DEBUGGER_SUPPORT | 2775 #endif // ENABLE_DEBUGGER_SUPPORT |
2781 | 2776 |
2782 } } // namespace v8::internal | 2777 } } // namespace v8::internal |
OLD | NEW |