| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 26 matching lines...) Expand all Loading... |
| 37 #include "prettyprinter.h" | 37 #include "prettyprinter.h" |
| 38 | 38 |
| 39 | 39 |
| 40 namespace v8 { | 40 namespace v8 { |
| 41 namespace internal { | 41 namespace internal { |
| 42 | 42 |
| 43 DeoptimizerData::DeoptimizerData() { | 43 DeoptimizerData::DeoptimizerData() { |
| 44 eager_deoptimization_entry_code_entries_ = -1; | 44 eager_deoptimization_entry_code_entries_ = -1; |
| 45 lazy_deoptimization_entry_code_entries_ = -1; | 45 lazy_deoptimization_entry_code_entries_ = -1; |
| 46 size_t deopt_table_size = Deoptimizer::GetMaxDeoptTableSize(); | 46 size_t deopt_table_size = Deoptimizer::GetMaxDeoptTableSize(); |
| 47 eager_deoptimization_entry_code_ = new VirtualMemory(deopt_table_size); | 47 MemoryAllocator* allocator = Isolate::Current()->memory_allocator(); |
| 48 lazy_deoptimization_entry_code_ = new VirtualMemory(deopt_table_size); | 48 size_t initial_commit_size = OS::CommitPageSize(); |
| 49 eager_deoptimization_entry_code_ = |
| 50 allocator->AllocateChunk(deopt_table_size, |
| 51 initial_commit_size, |
| 52 EXECUTABLE, |
| 53 NULL); |
| 54 lazy_deoptimization_entry_code_ = |
| 55 allocator->AllocateChunk(deopt_table_size, |
| 56 initial_commit_size, |
| 57 EXECUTABLE, |
| 58 NULL); |
| 49 current_ = NULL; | 59 current_ = NULL; |
| 50 deoptimizing_code_list_ = NULL; | 60 deoptimizing_code_list_ = NULL; |
| 51 #ifdef ENABLE_DEBUGGER_SUPPORT | 61 #ifdef ENABLE_DEBUGGER_SUPPORT |
| 52 deoptimized_frame_info_ = NULL; | 62 deoptimized_frame_info_ = NULL; |
| 53 #endif | 63 #endif |
| 54 } | 64 } |
| 55 | 65 |
| 56 | 66 |
| 57 DeoptimizerData::~DeoptimizerData() { | 67 DeoptimizerData::~DeoptimizerData() { |
| 58 delete eager_deoptimization_entry_code_; | 68 Isolate::Current()->memory_allocator()->Free( |
| 69 eager_deoptimization_entry_code_); |
| 59 eager_deoptimization_entry_code_ = NULL; | 70 eager_deoptimization_entry_code_ = NULL; |
| 60 delete lazy_deoptimization_entry_code_; | 71 Isolate::Current()->memory_allocator()->Free( |
| 72 lazy_deoptimization_entry_code_); |
| 61 lazy_deoptimization_entry_code_ = NULL; | 73 lazy_deoptimization_entry_code_ = NULL; |
| 62 | 74 |
| 63 DeoptimizingCodeListNode* current = deoptimizing_code_list_; | 75 DeoptimizingCodeListNode* current = deoptimizing_code_list_; |
| 64 while (current != NULL) { | 76 while (current != NULL) { |
| 65 DeoptimizingCodeListNode* prev = current; | 77 DeoptimizingCodeListNode* prev = current; |
| 66 current = current->next(); | 78 current = current->next(); |
| 67 delete prev; | 79 delete prev; |
| 68 } | 80 } |
| 69 deoptimizing_code_list_ = NULL; | 81 deoptimizing_code_list_ = NULL; |
| 70 } | 82 } |
| (...skipping 541 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 612 output_ = NULL; | 624 output_ = NULL; |
| 613 ASSERT(!HEAP->allow_allocation(true)); | 625 ASSERT(!HEAP->allow_allocation(true)); |
| 614 } | 626 } |
| 615 | 627 |
| 616 | 628 |
| 617 Address Deoptimizer::GetDeoptimizationEntry(int id, | 629 Address Deoptimizer::GetDeoptimizationEntry(int id, |
| 618 BailoutType type, | 630 BailoutType type, |
| 619 GetEntryMode mode) { | 631 GetEntryMode mode) { |
| 620 ASSERT(id >= 0); | 632 ASSERT(id >= 0); |
| 621 if (id >= kMaxNumberOfEntries) return NULL; | 633 if (id >= kMaxNumberOfEntries) return NULL; |
| 622 VirtualMemory* base = NULL; | 634 MemoryChunk* base = NULL; |
| 623 if (mode == ENSURE_ENTRY_CODE) { | 635 if (mode == ENSURE_ENTRY_CODE) { |
| 624 EnsureCodeForDeoptimizationEntry(type, id); | 636 EnsureCodeForDeoptimizationEntry(type, id); |
| 625 } else { | 637 } else { |
| 626 ASSERT(mode == CALCULATE_ENTRY_ADDRESS); | 638 ASSERT(mode == CALCULATE_ENTRY_ADDRESS); |
| 627 } | 639 } |
| 628 DeoptimizerData* data = Isolate::Current()->deoptimizer_data(); | 640 DeoptimizerData* data = Isolate::Current()->deoptimizer_data(); |
| 629 if (type == EAGER) { | 641 if (type == EAGER) { |
| 630 base = data->eager_deoptimization_entry_code_; | 642 base = data->eager_deoptimization_entry_code_; |
| 631 } else { | 643 } else { |
| 632 base = data->lazy_deoptimization_entry_code_; | 644 base = data->lazy_deoptimization_entry_code_; |
| 633 } | 645 } |
| 634 return | 646 return base->area_start() + (id * table_entry_size_); |
| 635 static_cast<Address>(base->address()) + (id * table_entry_size_); | |
| 636 } | 647 } |
| 637 | 648 |
| 638 | 649 |
| 639 int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) { | 650 int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) { |
| 640 VirtualMemory* base = NULL; | 651 MemoryChunk* base = NULL; |
| 641 DeoptimizerData* data = Isolate::Current()->deoptimizer_data(); | 652 DeoptimizerData* data = Isolate::Current()->deoptimizer_data(); |
| 642 if (type == EAGER) { | 653 if (type == EAGER) { |
| 643 base = data->eager_deoptimization_entry_code_; | 654 base = data->eager_deoptimization_entry_code_; |
| 644 } else { | 655 } else { |
| 645 base = data->lazy_deoptimization_entry_code_; | 656 base = data->lazy_deoptimization_entry_code_; |
| 646 } | 657 } |
| 647 Address base_casted = reinterpret_cast<Address>(base->address()); | 658 Address start = base->area_start(); |
| 648 if (base == NULL || | 659 if (base == NULL || |
| 649 addr < base->address() || | 660 addr < start || |
| 650 addr >= base_casted + (kMaxNumberOfEntries * table_entry_size_)) { | 661 addr >= start + (kMaxNumberOfEntries * table_entry_size_)) { |
| 651 return kNotDeoptimizationEntry; | 662 return kNotDeoptimizationEntry; |
| 652 } | 663 } |
| 653 ASSERT_EQ(0, | 664 ASSERT_EQ(0, |
| 654 static_cast<int>(addr - base_casted) % table_entry_size_); | 665 static_cast<int>(addr - start) % table_entry_size_); |
| 655 return static_cast<int>(addr - base_casted) / table_entry_size_; | 666 return static_cast<int>(addr - start) / table_entry_size_; |
| 656 } | 667 } |
| 657 | 668 |
| 658 | 669 |
| 659 int Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data, | 670 int Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data, |
| 660 BailoutId id, | 671 BailoutId id, |
| 661 SharedFunctionInfo* shared) { | 672 SharedFunctionInfo* shared) { |
| 662 // TODO(kasperl): For now, we do a simple linear search for the PC | 673 // TODO(kasperl): For now, we do a simple linear search for the PC |
| 663 // offset associated with the given node id. This should probably be | 674 // offset associated with the given node id. This should probably be |
| 664 // changed to a binary search. | 675 // changed to a binary search. |
| 665 int length = data->DeoptPoints(); | 676 int length = data->DeoptPoints(); |
| (...skipping 898 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1564 entry_count = Max(entry_count, Deoptimizer::kMinNumberOfEntries); | 1575 entry_count = Max(entry_count, Deoptimizer::kMinNumberOfEntries); |
| 1565 while (max_entry_id >= entry_count) entry_count *= 2; | 1576 while (max_entry_id >= entry_count) entry_count *= 2; |
| 1566 ASSERT(entry_count <= Deoptimizer::kMaxNumberOfEntries); | 1577 ASSERT(entry_count <= Deoptimizer::kMaxNumberOfEntries); |
| 1567 | 1578 |
| 1568 MacroAssembler masm(Isolate::Current(), NULL, 16 * KB); | 1579 MacroAssembler masm(Isolate::Current(), NULL, 16 * KB); |
| 1569 masm.set_emit_debug_code(false); | 1580 masm.set_emit_debug_code(false); |
| 1570 GenerateDeoptimizationEntries(&masm, entry_count, type); | 1581 GenerateDeoptimizationEntries(&masm, entry_count, type); |
| 1571 CodeDesc desc; | 1582 CodeDesc desc; |
| 1572 masm.GetCode(&desc); | 1583 masm.GetCode(&desc); |
| 1573 | 1584 |
| 1574 VirtualMemory* memory = type == EAGER | 1585 MemoryChunk* chunk = type == EAGER |
| 1575 ? data->eager_deoptimization_entry_code_ | 1586 ? data->eager_deoptimization_entry_code_ |
| 1576 : data->lazy_deoptimization_entry_code_; | 1587 : data->lazy_deoptimization_entry_code_; |
| 1577 size_t table_size = Deoptimizer::GetMaxDeoptTableSize(); | 1588 ASSERT(static_cast<int>(Deoptimizer::GetMaxDeoptTableSize()) >= |
| 1578 ASSERT(static_cast<int>(table_size) >= desc.instr_size); | 1589 desc.instr_size); |
| 1579 memory->Commit(memory->address(), table_size, true); | 1590 chunk->CommitArea(desc.instr_size); |
| 1580 memcpy(memory->address(), desc.buffer, desc.instr_size); | 1591 memcpy(chunk->area_start(), desc.buffer, desc.instr_size); |
| 1581 CPU::FlushICache(memory->address(), desc.instr_size); | 1592 CPU::FlushICache(chunk->area_start(), desc.instr_size); |
| 1582 | 1593 |
| 1583 if (type == EAGER) { | 1594 if (type == EAGER) { |
| 1584 data->eager_deoptimization_entry_code_entries_ = entry_count; | 1595 data->eager_deoptimization_entry_code_entries_ = entry_count; |
| 1585 } else { | 1596 } else { |
| 1586 data->lazy_deoptimization_entry_code_entries_ = entry_count; | 1597 data->lazy_deoptimization_entry_code_entries_ = entry_count; |
| 1587 } | 1598 } |
| 1588 } | 1599 } |
| 1589 | 1600 |
| 1590 | 1601 |
| 1591 void Deoptimizer::ReplaceCodeForRelatedFunctions(JSFunction* function, | 1602 void Deoptimizer::ReplaceCodeForRelatedFunctions(JSFunction* function, |
| (...skipping 522 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2114 | 2125 |
| 2115 void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) { | 2126 void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) { |
| 2116 v->VisitPointer(BitCast<Object**>(&function_)); | 2127 v->VisitPointer(BitCast<Object**>(&function_)); |
| 2117 v->VisitPointers(parameters_, parameters_ + parameters_count_); | 2128 v->VisitPointers(parameters_, parameters_ + parameters_count_); |
| 2118 v->VisitPointers(expression_stack_, expression_stack_ + expression_count_); | 2129 v->VisitPointers(expression_stack_, expression_stack_ + expression_count_); |
| 2119 } | 2130 } |
| 2120 | 2131 |
| 2121 #endif // ENABLE_DEBUGGER_SUPPORT | 2132 #endif // ENABLE_DEBUGGER_SUPPORT |
| 2122 | 2133 |
| 2123 } } // namespace v8::internal | 2134 } } // namespace v8::internal |
| OLD | NEW |