OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 30 matching lines...) Expand all Loading... |
41 int Deoptimizer::table_entry_size_ = 10; | 41 int Deoptimizer::table_entry_size_ = 10; |
42 | 42 |
43 void Deoptimizer::DeoptimizeFunction(JSFunction* function) { | 43 void Deoptimizer::DeoptimizeFunction(JSFunction* function) { |
44 AssertNoAllocation no_allocation; | 44 AssertNoAllocation no_allocation; |
45 | 45 |
46 if (!function->IsOptimized()) return; | 46 if (!function->IsOptimized()) return; |
47 | 47 |
48 // Get the optimized code. | 48 // Get the optimized code. |
49 Code* code = function->code(); | 49 Code* code = function->code(); |
50 | 50 |
51 // Invalidate the relocation information, as it will become invalid by the | |
52 // code patching below, and is not needed any more. | |
53 code->InvalidateRelocation(); | |
54 | |
55 // For each return after a safepoint insert a absolute call to the | 51 // For each return after a safepoint insert a absolute call to the |
56 // corresponding deoptimization entry. | 52 // corresponding deoptimization entry. |
57 unsigned last_pc_offset = 0; | 53 unsigned last_pc_offset = 0; |
58 SafepointTable table(function->code()); | 54 SafepointTable table(function->code()); |
| 55 |
| 56 // We will overwrite the code's relocation info in-place. Relocation info |
| 57 // is written backward. The relocation info is the payload of a byte array. |
| 58 // Later on we will align this at the start of the byte array and create |
| 59 // a trash byte array of the remaining space. |
| 60 ByteArray* reloc_info = code->relocation_info(); |
| 61 Address end_address = reloc_info->address() + reloc_info->Size(); |
| 62 RelocInfoWriter reloc_info_writer(end_address, code->instruction_start()); |
| 63 |
59 for (unsigned i = 0; i < table.length(); i++) { | 64 for (unsigned i = 0; i < table.length(); i++) { |
60 unsigned pc_offset = table.GetPcOffset(i); | 65 unsigned pc_offset = table.GetPcOffset(i); |
61 SafepointEntry safepoint_entry = table.GetEntry(i); | 66 SafepointEntry safepoint_entry = table.GetEntry(i); |
62 int deoptimization_index = safepoint_entry.deoptimization_index(); | 67 int deoptimization_index = safepoint_entry.deoptimization_index(); |
63 int gap_code_size = safepoint_entry.gap_code_size(); | 68 int gap_code_size = safepoint_entry.gap_code_size(); |
64 #ifdef DEBUG | 69 #ifdef DEBUG |
65 // Destroy the code which is not supposed to run again. | 70 // Destroy the code which is not supposed to run again. |
66 unsigned instructions = pc_offset - last_pc_offset; | 71 unsigned instructions = pc_offset - last_pc_offset; |
67 CodePatcher destroyer(code->instruction_start() + last_pc_offset, | 72 CodePatcher destroyer(code->instruction_start() + last_pc_offset, |
68 instructions); | 73 instructions); |
69 for (unsigned i = 0; i < instructions; i++) { | 74 for (unsigned i = 0; i < instructions; i++) { |
70 destroyer.masm()->int3(); | 75 destroyer.masm()->int3(); |
71 } | 76 } |
72 #endif | 77 #endif |
73 last_pc_offset = pc_offset; | 78 last_pc_offset = pc_offset; |
74 if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) { | 79 if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) { |
75 CodePatcher patcher( | 80 Address call_pc = code->instruction_start() + pc_offset + gap_code_size; |
76 code->instruction_start() + pc_offset + gap_code_size, | 81 CodePatcher patcher(call_pc, Assembler::kCallInstructionLength); |
77 Assembler::kCallInstructionLength); | 82 Address entry = GetDeoptimizationEntry(deoptimization_index, LAZY); |
78 patcher.masm()->call(GetDeoptimizationEntry(deoptimization_index, LAZY), | 83 patcher.masm()->call(entry, RelocInfo::NONE); |
79 RelocInfo::NONE); | |
80 last_pc_offset += gap_code_size + Assembler::kCallInstructionLength; | 84 last_pc_offset += gap_code_size + Assembler::kCallInstructionLength; |
| 85 RelocInfo rinfo(call_pc + 1, RelocInfo::RUNTIME_ENTRY, |
| 86 reinterpret_cast<intptr_t>(entry)); |
| 87 reloc_info_writer.Write(&rinfo); |
81 } | 88 } |
82 } | 89 } |
83 #ifdef DEBUG | 90 #ifdef DEBUG |
84 // Destroy the code which is not supposed to run again. | 91 // Destroy the code which is not supposed to run again. |
85 unsigned instructions = code->safepoint_table_start() - last_pc_offset; | 92 unsigned instructions = code->safepoint_table_start() - last_pc_offset; |
86 CodePatcher destroyer(code->instruction_start() + last_pc_offset, | 93 CodePatcher destroyer(code->instruction_start() + last_pc_offset, |
87 instructions); | 94 instructions); |
88 for (unsigned i = 0; i < instructions; i++) { | 95 for (unsigned i = 0; i < instructions; i++) { |
89 destroyer.masm()->int3(); | 96 destroyer.masm()->int3(); |
90 } | 97 } |
91 #endif | 98 #endif |
92 | 99 |
| 100 // Move the relocation info to the beginning |
| 101 int reloc_size = end_address - reloc_info_writer.pos(); |
| 102 memmove(code->relocation_start(), reloc_info_writer.pos(), reloc_size); |
| 103 |
| 104 // The relocation info is in place, update the size. |
| 105 reloc_info->set_length(reloc_size); |
| 106 |
| 107 // Handle the junk part after the new relocation info. We will create |
| 108 // a non-live object in the extra space at the end of the former reloc info. |
| 109 Address junk = reloc_info->address() + reloc_info->Size(); |
| 110 ASSERT(junk <= end_address); |
| 111 |
| 112 if (end_address - junk <= ByteArray::kHeaderSize) { |
| 113 // We get in here if there is not enough space for a ByteArray. |
| 114 |
| 115 // Both addresses are kPointerSize alligned. |
| 116 CHECK_EQ((end_address - junk) % 4, 0); |
| 117 Map* filler_map = Heap::one_pointer_filler_map(); |
| 118 while (junk < end_address) { |
| 119 HeapObject::FromAddress(junk)->set_map(filler_map); |
| 120 junk += kPointerSize; |
| 121 } |
| 122 } else { |
| 123 int size = end_address - junk; |
| 124 // Since the reloc_end address and junk are both alligned, we shouild, |
| 125 // never have junk which is not a multipla of kPointerSize. |
| 126 CHECK_EQ(size % kPointerSize, 0); |
| 127 CHECK_GT(size, 0); |
| 128 HeapObject* junk_object = HeapObject::FromAddress(junk); |
| 129 junk_object->set_map(Heap::byte_array_map()); |
| 130 int length = ByteArray::LengthFor(end_address - junk); |
| 131 ByteArray::cast(junk_object)->set_length(length); |
| 132 } |
| 133 |
93 // Add the deoptimizing code to the list. | 134 // Add the deoptimizing code to the list. |
94 DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); | 135 DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); |
95 node->set_next(deoptimizing_code_list_); | 136 node->set_next(deoptimizing_code_list_); |
96 deoptimizing_code_list_ = node; | 137 deoptimizing_code_list_ = node; |
97 | 138 |
98 // Set the code for the function to non-optimized version. | 139 // Set the code for the function to non-optimized version. |
99 function->ReplaceCode(function->shared()->code()); | 140 function->ReplaceCode(function->shared()->code()); |
100 | 141 |
101 if (FLAG_trace_deopt) { | 142 if (FLAG_trace_deopt) { |
102 PrintF("[forced deoptimization: "); | 143 PrintF("[forced deoptimization: "); |
(...skipping 538 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
641 } | 682 } |
642 __ bind(&done); | 683 __ bind(&done); |
643 } | 684 } |
644 | 685 |
645 #undef __ | 686 #undef __ |
646 | 687 |
647 | 688 |
648 } } // namespace v8::internal | 689 } } // namespace v8::internal |
649 | 690 |
650 #endif // V8_TARGET_ARCH_IA32 | 691 #endif // V8_TARGET_ARCH_IA32 |
OLD | NEW |