Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(320)

Side by Side Diff: src/ia32/deoptimizer-ia32.cc

Issue 6334083: Streamline the code for patching optimized code for lazy deopt. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge/build/ia32
Patch Set: Rewrite the incorrect comment mentioning absolute calls. Created 9 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/full-codegen.cc ('k') | src/ia32/lithium-codegen-ia32.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 27 matching lines...) Expand all
38 namespace internal { 38 namespace internal {
39 39
40 int Deoptimizer::table_entry_size_ = 10; 40 int Deoptimizer::table_entry_size_ = 10;
41 41
42 42
43 int Deoptimizer::patch_size() { 43 int Deoptimizer::patch_size() {
44 return Assembler::kCallInstructionLength; 44 return Assembler::kCallInstructionLength;
45 } 45 }
46 46
47 47
48 static void ZapCodeRange(Address start, Address end) {
49 #ifdef DEBUG
50 ASSERT(start <= end);
51 int size = end - start;
52 CodePatcher destroyer(start, size);
53 while (size-- > 0) destroyer.masm()->int3();
54 #endif
55 }
56
57
48 void Deoptimizer::DeoptimizeFunction(JSFunction* function) { 58 void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
49 AssertNoAllocation no_allocation; 59 AssertNoAllocation no_allocation;
50 60
51 if (!function->IsOptimized()) return; 61 if (!function->IsOptimized()) return;
52 62
53 // Get the optimized code. 63 // Get the optimized code.
54 Code* code = function->code(); 64 Code* code = function->code();
55 65 Address code_start_address = code->instruction_start();
56 // For each return after a safepoint insert a absolute call to the
57 // corresponding deoptimization entry.
58 unsigned last_pc_offset = 0;
59 SafepointTable table(function->code());
60 66
61 // We will overwrite the code's relocation info in-place. Relocation info 67 // We will overwrite the code's relocation info in-place. Relocation info
62 // is written backward. The relocation info is the payload of a byte array. 68 // is written backward. The relocation info is the payload of a byte
63 // Later on we will align this at the start of the byte array and create 69 // array. Later on we will slide this to the start of the byte array and
64 // a trash byte array of the remaining space. 70 // create a filler object in the remaining space.
65 ByteArray* reloc_info = code->relocation_info(); 71 ByteArray* reloc_info = code->relocation_info();
66 Address end_address = reloc_info->address() + reloc_info->Size(); 72 Address reloc_end_address = reloc_info->address() + reloc_info->Size();
67 RelocInfoWriter reloc_info_writer(end_address, code->instruction_start()); 73 RelocInfoWriter reloc_info_writer(reloc_end_address, code_start_address);
68 74
69 for (unsigned i = 0; i < table.length(); i++) { 75 // For each return after a safepoint insert a call to the corresponding
70 unsigned pc_offset = table.GetPcOffset(i); 76 // deoptimization entry. Since the call is a relative encoding, write new
77 // reloc info. We do not need any of the existing reloc info because the
78 // existing code will not be used again (we zap it in debug builds).
79 SafepointTable table(code);
80 Address prev_address = code_start_address;
81 for (unsigned i = 0; i < table.length(); ++i) {
82 Address curr_address = code_start_address + table.GetPcOffset(i);
83 ZapCodeRange(prev_address, curr_address);
84
71 SafepointEntry safepoint_entry = table.GetEntry(i); 85 SafepointEntry safepoint_entry = table.GetEntry(i);
72 int deoptimization_index = safepoint_entry.deoptimization_index(); 86 int deoptimization_index = safepoint_entry.deoptimization_index();
73 int gap_code_size = safepoint_entry.gap_code_size(); 87 if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
74 #ifdef DEBUG 88 // The gap code is needed to get to the state expected at the bailout.
75 // Destroy the code which is not supposed to run again. 89 curr_address += safepoint_entry.gap_code_size();
76 unsigned instructions = pc_offset - last_pc_offset; 90
77 CodePatcher destroyer(code->instruction_start() + last_pc_offset, 91 CodePatcher patcher(curr_address, patch_size());
78 instructions); 92 Address deopt_entry = GetDeoptimizationEntry(deoptimization_index, LAZY);
79 for (unsigned i = 0; i < instructions; i++) { 93 patcher.masm()->call(deopt_entry, RelocInfo::NONE);
80 destroyer.masm()->int3(); 94
95 RelocInfo rinfo(curr_address + 1, // address is 1 after the call opcode
96 RelocInfo::RUNTIME_ENTRY,
97 reinterpret_cast<intptr_t>(deopt_entry));
98 reloc_info_writer.Write(&rinfo);
99
100 curr_address += patch_size();
81 } 101 }
82 #endif 102 prev_address = curr_address;
83 last_pc_offset = pc_offset;
84 if (deoptimization_index != Safepoint::kNoDeoptimizationIndex) {
85 last_pc_offset += gap_code_size;
86 Address call_pc = code->instruction_start() + last_pc_offset;
87 CodePatcher patcher(call_pc, patch_size());
88 Address entry = GetDeoptimizationEntry(deoptimization_index, LAZY);
89 patcher.masm()->call(entry, RelocInfo::NONE);
90 last_pc_offset += patch_size();
91 RelocInfo rinfo(call_pc + 1, RelocInfo::RUNTIME_ENTRY,
92 reinterpret_cast<intptr_t>(entry));
93 reloc_info_writer.Write(&rinfo);
94 }
95 } 103 }
96 #ifdef DEBUG 104 ZapCodeRange(prev_address,
97 // Destroy the code which is not supposed to run again. 105 code_start_address + code->safepoint_table_offset());
98 unsigned instructions = code->safepoint_table_start() - last_pc_offset;
99 CodePatcher destroyer(code->instruction_start() + last_pc_offset,
100 instructions);
101 for (unsigned i = 0; i < instructions; i++) {
102 destroyer.masm()->int3();
103 }
104 #endif
105 106
106 // Move the relocation info to the beginning of the byte array. 107 // Move the relocation info to the beginning of the byte array.
107 int reloc_size = end_address - reloc_info_writer.pos(); 108 int new_reloc_size = reloc_end_address - reloc_info_writer.pos();
108 memmove(code->relocation_start(), reloc_info_writer.pos(), reloc_size); 109 memmove(code->relocation_start(), reloc_info_writer.pos(), new_reloc_size);
109 110
110 // The relocation info is in place, update the size. 111 // The relocation info is in place, update the size.
111 reloc_info->set_length(reloc_size); 112 reloc_info->set_length(new_reloc_size);
112 113
113 // Handle the junk part after the new relocation info. We will create 114 // Handle the junk part after the new relocation info. We will create
114 // a non-live object in the extra space at the end of the former reloc info. 115 // a non-live object in the extra space at the end of the former reloc info.
115 Address junk = reloc_info->address() + reloc_info->Size(); 116 Address junk_address = reloc_info->address() + reloc_info->Size();
116 ASSERT(junk <= end_address); 117 ASSERT(junk_address <= reloc_end_address);
117 118 Heap::CreateFillerObjectAt(junk_address, reloc_end_address - junk_address);
118 if (end_address - junk <= ByteArray::kHeaderSize) {
119 // We get in here if there is not enough space for a ByteArray.
120
121 // Both addresses are kPointerSize alligned.
122 CHECK_EQ((end_address - junk) % 4, 0);
123 Map* filler_map = Heap::one_pointer_filler_map();
124 while (junk < end_address) {
125 HeapObject::FromAddress(junk)->set_map(filler_map);
126 junk += kPointerSize;
127 }
128 } else {
129 int size = end_address - junk;
130 // Since the reloc_end address and junk are both alligned, we shouild,
131 // never have junk which is not a multipla of kPointerSize.
132 CHECK_EQ(size % kPointerSize, 0);
133 CHECK_GT(size, 0);
134 HeapObject* junk_object = HeapObject::FromAddress(junk);
135 junk_object->set_map(Heap::byte_array_map());
136 int length = ByteArray::LengthFor(end_address - junk);
137 ByteArray::cast(junk_object)->set_length(length);
138 }
139 119
140 // Add the deoptimizing code to the list. 120 // Add the deoptimizing code to the list.
141 DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); 121 DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
142 node->set_next(deoptimizing_code_list_); 122 node->set_next(deoptimizing_code_list_);
143 deoptimizing_code_list_ = node; 123 deoptimizing_code_list_ = node;
144 124
145 // Set the code for the function to non-optimized version. 125 // Set the code for the function to non-optimized version.
146 function->ReplaceCode(function->shared()->code()); 126 function->ReplaceCode(function->shared()->code());
147 127
148 if (FLAG_trace_deopt) { 128 if (FLAG_trace_deopt) {
(...skipping 521 matching lines...) Expand 10 before | Expand all | Expand 10 after
670 } 650 }
671 __ bind(&done); 651 __ bind(&done);
672 } 652 }
673 653
674 #undef __ 654 #undef __
675 655
676 656
677 } } // namespace v8::internal 657 } } // namespace v8::internal
678 658
679 #endif // V8_TARGET_ARCH_IA32 659 #endif // V8_TARGET_ARCH_IA32
OLDNEW
« no previous file with comments | « src/full-codegen.cc ('k') | src/ia32/lithium-codegen-ia32.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698