OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/v8.h" | 5 #include "src/v8.h" |
6 | 6 |
7 #include "src/codegen.h" | 7 #include "src/codegen.h" |
8 #include "src/deoptimizer.h" | 8 #include "src/deoptimizer.h" |
9 #include "src/full-codegen.h" | 9 #include "src/full-codegen.h" |
10 #include "src/safepoint-table.h" | 10 #include "src/safepoint-table.h" |
11 | 11 |
12 namespace v8 { | 12 namespace v8 { |
13 namespace internal { | 13 namespace internal { |
14 | 14 |
15 const int Deoptimizer::table_entry_size_ = 8; | 15 const int Deoptimizer::table_entry_size_ = 8; |
16 | 16 |
17 | 17 |
18 int Deoptimizer::patch_size() { | 18 int Deoptimizer::patch_size() { |
19 const int kCallInstructionSizeInWords = 3; | 19 #if V8_TARGET_ARCH_PPC64 |
| 20 const int kCallInstructionSizeInWords = 7; |
| 21 #else |
| 22 const int kCallInstructionSizeInWords = 4; |
| 23 #endif |
20 return kCallInstructionSizeInWords * Assembler::kInstrSize; | 24 return kCallInstructionSizeInWords * Assembler::kInstrSize; |
21 } | 25 } |
22 | 26 |
23 | 27 |
24 void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) { | 28 void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) { |
25 Address code_start_address = code->instruction_start(); | 29 Address code_start_address = code->instruction_start(); |
| 30 |
26 // Invalidate the relocation information, as it will become invalid by the | 31 // Invalidate the relocation information, as it will become invalid by the |
27 // code patching below, and is not needed any more. | 32 // code patching below, and is not needed any more. |
28 code->InvalidateRelocation(); | 33 code->InvalidateRelocation(); |
29 | 34 |
30 if (FLAG_zap_code_space) { | 35 if (FLAG_zap_code_space) { |
31 // Fail hard and early if we enter this code object again. | 36 // Fail hard and early if we enter this code object again. |
32 byte* pointer = code->FindCodeAgeSequence(); | 37 byte* pointer = code->FindCodeAgeSequence(); |
33 if (pointer != NULL) { | 38 if (pointer != NULL) { |
34 pointer += kNoCodeAgeSequenceLength; | 39 pointer += kNoCodeAgeSequenceLength; |
35 } else { | 40 } else { |
(...skipping 17 matching lines...) Expand all Loading... |
53 Address prev_call_address = NULL; | 58 Address prev_call_address = NULL; |
54 #endif | 59 #endif |
55 // For each LLazyBailout instruction insert a call to the corresponding | 60 // For each LLazyBailout instruction insert a call to the corresponding |
56 // deoptimization entry. | 61 // deoptimization entry. |
57 for (int i = 0; i < deopt_data->DeoptCount(); i++) { | 62 for (int i = 0; i < deopt_data->DeoptCount(); i++) { |
58 if (deopt_data->Pc(i)->value() == -1) continue; | 63 if (deopt_data->Pc(i)->value() == -1) continue; |
59 Address call_address = code_start_address + deopt_data->Pc(i)->value(); | 64 Address call_address = code_start_address + deopt_data->Pc(i)->value(); |
60 Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY); | 65 Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY); |
61 // We need calls to have a predictable size in the unoptimized code, but | 66 // We need calls to have a predictable size in the unoptimized code, but |
62 // this is optimized code, so we don't have to have a predictable size. | 67 // this is optimized code, so we don't have to have a predictable size. |
63 int call_size_in_bytes = | 68 int call_size_in_bytes = MacroAssembler::CallSizeNotPredictableCodeSize( |
64 MacroAssembler::CallSizeNotPredictableCodeSize(isolate, | 69 deopt_entry, kRelocInfo_NONEPTR); |
65 deopt_entry, | |
66 RelocInfo::NONE32); | |
67 int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize; | 70 int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize; |
68 DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0); | 71 DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0); |
69 DCHECK(call_size_in_bytes <= patch_size()); | 72 DCHECK(call_size_in_bytes <= patch_size()); |
70 CodePatcher patcher(call_address, call_size_in_words); | 73 CodePatcher patcher(call_address, call_size_in_words); |
71 patcher.masm()->Call(deopt_entry, RelocInfo::NONE32); | 74 patcher.masm()->Call(deopt_entry, kRelocInfo_NONEPTR); |
72 DCHECK(prev_call_address == NULL || | 75 DCHECK(prev_call_address == NULL || |
73 call_address >= prev_call_address + patch_size()); | 76 call_address >= prev_call_address + patch_size()); |
74 DCHECK(call_address + patch_size() <= code->instruction_end()); | 77 DCHECK(call_address + patch_size() <= code->instruction_end()); |
75 #ifdef DEBUG | 78 #ifdef DEBUG |
76 prev_call_address = call_address; | 79 prev_call_address = call_address; |
77 #endif | 80 #endif |
78 } | 81 } |
79 } | 82 } |
80 | 83 |
81 | 84 |
82 void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) { | 85 void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) { |
83 // Set the register values. The values are not important as there are no | 86 // Set the register values. The values are not important as there are no |
84 // callee saved registers in JavaScript frames, so all registers are | 87 // callee saved registers in JavaScript frames, so all registers are |
85 // spilled. Registers fp and sp are set to the correct values though. | 88 // spilled. Registers fp and sp are set to the correct values though. |
86 | 89 |
87 for (int i = 0; i < Register::kNumRegisters; i++) { | 90 for (int i = 0; i < Register::kNumRegisters; i++) { |
88 input_->SetRegister(i, i * 4); | 91 input_->SetRegister(i, i * 4); |
89 } | 92 } |
90 input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp())); | 93 input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp())); |
91 input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp())); | 94 input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp())); |
92 for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) { | 95 for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) { |
93 input_->SetDoubleRegister(i, 0.0); | 96 input_->SetDoubleRegister(i, 0.0); |
94 } | 97 } |
95 | 98 |
96 // Fill the frame content from the actual data on the frame. | 99 // Fill the frame content from the actual data on the frame. |
97 for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) { | 100 for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) { |
98 input_->SetFrameSlot(i, Memory::uint32_at(tos + i)); | 101 input_->SetFrameSlot( |
| 102 i, reinterpret_cast<intptr_t>(Memory::Address_at(tos + i))); |
99 } | 103 } |
100 } | 104 } |
101 | 105 |
102 | 106 |
103 void Deoptimizer::SetPlatformCompiledStubRegisters( | 107 void Deoptimizer::SetPlatformCompiledStubRegisters( |
104 FrameDescription* output_frame, CodeStubDescriptor* descriptor) { | 108 FrameDescription* output_frame, CodeStubDescriptor* descriptor) { |
105 ApiFunction function(descriptor->deoptimization_handler()); | 109 ApiFunction function(descriptor->deoptimization_handler()); |
106 ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_); | 110 ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_); |
107 intptr_t handler = reinterpret_cast<intptr_t>(xref.address()); | 111 intptr_t handler = reinterpret_cast<intptr_t>(xref.address()); |
108 int params = descriptor->GetHandlerParameterCount(); | 112 int params = descriptor->GetHandlerParameterCount(); |
109 output_frame->SetRegister(r0.code(), params); | 113 output_frame->SetRegister(r3.code(), params); |
110 output_frame->SetRegister(r1.code(), handler); | 114 output_frame->SetRegister(r4.code(), handler); |
111 } | 115 } |
112 | 116 |
113 | 117 |
114 void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) { | 118 void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) { |
115 for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) { | 119 for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) { |
116 double double_value = input_->GetDoubleRegister(i); | 120 double double_value = input_->GetDoubleRegister(i); |
117 output_frame->SetDoubleRegister(i, double_value); | 121 output_frame->SetDoubleRegister(i, double_value); |
118 } | 122 } |
119 } | 123 } |
120 | 124 |
121 | 125 |
122 bool Deoptimizer::HasAlignmentPadding(JSFunction* function) { | 126 bool Deoptimizer::HasAlignmentPadding(JSFunction* function) { |
123 // There is no dynamic alignment padding on ARM in the input frame. | 127 // There is no dynamic alignment padding on PPC in the input frame. |
124 return false; | 128 return false; |
125 } | 129 } |
126 | 130 |
127 | 131 |
128 #define __ masm()-> | 132 #define __ masm()-> |
129 | 133 |
130 // This code tries to be close to ia32 code so that any changes can be | 134 // This code tries to be close to ia32 code so that any changes can be |
131 // easily ported. | 135 // easily ported. |
132 void Deoptimizer::EntryGenerator::Generate() { | 136 void Deoptimizer::EntryGenerator::Generate() { |
133 GeneratePrologue(); | 137 GeneratePrologue(); |
134 | 138 |
135 // Save all general purpose registers before messing with them. | 139 // Unlike on ARM we don't save all the registers, just the useful ones. |
| 140 // For the rest, there are gaps on the stack, so the offsets remain the same. |
136 const int kNumberOfRegisters = Register::kNumRegisters; | 141 const int kNumberOfRegisters = Register::kNumRegisters; |
137 | 142 |
138 // Everything but pc, lr and ip which will be saved but not restored. | 143 RegList restored_regs = kJSCallerSaved | kCalleeSaved; |
139 RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit(); | 144 RegList saved_regs = restored_regs | sp.bit(); |
140 | 145 |
141 const int kDoubleRegsSize = | 146 const int kDoubleRegsSize = |
142 kDoubleSize * DwVfpRegister::kMaxNumAllocatableRegisters; | 147 kDoubleSize * DoubleRegister::kMaxNumAllocatableRegisters; |
143 | 148 |
144 // Save all allocatable VFP registers before messing with them. | 149 // Save all FPU registers before messing with them. |
145 DCHECK(kDoubleRegZero.code() == 14); | 150 __ subi(sp, sp, Operand(kDoubleRegsSize)); |
146 DCHECK(kScratchDoubleReg.code() == 15); | 151 for (int i = 0; i < DoubleRegister::kMaxNumAllocatableRegisters; ++i) { |
| 152 DoubleRegister fpu_reg = DoubleRegister::FromAllocationIndex(i); |
| 153 int offset = i * kDoubleSize; |
| 154 __ stfd(fpu_reg, MemOperand(sp, offset)); |
| 155 } |
147 | 156 |
148 // Check CPU flags for number of registers, setting the Z condition flag. | 157 // Push saved_regs (needed to populate FrameDescription::registers_). |
149 __ CheckFor32DRegs(ip); | 158 // Leave gaps for other registers. |
150 | 159 __ subi(sp, sp, Operand(kNumberOfRegisters * kPointerSize)); |
151 // Push registers d0-d13, and possibly d16-d31, on the stack. | 160 for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) { |
152 // If d16-d31 are not pushed, decrease the stack pointer instead. | 161 if ((saved_regs & (1 << i)) != 0) { |
153 __ vstm(db_w, sp, d16, d31, ne); | 162 __ StoreP(ToRegister(i), MemOperand(sp, kPointerSize * i)); |
154 __ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq); | 163 } |
155 __ vstm(db_w, sp, d0, d13); | 164 } |
156 | |
157 // Push all 16 registers (needed to populate FrameDescription::registers_). | |
158 // TODO(1588) Note that using pc with stm is deprecated, so we should perhaps | |
159 // handle this a bit differently. | |
160 __ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit()); | |
161 | 165 |
162 const int kSavedRegistersAreaSize = | 166 const int kSavedRegistersAreaSize = |
163 (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize; | 167 (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize; |
164 | 168 |
165 // Get the bailout id from the stack. | 169 // Get the bailout id from the stack. |
166 __ ldr(r2, MemOperand(sp, kSavedRegistersAreaSize)); | 170 __ LoadP(r5, MemOperand(sp, kSavedRegistersAreaSize)); |
167 | 171 |
168 // Get the address of the location in the code object (r3) (return | 172 // Get the address of the location in the code object (r6) (return |
169 // address for lazy deoptimization) and compute the fp-to-sp delta in | 173 // address for lazy deoptimization) and compute the fp-to-sp delta in |
170 // register r4. | 174 // register r7. |
171 __ mov(r3, lr); | 175 __ mflr(r6); |
172 // Correct one word for bailout id. | 176 // Correct one word for bailout id. |
173 __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); | 177 __ addi(r7, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); |
174 __ sub(r4, fp, r4); | 178 __ sub(r7, fp, r7); |
175 | 179 |
176 // Allocate a new deoptimizer object. | 180 // Allocate a new deoptimizer object. |
177 // Pass four arguments in r0 to r3 and fifth argument on stack. | 181 // Pass six arguments in r3 to r8. |
178 __ PrepareCallCFunction(6, r5); | 182 __ PrepareCallCFunction(6, r8); |
179 __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); | 183 __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
180 __ mov(r1, Operand(type())); // bailout type, | 184 __ li(r4, Operand(type())); // bailout type, |
181 // r2: bailout id already loaded. | 185 // r5: bailout id already loaded. |
182 // r3: code address or 0 already loaded. | 186 // r6: code address or 0 already loaded. |
183 __ str(r4, MemOperand(sp, 0 * kPointerSize)); // Fp-to-sp delta. | 187 // r7: Fp-to-sp delta. |
184 __ mov(r5, Operand(ExternalReference::isolate_address(isolate()))); | 188 __ mov(r8, Operand(ExternalReference::isolate_address(isolate()))); |
185 __ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate. | |
186 // Call Deoptimizer::New(). | 189 // Call Deoptimizer::New(). |
187 { | 190 { |
188 AllowExternalCallThatCantCauseGC scope(masm()); | 191 AllowExternalCallThatCantCauseGC scope(masm()); |
189 __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6); | 192 __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6); |
190 } | 193 } |
191 | 194 |
192 // Preserve "deoptimizer" object in register r0 and get the input | 195 // Preserve "deoptimizer" object in register r3 and get the input |
193 // frame descriptor pointer to r1 (deoptimizer->input_); | 196 // frame descriptor pointer to r4 (deoptimizer->input_); |
194 __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset())); | 197 __ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset())); |
195 | 198 |
196 // Copy core registers into FrameDescription::registers_[kNumRegisters]. | 199 // Copy core registers into FrameDescription::registers_[kNumRegisters]. |
197 DCHECK(Register::kNumRegisters == kNumberOfRegisters); | 200 DCHECK(Register::kNumRegisters == kNumberOfRegisters); |
198 for (int i = 0; i < kNumberOfRegisters; i++) { | 201 for (int i = 0; i < kNumberOfRegisters; i++) { |
199 int offset = (i * kPointerSize) + FrameDescription::registers_offset(); | 202 int offset = (i * kPointerSize) + FrameDescription::registers_offset(); |
200 __ ldr(r2, MemOperand(sp, i * kPointerSize)); | 203 __ LoadP(r5, MemOperand(sp, i * kPointerSize)); |
201 __ str(r2, MemOperand(r1, offset)); | 204 __ StoreP(r5, MemOperand(r4, offset)); |
202 } | 205 } |
203 | 206 |
| 207 int double_regs_offset = FrameDescription::double_registers_offset(); |
204 // Copy VFP registers to | 208 // Copy VFP registers to |
205 // double_registers_[DoubleRegister::kMaxNumAllocatableRegisters] | 209 // double_registers_[DoubleRegister::kNumAllocatableRegisters] |
206 int double_regs_offset = FrameDescription::double_registers_offset(); | 210 for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) { |
207 for (int i = 0; i < DwVfpRegister::kMaxNumAllocatableRegisters; ++i) { | |
208 int dst_offset = i * kDoubleSize + double_regs_offset; | 211 int dst_offset = i * kDoubleSize + double_regs_offset; |
209 int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize; | 212 int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize; |
210 __ vldr(d0, sp, src_offset); | 213 __ lfd(d0, MemOperand(sp, src_offset)); |
211 __ vstr(d0, r1, dst_offset); | 214 __ stfd(d0, MemOperand(r4, dst_offset)); |
212 } | 215 } |
213 | 216 |
214 // Remove the bailout id and the saved registers from the stack. | 217 // Remove the bailout id and the saved registers from the stack. |
215 __ add(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); | 218 __ addi(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); |
216 | 219 |
217 // Compute a pointer to the unwinding limit in register r2; that is | 220 // Compute a pointer to the unwinding limit in register r5; that is |
218 // the first stack slot not part of the input frame. | 221 // the first stack slot not part of the input frame. |
219 __ ldr(r2, MemOperand(r1, FrameDescription::frame_size_offset())); | 222 __ LoadP(r5, MemOperand(r4, FrameDescription::frame_size_offset())); |
220 __ add(r2, r2, sp); | 223 __ add(r5, r5, sp); |
221 | 224 |
222 // Unwind the stack down to - but not including - the unwinding | 225 // Unwind the stack down to - but not including - the unwinding |
223 // limit and copy the contents of the activation frame to the input | 226 // limit and copy the contents of the activation frame to the input |
224 // frame description. | 227 // frame description. |
225 __ add(r3, r1, Operand(FrameDescription::frame_content_offset())); | 228 __ addi(r6, r4, Operand(FrameDescription::frame_content_offset())); |
226 Label pop_loop; | 229 Label pop_loop; |
227 Label pop_loop_header; | 230 Label pop_loop_header; |
228 __ b(&pop_loop_header); | 231 __ b(&pop_loop_header); |
229 __ bind(&pop_loop); | 232 __ bind(&pop_loop); |
230 __ pop(r4); | 233 __ pop(r7); |
231 __ str(r4, MemOperand(r3, 0)); | 234 __ StoreP(r7, MemOperand(r6, 0)); |
232 __ add(r3, r3, Operand(sizeof(uint32_t))); | 235 __ addi(r6, r6, Operand(kPointerSize)); |
233 __ bind(&pop_loop_header); | 236 __ bind(&pop_loop_header); |
234 __ cmp(r2, sp); | 237 __ cmp(r5, sp); |
235 __ b(ne, &pop_loop); | 238 __ bne(&pop_loop); |
236 | 239 |
237 // Compute the output frame in the deoptimizer. | 240 // Compute the output frame in the deoptimizer. |
238 __ push(r0); // Preserve deoptimizer object across call. | 241 __ push(r3); // Preserve deoptimizer object across call. |
239 // r0: deoptimizer object; r1: scratch. | 242 // r3: deoptimizer object; r4: scratch. |
240 __ PrepareCallCFunction(1, r1); | 243 __ PrepareCallCFunction(1, r4); |
241 // Call Deoptimizer::ComputeOutputFrames(). | 244 // Call Deoptimizer::ComputeOutputFrames(). |
242 { | 245 { |
243 AllowExternalCallThatCantCauseGC scope(masm()); | 246 AllowExternalCallThatCantCauseGC scope(masm()); |
244 __ CallCFunction( | 247 __ CallCFunction( |
245 ExternalReference::compute_output_frames_function(isolate()), 1); | 248 ExternalReference::compute_output_frames_function(isolate()), 1); |
246 } | 249 } |
247 __ pop(r0); // Restore deoptimizer object (class Deoptimizer). | 250 __ pop(r3); // Restore deoptimizer object (class Deoptimizer). |
248 | 251 |
249 // Replace the current (input) frame with the output frames. | 252 // Replace the current (input) frame with the output frames. |
250 Label outer_push_loop, inner_push_loop, | 253 Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header; |
251 outer_loop_header, inner_loop_header; | 254 // Outer loop state: r7 = current "FrameDescription** output_", |
252 // Outer loop state: r4 = current "FrameDescription** output_", | 255 // r4 = one past the last FrameDescription**. |
253 // r1 = one past the last FrameDescription**. | 256 __ lwz(r4, MemOperand(r3, Deoptimizer::output_count_offset())); |
254 __ ldr(r1, MemOperand(r0, Deoptimizer::output_count_offset())); | 257 __ LoadP(r7, MemOperand(r3, Deoptimizer::output_offset())); // r7 is output_. |
255 __ ldr(r4, MemOperand(r0, Deoptimizer::output_offset())); // r4 is output_. | 258 __ ShiftLeftImm(r4, r4, Operand(kPointerSizeLog2)); |
256 __ add(r1, r4, Operand(r1, LSL, 2)); | 259 __ add(r4, r7, r4); |
257 __ jmp(&outer_loop_header); | 260 __ b(&outer_loop_header); |
| 261 |
258 __ bind(&outer_push_loop); | 262 __ bind(&outer_push_loop); |
259 // Inner loop state: r2 = current FrameDescription*, r3 = loop index. | 263 // Inner loop state: r5 = current FrameDescription*, r6 = loop index. |
260 __ ldr(r2, MemOperand(r4, 0)); // output_[ix] | 264 __ LoadP(r5, MemOperand(r7, 0)); // output_[ix] |
261 __ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset())); | 265 __ LoadP(r6, MemOperand(r5, FrameDescription::frame_size_offset())); |
262 __ jmp(&inner_loop_header); | 266 __ b(&inner_loop_header); |
| 267 |
263 __ bind(&inner_push_loop); | 268 __ bind(&inner_push_loop); |
264 __ sub(r3, r3, Operand(sizeof(uint32_t))); | 269 __ addi(r6, r6, Operand(-sizeof(intptr_t))); |
265 __ add(r6, r2, Operand(r3)); | 270 __ add(r9, r5, r6); |
266 __ ldr(r6, MemOperand(r6, FrameDescription::frame_content_offset())); | 271 __ LoadP(r9, MemOperand(r9, FrameDescription::frame_content_offset())); |
267 __ push(r6); | 272 __ push(r9); |
| 273 |
268 __ bind(&inner_loop_header); | 274 __ bind(&inner_loop_header); |
269 __ cmp(r3, Operand::Zero()); | 275 __ cmpi(r6, Operand::Zero()); |
270 __ b(ne, &inner_push_loop); // test for gt? | 276 __ bne(&inner_push_loop); // test for gt? |
271 __ add(r4, r4, Operand(kPointerSize)); | 277 |
| 278 __ addi(r7, r7, Operand(kPointerSize)); |
272 __ bind(&outer_loop_header); | 279 __ bind(&outer_loop_header); |
273 __ cmp(r4, r1); | 280 __ cmp(r7, r4); |
274 __ b(lt, &outer_push_loop); | 281 __ blt(&outer_push_loop); |
275 | 282 |
276 // Check CPU flags for number of registers, setting the Z condition flag. | 283 __ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset())); |
277 __ CheckFor32DRegs(ip); | 284 for (int i = 0; i < DoubleRegister::kMaxNumAllocatableRegisters; ++i) { |
278 | 285 const DoubleRegister dreg = DoubleRegister::FromAllocationIndex(i); |
279 __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset())); | 286 int src_offset = i * kDoubleSize + double_regs_offset; |
280 int src_offset = FrameDescription::double_registers_offset(); | 287 __ lfd(dreg, MemOperand(r4, src_offset)); |
281 for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) { | |
282 if (i == kDoubleRegZero.code()) continue; | |
283 if (i == kScratchDoubleReg.code()) continue; | |
284 | |
285 const DwVfpRegister reg = DwVfpRegister::from_code(i); | |
286 __ vldr(reg, r1, src_offset, i < 16 ? al : ne); | |
287 src_offset += kDoubleSize; | |
288 } | 288 } |
289 | 289 |
290 // Push state, pc, and continuation from the last output frame. | 290 // Push state, pc, and continuation from the last output frame. |
291 __ ldr(r6, MemOperand(r2, FrameDescription::state_offset())); | 291 __ LoadP(r9, MemOperand(r5, FrameDescription::state_offset())); |
292 __ push(r6); | 292 __ push(r9); |
293 __ ldr(r6, MemOperand(r2, FrameDescription::pc_offset())); | 293 __ LoadP(r9, MemOperand(r5, FrameDescription::pc_offset())); |
294 __ push(r6); | 294 __ push(r9); |
295 __ ldr(r6, MemOperand(r2, FrameDescription::continuation_offset())); | 295 __ LoadP(r9, MemOperand(r5, FrameDescription::continuation_offset())); |
296 __ push(r6); | 296 __ push(r9); |
297 | 297 |
298 // Push the registers from the last output frame. | 298 // Restore the registers from the last output frame. |
| 299 DCHECK(!(ip.bit() & restored_regs)); |
| 300 __ mr(ip, r5); |
299 for (int i = kNumberOfRegisters - 1; i >= 0; i--) { | 301 for (int i = kNumberOfRegisters - 1; i >= 0; i--) { |
300 int offset = (i * kPointerSize) + FrameDescription::registers_offset(); | 302 int offset = (i * kPointerSize) + FrameDescription::registers_offset(); |
301 __ ldr(r6, MemOperand(r2, offset)); | 303 if ((restored_regs & (1 << i)) != 0) { |
302 __ push(r6); | 304 __ LoadP(ToRegister(i), MemOperand(ip, offset)); |
| 305 } |
303 } | 306 } |
304 | 307 |
305 // Restore the registers from the stack. | |
306 __ ldm(ia_w, sp, restored_regs); // all but pc registers. | |
307 __ pop(ip); // remove sp | |
308 __ pop(ip); // remove lr | |
309 | |
310 __ InitializeRootRegister(); | 308 __ InitializeRootRegister(); |
311 | 309 |
312 __ pop(ip); // remove pc | |
313 __ pop(ip); // get continuation, leave pc on stack | 310 __ pop(ip); // get continuation, leave pc on stack |
314 __ pop(lr); | 311 __ pop(r0); |
| 312 __ mtlr(r0); |
315 __ Jump(ip); | 313 __ Jump(ip); |
316 __ stop("Unreachable."); | 314 __ stop("Unreachable."); |
317 } | 315 } |
318 | 316 |
319 | 317 |
320 void Deoptimizer::TableEntryGenerator::GeneratePrologue() { | 318 void Deoptimizer::TableEntryGenerator::GeneratePrologue() { |
| 319 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm()); |
| 320 |
321 // Create a sequence of deoptimization entries. | 321 // Create a sequence of deoptimization entries. |
322 // Note that registers are still live when jumping to an entry. | 322 // Note that registers are still live when jumping to an entry. |
323 Label done; | 323 Label done; |
324 for (int i = 0; i < count(); i++) { | 324 for (int i = 0; i < count(); i++) { |
325 int start = masm()->pc_offset(); | 325 int start = masm()->pc_offset(); |
326 USE(start); | 326 USE(start); |
327 __ mov(ip, Operand(i)); | 327 __ li(ip, Operand(i)); |
328 __ b(&done); | 328 __ b(&done); |
329 DCHECK(masm()->pc_offset() - start == table_entry_size_); | 329 DCHECK(masm()->pc_offset() - start == table_entry_size_); |
330 } | 330 } |
331 __ bind(&done); | 331 __ bind(&done); |
332 __ push(ip); | 332 __ push(ip); |
333 } | 333 } |
334 | 334 |
335 | 335 |
336 void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) { | 336 void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) { |
337 SetFrameSlot(offset, value); | 337 SetFrameSlot(offset, value); |
338 } | 338 } |
339 | 339 |
340 | 340 |
341 void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) { | 341 void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) { |
342 SetFrameSlot(offset, value); | 342 SetFrameSlot(offset, value); |
343 } | 343 } |
344 | 344 |
345 | 345 |
346 void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) { | 346 void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) { |
| 347 #if V8_OOL_CONSTANT_POOL |
347 DCHECK(FLAG_enable_ool_constant_pool); | 348 DCHECK(FLAG_enable_ool_constant_pool); |
348 SetFrameSlot(offset, value); | 349 SetFrameSlot(offset, value); |
| 350 #else |
| 351 // No out-of-line constant pool support. |
| 352 UNREACHABLE(); |
| 353 #endif |
349 } | 354 } |
350 | 355 |
351 | 356 |
352 #undef __ | 357 #undef __ |
353 | 358 } |
354 } } // namespace v8::internal | 359 } // namespace v8::internal |
OLD | NEW |