| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // |
| 3 // Copyright IBM Corp. 2012, 2013. All rights reserved. |
| 4 // |
| 2 // Use of this source code is governed by a BSD-style license that can be | 5 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 6 // found in the LICENSE file. |
| 4 | 7 |
| 5 #include "src/v8.h" | 8 #include "src/v8.h" |
| 6 | 9 |
| 7 #include "src/codegen.h" | 10 #include "src/codegen.h" |
| 8 #include "src/deoptimizer.h" | 11 #include "src/deoptimizer.h" |
| 9 #include "src/full-codegen.h" | 12 #include "src/full-codegen.h" |
| 10 #include "src/safepoint-table.h" | 13 #include "src/safepoint-table.h" |
| 11 | 14 |
| 12 namespace v8 { | 15 namespace v8 { |
| 13 namespace internal { | 16 namespace internal { |
| 14 | 17 |
| 15 const int Deoptimizer::table_entry_size_ = 8; | 18 const int Deoptimizer::table_entry_size_ = 8; |
| 16 | 19 |
| 17 | 20 |
| 18 int Deoptimizer::patch_size() { | 21 int Deoptimizer::patch_size() { |
| 19 const int kCallInstructionSizeInWords = 3; | 22 #if V8_TARGET_ARCH_PPC64 |
| 23 const int kCallInstructionSizeInWords = 7; |
| 24 #else |
| 25 const int kCallInstructionSizeInWords = 4; |
| 26 #endif |
| 20 return kCallInstructionSizeInWords * Assembler::kInstrSize; | 27 return kCallInstructionSizeInWords * Assembler::kInstrSize; |
| 21 } | 28 } |
| 22 | 29 |
| 23 | 30 |
| 24 void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) { | 31 void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) { |
| 25 Address code_start_address = code->instruction_start(); | 32 Address code_start_address = code->instruction_start(); |
| 33 |
| 26 // Invalidate the relocation information, as it will become invalid by the | 34 // Invalidate the relocation information, as it will become invalid by the |
| 27 // code patching below, and is not needed any more. | 35 // code patching below, and is not needed any more. |
| 28 code->InvalidateRelocation(); | 36 code->InvalidateRelocation(); |
| 29 | 37 |
| 30 if (FLAG_zap_code_space) { | 38 if (FLAG_zap_code_space) { |
| 31 // Fail hard and early if we enter this code object again. | 39 // Fail hard and early if we enter this code object again. |
| 32 byte* pointer = code->FindCodeAgeSequence(); | 40 byte* pointer = code->FindCodeAgeSequence(); |
| 33 if (pointer != NULL) { | 41 if (pointer != NULL) { |
| 34 pointer += kNoCodeAgeSequenceLength; | 42 pointer += kNoCodeAgeSequenceLength; |
| 35 } else { | 43 } else { |
| (...skipping 17 matching lines...) Expand all Loading... |
| 53 Address prev_call_address = NULL; | 61 Address prev_call_address = NULL; |
| 54 #endif | 62 #endif |
| 55 // For each LLazyBailout instruction insert a call to the corresponding | 63 // For each LLazyBailout instruction insert a call to the corresponding |
| 56 // deoptimization entry. | 64 // deoptimization entry. |
| 57 for (int i = 0; i < deopt_data->DeoptCount(); i++) { | 65 for (int i = 0; i < deopt_data->DeoptCount(); i++) { |
| 58 if (deopt_data->Pc(i)->value() == -1) continue; | 66 if (deopt_data->Pc(i)->value() == -1) continue; |
| 59 Address call_address = code_start_address + deopt_data->Pc(i)->value(); | 67 Address call_address = code_start_address + deopt_data->Pc(i)->value(); |
| 60 Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY); | 68 Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY); |
| 61 // We need calls to have a predictable size in the unoptimized code, but | 69 // We need calls to have a predictable size in the unoptimized code, but |
| 62 // this is optimized code, so we don't have to have a predictable size. | 70 // this is optimized code, so we don't have to have a predictable size. |
| 63 int call_size_in_bytes = | 71 int call_size_in_bytes = MacroAssembler::CallSizeNotPredictableCodeSize( |
| 64 MacroAssembler::CallSizeNotPredictableCodeSize(isolate, | 72 deopt_entry, kRelocInfo_NONEPTR); |
| 65 deopt_entry, | |
| 66 RelocInfo::NONE32); | |
| 67 int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize; | 73 int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize; |
| 68 DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0); | 74 DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0); |
| 69 DCHECK(call_size_in_bytes <= patch_size()); | 75 DCHECK(call_size_in_bytes <= patch_size()); |
| 70 CodePatcher patcher(call_address, call_size_in_words); | 76 CodePatcher patcher(call_address, call_size_in_words); |
| 71 patcher.masm()->Call(deopt_entry, RelocInfo::NONE32); | 77 patcher.masm()->Call(deopt_entry, kRelocInfo_NONEPTR); |
| 72 DCHECK(prev_call_address == NULL || | 78 DCHECK(prev_call_address == NULL || |
| 73 call_address >= prev_call_address + patch_size()); | 79 call_address >= prev_call_address + patch_size()); |
| 74 DCHECK(call_address + patch_size() <= code->instruction_end()); | 80 DCHECK(call_address + patch_size() <= code->instruction_end()); |
| 75 #ifdef DEBUG | 81 #ifdef DEBUG |
| 76 prev_call_address = call_address; | 82 prev_call_address = call_address; |
| 77 #endif | 83 #endif |
| 78 } | 84 } |
| 79 } | 85 } |
| 80 | 86 |
| 81 | 87 |
| 82 void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) { | 88 void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) { |
| 83 // Set the register values. The values are not important as there are no | 89 // Set the register values. The values are not important as there are no |
| 84 // callee saved registers in JavaScript frames, so all registers are | 90 // callee saved registers in JavaScript frames, so all registers are |
| 85 // spilled. Registers fp and sp are set to the correct values though. | 91 // spilled. Registers fp and sp are set to the correct values though. |
| 86 | 92 |
| 87 for (int i = 0; i < Register::kNumRegisters; i++) { | 93 for (int i = 0; i < Register::kNumRegisters; i++) { |
| 88 input_->SetRegister(i, i * 4); | 94 input_->SetRegister(i, i * 4); |
| 89 } | 95 } |
| 90 input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp())); | 96 input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp())); |
| 91 input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp())); | 97 input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp())); |
| 92 for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) { | 98 for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) { |
| 93 input_->SetDoubleRegister(i, 0.0); | 99 input_->SetDoubleRegister(i, 0.0); |
| 94 } | 100 } |
| 95 | 101 |
| 96 // Fill the frame content from the actual data on the frame. | 102 // Fill the frame content from the actual data on the frame. |
| 97 for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) { | 103 for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) { |
| 98 input_->SetFrameSlot(i, Memory::uint32_at(tos + i)); | 104 input_->SetFrameSlot( |
| 105 i, reinterpret_cast<intptr_t>(Memory::Address_at(tos + i))); |
| 99 } | 106 } |
| 100 } | 107 } |
| 101 | 108 |
| 102 | 109 |
| 103 void Deoptimizer::SetPlatformCompiledStubRegisters( | 110 void Deoptimizer::SetPlatformCompiledStubRegisters( |
| 104 FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) { | 111 FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) { |
| 105 ApiFunction function(descriptor->deoptimization_handler()); | 112 ApiFunction function(descriptor->deoptimization_handler()); |
| 106 ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_); | 113 ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_); |
| 107 intptr_t handler = reinterpret_cast<intptr_t>(xref.address()); | 114 intptr_t handler = reinterpret_cast<intptr_t>(xref.address()); |
| 108 int params = descriptor->GetHandlerParameterCount(); | 115 int params = descriptor->GetHandlerParameterCount(); |
| 109 output_frame->SetRegister(r0.code(), params); | 116 output_frame->SetRegister(r3.code(), params); |
| 110 output_frame->SetRegister(r1.code(), handler); | 117 output_frame->SetRegister(r4.code(), handler); |
| 111 } | 118 } |
| 112 | 119 |
| 113 | 120 |
| 114 void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) { | 121 void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) { |
| 115 for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) { | 122 for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) { |
| 116 double double_value = input_->GetDoubleRegister(i); | 123 double double_value = input_->GetDoubleRegister(i); |
| 117 output_frame->SetDoubleRegister(i, double_value); | 124 output_frame->SetDoubleRegister(i, double_value); |
| 118 } | 125 } |
| 119 } | 126 } |
| 120 | 127 |
| 121 | 128 |
| 122 bool Deoptimizer::HasAlignmentPadding(JSFunction* function) { | 129 bool Deoptimizer::HasAlignmentPadding(JSFunction* function) { |
| 123 // There is no dynamic alignment padding on ARM in the input frame. | 130 // There is no dynamic alignment padding on PPC in the input frame. |
| 124 return false; | 131 return false; |
| 125 } | 132 } |
| 126 | 133 |
| 127 | 134 |
| 128 #define __ masm()-> | 135 #define __ masm()-> |
| 129 | 136 |
| 130 // This code tries to be close to ia32 code so that any changes can be | 137 // This code tries to be close to ia32 code so that any changes can be |
| 131 // easily ported. | 138 // easily ported. |
| 132 void Deoptimizer::EntryGenerator::Generate() { | 139 void Deoptimizer::EntryGenerator::Generate() { |
| 133 GeneratePrologue(); | 140 GeneratePrologue(); |
| 134 | 141 |
| 135 // Save all general purpose registers before messing with them. | 142 // Unlike on ARM we don't save all the registers, just the useful ones. |
| 143 // For the rest, there are gaps on the stack, so the offsets remain the same. |
| 136 const int kNumberOfRegisters = Register::kNumRegisters; | 144 const int kNumberOfRegisters = Register::kNumRegisters; |
| 137 | 145 |
| 138 // Everything but pc, lr and ip which will be saved but not restored. | 146 RegList restored_regs = kJSCallerSaved | kCalleeSaved; |
| 139 RegList restored_regs = kJSCallerSaved | kCalleeSaved | ip.bit(); | 147 RegList saved_regs = restored_regs | sp.bit(); |
| 140 | 148 |
| 141 const int kDoubleRegsSize = | 149 const int kDoubleRegsSize = |
| 142 kDoubleSize * DwVfpRegister::kMaxNumAllocatableRegisters; | 150 kDoubleSize * DoubleRegister::kMaxNumAllocatableRegisters; |
| 143 | 151 |
| 144 // Save all allocatable VFP registers before messing with them. | 152 // Save all FPU registers before messing with them. |
| 145 DCHECK(kDoubleRegZero.code() == 14); | 153 __ subi(sp, sp, Operand(kDoubleRegsSize)); |
| 146 DCHECK(kScratchDoubleReg.code() == 15); | 154 for (int i = 0; i < DoubleRegister::kMaxNumAllocatableRegisters; ++i) { |
| 155 DoubleRegister fpu_reg = DoubleRegister::FromAllocationIndex(i); |
| 156 int offset = i * kDoubleSize; |
| 157 __ stfd(fpu_reg, MemOperand(sp, offset)); |
| 158 } |
| 147 | 159 |
| 148 // Check CPU flags for number of registers, setting the Z condition flag. | 160 // Push saved_regs (needed to populate FrameDescription::registers_). |
| 149 __ CheckFor32DRegs(ip); | 161 // Leave gaps for other registers. |
| 150 | 162 __ subi(sp, sp, Operand(kNumberOfRegisters * kPointerSize)); |
| 151 // Push registers d0-d13, and possibly d16-d31, on the stack. | 163 for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) { |
| 152 // If d16-d31 are not pushed, decrease the stack pointer instead. | 164 if ((saved_regs & (1 << i)) != 0) { |
| 153 __ vstm(db_w, sp, d16, d31, ne); | 165 __ StoreP(ToRegister(i), MemOperand(sp, kPointerSize * i)); |
| 154 __ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq); | 166 } |
| 155 __ vstm(db_w, sp, d0, d13); | 167 } |
| 156 | |
| 157 // Push all 16 registers (needed to populate FrameDescription::registers_). | |
| 158 // TODO(1588) Note that using pc with stm is deprecated, so we should perhaps | |
| 159 // handle this a bit differently. | |
| 160 __ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit()); | |
| 161 | 168 |
| 162 const int kSavedRegistersAreaSize = | 169 const int kSavedRegistersAreaSize = |
| 163 (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize; | 170 (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize; |
| 164 | 171 |
| 165 // Get the bailout id from the stack. | 172 // Get the bailout id from the stack. |
| 166 __ ldr(r2, MemOperand(sp, kSavedRegistersAreaSize)); | 173 __ LoadP(r5, MemOperand(sp, kSavedRegistersAreaSize)); |
| 167 | 174 |
| 168 // Get the address of the location in the code object (r3) (return | 175 // Get the address of the location in the code object (r6) (return |
| 169 // address for lazy deoptimization) and compute the fp-to-sp delta in | 176 // address for lazy deoptimization) and compute the fp-to-sp delta in |
| 170 // register r4. | 177 // register r7. |
| 171 __ mov(r3, lr); | 178 __ mflr(r6); |
| 172 // Correct one word for bailout id. | 179 // Correct one word for bailout id. |
| 173 __ add(r4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); | 180 __ addi(r7, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); |
| 174 __ sub(r4, fp, r4); | 181 __ sub(r7, fp, r7); |
| 175 | 182 |
| 176 // Allocate a new deoptimizer object. | 183 // Allocate a new deoptimizer object. |
| 177 // Pass four arguments in r0 to r3 and fifth argument on stack. | 184 // Pass six arguments in r3 to r8. |
| 178 __ PrepareCallCFunction(6, r5); | 185 __ PrepareCallCFunction(6, r8); |
| 179 __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); | 186 __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
| 180 __ mov(r1, Operand(type())); // bailout type, | 187 __ li(r4, Operand(type())); // bailout type, |
| 181 // r2: bailout id already loaded. | 188 // r5: bailout id already loaded. |
| 182 // r3: code address or 0 already loaded. | 189 // r6: code address or 0 already loaded. |
| 183 __ str(r4, MemOperand(sp, 0 * kPointerSize)); // Fp-to-sp delta. | 190 // r7: Fp-to-sp delta. |
| 184 __ mov(r5, Operand(ExternalReference::isolate_address(isolate()))); | 191 __ mov(r8, Operand(ExternalReference::isolate_address(isolate()))); |
| 185 __ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate. | |
| 186 // Call Deoptimizer::New(). | 192 // Call Deoptimizer::New(). |
| 187 { | 193 { |
| 188 AllowExternalCallThatCantCauseGC scope(masm()); | 194 AllowExternalCallThatCantCauseGC scope(masm()); |
| 189 __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6); | 195 __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6); |
| 190 } | 196 } |
| 191 | 197 |
| 192 // Preserve "deoptimizer" object in register r0 and get the input | 198 // Preserve "deoptimizer" object in register r3 and get the input |
| 193 // frame descriptor pointer to r1 (deoptimizer->input_); | 199 // frame descriptor pointer to r4 (deoptimizer->input_); |
| 194 __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset())); | 200 __ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset())); |
| 195 | 201 |
| 196 // Copy core registers into FrameDescription::registers_[kNumRegisters]. | 202 // Copy core registers into FrameDescription::registers_[kNumRegisters]. |
| 197 DCHECK(Register::kNumRegisters == kNumberOfRegisters); | 203 DCHECK(Register::kNumRegisters == kNumberOfRegisters); |
| 198 for (int i = 0; i < kNumberOfRegisters; i++) { | 204 for (int i = 0; i < kNumberOfRegisters; i++) { |
| 199 int offset = (i * kPointerSize) + FrameDescription::registers_offset(); | 205 int offset = (i * kPointerSize) + FrameDescription::registers_offset(); |
| 200 __ ldr(r2, MemOperand(sp, i * kPointerSize)); | 206 __ LoadP(r5, MemOperand(sp, i * kPointerSize)); |
| 201 __ str(r2, MemOperand(r1, offset)); | 207 __ StoreP(r5, MemOperand(r4, offset)); |
| 202 } | 208 } |
| 203 | 209 |
| 210 int double_regs_offset = FrameDescription::double_registers_offset(); |
| 204 // Copy VFP registers to | 211 // Copy VFP registers to |
| 205 // double_registers_[DoubleRegister::kMaxNumAllocatableRegisters] | 212 // double_registers_[DoubleRegister::kNumAllocatableRegisters] |
| 206 int double_regs_offset = FrameDescription::double_registers_offset(); | 213 for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) { |
| 207 for (int i = 0; i < DwVfpRegister::kMaxNumAllocatableRegisters; ++i) { | |
| 208 int dst_offset = i * kDoubleSize + double_regs_offset; | 214 int dst_offset = i * kDoubleSize + double_regs_offset; |
| 209 int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize; | 215 int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize; |
| 210 __ vldr(d0, sp, src_offset); | 216 __ lfd(d0, MemOperand(sp, src_offset)); |
| 211 __ vstr(d0, r1, dst_offset); | 217 __ stfd(d0, MemOperand(r4, dst_offset)); |
| 212 } | 218 } |
| 213 | 219 |
| 214 // Remove the bailout id and the saved registers from the stack. | 220 // Remove the bailout id and the saved registers from the stack. |
| 215 __ add(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); | 221 __ addi(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); |
| 216 | 222 |
| 217 // Compute a pointer to the unwinding limit in register r2; that is | 223 // Compute a pointer to the unwinding limit in register r5; that is |
| 218 // the first stack slot not part of the input frame. | 224 // the first stack slot not part of the input frame. |
| 219 __ ldr(r2, MemOperand(r1, FrameDescription::frame_size_offset())); | 225 __ LoadP(r5, MemOperand(r4, FrameDescription::frame_size_offset())); |
| 220 __ add(r2, r2, sp); | 226 __ add(r5, r5, sp); |
| 221 | 227 |
| 222 // Unwind the stack down to - but not including - the unwinding | 228 // Unwind the stack down to - but not including - the unwinding |
| 223 // limit and copy the contents of the activation frame to the input | 229 // limit and copy the contents of the activation frame to the input |
| 224 // frame description. | 230 // frame description. |
| 225 __ add(r3, r1, Operand(FrameDescription::frame_content_offset())); | 231 __ addi(r6, r4, Operand(FrameDescription::frame_content_offset())); |
| 226 Label pop_loop; | 232 Label pop_loop; |
| 227 Label pop_loop_header; | 233 Label pop_loop_header; |
| 228 __ b(&pop_loop_header); | 234 __ b(&pop_loop_header); |
| 229 __ bind(&pop_loop); | 235 __ bind(&pop_loop); |
| 230 __ pop(r4); | 236 __ pop(r7); |
| 231 __ str(r4, MemOperand(r3, 0)); | 237 __ StoreP(r7, MemOperand(r6, 0)); |
| 232 __ add(r3, r3, Operand(sizeof(uint32_t))); | 238 __ addi(r6, r6, Operand(kPointerSize)); |
| 233 __ bind(&pop_loop_header); | 239 __ bind(&pop_loop_header); |
| 234 __ cmp(r2, sp); | 240 __ cmp(r5, sp); |
| 235 __ b(ne, &pop_loop); | 241 __ bne(&pop_loop); |
| 236 | 242 |
| 237 // Compute the output frame in the deoptimizer. | 243 // Compute the output frame in the deoptimizer. |
| 238 __ push(r0); // Preserve deoptimizer object across call. | 244 __ push(r3); // Preserve deoptimizer object across call. |
| 239 // r0: deoptimizer object; r1: scratch. | 245 // r3: deoptimizer object; r4: scratch. |
| 240 __ PrepareCallCFunction(1, r1); | 246 __ PrepareCallCFunction(1, r4); |
| 241 // Call Deoptimizer::ComputeOutputFrames(). | 247 // Call Deoptimizer::ComputeOutputFrames(). |
| 242 { | 248 { |
| 243 AllowExternalCallThatCantCauseGC scope(masm()); | 249 AllowExternalCallThatCantCauseGC scope(masm()); |
| 244 __ CallCFunction( | 250 __ CallCFunction( |
| 245 ExternalReference::compute_output_frames_function(isolate()), 1); | 251 ExternalReference::compute_output_frames_function(isolate()), 1); |
| 246 } | 252 } |
| 247 __ pop(r0); // Restore deoptimizer object (class Deoptimizer). | 253 __ pop(r3); // Restore deoptimizer object (class Deoptimizer). |
| 248 | 254 |
| 249 // Replace the current (input) frame with the output frames. | 255 // Replace the current (input) frame with the output frames. |
| 250 Label outer_push_loop, inner_push_loop, | 256 Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header; |
| 251 outer_loop_header, inner_loop_header; | 257 // Outer loop state: r7 = current "FrameDescription** output_", |
| 252 // Outer loop state: r4 = current "FrameDescription** output_", | 258 // r4 = one past the last FrameDescription**. |
| 253 // r1 = one past the last FrameDescription**. | 259 __ lwz(r4, MemOperand(r3, Deoptimizer::output_count_offset())); |
| 254 __ ldr(r1, MemOperand(r0, Deoptimizer::output_count_offset())); | 260 __ LoadP(r7, MemOperand(r3, Deoptimizer::output_offset())); // r7 is output_. |
| 255 __ ldr(r4, MemOperand(r0, Deoptimizer::output_offset())); // r4 is output_. | 261 __ ShiftLeftImm(r4, r4, Operand(kPointerSizeLog2)); |
| 256 __ add(r1, r4, Operand(r1, LSL, 2)); | 262 __ add(r4, r7, r4); |
| 257 __ jmp(&outer_loop_header); | 263 __ b(&outer_loop_header); |
| 264 |
| 258 __ bind(&outer_push_loop); | 265 __ bind(&outer_push_loop); |
| 259 // Inner loop state: r2 = current FrameDescription*, r3 = loop index. | 266 // Inner loop state: r5 = current FrameDescription*, r6 = loop index. |
| 260 __ ldr(r2, MemOperand(r4, 0)); // output_[ix] | 267 __ LoadP(r5, MemOperand(r7, 0)); // output_[ix] |
| 261 __ ldr(r3, MemOperand(r2, FrameDescription::frame_size_offset())); | 268 __ LoadP(r6, MemOperand(r5, FrameDescription::frame_size_offset())); |
| 262 __ jmp(&inner_loop_header); | 269 __ b(&inner_loop_header); |
| 270 |
| 263 __ bind(&inner_push_loop); | 271 __ bind(&inner_push_loop); |
| 264 __ sub(r3, r3, Operand(sizeof(uint32_t))); | 272 __ addi(r6, r6, Operand(-sizeof(intptr_t))); |
| 265 __ add(r6, r2, Operand(r3)); | 273 __ add(r9, r5, r6); |
| 266 __ ldr(r6, MemOperand(r6, FrameDescription::frame_content_offset())); | 274 __ LoadP(r9, MemOperand(r9, FrameDescription::frame_content_offset())); |
| 267 __ push(r6); | 275 __ push(r9); |
| 276 |
| 268 __ bind(&inner_loop_header); | 277 __ bind(&inner_loop_header); |
| 269 __ cmp(r3, Operand::Zero()); | 278 __ cmpi(r6, Operand::Zero()); |
| 270 __ b(ne, &inner_push_loop); // test for gt? | 279 __ bne(&inner_push_loop); // test for gt? |
| 271 __ add(r4, r4, Operand(kPointerSize)); | 280 |
| 281 __ addi(r7, r7, Operand(kPointerSize)); |
| 272 __ bind(&outer_loop_header); | 282 __ bind(&outer_loop_header); |
| 273 __ cmp(r4, r1); | 283 __ cmp(r7, r4); |
| 274 __ b(lt, &outer_push_loop); | 284 __ blt(&outer_push_loop); |
| 275 | 285 |
| 276 // Check CPU flags for number of registers, setting the Z condition flag. | 286 __ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset())); |
| 277 __ CheckFor32DRegs(ip); | 287 for (int i = 0; i < DoubleRegister::kMaxNumAllocatableRegisters; ++i) { |
| 278 | 288 const DoubleRegister dreg = DoubleRegister::FromAllocationIndex(i); |
| 279 __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset())); | 289 int src_offset = i * kDoubleSize + double_regs_offset; |
| 280 int src_offset = FrameDescription::double_registers_offset(); | 290 __ lfd(dreg, MemOperand(r4, src_offset)); |
| 281 for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) { | |
| 282 if (i == kDoubleRegZero.code()) continue; | |
| 283 if (i == kScratchDoubleReg.code()) continue; | |
| 284 | |
| 285 const DwVfpRegister reg = DwVfpRegister::from_code(i); | |
| 286 __ vldr(reg, r1, src_offset, i < 16 ? al : ne); | |
| 287 src_offset += kDoubleSize; | |
| 288 } | 291 } |
| 289 | 292 |
| 290 // Push state, pc, and continuation from the last output frame. | 293 // Push state, pc, and continuation from the last output frame. |
| 291 __ ldr(r6, MemOperand(r2, FrameDescription::state_offset())); | 294 __ LoadP(r9, MemOperand(r5, FrameDescription::state_offset())); |
| 292 __ push(r6); | 295 __ push(r9); |
| 293 __ ldr(r6, MemOperand(r2, FrameDescription::pc_offset())); | 296 __ LoadP(r9, MemOperand(r5, FrameDescription::pc_offset())); |
| 294 __ push(r6); | 297 __ push(r9); |
| 295 __ ldr(r6, MemOperand(r2, FrameDescription::continuation_offset())); | 298 __ LoadP(r9, MemOperand(r5, FrameDescription::continuation_offset())); |
| 296 __ push(r6); | 299 __ push(r9); |
| 297 | 300 |
| 298 // Push the registers from the last output frame. | 301 // Restore the registers from the last output frame. |
| 302 DCHECK(!(ip.bit() & restored_regs)); |
| 303 __ mr(ip, r5); |
| 299 for (int i = kNumberOfRegisters - 1; i >= 0; i--) { | 304 for (int i = kNumberOfRegisters - 1; i >= 0; i--) { |
| 300 int offset = (i * kPointerSize) + FrameDescription::registers_offset(); | 305 int offset = (i * kPointerSize) + FrameDescription::registers_offset(); |
| 301 __ ldr(r6, MemOperand(r2, offset)); | 306 if ((restored_regs & (1 << i)) != 0) { |
| 302 __ push(r6); | 307 __ LoadP(ToRegister(i), MemOperand(ip, offset)); |
| 308 } |
| 303 } | 309 } |
| 304 | 310 |
| 305 // Restore the registers from the stack. | |
| 306 __ ldm(ia_w, sp, restored_regs); // all but pc registers. | |
| 307 __ pop(ip); // remove sp | |
| 308 __ pop(ip); // remove lr | |
| 309 | |
| 310 __ InitializeRootRegister(); | 311 __ InitializeRootRegister(); |
| 311 | 312 |
| 312 __ pop(ip); // remove pc | |
| 313 __ pop(ip); // get continuation, leave pc on stack | 313 __ pop(ip); // get continuation, leave pc on stack |
| 314 __ pop(lr); | 314 __ pop(r0); |
| 315 __ mtlr(r0); |
| 315 __ Jump(ip); | 316 __ Jump(ip); |
| 316 __ stop("Unreachable."); | 317 __ stop("Unreachable."); |
| 317 } | 318 } |
| 318 | 319 |
| 319 | 320 |
| 320 void Deoptimizer::TableEntryGenerator::GeneratePrologue() { | 321 void Deoptimizer::TableEntryGenerator::GeneratePrologue() { |
| 322 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm()); |
| 323 |
| 321 // Create a sequence of deoptimization entries. | 324 // Create a sequence of deoptimization entries. |
| 322 // Note that registers are still live when jumping to an entry. | 325 // Note that registers are still live when jumping to an entry. |
| 323 Label done; | 326 Label done; |
| 324 for (int i = 0; i < count(); i++) { | 327 for (int i = 0; i < count(); i++) { |
| 325 int start = masm()->pc_offset(); | 328 int start = masm()->pc_offset(); |
| 326 USE(start); | 329 USE(start); |
| 327 __ mov(ip, Operand(i)); | 330 __ li(ip, Operand(i)); |
| 328 __ b(&done); | 331 __ b(&done); |
| 329 DCHECK(masm()->pc_offset() - start == table_entry_size_); | 332 DCHECK(masm()->pc_offset() - start == table_entry_size_); |
| 330 } | 333 } |
| 331 __ bind(&done); | 334 __ bind(&done); |
| 332 __ push(ip); | 335 __ push(ip); |
| 333 } | 336 } |
| 334 | 337 |
| 335 | 338 |
| 336 void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) { | 339 void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) { |
| 337 SetFrameSlot(offset, value); | 340 SetFrameSlot(offset, value); |
| 338 } | 341 } |
| 339 | 342 |
| 340 | 343 |
| 341 void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) { | 344 void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) { |
| 342 SetFrameSlot(offset, value); | 345 SetFrameSlot(offset, value); |
| 343 } | 346 } |
| 344 | 347 |
| 345 | 348 |
| 346 void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) { | 349 void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) { |
| 350 #if V8_OOL_CONSTANT_POOL |
| 347 DCHECK(FLAG_enable_ool_constant_pool); | 351 DCHECK(FLAG_enable_ool_constant_pool); |
| 348 SetFrameSlot(offset, value); | 352 SetFrameSlot(offset, value); |
| 353 #else |
| 354 // No out-of-line constant pool support. |
| 355 UNREACHABLE(); |
| 356 #endif |
| 349 } | 357 } |
| 350 | 358 |
| 351 | 359 |
| 352 #undef __ | 360 #undef __ |
| 353 | 361 } |
| 354 } } // namespace v8::internal | 362 } // namespace v8::internal |
| OLD | NEW |