| OLD | NEW |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/deoptimizer.h" |
| 5 #include "src/codegen.h" | 6 #include "src/codegen.h" |
| 6 #include "src/deoptimizer.h" | |
| 7 #include "src/full-codegen/full-codegen.h" | 7 #include "src/full-codegen/full-codegen.h" |
| 8 #include "src/register-configuration.h" | 8 #include "src/register-configuration.h" |
| 9 #include "src/safepoint-table.h" | 9 #include "src/safepoint-table.h" |
| 10 | 10 |
| 11 namespace v8 { | 11 namespace v8 { |
| 12 namespace internal { | 12 namespace internal { |
| 13 | 13 |
| 14 const int Deoptimizer::table_entry_size_ = 8; | 14 // LAY + LGHI/LHI + BRCL |
| 15 | 15 const int Deoptimizer::table_entry_size_ = 16; |
| 16 | 16 |
| 17 int Deoptimizer::patch_size() { | 17 int Deoptimizer::patch_size() { |
| 18 #if V8_TARGET_ARCH_PPC64 | 18 #if V8_TARGET_ARCH_S390X |
| 19 const int kCallInstructionSizeInWords = 7; | 19 const int kCallInstructionSize = 16; |
| 20 #else | 20 #else |
| 21 const int kCallInstructionSizeInWords = 4; | 21 const int kCallInstructionSize = 10; |
| 22 #endif | 22 #endif |
| 23 return kCallInstructionSizeInWords * Assembler::kInstrSize; | 23 return kCallInstructionSize; |
| 24 } | 24 } |
| 25 | 25 |
| 26 | |
| 27 void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) { | 26 void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) { |
| 28 // Empty because there is no need for relocation information for the code | 27 // Empty because there is no need for relocation information for the code |
| 29 // patching in Deoptimizer::PatchCodeForDeoptimization below. | 28 // patching in Deoptimizer::PatchCodeForDeoptimization below. |
| 30 } | 29 } |
| 31 | 30 |
| 32 | |
| 33 void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) { | 31 void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) { |
| 34 Address code_start_address = code->instruction_start(); | 32 Address code_start_address = code->instruction_start(); |
| 35 | 33 |
| 36 // Invalidate the relocation information, as it will become invalid by the | 34 // Invalidate the relocation information, as it will become invalid by the |
| 37 // code patching below, and is not needed any more. | 35 // code patching below, and is not needed any more. |
| 38 code->InvalidateRelocation(); | 36 code->InvalidateRelocation(); |
| 39 | 37 |
| 40 if (FLAG_zap_code_space) { | 38 if (FLAG_zap_code_space) { |
| 41 // Fail hard and early if we enter this code object again. | 39 // Fail hard and early if we enter this code object again. |
| 42 byte* pointer = code->FindCodeAgeSequence(); | 40 byte* pointer = code->FindCodeAgeSequence(); |
| 43 if (pointer != NULL) { | 41 if (pointer != NULL) { |
| 44 pointer += kNoCodeAgeSequenceLength; | 42 pointer += kNoCodeAgeSequenceLength; |
| 45 } else { | 43 } else { |
| 46 pointer = code->instruction_start(); | 44 pointer = code->instruction_start(); |
| 47 } | 45 } |
| 48 CodePatcher patcher(isolate, pointer, 1); | 46 CodePatcher patcher(isolate, pointer, 2); |
| 49 patcher.masm()->bkpt(0); | 47 patcher.masm()->bkpt(0); |
| 50 | 48 |
| 51 DeoptimizationInputData* data = | 49 DeoptimizationInputData* data = |
| 52 DeoptimizationInputData::cast(code->deoptimization_data()); | 50 DeoptimizationInputData::cast(code->deoptimization_data()); |
| 53 int osr_offset = data->OsrPcOffset()->value(); | 51 int osr_offset = data->OsrPcOffset()->value(); |
| 54 if (osr_offset > 0) { | 52 if (osr_offset > 0) { |
| 55 CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset, | 53 CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset, |
| 56 1); | 54 2); |
| 57 osr_patcher.masm()->bkpt(0); | 55 osr_patcher.masm()->bkpt(0); |
| 58 } | 56 } |
| 59 } | 57 } |
| 60 | 58 |
| 61 DeoptimizationInputData* deopt_data = | 59 DeoptimizationInputData* deopt_data = |
| 62 DeoptimizationInputData::cast(code->deoptimization_data()); | 60 DeoptimizationInputData::cast(code->deoptimization_data()); |
| 63 #ifdef DEBUG | 61 #ifdef DEBUG |
| 64 Address prev_call_address = NULL; | 62 Address prev_call_address = NULL; |
| 65 #endif | 63 #endif |
| 66 // For each LLazyBailout instruction insert a call to the corresponding | 64 // For each LLazyBailout instruction insert a call to the corresponding |
| 67 // deoptimization entry. | 65 // deoptimization entry. |
| 68 for (int i = 0; i < deopt_data->DeoptCount(); i++) { | 66 for (int i = 0; i < deopt_data->DeoptCount(); i++) { |
| 69 if (deopt_data->Pc(i)->value() == -1) continue; | 67 if (deopt_data->Pc(i)->value() == -1) continue; |
| 70 Address call_address = code_start_address + deopt_data->Pc(i)->value(); | 68 Address call_address = code_start_address + deopt_data->Pc(i)->value(); |
| 71 Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY); | 69 Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY); |
| 72 // We need calls to have a predictable size in the unoptimized code, but | 70 // We need calls to have a predictable size in the unoptimized code, but |
| 73 // this is optimized code, so we don't have to have a predictable size. | 71 // this is optimized code, so we don't have to have a predictable size. |
| 74 int call_size_in_bytes = MacroAssembler::CallSizeNotPredictableCodeSize( | 72 int call_size_in_bytes = MacroAssembler::CallSizeNotPredictableCodeSize( |
| 75 deopt_entry, kRelocInfo_NONEPTR); | 73 deopt_entry, kRelocInfo_NONEPTR); |
| 76 int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize; | |
| 77 DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0); | |
| 78 DCHECK(call_size_in_bytes <= patch_size()); | 74 DCHECK(call_size_in_bytes <= patch_size()); |
| 79 CodePatcher patcher(isolate, call_address, call_size_in_words); | 75 CodePatcher patcher(isolate, call_address, call_size_in_bytes); |
| 80 patcher.masm()->Call(deopt_entry, kRelocInfo_NONEPTR); | 76 patcher.masm()->Call(deopt_entry, kRelocInfo_NONEPTR); |
| 81 DCHECK(prev_call_address == NULL || | 77 DCHECK(prev_call_address == NULL || |
| 82 call_address >= prev_call_address + patch_size()); | 78 call_address >= prev_call_address + patch_size()); |
| 83 DCHECK(call_address + patch_size() <= code->instruction_end()); | 79 DCHECK(call_address + patch_size() <= code->instruction_end()); |
| 84 #ifdef DEBUG | 80 #ifdef DEBUG |
| 85 prev_call_address = call_address; | 81 prev_call_address = call_address; |
| 86 #endif | 82 #endif |
| 87 } | 83 } |
| 88 } | 84 } |
| 89 | 85 |
| 90 | |
| 91 void Deoptimizer::SetPlatformCompiledStubRegisters( | 86 void Deoptimizer::SetPlatformCompiledStubRegisters( |
| 92 FrameDescription* output_frame, CodeStubDescriptor* descriptor) { | 87 FrameDescription* output_frame, CodeStubDescriptor* descriptor) { |
| 93 ApiFunction function(descriptor->deoptimization_handler()); | 88 ApiFunction function(descriptor->deoptimization_handler()); |
| 94 ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_); | 89 ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_); |
| 95 intptr_t handler = reinterpret_cast<intptr_t>(xref.address()); | 90 intptr_t handler = reinterpret_cast<intptr_t>(xref.address()); |
| 96 int params = descriptor->GetHandlerParameterCount(); | 91 int params = descriptor->GetHandlerParameterCount(); |
| 97 output_frame->SetRegister(r3.code(), params); | 92 output_frame->SetRegister(r2.code(), params); |
| 98 output_frame->SetRegister(r4.code(), handler); | 93 output_frame->SetRegister(r3.code(), handler); |
| 99 } | 94 } |
| 100 | 95 |
| 101 | |
| 102 void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) { | 96 void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) { |
| 103 for (int i = 0; i < DoubleRegister::kNumRegisters; ++i) { | 97 for (int i = 0; i < DoubleRegister::kNumRegisters; ++i) { |
| 104 double double_value = input_->GetDoubleRegister(i); | 98 double double_value = input_->GetDoubleRegister(i); |
| 105 output_frame->SetDoubleRegister(i, double_value); | 99 output_frame->SetDoubleRegister(i, double_value); |
| 106 } | 100 } |
| 107 } | 101 } |
| 108 | 102 |
| 109 bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) { | 103 bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) { |
| 110 // There is no dynamic alignment padding on PPC in the input frame. | 104 // There is no dynamic alignment padding on S390 in the input frame. |
| 111 return false; | 105 return false; |
| 112 } | 106 } |
| 113 | 107 |
| 114 | |
| 115 #define __ masm()-> | 108 #define __ masm()-> |
| 116 | 109 |
| 117 // This code tries to be close to ia32 code so that any changes can be | 110 // This code tries to be close to ia32 code so that any changes can be |
| 118 // easily ported. | 111 // easily ported. |
| 119 void Deoptimizer::TableEntryGenerator::Generate() { | 112 void Deoptimizer::TableEntryGenerator::Generate() { |
| 120 GeneratePrologue(); | 113 GeneratePrologue(); |
| 121 | 114 |
| 122 // Unlike on ARM we don't save all the registers, just the useful ones. | 115 // Save all the registers onto the stack |
| 123 // For the rest, there are gaps on the stack, so the offsets remain the same. | |
| 124 const int kNumberOfRegisters = Register::kNumRegisters; | 116 const int kNumberOfRegisters = Register::kNumRegisters; |
| 125 | 117 |
| 126 RegList restored_regs = kJSCallerSaved | kCalleeSaved; | 118 RegList restored_regs = kJSCallerSaved | kCalleeSaved; |
| 127 RegList saved_regs = restored_regs | sp.bit(); | |
| 128 | 119 |
| 129 const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters; | 120 const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters; |
| 130 | 121 |
| 131 // Save all double registers before messing with them. | 122 // Save all double registers before messing with them. |
| 132 __ subi(sp, sp, Operand(kDoubleRegsSize)); | 123 __ lay(sp, MemOperand(sp, -kDoubleRegsSize)); |
| 133 const RegisterConfiguration* config = | 124 const RegisterConfiguration* config = |
| 134 RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT); | 125 RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT); |
| 135 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { | 126 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { |
| 136 int code = config->GetAllocatableDoubleCode(i); | 127 int code = config->GetAllocatableDoubleCode(i); |
| 137 const DoubleRegister dreg = DoubleRegister::from_code(code); | 128 const DoubleRegister dreg = DoubleRegister::from_code(code); |
| 138 int offset = code * kDoubleSize; | 129 int offset = code * kDoubleSize; |
| 139 __ stfd(dreg, MemOperand(sp, offset)); | 130 __ StoreDouble(dreg, MemOperand(sp, offset)); |
| 140 } | 131 } |
| 141 | 132 |
| 142 // Push saved_regs (needed to populate FrameDescription::registers_). | 133 // Push all GPRs onto the stack |
| 143 // Leave gaps for other registers. | 134 __ lay(sp, MemOperand(sp, -kNumberOfRegisters * kPointerSize)); |
| 144 __ subi(sp, sp, Operand(kNumberOfRegisters * kPointerSize)); | 135 __ StoreMultipleP(r0, sp, MemOperand(sp)); // Save all 16 registers |
| 145 for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) { | |
| 146 if ((saved_regs & (1 << i)) != 0) { | |
| 147 __ StoreP(ToRegister(i), MemOperand(sp, kPointerSize * i)); | |
| 148 } | |
| 149 } | |
| 150 | 136 |
| 151 __ mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); | 137 __ mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); |
| 152 __ StoreP(fp, MemOperand(ip)); | 138 __ StoreP(fp, MemOperand(ip)); |
| 153 | 139 |
| 154 const int kSavedRegistersAreaSize = | 140 const int kSavedRegistersAreaSize = |
| 155 (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize; | 141 (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize; |
| 156 | 142 |
| 157 // Get the bailout id from the stack. | 143 // Get the bailout id from the stack. |
| 158 __ LoadP(r5, MemOperand(sp, kSavedRegistersAreaSize)); | 144 __ LoadP(r4, MemOperand(sp, kSavedRegistersAreaSize)); |
| 159 | 145 |
| 160 // Get the address of the location in the code object (r6) (return | 146 // Cleanse the Return address for 31-bit |
| 147 __ CleanseP(r14); |
| 148 |
| 149 // Get the address of the location in the code object (r5)(return |
| 161 // address for lazy deoptimization) and compute the fp-to-sp delta in | 150 // address for lazy deoptimization) and compute the fp-to-sp delta in |
| 162 // register r7. | 151 // register r6. |
| 163 __ mflr(r6); | 152 __ LoadRR(r5, r14); |
| 164 // Correct one word for bailout id. | 153 __ la(r6, MemOperand(sp, kSavedRegistersAreaSize + (1 * kPointerSize))); |
| 165 __ addi(r7, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); | 154 __ SubP(r6, fp, r6); |
| 166 __ sub(r7, fp, r7); | |
| 167 | 155 |
| 168 // Allocate a new deoptimizer object. | 156 // Allocate a new deoptimizer object. |
| 169 // Pass six arguments in r3 to r8. | 157 // Pass six arguments in r2 to r7. |
| 170 __ PrepareCallCFunction(6, r8); | 158 __ PrepareCallCFunction(6, r7); |
| 171 __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); | 159 __ LoadP(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
| 172 __ li(r4, Operand(type())); // bailout type, | 160 __ LoadImmP(r3, Operand(type())); // bailout type, |
| 173 // r5: bailout id already loaded. | 161 // r4: bailout id already loaded. |
| 174 // r6: code address or 0 already loaded. | 162 // r5: code address or 0 already loaded. |
| 175 // r7: Fp-to-sp delta. | 163 // r6: Fp-to-sp delta. |
| 176 __ mov(r8, Operand(ExternalReference::isolate_address(isolate()))); | 164 // Parm6: isolate is passed on the stack. |
| 165 __ mov(r7, Operand(ExternalReference::isolate_address(isolate()))); |
| 166 __ StoreP(r7, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize)); |
| 167 |
| 177 // Call Deoptimizer::New(). | 168 // Call Deoptimizer::New(). |
| 178 { | 169 { |
| 179 AllowExternalCallThatCantCauseGC scope(masm()); | 170 AllowExternalCallThatCantCauseGC scope(masm()); |
| 180 __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6); | 171 __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6); |
| 181 } | 172 } |
| 182 | 173 |
| 183 // Preserve "deoptimizer" object in register r3 and get the input | 174 // Preserve "deoptimizer" object in register r2 and get the input |
| 184 // frame descriptor pointer to r4 (deoptimizer->input_); | 175 // frame descriptor pointer to r3 (deoptimizer->input_); |
| 185 __ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset())); | 176 __ LoadP(r3, MemOperand(r2, Deoptimizer::input_offset())); |
| 186 | 177 |
| 187 // Copy core registers into FrameDescription::registers_[kNumRegisters]. | 178 // Copy core registers into FrameDescription::registers_[kNumRegisters]. |
| 179 // DCHECK(Register::kNumRegisters == kNumberOfRegisters); |
| 180 // __ mvc(MemOperand(r3, FrameDescription::registers_offset()), |
| 181 // MemOperand(sp), kNumberOfRegisters * kPointerSize); |
| 182 // Copy core registers into FrameDescription::registers_[kNumRegisters]. |
| 183 // TODO(john.yan): optimize the following code by using mvc instruction |
| 188 DCHECK(Register::kNumRegisters == kNumberOfRegisters); | 184 DCHECK(Register::kNumRegisters == kNumberOfRegisters); |
| 189 for (int i = 0; i < kNumberOfRegisters; i++) { | 185 for (int i = 0; i < kNumberOfRegisters; i++) { |
| 190 int offset = (i * kPointerSize) + FrameDescription::registers_offset(); | 186 int offset = (i * kPointerSize) + FrameDescription::registers_offset(); |
| 191 __ LoadP(r5, MemOperand(sp, i * kPointerSize)); | 187 __ LoadP(r4, MemOperand(sp, i * kPointerSize)); |
| 192 __ StoreP(r5, MemOperand(r4, offset)); | 188 __ StoreP(r4, MemOperand(r3, offset)); |
| 193 } | 189 } |
| 194 | 190 |
| 195 int double_regs_offset = FrameDescription::double_registers_offset(); | 191 int double_regs_offset = FrameDescription::double_registers_offset(); |
| 196 // Copy double registers to | 192 // Copy double registers to |
| 197 // double_registers_[DoubleRegister::kNumRegisters] | 193 // double_registers_[DoubleRegister::kNumRegisters] |
| 198 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { | 194 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { |
| 199 int code = config->GetAllocatableDoubleCode(i); | 195 int code = config->GetAllocatableDoubleCode(i); |
| 200 int dst_offset = code * kDoubleSize + double_regs_offset; | 196 int dst_offset = code * kDoubleSize + double_regs_offset; |
| 201 int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize; | 197 int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize; |
| 202 __ lfd(d0, MemOperand(sp, src_offset)); | 198 // TODO(joransiu): MVC opportunity |
| 203 __ stfd(d0, MemOperand(r4, dst_offset)); | 199 __ LoadDouble(d0, MemOperand(sp, src_offset)); |
| 200 __ StoreDouble(d0, MemOperand(r3, dst_offset)); |
| 204 } | 201 } |
| 205 | 202 |
| 206 // Remove the bailout id and the saved registers from the stack. | 203 // Remove the bailout id and the saved registers from the stack. |
| 207 __ addi(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize))); | 204 __ la(sp, MemOperand(sp, kSavedRegistersAreaSize + (1 * kPointerSize))); |
| 208 | 205 |
| 209 // Compute a pointer to the unwinding limit in register r5; that is | 206 // Compute a pointer to the unwinding limit in register r4; that is |
| 210 // the first stack slot not part of the input frame. | 207 // the first stack slot not part of the input frame. |
| 211 __ LoadP(r5, MemOperand(r4, FrameDescription::frame_size_offset())); | 208 __ LoadP(r4, MemOperand(r3, FrameDescription::frame_size_offset())); |
| 212 __ add(r5, r5, sp); | 209 __ AddP(r4, sp); |
| 213 | 210 |
| 214 // Unwind the stack down to - but not including - the unwinding | 211 // Unwind the stack down to - but not including - the unwinding |
| 215 // limit and copy the contents of the activation frame to the input | 212 // limit and copy the contents of the activation frame to the input |
| 216 // frame description. | 213 // frame description. |
| 217 __ addi(r6, r4, Operand(FrameDescription::frame_content_offset())); | 214 __ la(r5, MemOperand(r3, FrameDescription::frame_content_offset())); |
| 218 Label pop_loop; | 215 Label pop_loop; |
| 219 Label pop_loop_header; | 216 Label pop_loop_header; |
| 220 __ b(&pop_loop_header); | 217 __ b(&pop_loop_header, Label::kNear); |
| 221 __ bind(&pop_loop); | 218 __ bind(&pop_loop); |
| 222 __ pop(r7); | 219 __ pop(r6); |
| 223 __ StoreP(r7, MemOperand(r6, 0)); | 220 __ StoreP(r6, MemOperand(r5, 0)); |
| 224 __ addi(r6, r6, Operand(kPointerSize)); | 221 __ la(r5, MemOperand(r5, kPointerSize)); |
| 225 __ bind(&pop_loop_header); | 222 __ bind(&pop_loop_header); |
| 226 __ cmp(r5, sp); | 223 __ CmpP(r4, sp); |
| 227 __ bne(&pop_loop); | 224 __ bne(&pop_loop); |
| 228 | 225 |
| 229 // Compute the output frame in the deoptimizer. | 226 // Compute the output frame in the deoptimizer. |
| 230 __ push(r3); // Preserve deoptimizer object across call. | 227 __ push(r2); // Preserve deoptimizer object across call. |
| 231 // r3: deoptimizer object; r4: scratch. | 228 // r2: deoptimizer object; r3: scratch. |
| 232 __ PrepareCallCFunction(1, r4); | 229 __ PrepareCallCFunction(1, r3); |
| 233 // Call Deoptimizer::ComputeOutputFrames(). | 230 // Call Deoptimizer::ComputeOutputFrames(). |
| 234 { | 231 { |
| 235 AllowExternalCallThatCantCauseGC scope(masm()); | 232 AllowExternalCallThatCantCauseGC scope(masm()); |
| 236 __ CallCFunction( | 233 __ CallCFunction( |
| 237 ExternalReference::compute_output_frames_function(isolate()), 1); | 234 ExternalReference::compute_output_frames_function(isolate()), 1); |
| 238 } | 235 } |
| 239 __ pop(r3); // Restore deoptimizer object (class Deoptimizer). | 236 __ pop(r2); // Restore deoptimizer object (class Deoptimizer). |
| 240 | 237 |
| 241 // Replace the current (input) frame with the output frames. | 238 // Replace the current (input) frame with the output frames. |
| 242 Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header; | 239 Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header; |
| 243 // Outer loop state: r7 = current "FrameDescription** output_", | 240 // Outer loop state: r6 = current "FrameDescription** output_", |
| 244 // r4 = one past the last FrameDescription**. | 241 // r3 = one past the last FrameDescription**. |
| 245 __ lwz(r4, MemOperand(r3, Deoptimizer::output_count_offset())); | 242 __ LoadlW(r3, MemOperand(r2, Deoptimizer::output_count_offset())); |
| 246 __ LoadP(r7, MemOperand(r3, Deoptimizer::output_offset())); // r7 is output_. | 243 __ LoadP(r6, MemOperand(r2, Deoptimizer::output_offset())); // r6 is output_. |
| 247 __ ShiftLeftImm(r4, r4, Operand(kPointerSizeLog2)); | 244 __ ShiftLeftP(r3, r3, Operand(kPointerSizeLog2)); |
| 248 __ add(r4, r7, r4); | 245 __ AddP(r3, r6, r3); |
| 249 __ b(&outer_loop_header); | 246 __ b(&outer_loop_header, Label::kNear); |
| 250 | 247 |
| 251 __ bind(&outer_push_loop); | 248 __ bind(&outer_push_loop); |
| 252 // Inner loop state: r5 = current FrameDescription*, r6 = loop index. | 249 // Inner loop state: r4 = current FrameDescription*, r5 = loop index. |
| 253 __ LoadP(r5, MemOperand(r7, 0)); // output_[ix] | 250 __ LoadP(r4, MemOperand(r6, 0)); // output_[ix] |
| 254 __ LoadP(r6, MemOperand(r5, FrameDescription::frame_size_offset())); | 251 __ LoadP(r5, MemOperand(r4, FrameDescription::frame_size_offset())); |
| 255 __ b(&inner_loop_header); | 252 __ b(&inner_loop_header, Label::kNear); |
| 256 | 253 |
| 257 __ bind(&inner_push_loop); | 254 __ bind(&inner_push_loop); |
| 258 __ addi(r6, r6, Operand(-sizeof(intptr_t))); | 255 __ AddP(r5, Operand(-sizeof(intptr_t))); |
| 259 __ add(r9, r5, r6); | 256 __ AddP(r8, r4, r5); |
| 260 __ LoadP(r9, MemOperand(r9, FrameDescription::frame_content_offset())); | 257 __ LoadP(r8, MemOperand(r8, FrameDescription::frame_content_offset())); |
| 261 __ push(r9); | 258 __ push(r8); |
| 262 | 259 |
| 263 __ bind(&inner_loop_header); | 260 __ bind(&inner_loop_header); |
| 264 __ cmpi(r6, Operand::Zero()); | 261 __ CmpP(r5, Operand::Zero()); |
| 265 __ bne(&inner_push_loop); // test for gt? | 262 __ bne(&inner_push_loop); // test for gt? |
| 266 | 263 |
| 267 __ addi(r7, r7, Operand(kPointerSize)); | 264 __ AddP(r6, r6, Operand(kPointerSize)); |
| 268 __ bind(&outer_loop_header); | 265 __ bind(&outer_loop_header); |
| 269 __ cmp(r7, r4); | 266 __ CmpP(r6, r3); |
| 270 __ blt(&outer_push_loop); | 267 __ blt(&outer_push_loop); |
| 271 | 268 |
| 272 __ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset())); | 269 __ LoadP(r3, MemOperand(r2, Deoptimizer::input_offset())); |
| 273 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { | 270 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) { |
| 274 int code = config->GetAllocatableDoubleCode(i); | 271 int code = config->GetAllocatableDoubleCode(i); |
| 275 const DoubleRegister dreg = DoubleRegister::from_code(code); | 272 const DoubleRegister dreg = DoubleRegister::from_code(code); |
| 276 int src_offset = code * kDoubleSize + double_regs_offset; | 273 int src_offset = code * kDoubleSize + double_regs_offset; |
| 277 __ lfd(dreg, MemOperand(r4, src_offset)); | 274 __ ld(dreg, MemOperand(r3, src_offset)); |
| 278 } | 275 } |
| 279 | 276 |
| 280 // Push state, pc, and continuation from the last output frame. | 277 // Push state, pc, and continuation from the last output frame. |
| 281 __ LoadP(r9, MemOperand(r5, FrameDescription::state_offset())); | 278 __ LoadP(r8, MemOperand(r4, FrameDescription::state_offset())); |
| 282 __ push(r9); | 279 __ push(r8); |
| 283 __ LoadP(r9, MemOperand(r5, FrameDescription::pc_offset())); | 280 __ LoadP(r8, MemOperand(r4, FrameDescription::pc_offset())); |
| 284 __ push(r9); | 281 __ push(r8); |
| 285 __ LoadP(r9, MemOperand(r5, FrameDescription::continuation_offset())); | 282 __ LoadP(r8, MemOperand(r4, FrameDescription::continuation_offset())); |
| 286 __ push(r9); | 283 __ push(r8); |
| 287 | 284 |
| 288 // Restore the registers from the last output frame. | 285 // Restore the registers from the last output frame. |
| 289 DCHECK(!(ip.bit() & restored_regs)); | 286 __ LoadRR(r1, r4); |
| 290 __ mr(ip, r5); | 287 for (int i = kNumberOfRegisters - 1; i > 0; i--) { |
| 291 for (int i = kNumberOfRegisters - 1; i >= 0; i--) { | |
| 292 int offset = (i * kPointerSize) + FrameDescription::registers_offset(); | 288 int offset = (i * kPointerSize) + FrameDescription::registers_offset(); |
| 293 if ((restored_regs & (1 << i)) != 0) { | 289 if ((restored_regs & (1 << i)) != 0) { |
| 294 __ LoadP(ToRegister(i), MemOperand(ip, offset)); | 290 __ LoadP(ToRegister(i), MemOperand(r1, offset)); |
| 295 } | 291 } |
| 296 } | 292 } |
| 297 | 293 |
| 298 __ InitializeRootRegister(); | 294 __ InitializeRootRegister(); |
| 299 | 295 |
| 300 __ pop(ip); // get continuation, leave pc on stack | 296 __ pop(ip); // get continuation, leave pc on stack |
| 301 __ pop(r0); | 297 __ pop(r14); |
| 302 __ mtlr(r0); | |
| 303 __ Jump(ip); | 298 __ Jump(ip); |
| 304 __ stop("Unreachable."); | 299 __ stop("Unreachable."); |
| 305 } | 300 } |
| 306 | 301 |
| 307 | |
| 308 void Deoptimizer::TableEntryGenerator::GeneratePrologue() { | 302 void Deoptimizer::TableEntryGenerator::GeneratePrologue() { |
| 309 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm()); | 303 // Create a sequence of deoptimization entries. Note that any |
| 310 | 304 // registers may be still live. |
| 311 // Create a sequence of deoptimization entries. | |
| 312 // Note that registers are still live when jumping to an entry. | |
| 313 Label done; | 305 Label done; |
| 314 for (int i = 0; i < count(); i++) { | 306 for (int i = 0; i < count(); i++) { |
| 315 int start = masm()->pc_offset(); | 307 int start = masm()->pc_offset(); |
| 316 USE(start); | 308 USE(start); |
| 317 __ li(ip, Operand(i)); | 309 __ lay(sp, MemOperand(sp, -kPointerSize)); |
| 310 __ LoadImmP(ip, Operand(i)); |
| 318 __ b(&done); | 311 __ b(&done); |
| 312 int end = masm()->pc_offset(); |
| 313 USE(end); |
| 319 DCHECK(masm()->pc_offset() - start == table_entry_size_); | 314 DCHECK(masm()->pc_offset() - start == table_entry_size_); |
| 320 } | 315 } |
| 321 __ bind(&done); | 316 __ bind(&done); |
| 322 __ push(ip); | 317 __ StoreP(ip, MemOperand(sp)); |
| 323 } | 318 } |
| 324 | 319 |
| 325 | |
| 326 void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) { | 320 void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) { |
| 327 SetFrameSlot(offset, value); | 321 SetFrameSlot(offset, value); |
| 328 } | 322 } |
| 329 | 323 |
| 330 | |
| 331 void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) { | 324 void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) { |
| 332 SetFrameSlot(offset, value); | 325 SetFrameSlot(offset, value); |
| 333 } | 326 } |
| 334 | 327 |
| 335 | |
| 336 void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) { | 328 void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) { |
| 337 DCHECK(FLAG_enable_embedded_constant_pool); | 329 // No out-of-line constant pool support. |
| 338 SetFrameSlot(offset, value); | 330 UNREACHABLE(); |
| 339 } | 331 } |
| 340 | 332 |
| 333 #undef __ |
| 341 | 334 |
| 342 #undef __ | |
| 343 } // namespace internal | 335 } // namespace internal |
| 344 } // namespace v8 | 336 } // namespace v8 |
| OLD | NEW |