| Index: src/s390/deoptimizer-s390.cc
|
| diff --git a/src/ppc/deoptimizer-ppc.cc b/src/s390/deoptimizer-s390.cc
|
| similarity index 62%
|
| copy from src/ppc/deoptimizer-ppc.cc
|
| copy to src/s390/deoptimizer-s390.cc
|
| index 9ec5cdd11a2765ae88f46e5e87e1d7dcfeac69f9..8b697f6e97f13e78133ea022c0842b7b9d2e6b14 100644
|
| --- a/src/ppc/deoptimizer-ppc.cc
|
| +++ b/src/s390/deoptimizer-s390.cc
|
| @@ -2,8 +2,8 @@
|
| // Use of this source code is governed by a BSD-style license that can be
|
| // found in the LICENSE file.
|
|
|
| -#include "src/codegen.h"
|
| #include "src/deoptimizer.h"
|
| +#include "src/codegen.h"
|
| #include "src/full-codegen/full-codegen.h"
|
| #include "src/register-configuration.h"
|
| #include "src/safepoint-table.h"
|
| @@ -11,25 +11,23 @@
|
| namespace v8 {
|
| namespace internal {
|
|
|
| -const int Deoptimizer::table_entry_size_ = 8;
|
| -
|
| +// LAY + LGHI/LHI + BRCL
|
| +const int Deoptimizer::table_entry_size_ = 16;
|
|
|
| int Deoptimizer::patch_size() {
|
| -#if V8_TARGET_ARCH_PPC64
|
| - const int kCallInstructionSizeInWords = 7;
|
| +#if V8_TARGET_ARCH_S390X
|
| + const int kCallInstructionSize = 16;
|
| #else
|
| - const int kCallInstructionSizeInWords = 4;
|
| + const int kCallInstructionSize = 10;
|
| #endif
|
| - return kCallInstructionSizeInWords * Assembler::kInstrSize;
|
| + return kCallInstructionSize;
|
| }
|
|
|
| -
|
| void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
|
| // Empty because there is no need for relocation information for the code
|
| // patching in Deoptimizer::PatchCodeForDeoptimization below.
|
| }
|
|
|
| -
|
| void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
|
| Address code_start_address = code->instruction_start();
|
|
|
| @@ -45,7 +43,7 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
|
| } else {
|
| pointer = code->instruction_start();
|
| }
|
| - CodePatcher patcher(isolate, pointer, 1);
|
| + CodePatcher patcher(isolate, pointer, 2);
|
| patcher.masm()->bkpt(0);
|
|
|
| DeoptimizationInputData* data =
|
| @@ -53,7 +51,7 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
|
| int osr_offset = data->OsrPcOffset()->value();
|
| if (osr_offset > 0) {
|
| CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset,
|
| - 1);
|
| + 2);
|
| osr_patcher.masm()->bkpt(0);
|
| }
|
| }
|
| @@ -73,10 +71,8 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
|
| // this is optimized code, so we don't have to have a predictable size.
|
| int call_size_in_bytes = MacroAssembler::CallSizeNotPredictableCodeSize(
|
| deopt_entry, kRelocInfo_NONEPTR);
|
| - int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
|
| - DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0);
|
| DCHECK(call_size_in_bytes <= patch_size());
|
| - CodePatcher patcher(isolate, call_address, call_size_in_words);
|
| + CodePatcher patcher(isolate, call_address, call_size_in_bytes);
|
| patcher.masm()->Call(deopt_entry, kRelocInfo_NONEPTR);
|
| DCHECK(prev_call_address == NULL ||
|
| call_address >= prev_call_address + patch_size());
|
| @@ -87,18 +83,16 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
|
| }
|
| }
|
|
|
| -
|
| void Deoptimizer::SetPlatformCompiledStubRegisters(
|
| FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
|
| ApiFunction function(descriptor->deoptimization_handler());
|
| ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
|
| intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
|
| int params = descriptor->GetHandlerParameterCount();
|
| - output_frame->SetRegister(r3.code(), params);
|
| - output_frame->SetRegister(r4.code(), handler);
|
| + output_frame->SetRegister(r2.code(), params);
|
| + output_frame->SetRegister(r3.code(), handler);
|
| }
|
|
|
| -
|
| void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
|
| for (int i = 0; i < DoubleRegister::kNumRegisters; ++i) {
|
| double double_value = input_->GetDoubleRegister(i);
|
| @@ -107,11 +101,10 @@ void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
|
| }
|
|
|
| bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
|
| - // There is no dynamic alignment padding on PPC in the input frame.
|
| + // There is no dynamic alignment padding on S390 in the input frame.
|
| return false;
|
| }
|
|
|
| -
|
| #define __ masm()->
|
|
|
| // This code tries to be close to ia32 code so that any changes can be
|
| @@ -119,34 +112,27 @@ bool Deoptimizer::HasAlignmentPadding(SharedFunctionInfo* shared) {
|
| void Deoptimizer::TableEntryGenerator::Generate() {
|
| GeneratePrologue();
|
|
|
| - // Unlike on ARM we don't save all the registers, just the useful ones.
|
| - // For the rest, there are gaps on the stack, so the offsets remain the same.
|
| + // Save all the registers onto the stack
|
| const int kNumberOfRegisters = Register::kNumRegisters;
|
|
|
| RegList restored_regs = kJSCallerSaved | kCalleeSaved;
|
| - RegList saved_regs = restored_regs | sp.bit();
|
|
|
| const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
|
|
|
| // Save all double registers before messing with them.
|
| - __ subi(sp, sp, Operand(kDoubleRegsSize));
|
| + __ lay(sp, MemOperand(sp, -kDoubleRegsSize));
|
| const RegisterConfiguration* config =
|
| RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
|
| for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
|
| int code = config->GetAllocatableDoubleCode(i);
|
| const DoubleRegister dreg = DoubleRegister::from_code(code);
|
| int offset = code * kDoubleSize;
|
| - __ stfd(dreg, MemOperand(sp, offset));
|
| + __ StoreDouble(dreg, MemOperand(sp, offset));
|
| }
|
|
|
| - // Push saved_regs (needed to populate FrameDescription::registers_).
|
| - // Leave gaps for other registers.
|
| - __ subi(sp, sp, Operand(kNumberOfRegisters * kPointerSize));
|
| - for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
|
| - if ((saved_regs & (1 << i)) != 0) {
|
| - __ StoreP(ToRegister(i), MemOperand(sp, kPointerSize * i));
|
| - }
|
| - }
|
| + // Push all GPRs onto the stack
|
| + __ lay(sp, MemOperand(sp, -kNumberOfRegisters * kPointerSize));
|
| + __ StoreMultipleP(r0, sp, MemOperand(sp)); // Save all 16 registers
|
|
|
| __ mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
|
| __ StoreP(fp, MemOperand(ip));
|
| @@ -155,41 +141,51 @@ void Deoptimizer::TableEntryGenerator::Generate() {
|
| (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
|
|
|
| // Get the bailout id from the stack.
|
| - __ LoadP(r5, MemOperand(sp, kSavedRegistersAreaSize));
|
| + __ LoadP(r4, MemOperand(sp, kSavedRegistersAreaSize));
|
| +
|
| + // Cleanse the Return address for 31-bit
|
| + __ CleanseP(r14);
|
|
|
| - // Get the address of the location in the code object (r6) (return
|
| + // Get the address of the location in the code object (r5)(return
|
| // address for lazy deoptimization) and compute the fp-to-sp delta in
|
| - // register r7.
|
| - __ mflr(r6);
|
| - // Correct one word for bailout id.
|
| - __ addi(r7, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
|
| - __ sub(r7, fp, r7);
|
| + // register r6.
|
| + __ LoadRR(r5, r14);
|
| + __ la(r6, MemOperand(sp, kSavedRegistersAreaSize + (1 * kPointerSize)));
|
| + __ SubP(r6, fp, r6);
|
|
|
| // Allocate a new deoptimizer object.
|
| - // Pass six arguments in r3 to r8.
|
| - __ PrepareCallCFunction(6, r8);
|
| - __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
|
| - __ li(r4, Operand(type())); // bailout type,
|
| - // r5: bailout id already loaded.
|
| - // r6: code address or 0 already loaded.
|
| - // r7: Fp-to-sp delta.
|
| - __ mov(r8, Operand(ExternalReference::isolate_address(isolate())));
|
| + // Pass six arguments in r2 to r7.
|
| + __ PrepareCallCFunction(6, r7);
|
| + __ LoadP(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
|
| + __ LoadImmP(r3, Operand(type())); // bailout type,
|
| + // r4: bailout id already loaded.
|
| + // r5: code address or 0 already loaded.
|
| + // r6: Fp-to-sp delta.
|
| + // Parm6: isolate is passed on the stack.
|
| + __ mov(r7, Operand(ExternalReference::isolate_address(isolate())));
|
| + __ StoreP(r7, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
|
| +
|
| // Call Deoptimizer::New().
|
| {
|
| AllowExternalCallThatCantCauseGC scope(masm());
|
| __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
|
| }
|
|
|
| - // Preserve "deoptimizer" object in register r3 and get the input
|
| - // frame descriptor pointer to r4 (deoptimizer->input_);
|
| - __ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset()));
|
| + // Preserve "deoptimizer" object in register r2 and get the input
|
| + // frame descriptor pointer to r3 (deoptimizer->input_);
|
| + __ LoadP(r3, MemOperand(r2, Deoptimizer::input_offset()));
|
|
|
| // Copy core registers into FrameDescription::registers_[kNumRegisters].
|
| + // DCHECK(Register::kNumRegisters == kNumberOfRegisters);
|
| + // __ mvc(MemOperand(r3, FrameDescription::registers_offset()),
|
| + // MemOperand(sp), kNumberOfRegisters * kPointerSize);
|
| + // Copy core registers into FrameDescription::registers_[kNumRegisters].
|
| + // TODO(john.yan): optimize the following code by using mvc instruction
|
| DCHECK(Register::kNumRegisters == kNumberOfRegisters);
|
| for (int i = 0; i < kNumberOfRegisters; i++) {
|
| int offset = (i * kPointerSize) + FrameDescription::registers_offset();
|
| - __ LoadP(r5, MemOperand(sp, i * kPointerSize));
|
| - __ StoreP(r5, MemOperand(r4, offset));
|
| + __ LoadP(r4, MemOperand(sp, i * kPointerSize));
|
| + __ StoreP(r4, MemOperand(r3, offset));
|
| }
|
|
|
| int double_regs_offset = FrameDescription::double_registers_offset();
|
| @@ -199,146 +195,142 @@ void Deoptimizer::TableEntryGenerator::Generate() {
|
| int code = config->GetAllocatableDoubleCode(i);
|
| int dst_offset = code * kDoubleSize + double_regs_offset;
|
| int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
|
| - __ lfd(d0, MemOperand(sp, src_offset));
|
| - __ stfd(d0, MemOperand(r4, dst_offset));
|
| + // TODO(joransiu): MVC opportunity
|
| + __ LoadDouble(d0, MemOperand(sp, src_offset));
|
| + __ StoreDouble(d0, MemOperand(r3, dst_offset));
|
| }
|
|
|
| // Remove the bailout id and the saved registers from the stack.
|
| - __ addi(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
|
| + __ la(sp, MemOperand(sp, kSavedRegistersAreaSize + (1 * kPointerSize)));
|
|
|
| - // Compute a pointer to the unwinding limit in register r5; that is
|
| + // Compute a pointer to the unwinding limit in register r4; that is
|
| // the first stack slot not part of the input frame.
|
| - __ LoadP(r5, MemOperand(r4, FrameDescription::frame_size_offset()));
|
| - __ add(r5, r5, sp);
|
| + __ LoadP(r4, MemOperand(r3, FrameDescription::frame_size_offset()));
|
| + __ AddP(r4, sp);
|
|
|
| // Unwind the stack down to - but not including - the unwinding
|
| // limit and copy the contents of the activation frame to the input
|
| // frame description.
|
| - __ addi(r6, r4, Operand(FrameDescription::frame_content_offset()));
|
| + __ la(r5, MemOperand(r3, FrameDescription::frame_content_offset()));
|
| Label pop_loop;
|
| Label pop_loop_header;
|
| - __ b(&pop_loop_header);
|
| + __ b(&pop_loop_header, Label::kNear);
|
| __ bind(&pop_loop);
|
| - __ pop(r7);
|
| - __ StoreP(r7, MemOperand(r6, 0));
|
| - __ addi(r6, r6, Operand(kPointerSize));
|
| + __ pop(r6);
|
| + __ StoreP(r6, MemOperand(r5, 0));
|
| + __ la(r5, MemOperand(r5, kPointerSize));
|
| __ bind(&pop_loop_header);
|
| - __ cmp(r5, sp);
|
| + __ CmpP(r4, sp);
|
| __ bne(&pop_loop);
|
|
|
| // Compute the output frame in the deoptimizer.
|
| - __ push(r3); // Preserve deoptimizer object across call.
|
| - // r3: deoptimizer object; r4: scratch.
|
| - __ PrepareCallCFunction(1, r4);
|
| + __ push(r2); // Preserve deoptimizer object across call.
|
| + // r2: deoptimizer object; r3: scratch.
|
| + __ PrepareCallCFunction(1, r3);
|
| // Call Deoptimizer::ComputeOutputFrames().
|
| {
|
| AllowExternalCallThatCantCauseGC scope(masm());
|
| __ CallCFunction(
|
| ExternalReference::compute_output_frames_function(isolate()), 1);
|
| }
|
| - __ pop(r3); // Restore deoptimizer object (class Deoptimizer).
|
| + __ pop(r2); // Restore deoptimizer object (class Deoptimizer).
|
|
|
| // Replace the current (input) frame with the output frames.
|
| Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
|
| - // Outer loop state: r7 = current "FrameDescription** output_",
|
| - // r4 = one past the last FrameDescription**.
|
| - __ lwz(r4, MemOperand(r3, Deoptimizer::output_count_offset()));
|
| - __ LoadP(r7, MemOperand(r3, Deoptimizer::output_offset())); // r7 is output_.
|
| - __ ShiftLeftImm(r4, r4, Operand(kPointerSizeLog2));
|
| - __ add(r4, r7, r4);
|
| - __ b(&outer_loop_header);
|
| + // Outer loop state: r6 = current "FrameDescription** output_",
|
| + // r3 = one past the last FrameDescription**.
|
| + __ LoadlW(r3, MemOperand(r2, Deoptimizer::output_count_offset()));
|
| + __ LoadP(r6, MemOperand(r2, Deoptimizer::output_offset())); // r6 is output_.
|
| + __ ShiftLeftP(r3, r3, Operand(kPointerSizeLog2));
|
| + __ AddP(r3, r6, r3);
|
| + __ b(&outer_loop_header, Label::kNear);
|
|
|
| __ bind(&outer_push_loop);
|
| - // Inner loop state: r5 = current FrameDescription*, r6 = loop index.
|
| - __ LoadP(r5, MemOperand(r7, 0)); // output_[ix]
|
| - __ LoadP(r6, MemOperand(r5, FrameDescription::frame_size_offset()));
|
| - __ b(&inner_loop_header);
|
| + // Inner loop state: r4 = current FrameDescription*, r5 = loop index.
|
| + __ LoadP(r4, MemOperand(r6, 0)); // output_[ix]
|
| + __ LoadP(r5, MemOperand(r4, FrameDescription::frame_size_offset()));
|
| + __ b(&inner_loop_header, Label::kNear);
|
|
|
| __ bind(&inner_push_loop);
|
| - __ addi(r6, r6, Operand(-sizeof(intptr_t)));
|
| - __ add(r9, r5, r6);
|
| - __ LoadP(r9, MemOperand(r9, FrameDescription::frame_content_offset()));
|
| - __ push(r9);
|
| + __ AddP(r5, Operand(-sizeof(intptr_t)));
|
| + __ AddP(r8, r4, r5);
|
| + __ LoadP(r8, MemOperand(r8, FrameDescription::frame_content_offset()));
|
| + __ push(r8);
|
|
|
| __ bind(&inner_loop_header);
|
| - __ cmpi(r6, Operand::Zero());
|
| + __ CmpP(r5, Operand::Zero());
|
| __ bne(&inner_push_loop); // test for gt?
|
|
|
| - __ addi(r7, r7, Operand(kPointerSize));
|
| + __ AddP(r6, r6, Operand(kPointerSize));
|
| __ bind(&outer_loop_header);
|
| - __ cmp(r7, r4);
|
| + __ CmpP(r6, r3);
|
| __ blt(&outer_push_loop);
|
|
|
| - __ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset()));
|
| + __ LoadP(r3, MemOperand(r2, Deoptimizer::input_offset()));
|
| for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
|
| int code = config->GetAllocatableDoubleCode(i);
|
| const DoubleRegister dreg = DoubleRegister::from_code(code);
|
| int src_offset = code * kDoubleSize + double_regs_offset;
|
| - __ lfd(dreg, MemOperand(r4, src_offset));
|
| + __ ld(dreg, MemOperand(r3, src_offset));
|
| }
|
|
|
| // Push state, pc, and continuation from the last output frame.
|
| - __ LoadP(r9, MemOperand(r5, FrameDescription::state_offset()));
|
| - __ push(r9);
|
| - __ LoadP(r9, MemOperand(r5, FrameDescription::pc_offset()));
|
| - __ push(r9);
|
| - __ LoadP(r9, MemOperand(r5, FrameDescription::continuation_offset()));
|
| - __ push(r9);
|
| + __ LoadP(r8, MemOperand(r4, FrameDescription::state_offset()));
|
| + __ push(r8);
|
| + __ LoadP(r8, MemOperand(r4, FrameDescription::pc_offset()));
|
| + __ push(r8);
|
| + __ LoadP(r8, MemOperand(r4, FrameDescription::continuation_offset()));
|
| + __ push(r8);
|
|
|
| // Restore the registers from the last output frame.
|
| - DCHECK(!(ip.bit() & restored_regs));
|
| - __ mr(ip, r5);
|
| - for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
|
| + __ LoadRR(r1, r4);
|
| + for (int i = kNumberOfRegisters - 1; i > 0; i--) {
|
| int offset = (i * kPointerSize) + FrameDescription::registers_offset();
|
| if ((restored_regs & (1 << i)) != 0) {
|
| - __ LoadP(ToRegister(i), MemOperand(ip, offset));
|
| + __ LoadP(ToRegister(i), MemOperand(r1, offset));
|
| }
|
| }
|
|
|
| __ InitializeRootRegister();
|
|
|
| __ pop(ip); // get continuation, leave pc on stack
|
| - __ pop(r0);
|
| - __ mtlr(r0);
|
| + __ pop(r14);
|
| __ Jump(ip);
|
| __ stop("Unreachable.");
|
| }
|
|
|
| -
|
| void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
|
| - Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
|
| -
|
| - // Create a sequence of deoptimization entries.
|
| - // Note that registers are still live when jumping to an entry.
|
| + // Create a sequence of deoptimization entries. Note that any
|
| + // registers may be still live.
|
| Label done;
|
| for (int i = 0; i < count(); i++) {
|
| int start = masm()->pc_offset();
|
| USE(start);
|
| - __ li(ip, Operand(i));
|
| + __ lay(sp, MemOperand(sp, -kPointerSize));
|
| + __ LoadImmP(ip, Operand(i));
|
| __ b(&done);
|
| + int end = masm()->pc_offset();
|
| + USE(end);
|
| DCHECK(masm()->pc_offset() - start == table_entry_size_);
|
| }
|
| __ bind(&done);
|
| - __ push(ip);
|
| + __ StoreP(ip, MemOperand(sp));
|
| }
|
|
|
| -
|
| void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
|
| SetFrameSlot(offset, value);
|
| }
|
|
|
| -
|
| void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
|
| SetFrameSlot(offset, value);
|
| }
|
|
|
| -
|
| void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
|
| - DCHECK(FLAG_enable_embedded_constant_pool);
|
| - SetFrameSlot(offset, value);
|
| + // No out-of-line constant pool support.
|
| + UNREACHABLE();
|
| }
|
|
|
| -
|
| #undef __
|
| +
|
| } // namespace internal
|
| } // namespace v8
|
|
|