| Index: src/s390/macro-assembler-s390.h
|
| diff --git a/src/ppc/macro-assembler-ppc.h b/src/s390/macro-assembler-s390.h
|
| similarity index 75%
|
| copy from src/ppc/macro-assembler-ppc.h
|
| copy to src/s390/macro-assembler-s390.h
|
| index d9dbd568273f6abca6f7c76e48d5897af5f076eb..cd3b9e1931708fb523077a4eb053c9de285e21ec 100644
|
| --- a/src/ppc/macro-assembler-ppc.h
|
| +++ b/src/s390/macro-assembler-s390.h
|
| @@ -2,8 +2,8 @@
|
| // Use of this source code is governed by a BSD-style license that can be
|
| // found in the LICENSE file.
|
|
|
| -#ifndef V8_PPC_MACRO_ASSEMBLER_PPC_H_
|
| -#define V8_PPC_MACRO_ASSEMBLER_PPC_H_
|
| +#ifndef V8_S390_MACRO_ASSEMBLER_S390_H_
|
| +#define V8_S390_MACRO_ASSEMBLER_S390_H_
|
|
|
| #include "src/assembler.h"
|
| #include "src/bailout-reason.h"
|
| @@ -14,20 +14,20 @@ namespace v8 {
|
| namespace internal {
|
|
|
| // Give alias names to registers for calling conventions.
|
| -const Register kReturnRegister0 = {Register::kCode_r3};
|
| -const Register kReturnRegister1 = {Register::kCode_r4};
|
| -const Register kReturnRegister2 = {Register::kCode_r5};
|
| -const Register kJSFunctionRegister = {Register::kCode_r4};
|
| -const Register kContextRegister = {Register::kCode_r30};
|
| -const Register kInterpreterAccumulatorRegister = {Register::kCode_r3};
|
| -const Register kInterpreterRegisterFileRegister = {Register::kCode_r14};
|
| -const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r15};
|
| -const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r16};
|
| -const Register kInterpreterDispatchTableRegister = {Register::kCode_r17};
|
| -const Register kJavaScriptCallArgCountRegister = {Register::kCode_r3};
|
| -const Register kJavaScriptCallNewTargetRegister = {Register::kCode_r6};
|
| -const Register kRuntimeCallFunctionRegister = {Register::kCode_r4};
|
| -const Register kRuntimeCallArgCountRegister = {Register::kCode_r3};
|
| +const Register kReturnRegister0 = {Register::kCode_r2};
|
| +const Register kReturnRegister1 = {Register::kCode_r3};
|
| +const Register kReturnRegister2 = {Register::kCode_r4};
|
| +const Register kJSFunctionRegister = {Register::kCode_r3};
|
| +const Register kContextRegister = {Register::kCode_r13};
|
| +const Register kInterpreterAccumulatorRegister = {Register::kCode_r2};
|
| +const Register kInterpreterRegisterFileRegister = {Register::kCode_r4};
|
| +const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r5};
|
| +const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r6};
|
| +const Register kInterpreterDispatchTableRegister = {Register::kCode_r8};
|
| +const Register kJavaScriptCallArgCountRegister = {Register::kCode_r2};
|
| +const Register kJavaScriptCallNewTargetRegister = {Register::kCode_r5};
|
| +const Register kRuntimeCallFunctionRegister = {Register::kCode_r3};
|
| +const Register kRuntimeCallArgCountRegister = {Register::kCode_r2};
|
|
|
| // ----------------------------------------------------------------------------
|
| // Static helper functions
|
| @@ -37,6 +37,15 @@ inline MemOperand FieldMemOperand(Register object, int offset) {
|
| return MemOperand(object, offset - kHeapObjectTag);
|
| }
|
|
|
| +// Generate a MemOperand for loading a field from an object.
|
| +inline MemOperand FieldMemOperand(Register object, Register index, int offset) {
|
| + return MemOperand(object, index, offset - kHeapObjectTag);
|
| +}
|
| +
|
| +// Generate a MemOperand for loading a field from Root register
|
| +inline MemOperand RootMemOperand(Heap::RootListIndex index) {
|
| + return MemOperand(kRootRegister, index << kPointerSizeLog2);
|
| +}
|
|
|
| // Flags used for AllocateHeapNumber
|
| enum TaggingMode {
|
| @@ -46,7 +55,6 @@ enum TaggingMode {
|
| DONT_TAG_RESULT
|
| };
|
|
|
| -
|
| enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
|
| enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
|
| enum PointersToHereCheck {
|
| @@ -55,14 +63,12 @@ enum PointersToHereCheck {
|
| };
|
| enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
|
|
|
| -
|
| Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
|
| Register reg3 = no_reg,
|
| Register reg4 = no_reg,
|
| Register reg5 = no_reg,
|
| Register reg6 = no_reg);
|
|
|
| -
|
| #ifdef DEBUG
|
| bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
|
| Register reg4 = no_reg, Register reg5 = no_reg,
|
| @@ -72,42 +78,91 @@ bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
|
| #endif
|
|
|
| // These exist to provide portability between 32 and 64bit
|
| -#if V8_TARGET_ARCH_PPC64
|
| -#define LoadPU ldu
|
| -#define LoadPX ldx
|
| -#define LoadPUX ldux
|
| -#define StorePU stdu
|
| -#define StorePX stdx
|
| -#define StorePUX stdux
|
| -#define ShiftLeftImm sldi
|
| -#define ShiftRightImm srdi
|
| -#define ClearLeftImm clrldi
|
| -#define ClearRightImm clrrdi
|
| -#define ShiftRightArithImm sradi
|
| -#define ShiftLeft_ sld
|
| -#define ShiftRight_ srd
|
| -#define ShiftRightArith srad
|
| -#define Mul mulld
|
| +#if V8_TARGET_ARCH_S390X
|
| #define Div divd
|
| +
|
| +// The length of the arithmetic operation is the length
|
| +// of the register.
|
| +
|
| +// Length:
|
| +// H = halfword
|
| +// W = word
|
| +
|
| +// arithmetics and bitwise
|
| +#define AddMI agsi
|
| +#define AddRR agr
|
| +#define SubRR sgr
|
| +#define AndRR ngr
|
| +#define OrRR ogr
|
| +#define XorRR xgr
|
| +#define LoadComplementRR lcgr
|
| +#define LoadNegativeRR lngr
|
| +
|
| +// Distinct Operands
|
| +#define AddP_RRR agrk
|
| +#define AddPImm_RRI aghik
|
| +#define AddLogicalP_RRR algrk
|
| +#define SubP_RRR sgrk
|
| +#define SubLogicalP_RRR slgrk
|
| +#define AndP_RRR ngrk
|
| +#define OrP_RRR ogrk
|
| +#define XorP_RRR xgrk
|
| +
|
| +// Load / Store
|
| +#define LoadRR lgr
|
| +#define LoadAndTestRR ltgr
|
| +#define LoadImmP lghi
|
| +#define LoadLogicalHalfWordP llgh
|
| +
|
| +// Compare
|
| +#define CmpPH cghi
|
| +#define CmpLogicalPW clgfi
|
| +
|
| +// Shifts
|
| +#define ShiftLeftP sllg
|
| +#define ShiftRightP srlg
|
| +#define ShiftLeftArithP slag
|
| +#define ShiftRightArithP srag
|
| #else
|
| -#define LoadPU lwzu
|
| -#define LoadPX lwzx
|
| -#define LoadPUX lwzux
|
| -#define StorePU stwu
|
| -#define StorePX stwx
|
| -#define StorePUX stwux
|
| -#define ShiftLeftImm slwi
|
| -#define ShiftRightImm srwi
|
| -#define ClearLeftImm clrlwi
|
| -#define ClearRightImm clrrwi
|
| -#define ShiftRightArithImm srawi
|
| -#define ShiftLeft_ slw
|
| -#define ShiftRight_ srw
|
| -#define ShiftRightArith sraw
|
| -#define Mul mullw
|
| -#define Div divw
|
| -#endif
|
|
|
| +// arithmetics and bitwise
|
| +// Reg2Reg
|
| +#define AddMI asi
|
| +#define AddRR ar
|
| +#define SubRR sr
|
| +#define AndRR nr
|
| +#define OrRR or_z
|
| +#define XorRR xr
|
| +#define LoadComplementRR lcr
|
| +#define LoadNegativeRR lnr
|
| +
|
| +// Distinct Operands
|
| +#define AddP_RRR ark
|
| +#define AddPImm_RRI ahik
|
| +#define AddLogicalP_RRR alrk
|
| +#define SubP_RRR srk
|
| +#define SubLogicalP_RRR slrk
|
| +#define AndP_RRR nrk
|
| +#define OrP_RRR ork
|
| +#define XorP_RRR xrk
|
| +
|
| +// Load / Store
|
| +#define LoadRR lr
|
| +#define LoadAndTestRR ltr
|
| +#define LoadImmP lhi
|
| +#define LoadLogicalHalfWordP llh
|
| +
|
| +// Compare
|
| +#define CmpPH chi
|
| +#define CmpLogicalPW clfi
|
| +
|
| +// Shifts
|
| +#define ShiftLeftP ShiftLeft
|
| +#define ShiftRightP ShiftRight
|
| +#define ShiftLeftArithP ShiftLeftArith
|
| +#define ShiftRightArithP ShiftRightArith
|
| +
|
| +#endif
|
|
|
| // MacroAssembler implements a collection of frequently used macros.
|
| class MacroAssembler : public Assembler {
|
| @@ -115,10 +170,7 @@ class MacroAssembler : public Assembler {
|
| MacroAssembler(Isolate* isolate, void* buffer, int size,
|
| CodeObjectRequired create_code_object);
|
|
|
| -
|
| - // Returns the size of a call in instructions. Note, the value returned is
|
| - // only valid as long as no entries are added to the constant pool between
|
| - // checking the call size and emitting the actual call.
|
| + // Returns the size of a call in instructions.
|
| static int CallSize(Register target);
|
| int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
|
| static int CallSizeNotPredictableCodeSize(Address target,
|
| @@ -141,8 +193,8 @@ class MacroAssembler : public Assembler {
|
| void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
|
| TypeFeedbackId ast_id = TypeFeedbackId::None(),
|
| Condition cond = al);
|
| - void Ret() { blr(); }
|
| - void Ret(Condition cond, CRegister cr = cr7) { bclr(cond, cr); }
|
| + void Ret() { b(r14); }
|
| + void Ret(Condition cond) { b(cond, r14); }
|
|
|
| // Emit code to discard a non-negative number of pointer-sized elements
|
| // from the stack, clobbering only the sp register.
|
| @@ -151,7 +203,7 @@ class MacroAssembler : public Assembler {
|
|
|
| void Ret(int drop) {
|
| Drop(drop);
|
| - blr();
|
| + Ret();
|
| }
|
|
|
| void Call(Label* target);
|
| @@ -168,6 +220,9 @@ class MacroAssembler : public Assembler {
|
| void Move(Register dst, Register src, Condition cond = al);
|
| void Move(DoubleRegister dst, DoubleRegister src);
|
|
|
| + void InsertDoubleLow(DoubleRegister dst, Register src);
|
| + void InsertDoubleHigh(DoubleRegister dst, Register src);
|
| +
|
| void MultiPush(RegList regs, Register location = sp);
|
| void MultiPop(RegList regs, Register location = sp);
|
|
|
| @@ -181,6 +236,178 @@ class MacroAssembler : public Assembler {
|
| void StoreRoot(Register source, Heap::RootListIndex index,
|
| Condition cond = al);
|
|
|
| + //--------------------------------------------------------------------------
|
| + // S390 Macro Assemblers for Instructions
|
| + //--------------------------------------------------------------------------
|
| +
|
| + // Arithmetic Operations
|
| +
|
| + // Add (Register - Immediate)
|
| + void Add32(Register dst, const Operand& imm);
|
| + void AddP(Register dst, const Operand& imm);
|
| + void Add32(Register dst, Register src, const Operand& imm);
|
| + void AddP(Register dst, Register src, const Operand& imm);
|
| +
|
| + // Add (Register - Register)
|
| + void Add32(Register dst, Register src);
|
| + void AddP(Register dst, Register src);
|
| + void AddP_ExtendSrc(Register dst, Register src);
|
| + void Add32(Register dst, Register src1, Register src2);
|
| + void AddP(Register dst, Register src1, Register src2);
|
| + void AddP_ExtendSrc(Register dst, Register src1, Register src2);
|
| +
|
| + // Add (Register - Mem)
|
| + void Add32(Register dst, const MemOperand& opnd);
|
| + void AddP(Register dst, const MemOperand& opnd);
|
| + void AddP_ExtendSrc(Register dst, const MemOperand& opnd);
|
| +
|
| + // Add (Mem - Immediate)
|
| + void Add32(const MemOperand& opnd, const Operand& imm);
|
| + void AddP(const MemOperand& opnd, const Operand& imm);
|
| +
|
| + // Add Logical (Register - Immediate)
|
| + void AddLogical(Register dst, const Operand& imm);
|
| + void AddLogicalP(Register dst, const Operand& imm);
|
| +
|
| + // Add Logical (Register - Mem)
|
| + void AddLogical(Register dst, const MemOperand& opnd);
|
| + void AddLogicalP(Register dst, const MemOperand& opnd);
|
| +
|
| + // Subtract (Register - Immediate)
|
| + void Sub32(Register dst, const Operand& imm);
|
| + void SubP(Register dst, const Operand& imm);
|
| + void Sub32(Register dst, Register src, const Operand& imm);
|
| + void SubP(Register dst, Register src, const Operand& imm);
|
| +
|
| + // Subtract (Register - Register)
|
| + void Sub32(Register dst, Register src);
|
| + void SubP(Register dst, Register src);
|
| + void SubP_ExtendSrc(Register dst, Register src);
|
| + void Sub32(Register dst, Register src1, Register src2);
|
| + void SubP(Register dst, Register src1, Register src2);
|
| + void SubP_ExtendSrc(Register dst, Register src1, Register src2);
|
| +
|
| + // Subtract (Register - Mem)
|
| + void Sub32(Register dst, const MemOperand& opnd);
|
| + void SubP(Register dst, const MemOperand& opnd);
|
| + void SubP_ExtendSrc(Register dst, const MemOperand& opnd);
|
| +
|
| + // Subtract Logical (Register - Mem)
|
| + void SubLogical(Register dst, const MemOperand& opnd);
|
| + void SubLogicalP(Register dst, const MemOperand& opnd);
|
| + void SubLogicalP_ExtendSrc(Register dst, const MemOperand& opnd);
|
| +
|
| + // Multiply
|
| + void MulP(Register dst, const Operand& opnd);
|
| + void MulP(Register dst, Register src);
|
| + void MulP(Register dst, const MemOperand& opnd);
|
| + void Mul(Register dst, Register src1, Register src2);
|
| +
|
| + // Divide
|
| + void DivP(Register dividend, Register divider);
|
| +
|
| + // Compare
|
| + void Cmp32(Register src1, Register src2);
|
| + void CmpP(Register src1, Register src2);
|
| + void Cmp32(Register dst, const Operand& opnd);
|
| + void CmpP(Register dst, const Operand& opnd);
|
| + void Cmp32(Register dst, const MemOperand& opnd);
|
| + void CmpP(Register dst, const MemOperand& opnd);
|
| +
|
| + // Compare Logical
|
| + void CmpLogical32(Register src1, Register src2);
|
| + void CmpLogicalP(Register src1, Register src2);
|
| + void CmpLogical32(Register src1, const Operand& opnd);
|
| + void CmpLogicalP(Register src1, const Operand& opnd);
|
| + void CmpLogical32(Register dst, const MemOperand& opnd);
|
| + void CmpLogicalP(Register dst, const MemOperand& opnd);
|
| +
|
| + // Compare Logical Byte (CLI/CLIY)
|
| + void CmpLogicalByte(const MemOperand& mem, const Operand& imm);
|
| +
|
| + // Load 32bit
|
| + void Load(Register dst, const MemOperand& opnd);
|
| + void Load(Register dst, const Operand& opnd);
|
| + void LoadW(Register dst, const MemOperand& opnd, Register scratch = no_reg);
|
| + void LoadlW(Register dst, const MemOperand& opnd, Register scratch = no_reg);
|
| + void LoadB(Register dst, const MemOperand& opnd);
|
| + void LoadlB(Register dst, const MemOperand& opnd);
|
| +
|
| + // Load And Test
|
| + void LoadAndTest32(Register dst, Register src);
|
| + void LoadAndTestP_ExtendSrc(Register dst, Register src);
|
| + void LoadAndTestP(Register dst, Register src);
|
| +
|
| + void LoadAndTest32(Register dst, const MemOperand& opnd);
|
| + void LoadAndTestP(Register dst, const MemOperand& opnd);
|
| +
|
| + // Load Floating Point
|
| + void LoadDouble(DoubleRegister dst, const MemOperand& opnd);
|
| + void LoadFloat32(DoubleRegister dst, const MemOperand& opnd);
|
| + void LoadFloat32ConvertToDouble(DoubleRegister dst, const MemOperand& mem);
|
| +
|
| + // Store Floating Point
|
| + void StoreDouble(DoubleRegister dst, const MemOperand& opnd);
|
| + void StoreFloat32(DoubleRegister dst, const MemOperand& opnd);
|
| + void StoreDoubleAsFloat32(DoubleRegister src, const MemOperand& mem,
|
| + DoubleRegister scratch);
|
| +
|
| + void Branch(Condition c, const Operand& opnd);
|
| + void BranchOnCount(Register r1, Label* l);
|
| +
|
| + // Shifts
|
| + void ShiftLeft(Register dst, Register src, Register val);
|
| + void ShiftLeft(Register dst, Register src, const Operand& val);
|
| + void ShiftRight(Register dst, Register src, Register val);
|
| + void ShiftRight(Register dst, Register src, const Operand& val);
|
| + void ShiftLeftArith(Register dst, Register src, Register shift);
|
| + void ShiftLeftArith(Register dst, Register src, const Operand& val);
|
| + void ShiftRightArith(Register dst, Register src, Register shift);
|
| + void ShiftRightArith(Register dst, Register src, const Operand& val);
|
| +
|
| + void ClearRightImm(Register dst, Register src, const Operand& val);
|
| +
|
| + // Bitwise operations
|
| + void And(Register dst, Register src);
|
| + void AndP(Register dst, Register src);
|
| + void And(Register dst, Register src1, Register src2);
|
| + void AndP(Register dst, Register src1, Register src2);
|
| + void And(Register dst, const MemOperand& opnd);
|
| + void AndP(Register dst, const MemOperand& opnd);
|
| + void And(Register dst, const Operand& opnd);
|
| + void AndP(Register dst, const Operand& opnd);
|
| + void And(Register dst, Register src, const Operand& opnd);
|
| + void AndP(Register dst, Register src, const Operand& opnd);
|
| + void Or(Register dst, Register src);
|
| + void OrP(Register dst, Register src);
|
| + void Or(Register dst, Register src1, Register src2);
|
| + void OrP(Register dst, Register src1, Register src2);
|
| + void Or(Register dst, const MemOperand& opnd);
|
| + void OrP(Register dst, const MemOperand& opnd);
|
| + void Or(Register dst, const Operand& opnd);
|
| + void OrP(Register dst, const Operand& opnd);
|
| + void Or(Register dst, Register src, const Operand& opnd);
|
| + void OrP(Register dst, Register src, const Operand& opnd);
|
| + void Xor(Register dst, Register src);
|
| + void XorP(Register dst, Register src);
|
| + void Xor(Register dst, Register src1, Register src2);
|
| + void XorP(Register dst, Register src1, Register src2);
|
| + void Xor(Register dst, const MemOperand& opnd);
|
| + void XorP(Register dst, const MemOperand& opnd);
|
| + void Xor(Register dst, const Operand& opnd);
|
| + void XorP(Register dst, const Operand& opnd);
|
| + void Xor(Register dst, Register src, const Operand& opnd);
|
| + void XorP(Register dst, Register src, const Operand& opnd);
|
| + void Popcnt32(Register dst, Register src);
|
| +
|
| +#ifdef V8_TARGET_ARCH_S390X
|
| + void Popcnt64(Register dst, Register src);
|
| +#endif
|
| +
|
| + void NotP(Register dst);
|
| +
|
| + void mov(Register dst, const Operand& src);
|
| +
|
| // ---------------------------------------------------------------------------
|
| // GC Support
|
|
|
| @@ -270,6 +497,18 @@ class MacroAssembler : public Assembler {
|
| PointersToHereCheck pointers_to_here_check_for_value =
|
| kPointersToHereMaybeInteresting);
|
|
|
| + void push(Register src) {
|
| + lay(sp, MemOperand(sp, -kPointerSize));
|
| + StoreP(src, MemOperand(sp));
|
| + }
|
| +
|
| + void pop(Register dst) {
|
| + LoadP(dst, MemOperand(sp));
|
| + la(sp, MemOperand(sp, kPointerSize));
|
| + }
|
| +
|
| + void pop() { la(sp, MemOperand(sp, kPointerSize)); }
|
| +
|
| void Push(Register src) { push(src); }
|
|
|
| // Push a handle.
|
| @@ -278,33 +517,48 @@ class MacroAssembler : public Assembler {
|
|
|
| // Push two registers. Pushes leftmost register first (to highest address).
|
| void Push(Register src1, Register src2) {
|
| - StorePU(src2, MemOperand(sp, -2 * kPointerSize));
|
| + lay(sp, MemOperand(sp, -kPointerSize * 2));
|
| StoreP(src1, MemOperand(sp, kPointerSize));
|
| + StoreP(src2, MemOperand(sp, 0));
|
| }
|
|
|
| // Push three registers. Pushes leftmost register first (to highest address).
|
| void Push(Register src1, Register src2, Register src3) {
|
| - StorePU(src3, MemOperand(sp, -3 * kPointerSize));
|
| + lay(sp, MemOperand(sp, -kPointerSize * 3));
|
| + StoreP(src1, MemOperand(sp, kPointerSize * 2));
|
| StoreP(src2, MemOperand(sp, kPointerSize));
|
| - StoreP(src1, MemOperand(sp, 2 * kPointerSize));
|
| + StoreP(src3, MemOperand(sp, 0));
|
| }
|
|
|
| // Push four registers. Pushes leftmost register first (to highest address).
|
| void Push(Register src1, Register src2, Register src3, Register src4) {
|
| - StorePU(src4, MemOperand(sp, -4 * kPointerSize));
|
| + lay(sp, MemOperand(sp, -kPointerSize * 4));
|
| + StoreP(src1, MemOperand(sp, kPointerSize * 3));
|
| + StoreP(src2, MemOperand(sp, kPointerSize * 2));
|
| StoreP(src3, MemOperand(sp, kPointerSize));
|
| - StoreP(src2, MemOperand(sp, 2 * kPointerSize));
|
| - StoreP(src1, MemOperand(sp, 3 * kPointerSize));
|
| + StoreP(src4, MemOperand(sp, 0));
|
| }
|
|
|
| // Push five registers. Pushes leftmost register first (to highest address).
|
| void Push(Register src1, Register src2, Register src3, Register src4,
|
| Register src5) {
|
| - StorePU(src5, MemOperand(sp, -5 * kPointerSize));
|
| + DCHECK(!src1.is(src2));
|
| + DCHECK(!src1.is(src3));
|
| + DCHECK(!src2.is(src3));
|
| + DCHECK(!src1.is(src4));
|
| + DCHECK(!src2.is(src4));
|
| + DCHECK(!src3.is(src4));
|
| + DCHECK(!src1.is(src5));
|
| + DCHECK(!src2.is(src5));
|
| + DCHECK(!src3.is(src5));
|
| + DCHECK(!src4.is(src5));
|
| +
|
| + lay(sp, MemOperand(sp, -kPointerSize * 5));
|
| + StoreP(src1, MemOperand(sp, kPointerSize * 4));
|
| + StoreP(src2, MemOperand(sp, kPointerSize * 3));
|
| + StoreP(src3, MemOperand(sp, kPointerSize * 2));
|
| StoreP(src4, MemOperand(sp, kPointerSize));
|
| - StoreP(src3, MemOperand(sp, 2 * kPointerSize));
|
| - StoreP(src2, MemOperand(sp, 3 * kPointerSize));
|
| - StoreP(src1, MemOperand(sp, 4 * kPointerSize));
|
| + StoreP(src5, MemOperand(sp, 0));
|
| }
|
|
|
| void Pop(Register dst) { pop(dst); }
|
| @@ -313,7 +567,7 @@ class MacroAssembler : public Assembler {
|
| void Pop(Register src1, Register src2) {
|
| LoadP(src2, MemOperand(sp, 0));
|
| LoadP(src1, MemOperand(sp, kPointerSize));
|
| - addi(sp, sp, Operand(2 * kPointerSize));
|
| + la(sp, MemOperand(sp, 2 * kPointerSize));
|
| }
|
|
|
| // Pop three registers. Pops rightmost register first (from lower address).
|
| @@ -321,7 +575,7 @@ class MacroAssembler : public Assembler {
|
| LoadP(src3, MemOperand(sp, 0));
|
| LoadP(src2, MemOperand(sp, kPointerSize));
|
| LoadP(src1, MemOperand(sp, 2 * kPointerSize));
|
| - addi(sp, sp, Operand(3 * kPointerSize));
|
| + la(sp, MemOperand(sp, 3 * kPointerSize));
|
| }
|
|
|
| // Pop four registers. Pops rightmost register first (from lower address).
|
| @@ -330,7 +584,7 @@ class MacroAssembler : public Assembler {
|
| LoadP(src3, MemOperand(sp, kPointerSize));
|
| LoadP(src2, MemOperand(sp, 2 * kPointerSize));
|
| LoadP(src1, MemOperand(sp, 3 * kPointerSize));
|
| - addi(sp, sp, Operand(4 * kPointerSize));
|
| + la(sp, MemOperand(sp, 4 * kPointerSize));
|
| }
|
|
|
| // Pop five registers. Pops rightmost register first (from lower address).
|
| @@ -341,7 +595,7 @@ class MacroAssembler : public Assembler {
|
| LoadP(src3, MemOperand(sp, 2 * kPointerSize));
|
| LoadP(src2, MemOperand(sp, 3 * kPointerSize));
|
| LoadP(src1, MemOperand(sp, 4 * kPointerSize));
|
| - addi(sp, sp, Operand(5 * kPointerSize));
|
| + la(sp, MemOperand(sp, 5 * kPointerSize));
|
| }
|
|
|
| // Push a fixed frame, consisting of lr, fp, context and
|
| @@ -391,29 +645,55 @@ class MacroAssembler : public Assembler {
|
| // a float, storing the result in |dst|
|
| void ConvertUnsignedIntToFloat(Register src, DoubleRegister dst);
|
|
|
| -#if V8_TARGET_ARCH_PPC64
|
| +#if V8_TARGET_ARCH_S390X
|
| void ConvertInt64ToFloat(Register src, DoubleRegister double_dst);
|
| void ConvertInt64ToDouble(Register src, DoubleRegister double_dst);
|
| void ConvertUnsignedInt64ToFloat(Register src, DoubleRegister double_dst);
|
| void ConvertUnsignedInt64ToDouble(Register src, DoubleRegister double_dst);
|
| #endif
|
|
|
| + void MovIntToFloat(DoubleRegister dst, Register src);
|
| + void MovFloatToInt(Register dst, DoubleRegister src);
|
| + void MovDoubleToInt64(Register dst, DoubleRegister src);
|
| + void MovInt64ToDouble(DoubleRegister dst, Register src);
|
| + // Converts the double_input to an integer. Note that, upon return,
|
| + // the contents of double_dst will also hold the fixed point representation.
|
| + void ConvertFloat32ToInt64(const DoubleRegister double_input,
|
| +#if !V8_TARGET_ARCH_S390X
|
| + const Register dst_hi,
|
| +#endif
|
| + const Register dst,
|
| + const DoubleRegister double_dst,
|
| + FPRoundingMode rounding_mode = kRoundToZero);
|
| +
|
| // Converts the double_input to an integer. Note that, upon return,
|
| // the contents of double_dst will also hold the fixed point representation.
|
| void ConvertDoubleToInt64(const DoubleRegister double_input,
|
| -#if !V8_TARGET_ARCH_PPC64
|
| +#if !V8_TARGET_ARCH_S390X
|
| const Register dst_hi,
|
| #endif
|
| const Register dst, const DoubleRegister double_dst,
|
| FPRoundingMode rounding_mode = kRoundToZero);
|
|
|
| -#if V8_TARGET_ARCH_PPC64
|
| + void ConvertFloat32ToInt32(const DoubleRegister double_input,
|
| + const Register dst,
|
| + const DoubleRegister double_dst,
|
| + FPRoundingMode rounding_mode = kRoundToZero);
|
| + void ConvertFloat32ToUnsignedInt32(
|
| + const DoubleRegister double_input, const Register dst,
|
| + const DoubleRegister double_dst,
|
| + FPRoundingMode rounding_mode = kRoundToZero);
|
| +#if V8_TARGET_ARCH_S390X
|
| // Converts the double_input to an unsigned integer. Note that, upon return,
|
| // the contents of double_dst will also hold the fixed point representation.
|
| void ConvertDoubleToUnsignedInt64(
|
| const DoubleRegister double_input, const Register dst,
|
| const DoubleRegister double_dst,
|
| FPRoundingMode rounding_mode = kRoundToZero);
|
| + void ConvertFloat32ToUnsignedInt64(
|
| + const DoubleRegister double_input, const Register dst,
|
| + const DoubleRegister double_dst,
|
| + FPRoundingMode rounding_mode = kRoundToZero);
|
| #endif
|
|
|
| // Generates function and stub prologue code.
|
| @@ -471,8 +751,8 @@ class MacroAssembler : public Assembler {
|
| }
|
|
|
| // ----------------------------------------------------------------
|
| - // new PPC macro-assembler interfaces that are slightly higher level
|
| - // than assembler-ppc and may generate variable length sequences
|
| + // new S390 macro-assembler interfaces that are slightly higher level
|
| + // than assembler-s390 and may generate variable length sequences
|
|
|
| // load a literal signed int value <value> to GPR <dst>
|
| void LoadIntLiteral(Register dst, int value);
|
| @@ -482,74 +762,30 @@ class MacroAssembler : public Assembler {
|
|
|
| // load a literal double value <value> to FPR <result>
|
| void LoadDoubleLiteral(DoubleRegister result, double value, Register scratch);
|
| + void LoadDoubleLiteral(DoubleRegister result, uint64_t value,
|
| + Register scratch);
|
|
|
| - void LoadWord(Register dst, const MemOperand& mem, Register scratch);
|
| - void LoadWordArith(Register dst, const MemOperand& mem,
|
| - Register scratch = no_reg);
|
| - void StoreWord(Register src, const MemOperand& mem, Register scratch);
|
| + void LoadFloat32Literal(DoubleRegister result, float value, Register scratch);
|
| +
|
| + void StoreW(Register src, const MemOperand& mem, Register scratch = no_reg);
|
|
|
| - void LoadHalfWord(Register dst, const MemOperand& mem, Register scratch);
|
| - void LoadHalfWordArith(Register dst, const MemOperand& mem,
|
| - Register scratch = no_reg);
|
| - void StoreHalfWord(Register src, const MemOperand& mem, Register scratch);
|
| + void LoadHalfWordP(Register dst, const MemOperand& mem,
|
| + Register scratch = no_reg);
|
|
|
| - void LoadByte(Register dst, const MemOperand& mem, Register scratch);
|
| - void StoreByte(Register src, const MemOperand& mem, Register scratch);
|
| + void StoreHalfWord(Register src, const MemOperand& mem,
|
| + Register scratch = r0);
|
| + void StoreByte(Register src, const MemOperand& mem, Register scratch = r0);
|
|
|
| void LoadRepresentation(Register dst, const MemOperand& mem, Representation r,
|
| Register scratch = no_reg);
|
| void StoreRepresentation(Register src, const MemOperand& mem,
|
| Representation r, Register scratch = no_reg);
|
|
|
| - void LoadDouble(DoubleRegister dst, const MemOperand& mem, Register scratch);
|
| - void StoreDouble(DoubleRegister src, const MemOperand& mem, Register scratch);
|
| -
|
| - // Move values between integer and floating point registers.
|
| - void MovIntToDouble(DoubleRegister dst, Register src, Register scratch);
|
| - void MovUnsignedIntToDouble(DoubleRegister dst, Register src,
|
| - Register scratch);
|
| - void MovInt64ToDouble(DoubleRegister dst,
|
| -#if !V8_TARGET_ARCH_PPC64
|
| - Register src_hi,
|
| -#endif
|
| - Register src);
|
| -#if V8_TARGET_ARCH_PPC64
|
| - void MovInt64ComponentsToDouble(DoubleRegister dst, Register src_hi,
|
| - Register src_lo, Register scratch);
|
| -#endif
|
| - void InsertDoubleLow(DoubleRegister dst, Register src, Register scratch);
|
| - void InsertDoubleHigh(DoubleRegister dst, Register src, Register scratch);
|
| - void MovDoubleLowToInt(Register dst, DoubleRegister src);
|
| - void MovDoubleHighToInt(Register dst, DoubleRegister src);
|
| - void MovDoubleToInt64(
|
| -#if !V8_TARGET_ARCH_PPC64
|
| - Register dst_hi,
|
| -#endif
|
| - Register dst, DoubleRegister src);
|
| - void MovIntToFloat(DoubleRegister dst, Register src);
|
| - void MovFloatToInt(Register dst, DoubleRegister src);
|
| -
|
| - void Add(Register dst, Register src, intptr_t value, Register scratch);
|
| - void Cmpi(Register src1, const Operand& src2, Register scratch,
|
| - CRegister cr = cr7);
|
| - void Cmpli(Register src1, const Operand& src2, Register scratch,
|
| - CRegister cr = cr7);
|
| - void Cmpwi(Register src1, const Operand& src2, Register scratch,
|
| - CRegister cr = cr7);
|
| - void Cmplwi(Register src1, const Operand& src2, Register scratch,
|
| - CRegister cr = cr7);
|
| - void And(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
|
| - void Or(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
|
| - void Xor(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
|
| -
|
| void AddSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
|
| void SubSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
|
| - void CmpSmiLiteral(Register src1, Smi* smi, Register scratch,
|
| - CRegister cr = cr7);
|
| - void CmplSmiLiteral(Register src1, Smi* smi, Register scratch,
|
| - CRegister cr = cr7);
|
| - void AndSmiLiteral(Register dst, Register src, Smi* smi, Register scratch,
|
| - RCBit rc = LeaveRC);
|
| + void CmpSmiLiteral(Register src1, Smi* smi, Register scratch);
|
| + void CmpLogicalSmiLiteral(Register src1, Smi* smi, Register scratch);
|
| + void AndSmiLiteral(Register dst, Register src, Smi* smi);
|
|
|
| // Set new rounding mode RN to FPSCR
|
| void SetRoundingMode(FPRoundingMode RN);
|
| @@ -560,10 +796,29 @@ class MacroAssembler : public Assembler {
|
| // These exist to provide portability between 32 and 64bit
|
| void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg);
|
| void StoreP(Register src, const MemOperand& mem, Register scratch = no_reg);
|
| + void StoreP(const MemOperand& mem, const Operand& opnd,
|
| + Register scratch = no_reg);
|
| + void LoadMultipleP(Register dst1, Register dst2, const MemOperand& mem);
|
| + void StoreMultipleP(Register dst1, Register dst2, const MemOperand& mem);
|
| + void LoadMultipleW(Register dst1, Register dst2, const MemOperand& mem);
|
| + void StoreMultipleW(Register dst1, Register dst2, const MemOperand& mem);
|
| +
|
| + // Cleanse pointer address on 31bit by zero out top bit.
|
| + // This is a NOP on 64-bit.
|
| + void CleanseP(Register src) {
|
| +#if (V8_HOST_ARCH_S390 && !(V8_TARGET_ARCH_S390X))
|
| + nilh(src, Operand(0x7FFF));
|
| +#endif
|
| + }
|
|
|
| // ---------------------------------------------------------------------------
|
| // JavaScript invokes
|
|
|
| + // Set up call kind marking in ecx. The method takes ecx as an
|
| + // explicit first parameter to make the code more readable at the
|
| + // call sites.
|
| + // void SetCallKind(Register dst, CallKind kind);
|
| +
|
| // Invoke the JavaScript function code by either calling or jumping.
|
| void InvokeFunctionCode(Register function, Register new_target,
|
| const ParameterCount& expected,
|
| @@ -623,7 +878,6 @@ class MacroAssembler : public Assembler {
|
| Register result, Register t0, Register t1,
|
| Register t2);
|
|
|
| -
|
| inline void MarkCode(NopMarkerTypes type) { nop(type); }
|
|
|
| // Check if the given instruction is a 'type' marker.
|
| @@ -635,7 +889,6 @@ class MacroAssembler : public Assembler {
|
| return IsNop(instr, type);
|
| }
|
|
|
| -
|
| static inline int GetCodeMarker(Instr instr) {
|
| int dst_reg_offset = 12;
|
| int dst_mask = 0xf << dst_reg_offset;
|
| @@ -656,11 +909,10 @@ class MacroAssembler : public Assembler {
|
| return type;
|
| }
|
|
|
| -
|
| // ---------------------------------------------------------------------------
|
| // Allocation support
|
|
|
| - // Allocate an object in new space or old space. The object_size is
|
| + // Allocate an object in new space or old pointer space. The object_size is
|
| // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
|
| // is passed. If the space is exhausted control continues at the gc_required
|
| // label. The allocated object is returned in result. If the flag
|
| @@ -760,7 +1012,6 @@ class MacroAssembler : public Assembler {
|
| // sets the flags and leaves the object type in the type_reg register.
|
| void CompareInstanceType(Register map, Register type_reg, InstanceType type);
|
|
|
| -
|
| // Check if a map for a JSObject indicates that the object has fast elements.
|
| // Jump to the specified label if it does not.
|
| void CheckFastElements(Register map, Register scratch, Label* fail);
|
| @@ -799,11 +1050,9 @@ class MacroAssembler : public Assembler {
|
| void CheckMap(Register obj, Register scratch, Handle<Map> map, Label* fail,
|
| SmiCheckType smi_check_type);
|
|
|
| -
|
| void CheckMap(Register obj, Register scratch, Heap::RootListIndex index,
|
| Label* fail, SmiCheckType smi_check_type);
|
|
|
| -
|
| // Check if the map of an object is equal to a specified weak map and branch
|
| // to a specified target if equal. Skip the smi check if not required
|
| // (object is known to be a heap object)
|
| @@ -847,13 +1096,13 @@ class MacroAssembler : public Assembler {
|
| // Returns a condition that will be enabled if the object was a string.
|
| Condition IsObjectStringType(Register obj, Register type) {
|
| LoadP(type, FieldMemOperand(obj, HeapObject::kMapOffset));
|
| - lbz(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
|
| - andi(r0, type, Operand(kIsNotStringMask));
|
| + LoadlB(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
|
| + mov(r0, Operand(kIsNotStringMask));
|
| + AndP(r0, type);
|
| DCHECK_EQ(0u, kStringTag);
|
| return eq;
|
| }
|
|
|
| -
|
| // Picks out an array index from the hash field.
|
| // Register use:
|
| // hash - holds the index's hash. Clobbered.
|
| @@ -864,7 +1113,9 @@ class MacroAssembler : public Assembler {
|
| void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
|
| void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
|
|
|
| - // Load the value of a smi object into a double register.
|
| + // Load the value of a smi object into a FP double register. The register
|
| + // scratch1 can be the same register as smi in which case smi will hold the
|
| + // untagged value afterwards.
|
| void SmiToDouble(DoubleRegister value, Register smi);
|
|
|
| // Check if a double can be exactly represented as a signed 32-bit integer.
|
| @@ -895,6 +1146,22 @@ class MacroAssembler : public Assembler {
|
| Register input_high, Register scratch,
|
| DoubleRegister double_scratch, Label* done, Label* exact);
|
|
|
| + // Perform ceiling of float in input_register and store in double_output.
|
| + void FloatCeiling32(DoubleRegister double_output, DoubleRegister double_input,
|
| + Register scratch, DoubleRegister double_scratch);
|
| +
|
| + // Perform floor of float in input_register and store in double_output.
|
| + void FloatFloor32(DoubleRegister double_output, DoubleRegister double_input,
|
| + Register scratch);
|
| +
|
| + // Perform ceiling of double in input_register and store in double_output.
|
| + void FloatCeiling64(DoubleRegister double_output, DoubleRegister double_input,
|
| + Register scratch, DoubleRegister double_scratch);
|
| +
|
| + // Perform floor of double in input_register and store in double_output.
|
| + void FloatFloor64(DoubleRegister double_output, DoubleRegister double_input,
|
| + Register scratch);
|
| +
|
| // Performs a truncating conversion of a floating point number as used by
|
| // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
|
| // succeeds, otherwise falls through if result is saturated. On return
|
| @@ -940,13 +1207,25 @@ class MacroAssembler : public Assembler {
|
| void SubAndCheckForOverflow(Register dst, Register left, Register right,
|
| Register overflow_dst, Register scratch = r0);
|
|
|
| - void BranchOnOverflow(Label* label) { blt(label, cr0); }
|
| + void BranchOnOverflow(Label* label) { blt(label /*, cr0*/); }
|
|
|
| - void BranchOnNoOverflow(Label* label) { bge(label, cr0); }
|
| + void BranchOnNoOverflow(Label* label) { bge(label /*, cr0*/); }
|
|
|
| - void RetOnOverflow(void) { Ret(lt, cr0); }
|
| + void RetOnOverflow(void) {
|
| + Label label;
|
|
|
| - void RetOnNoOverflow(void) { Ret(ge, cr0); }
|
| + blt(&label /*, cr0*/);
|
| + Ret();
|
| + bind(&label);
|
| + }
|
| +
|
| + void RetOnNoOverflow(void) {
|
| + Label label;
|
| +
|
| + bge(&label /*, cr0*/);
|
| + Ret();
|
| + bind(&label);
|
| + }
|
|
|
| // ---------------------------------------------------------------------------
|
| // Runtime calls
|
| @@ -1033,7 +1312,6 @@ class MacroAssembler : public Assembler {
|
| return code_object_;
|
| }
|
|
|
| -
|
| // Emit code for a truncating division by a constant. The dividend register is
|
| // unchanged and ip gets clobbered. Dividend and result must be different.
|
| void TruncatingDiv(Register result, Register dividend, int32_t divisor);
|
| @@ -1048,7 +1326,6 @@ class MacroAssembler : public Assembler {
|
| void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
|
| Register scratch2);
|
|
|
| -
|
| // ---------------------------------------------------------------------------
|
| // Debugging
|
|
|
| @@ -1098,25 +1375,36 @@ class MacroAssembler : public Assembler {
|
| // Extract consecutive bits (defined by rangeStart - rangeEnd) from src
|
| // and place them into the least significant bits of dst.
|
| inline void ExtractBitRange(Register dst, Register src, int rangeStart,
|
| - int rangeEnd, RCBit rc = LeaveRC) {
|
| + int rangeEnd) {
|
| DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer);
|
| - int rotate = (rangeEnd == 0) ? 0 : kBitsPerPointer - rangeEnd;
|
| - int width = rangeStart - rangeEnd + 1;
|
| - if (rc == SetRC && rangeEnd == 0 && width <= 16) {
|
| - andi(dst, src, Operand((1 << width) - 1));
|
| +
|
| + // Try to use RISBG if possible.
|
| + if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
|
| + int shiftAmount = (64 - rangeEnd) % 64; // Convert to shift left.
|
| + int endBit = 63; // End is always LSB after shifting.
|
| + int startBit = 63 - rangeStart + rangeEnd;
|
| + risbg(dst, src, Operand(startBit), Operand(endBit), Operand(shiftAmount),
|
| + true);
|
| } else {
|
| -#if V8_TARGET_ARCH_PPC64
|
| - rldicl(dst, src, rotate, kBitsPerPointer - width, rc);
|
| + if (rangeEnd > 0) // Don't need to shift if rangeEnd is zero.
|
| + ShiftRightP(dst, src, Operand(rangeEnd));
|
| + else if (!dst.is(src)) // If we didn't shift, we might need to copy
|
| + LoadRR(dst, src);
|
| + int width = rangeStart - rangeEnd + 1;
|
| +#if V8_TARGET_ARCH_S390X
|
| + uint64_t mask = (static_cast<uint64_t>(1) << width) - 1;
|
| + nihf(dst, Operand(mask >> 32));
|
| + nilf(dst, Operand(mask & 0xFFFFFFFF));
|
| + ltgr(dst, dst);
|
| #else
|
| - rlwinm(dst, src, rotate, kBitsPerPointer - width, kBitsPerPointer - 1,
|
| - rc);
|
| + uint32_t mask = (1 << width) - 1;
|
| + AndP(dst, Operand(mask));
|
| #endif
|
| }
|
| }
|
|
|
| - inline void ExtractBit(Register dst, Register src, uint32_t bitNumber,
|
| - RCBit rc = LeaveRC) {
|
| - ExtractBitRange(dst, src, bitNumber, bitNumber, rc);
|
| + inline void ExtractBit(Register dst, Register src, uint32_t bitNumber) {
|
| + ExtractBitRange(dst, src, bitNumber, bitNumber);
|
| }
|
|
|
| // Extract consecutive bits (defined by mask) from src and place them
|
| @@ -1142,19 +1430,19 @@ class MacroAssembler : public Assembler {
|
| // 1-bits in mask must be contiguous
|
| DCHECK(bit == 0 || (mask & ((bit << 1) - 1)) == 0);
|
|
|
| - ExtractBitRange(dst, src, start, end, rc);
|
| + ExtractBitRange(dst, src, start, end);
|
| }
|
|
|
| // Test single bit in value.
|
| inline void TestBit(Register value, int bitNumber, Register scratch = r0) {
|
| - ExtractBitRange(scratch, value, bitNumber, bitNumber, SetRC);
|
| + ExtractBitRange(scratch, value, bitNumber, bitNumber);
|
| }
|
|
|
| // Test consecutive bit range in value. Range is defined by
|
| // rangeStart - rangeEnd.
|
| inline void TestBitRange(Register value, int rangeStart, int rangeEnd,
|
| Register scratch = r0) {
|
| - ExtractBitRange(scratch, value, rangeStart, rangeEnd, SetRC);
|
| + ExtractBitRange(scratch, value, rangeStart, rangeEnd);
|
| }
|
|
|
| // Test consecutive bit range in value. Range is defined by mask.
|
| @@ -1163,17 +1451,16 @@ class MacroAssembler : public Assembler {
|
| ExtractBitMask(scratch, value, mask, SetRC);
|
| }
|
|
|
| -
|
| // ---------------------------------------------------------------------------
|
| // Smi utilities
|
|
|
| // Shift left by kSmiShift
|
| - void SmiTag(Register reg, RCBit rc = LeaveRC) { SmiTag(reg, reg, rc); }
|
| - void SmiTag(Register dst, Register src, RCBit rc = LeaveRC) {
|
| - ShiftLeftImm(dst, src, Operand(kSmiShift), rc);
|
| + void SmiTag(Register reg) { SmiTag(reg, reg); }
|
| + void SmiTag(Register dst, Register src) {
|
| + ShiftLeftP(dst, src, Operand(kSmiShift));
|
| }
|
|
|
| -#if !V8_TARGET_ARCH_PPC64
|
| +#if !V8_TARGET_ARCH_S390X
|
| // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
|
| void SmiTagCheckOverflow(Register reg, Register overflow);
|
| void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
|
| @@ -1182,8 +1469,8 @@ class MacroAssembler : public Assembler {
|
| Label* not_smi_label) {
|
| // High bits must be identical to fit into an Smi
|
| STATIC_ASSERT(kSmiShift == 1);
|
| - addis(scratch, value, Operand(0x40000000u >> 16));
|
| - cmpi(scratch, Operand::Zero());
|
| + AddP(scratch, value, Operand(0x40000000u));
|
| + CmpP(scratch, Operand::Zero());
|
| blt(not_smi_label);
|
| }
|
| #endif
|
| @@ -1197,68 +1484,68 @@ class MacroAssembler : public Assembler {
|
| inline void JumpIfNotUnsignedSmiCandidate(Register value, Register scratch,
|
| Label* not_smi_label) {
|
| TestUnsignedSmiCandidate(value, scratch);
|
| - bne(not_smi_label, cr0);
|
| + bne(not_smi_label /*, cr0*/);
|
| }
|
|
|
| - void SmiUntag(Register reg, RCBit rc = LeaveRC) { SmiUntag(reg, reg, rc); }
|
| + void SmiUntag(Register reg) { SmiUntag(reg, reg); }
|
|
|
| - void SmiUntag(Register dst, Register src, RCBit rc = LeaveRC) {
|
| - ShiftRightArithImm(dst, src, kSmiShift, rc);
|
| + void SmiUntag(Register dst, Register src) {
|
| + ShiftRightArithP(dst, src, Operand(kSmiShift));
|
| }
|
|
|
| void SmiToPtrArrayOffset(Register dst, Register src) {
|
| -#if V8_TARGET_ARCH_PPC64
|
| +#if V8_TARGET_ARCH_S390X
|
| STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2);
|
| - ShiftRightArithImm(dst, src, kSmiShift - kPointerSizeLog2);
|
| + ShiftRightArithP(dst, src, Operand(kSmiShift - kPointerSizeLog2));
|
| #else
|
| STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kPointerSizeLog2);
|
| - ShiftLeftImm(dst, src, Operand(kPointerSizeLog2 - kSmiShift));
|
| + ShiftLeftP(dst, src, Operand(kPointerSizeLog2 - kSmiShift));
|
| #endif
|
| }
|
|
|
| void SmiToByteArrayOffset(Register dst, Register src) { SmiUntag(dst, src); }
|
|
|
| void SmiToShortArrayOffset(Register dst, Register src) {
|
| -#if V8_TARGET_ARCH_PPC64
|
| +#if V8_TARGET_ARCH_S390X
|
| STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 1);
|
| - ShiftRightArithImm(dst, src, kSmiShift - 1);
|
| + ShiftRightArithP(dst, src, Operand(kSmiShift - 1));
|
| #else
|
| STATIC_ASSERT(kSmiTag == 0 && kSmiShift == 1);
|
| if (!dst.is(src)) {
|
| - mr(dst, src);
|
| + LoadRR(dst, src);
|
| }
|
| #endif
|
| }
|
|
|
| void SmiToIntArrayOffset(Register dst, Register src) {
|
| -#if V8_TARGET_ARCH_PPC64
|
| +#if V8_TARGET_ARCH_S390X
|
| STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 2);
|
| - ShiftRightArithImm(dst, src, kSmiShift - 2);
|
| + ShiftRightArithP(dst, src, Operand(kSmiShift - 2));
|
| #else
|
| STATIC_ASSERT(kSmiTag == 0 && kSmiShift < 2);
|
| - ShiftLeftImm(dst, src, Operand(2 - kSmiShift));
|
| + ShiftLeftP(dst, src, Operand(2 - kSmiShift));
|
| #endif
|
| }
|
|
|
| #define SmiToFloatArrayOffset SmiToIntArrayOffset
|
|
|
| void SmiToDoubleArrayOffset(Register dst, Register src) {
|
| -#if V8_TARGET_ARCH_PPC64
|
| +#if V8_TARGET_ARCH_S390X
|
| STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kDoubleSizeLog2);
|
| - ShiftRightArithImm(dst, src, kSmiShift - kDoubleSizeLog2);
|
| + ShiftRightArithP(dst, src, Operand(kSmiShift - kDoubleSizeLog2));
|
| #else
|
| STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kDoubleSizeLog2);
|
| - ShiftLeftImm(dst, src, Operand(kDoubleSizeLog2 - kSmiShift));
|
| + ShiftLeftP(dst, src, Operand(kDoubleSizeLog2 - kSmiShift));
|
| #endif
|
| }
|
|
|
| void SmiToArrayOffset(Register dst, Register src, int elementSizeLog2) {
|
| if (kSmiShift < elementSizeLog2) {
|
| - ShiftLeftImm(dst, src, Operand(elementSizeLog2 - kSmiShift));
|
| + ShiftLeftP(dst, src, Operand(elementSizeLog2 - kSmiShift));
|
| } else if (kSmiShift > elementSizeLog2) {
|
| - ShiftRightArithImm(dst, src, kSmiShift - elementSizeLog2);
|
| + ShiftRightArithP(dst, src, Operand(kSmiShift - elementSizeLog2));
|
| } else if (!dst.is(src)) {
|
| - mr(dst, src);
|
| + LoadRR(dst, src);
|
| }
|
| }
|
|
|
| @@ -1267,7 +1554,7 @@ class MacroAssembler : public Assembler {
|
| if (isSmi) {
|
| SmiToArrayOffset(dst, src, elementSizeLog2);
|
| } else {
|
| - ShiftLeftImm(dst, src, Operand(elementSizeLog2));
|
| + ShiftLeftP(dst, src, Operand(elementSizeLog2));
|
| }
|
| }
|
|
|
| @@ -1279,28 +1566,24 @@ class MacroAssembler : public Assembler {
|
| // Souce and destination can be the same register.
|
| void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
|
|
|
| - inline void TestIfSmi(Register value, Register scratch) {
|
| - TestBitRange(value, kSmiTagSize - 1, 0, scratch);
|
| - }
|
| + inline void TestIfSmi(Register value) { tmll(value, Operand(1)); }
|
|
|
| inline void TestIfPositiveSmi(Register value, Register scratch) {
|
| -#if V8_TARGET_ARCH_PPC64
|
| - rldicl(scratch, value, 1, kBitsPerPointer - (1 + kSmiTagSize), SetRC);
|
| -#else
|
| - rlwinm(scratch, value, 1, kBitsPerPointer - (1 + kSmiTagSize),
|
| - kBitsPerPointer - 1, SetRC);
|
| -#endif
|
| + STATIC_ASSERT((kSmiTagMask | kSmiSignMask) ==
|
| + (intptr_t)(1UL << (kBitsPerPointer - 1) | 1));
|
| + mov(scratch, Operand(kIntptrSignBit | kSmiTagMask));
|
| + AndP(scratch, value);
|
| }
|
|
|
| // Jump the register contains a smi.
|
| inline void JumpIfSmi(Register value, Label* smi_label) {
|
| - TestIfSmi(value, r0);
|
| - beq(smi_label, cr0); // branch if SMI
|
| + TestIfSmi(value);
|
| + beq(smi_label /*, cr0*/); // branch if SMI
|
| }
|
| // Jump if either of the registers contain a non-smi.
|
| inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
|
| - TestIfSmi(value, r0);
|
| - bne(not_smi_label, cr0);
|
| + TestIfSmi(value);
|
| + bne(not_smi_label /*, cr0*/);
|
| }
|
| // Jump if either of the registers contain a non-smi.
|
| void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
|
| @@ -1311,30 +1594,28 @@ class MacroAssembler : public Assembler {
|
| void AssertNotSmi(Register object);
|
| void AssertSmi(Register object);
|
|
|
| -
|
| -#if V8_TARGET_ARCH_PPC64
|
| - inline void TestIfInt32(Register value, Register scratch,
|
| - CRegister cr = cr7) {
|
| +#if V8_TARGET_ARCH_S390X
|
| + inline void TestIfInt32(Register value, Register scratch) {
|
| // High bits must be identical to fit into an 32-bit integer
|
| - extsw(scratch, value);
|
| - cmp(scratch, value, cr);
|
| + lgfr(scratch, value);
|
| + CmpP(scratch, value);
|
| }
|
| #else
|
| - inline void TestIfInt32(Register hi_word, Register lo_word, Register scratch,
|
| - CRegister cr = cr7) {
|
| + inline void TestIfInt32(Register hi_word, Register lo_word,
|
| + Register scratch) {
|
| // High bits must be identical to fit into an 32-bit integer
|
| - srawi(scratch, lo_word, 31);
|
| - cmp(scratch, hi_word, cr);
|
| + ShiftRightArith(scratch, lo_word, Operand(31));
|
| + CmpP(scratch, hi_word);
|
| }
|
| #endif
|
|
|
| -#if V8_TARGET_ARCH_PPC64
|
| +#if V8_TARGET_ARCH_S390X
|
| // Ensure it is permissable to read/write int value directly from
|
| // upper half of the smi.
|
| STATIC_ASSERT(kSmiTag == 0);
|
| STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
|
| #endif
|
| -#if V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN
|
| +#if V8_TARGET_LITTLE_ENDIAN
|
| #define SmiWordOffset(offset) (offset + kPointerSize / 2)
|
| #else
|
| #define SmiWordOffset(offset) offset
|
| @@ -1406,10 +1687,6 @@ class MacroAssembler : public Assembler {
|
| // ---------------------------------------------------------------------------
|
| // Patching helpers.
|
|
|
| - // Decode offset from constant pool load instruction(s).
|
| - // Caller must place the instruction word at <location> in <result>.
|
| - void DecodeConstantPoolOffset(Register result, Register location);
|
| -
|
| void ClampUint8(Register output_reg, Register input_reg);
|
|
|
| // Saturate a value into 8-bit unsigned integer
|
| @@ -1419,7 +1696,6 @@ class MacroAssembler : public Assembler {
|
| void ClampDoubleToUint8(Register result_reg, DoubleRegister input_reg,
|
| DoubleRegister temp_double_reg);
|
|
|
| -
|
| void LoadInstanceDescriptors(Register map, Register descriptors);
|
| void EnumLength(Register dst, Register map);
|
| void NumberOfOwnDescriptors(Register dst, Register map);
|
| @@ -1427,32 +1703,20 @@ class MacroAssembler : public Assembler {
|
| AccessorComponent accessor);
|
|
|
| template <typename Field>
|
| - void DecodeField(Register dst, Register src, RCBit rc = LeaveRC) {
|
| - ExtractBitRange(dst, src, Field::kShift + Field::kSize - 1, Field::kShift,
|
| - rc);
|
| + void DecodeField(Register dst, Register src) {
|
| + ExtractBitRange(dst, src, Field::kShift + Field::kSize - 1, Field::kShift);
|
| }
|
|
|
| template <typename Field>
|
| - void DecodeField(Register reg, RCBit rc = LeaveRC) {
|
| - DecodeField<Field>(reg, reg, rc);
|
| + void DecodeField(Register reg) {
|
| + DecodeField<Field>(reg, reg);
|
| }
|
|
|
| template <typename Field>
|
| void DecodeFieldToSmi(Register dst, Register src) {
|
| -#if V8_TARGET_ARCH_PPC64
|
| + // TODO(joransiu): Optimize into single instruction
|
| DecodeField<Field>(dst, src);
|
| SmiTag(dst);
|
| -#else
|
| - // 32-bit can do this in one instruction:
|
| - int start = Field::kSize + kSmiShift - 1;
|
| - int end = kSmiShift;
|
| - int rotate = kSmiShift - Field::kShift;
|
| - if (rotate < 0) {
|
| - rotate += kBitsPerPointer;
|
| - }
|
| - rlwinm(dst, src, rotate, kBitsPerPointer - start - 1,
|
| - kBitsPerPointer - end - 1);
|
| -#endif
|
| }
|
|
|
| template <typename Field>
|
| @@ -1469,8 +1733,8 @@ class MacroAssembler : public Assembler {
|
| // Returns the pc offset at which the frame ends.
|
| int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0);
|
|
|
| - // Expects object in r3 and returns map with validated enum cache
|
| - // in r3. Assumes that any other register can be used as a scratch.
|
| + // Expects object in r2 and returns map with validated enum cache
|
| + // in r2. Assumes that any other register can be used as a scratch.
|
| void CheckEnumCache(Label* call_runtime);
|
|
|
| // AllocationMemento support. Arrays may have an associated
|
| @@ -1497,19 +1761,6 @@ class MacroAssembler : public Assembler {
|
| void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
|
| Register scratch1, Label* found);
|
|
|
| - // Loads the constant pool pointer (kConstantPoolRegister).
|
| - void LoadConstantPoolPointerRegisterFromCodeTargetAddress(
|
| - Register code_target_address);
|
| - void LoadConstantPoolPointerRegister();
|
| - void LoadConstantPoolPointerRegister(Register base, int code_entry_delta = 0);
|
| -
|
| - void AbortConstantPoolBuilding() {
|
| -#ifdef DEBUG
|
| - // Avoid DCHECK(!is_linked()) failure in ~Label()
|
| - bind(ConstantPoolPosition());
|
| -#endif
|
| - }
|
| -
|
| private:
|
| static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
|
|
|
| @@ -1558,7 +1809,6 @@ class MacroAssembler : public Assembler {
|
| friend class StandardFrame;
|
| };
|
|
|
| -
|
| // The code patcher is used to patch (typically) small parts of code e.g. for
|
| // debugging and other types of instrumentation. When using the code patcher
|
| // the exact number of bytes specified must be emitted. It is not legal to emit
|
| @@ -1575,13 +1825,6 @@ class CodePatcher {
|
| // Macro assembler to emit code.
|
| MacroAssembler* masm() { return &masm_; }
|
|
|
| - // Emit an instruction directly.
|
| - void Emit(Instr instr);
|
| -
|
| - // Emit the condition part of an instruction leaving the rest of the current
|
| - // instruction unchanged.
|
| - void EmitCondition(Condition cond);
|
| -
|
| private:
|
| byte* address_; // The address of the code being patched.
|
| int size_; // Number of bytes of the expected patch size.
|
| @@ -1589,7 +1832,6 @@ class CodePatcher {
|
| FlushICache flush_cache_; // Whether to flush the I cache after patching.
|
| };
|
|
|
| -
|
| // -----------------------------------------------------------------------------
|
| // Static helper functions.
|
|
|
| @@ -1597,12 +1839,10 @@ inline MemOperand ContextMemOperand(Register context, int index = 0) {
|
| return MemOperand(context, Context::SlotOffset(index));
|
| }
|
|
|
| -
|
| inline MemOperand NativeContextMemOperand() {
|
| return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
|
| }
|
|
|
| -
|
| #ifdef GENERATED_CODE_COVERAGE
|
| #define CODE_COVERAGE_STRINGIFY(x) #x
|
| #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
|
| @@ -1616,4 +1856,4 @@ inline MemOperand NativeContextMemOperand() {
|
| } // namespace internal
|
| } // namespace v8
|
|
|
| -#endif // V8_PPC_MACRO_ASSEMBLER_PPC_H_
|
| +#endif // V8_S390_MACRO_ASSEMBLER_S390_H_
|
|
|