Index: src/ppc/macro-assembler-ppc.h |
diff --git a/src/arm/macro-assembler-arm.h b/src/ppc/macro-assembler-ppc.h |
similarity index 69% |
copy from src/arm/macro-assembler-arm.h |
copy to src/ppc/macro-assembler-ppc.h |
index d29ca79e935e85eab95b8579977577af691905d8..00b251a499f3952bb07919b413f5cb09306e611b 100644 |
--- a/src/arm/macro-assembler-arm.h |
+++ b/src/ppc/macro-assembler-ppc.h |
@@ -1,9 +1,12 @@ |
// Copyright 2012 the V8 project authors. All rights reserved. |
+// |
+// Copyright IBM Corp. 2012, 2013. All rights reserved. |
+// |
// Use of this source code is governed by a BSD-style license that can be |
// found in the LICENSE file. |
-#ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_ |
-#define V8_ARM_MACRO_ASSEMBLER_ARM_H_ |
+#ifndef V8_PPC_MACRO_ASSEMBLER_PPC_H_ |
+#define V8_PPC_MACRO_ASSEMBLER_PPC_H_ |
#include "src/assembler.h" |
#include "src/frames.h" |
@@ -21,11 +24,6 @@ inline MemOperand FieldMemOperand(Register object, int offset) { |
} |
-// Give alias names to registers |
-const Register cp = { kRegister_r7_Code }; // JavaScript context pointer. |
-const Register pp = { kRegister_r8_Code }; // Constant pool pointer. |
-const Register kRootRegister = { kRegister_r10_Code }; // Roots array pointer. |
- |
// Flags used for AllocateHeapNumber |
enum TaggingMode { |
// Tag the result. |
@@ -63,11 +61,43 @@ bool AreAliased(Register reg1, |
Register reg8 = no_reg); |
#endif |
+// These exist to provide portability between 32 and 64bit |
+#if V8_TARGET_ARCH_PPC64 |
+#define LoadPU ldu |
+#define LoadPX ldx |
+#define LoadPUX ldux |
+#define StorePU stdu |
+#define StorePX stdx |
+#define StorePUX stdux |
+#define ShiftLeftImm sldi |
+#define ShiftRightImm srdi |
+#define ClearLeftImm clrldi |
+#define ClearRightImm clrrdi |
+#define ShiftRightArithImm sradi |
+#define ShiftLeft sld |
+#define ShiftRight srd |
+#define ShiftRightArith srad |
+#define Mul mulld |
+#define Div divd |
+#else |
+#define LoadPU lwzu |
+#define LoadPX lwzx |
+#define LoadPUX lwzux |
+#define StorePU stwu |
+#define StorePX stwx |
+#define StorePUX stwux |
+#define ShiftLeftImm slwi |
+#define ShiftRightImm srwi |
+#define ClearLeftImm clrlwi |
+#define ClearRightImm clrrwi |
+#define ShiftRightArithImm srawi |
+#define ShiftLeft slw |
+#define ShiftRight srw |
+#define ShiftRightArith sraw |
+#define Mul mullw |
+#define Div divw |
+#endif |
-enum TargetAddressStorageMode { |
- CAN_INLINE_TARGET_ADDRESS, |
- NEVER_INLINE_TARGET_ADDRESS |
-}; |
// MacroAssembler implements a collection of frequently used macros. |
class MacroAssembler: public Assembler { |
@@ -84,22 +114,17 @@ class MacroAssembler: public Assembler { |
// checking the call size and emitting the actual call. |
static int CallSize(Register target, Condition cond = al); |
int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al); |
- int CallStubSize(CodeStub* stub, |
- TypeFeedbackId ast_id = TypeFeedbackId::None(), |
- Condition cond = al); |
- static int CallSizeNotPredictableCodeSize(Isolate* isolate, |
- Address target, |
+ static int CallSizeNotPredictableCodeSize(Address target, |
RelocInfo::Mode rmode, |
Condition cond = al); |
// Jump, Call, and Ret pseudo instructions implementing inter-working. |
void Jump(Register target, Condition cond = al); |
- void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al); |
+ void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al, |
+ CRegister cr = cr7); |
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al); |
void Call(Register target, Condition cond = al); |
- void Call(Address target, RelocInfo::Mode rmode, |
- Condition cond = al, |
- TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS); |
+ void Call(Address target, RelocInfo::Mode rmode, Condition cond = al); |
int CallSize(Handle<Code> code, |
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, |
TypeFeedbackId ast_id = TypeFeedbackId::None(), |
@@ -107,8 +132,7 @@ class MacroAssembler: public Assembler { |
void Call(Handle<Code> code, |
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, |
TypeFeedbackId ast_id = TypeFeedbackId::None(), |
- Condition cond = al, |
- TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS); |
+ Condition cond = al); |
void Ret(Condition cond = al); |
// Emit code to discard a non-negative number of pointer-sized elements |
@@ -117,45 +141,21 @@ class MacroAssembler: public Assembler { |
void Ret(int drop, Condition cond = al); |
- // Swap two registers. If the scratch register is omitted then a slightly |
- // less efficient form using xor instead of mov is emitted. |
- void Swap(Register reg1, |
- Register reg2, |
- Register scratch = no_reg, |
- Condition cond = al); |
- |
- void Mls(Register dst, Register src1, Register src2, Register srcA, |
- Condition cond = al); |
- void And(Register dst, Register src1, const Operand& src2, |
- Condition cond = al); |
- void Ubfx(Register dst, Register src, int lsb, int width, |
- Condition cond = al); |
- void Sbfx(Register dst, Register src, int lsb, int width, |
- Condition cond = al); |
- // The scratch register is not used for ARMv7. |
- // scratch can be the same register as src (in which case it is trashed), but |
- // not the same as dst. |
- void Bfi(Register dst, |
- Register src, |
- Register scratch, |
- int lsb, |
- int width, |
- Condition cond = al); |
- void Bfc(Register dst, Register src, int lsb, int width, Condition cond = al); |
- void Usat(Register dst, int satpos, const Operand& src, |
- Condition cond = al); |
- |
void Call(Label* target); |
- void Push(Register src) { push(src); } |
- void Pop(Register dst) { pop(dst); } |
+ |
+ // Emit call to the code we are currently generating. |
+ void CallSelf() { |
+ Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location())); |
+ Call(self, RelocInfo::CODE_TARGET); |
+ } |
// Register move. May do nothing if the registers are identical. |
void Move(Register dst, Handle<Object> value); |
void Move(Register dst, Register src, Condition cond = al); |
- void Move(DwVfpRegister dst, DwVfpRegister src); |
+ void Move(DoubleRegister dst, DoubleRegister src); |
- void Load(Register dst, const MemOperand& src, Representation r); |
- void Store(Register src, const MemOperand& dst, Representation r); |
+ void MultiPush(RegList regs); |
+ void MultiPop(RegList regs); |
// Load an object from the root table. |
void LoadRoot(Register destination, |
@@ -305,137 +305,94 @@ class MacroAssembler: public Assembler { |
PointersToHereCheck pointers_to_here_check_for_value = |
kPointersToHereMaybeInteresting); |
+ void Push(Register src) { push(src); } |
+ |
// Push a handle. |
void Push(Handle<Object> handle); |
void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); } |
// Push two registers. Pushes leftmost register first (to highest address). |
- void Push(Register src1, Register src2, Condition cond = al) { |
- ASSERT(!src1.is(src2)); |
- if (src1.code() > src2.code()) { |
- stm(db_w, sp, src1.bit() | src2.bit(), cond); |
- } else { |
- str(src1, MemOperand(sp, 4, NegPreIndex), cond); |
- str(src2, MemOperand(sp, 4, NegPreIndex), cond); |
- } |
+ void Push(Register src1, Register src2) { |
+ StorePU(src1, MemOperand(sp, -kPointerSize)); |
+ StorePU(src2, MemOperand(sp, -kPointerSize)); |
} |
// Push three registers. Pushes leftmost register first (to highest address). |
- void Push(Register src1, Register src2, Register src3, Condition cond = al) { |
- ASSERT(!src1.is(src2)); |
- ASSERT(!src2.is(src3)); |
- ASSERT(!src1.is(src3)); |
- if (src1.code() > src2.code()) { |
- if (src2.code() > src3.code()) { |
- stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond); |
- } else { |
- stm(db_w, sp, src1.bit() | src2.bit(), cond); |
- str(src3, MemOperand(sp, 4, NegPreIndex), cond); |
- } |
- } else { |
- str(src1, MemOperand(sp, 4, NegPreIndex), cond); |
- Push(src2, src3, cond); |
- } |
+ void Push(Register src1, Register src2, Register src3) { |
+ StorePU(src1, MemOperand(sp, -kPointerSize)); |
+ StorePU(src2, MemOperand(sp, -kPointerSize)); |
+ StorePU(src3, MemOperand(sp, -kPointerSize)); |
} |
// Push four registers. Pushes leftmost register first (to highest address). |
void Push(Register src1, |
Register src2, |
Register src3, |
+ Register src4) { |
+ StorePU(src1, MemOperand(sp, -kPointerSize)); |
+ StorePU(src2, MemOperand(sp, -kPointerSize)); |
+ StorePU(src3, MemOperand(sp, -kPointerSize)); |
+ StorePU(src4, MemOperand(sp, -kPointerSize)); |
+ } |
+ |
+ // Push five registers. Pushes leftmost register first (to highest address). |
+ void Push(Register src1, |
+ Register src2, |
+ Register src3, |
Register src4, |
- Condition cond = al) { |
- ASSERT(!src1.is(src2)); |
- ASSERT(!src2.is(src3)); |
- ASSERT(!src1.is(src3)); |
- ASSERT(!src1.is(src4)); |
- ASSERT(!src2.is(src4)); |
- ASSERT(!src3.is(src4)); |
- if (src1.code() > src2.code()) { |
- if (src2.code() > src3.code()) { |
- if (src3.code() > src4.code()) { |
- stm(db_w, |
- sp, |
- src1.bit() | src2.bit() | src3.bit() | src4.bit(), |
- cond); |
- } else { |
- stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond); |
- str(src4, MemOperand(sp, 4, NegPreIndex), cond); |
- } |
- } else { |
- stm(db_w, sp, src1.bit() | src2.bit(), cond); |
- Push(src3, src4, cond); |
- } |
- } else { |
- str(src1, MemOperand(sp, 4, NegPreIndex), cond); |
- Push(src2, src3, src4, cond); |
- } |
+ Register src5) { |
+ StorePU(src1, MemOperand(sp, -kPointerSize)); |
+ StorePU(src2, MemOperand(sp, -kPointerSize)); |
+ StorePU(src3, MemOperand(sp, -kPointerSize)); |
+ StorePU(src4, MemOperand(sp, -kPointerSize)); |
+ StorePU(src5, MemOperand(sp, -kPointerSize)); |
} |
+ void Pop(Register dst) { pop(dst); } |
+ |
// Pop two registers. Pops rightmost register first (from lower address). |
- void Pop(Register src1, Register src2, Condition cond = al) { |
- ASSERT(!src1.is(src2)); |
- if (src1.code() > src2.code()) { |
- ldm(ia_w, sp, src1.bit() | src2.bit(), cond); |
- } else { |
- ldr(src2, MemOperand(sp, 4, PostIndex), cond); |
- ldr(src1, MemOperand(sp, 4, PostIndex), cond); |
- } |
+ void Pop(Register src1, Register src2) { |
+ LoadP(src2, MemOperand(sp, 0)); |
+ LoadP(src1, MemOperand(sp, kPointerSize)); |
+ addi(sp, sp, Operand(2 * kPointerSize)); |
} |
// Pop three registers. Pops rightmost register first (from lower address). |
- void Pop(Register src1, Register src2, Register src3, Condition cond = al) { |
- ASSERT(!src1.is(src2)); |
- ASSERT(!src2.is(src3)); |
- ASSERT(!src1.is(src3)); |
- if (src1.code() > src2.code()) { |
- if (src2.code() > src3.code()) { |
- ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond); |
- } else { |
- ldr(src3, MemOperand(sp, 4, PostIndex), cond); |
- ldm(ia_w, sp, src1.bit() | src2.bit(), cond); |
- } |
- } else { |
- Pop(src2, src3, cond); |
- ldr(src1, MemOperand(sp, 4, PostIndex), cond); |
- } |
+ void Pop(Register src1, Register src2, Register src3) { |
+ LoadP(src3, MemOperand(sp, 0)); |
+ LoadP(src2, MemOperand(sp, kPointerSize)); |
+ LoadP(src1, MemOperand(sp, 2 * kPointerSize)); |
+ addi(sp, sp, Operand(3 * kPointerSize)); |
} |
// Pop four registers. Pops rightmost register first (from lower address). |
void Pop(Register src1, |
Register src2, |
Register src3, |
+ Register src4) { |
+ LoadP(src4, MemOperand(sp, 0)); |
+ LoadP(src3, MemOperand(sp, kPointerSize)); |
+ LoadP(src2, MemOperand(sp, 2 * kPointerSize)); |
+ LoadP(src1, MemOperand(sp, 3 * kPointerSize)); |
+ addi(sp, sp, Operand(4 * kPointerSize)); |
+ } |
+ |
+ // Pop five registers. Pops rightmost register first (from lower address). |
+ void Pop(Register src1, |
+ Register src2, |
+ Register src3, |
Register src4, |
- Condition cond = al) { |
- ASSERT(!src1.is(src2)); |
- ASSERT(!src2.is(src3)); |
- ASSERT(!src1.is(src3)); |
- ASSERT(!src1.is(src4)); |
- ASSERT(!src2.is(src4)); |
- ASSERT(!src3.is(src4)); |
- if (src1.code() > src2.code()) { |
- if (src2.code() > src3.code()) { |
- if (src3.code() > src4.code()) { |
- ldm(ia_w, |
- sp, |
- src1.bit() | src2.bit() | src3.bit() | src4.bit(), |
- cond); |
- } else { |
- ldr(src4, MemOperand(sp, 4, PostIndex), cond); |
- ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond); |
- } |
- } else { |
- Pop(src3, src4, cond); |
- ldm(ia_w, sp, src1.bit() | src2.bit(), cond); |
- } |
- } else { |
- Pop(src2, src3, src4, cond); |
- ldr(src1, MemOperand(sp, 4, PostIndex), cond); |
- } |
+ Register src5) { |
+ LoadP(src5, MemOperand(sp, 0)); |
+ LoadP(src4, MemOperand(sp, kPointerSize)); |
+ LoadP(src3, MemOperand(sp, 2 * kPointerSize)); |
+ LoadP(src2, MemOperand(sp, 3 * kPointerSize)); |
+ LoadP(src1, MemOperand(sp, 4 * kPointerSize)); |
+ addi(sp, sp, Operand(5 * kPointerSize)); |
} |
- // Push a fixed frame, consisting of lr, fp, constant pool (if |
- // FLAG_enable_ool_constant_pool), context and JS function / marker id if |
- // marker_reg is a valid register. |
+ // Push a fixed frame, consisting of lr, fp, context and |
+ // JS function / marker id if marker_reg is a valid register. |
void PushFixedFrame(Register marker_reg = no_reg); |
void PopFixedFrame(Register marker_reg = no_reg); |
@@ -450,96 +407,45 @@ class MacroAssembler: public Assembler { |
// into register dst. |
void LoadFromSafepointRegisterSlot(Register dst, Register src); |
- // Load two consecutive registers with two consecutive memory locations. |
- void Ldrd(Register dst1, |
- Register dst2, |
- const MemOperand& src, |
- Condition cond = al); |
- |
- // Store two consecutive registers to two consecutive memory locations. |
- void Strd(Register src1, |
- Register src2, |
- const MemOperand& dst, |
- Condition cond = al); |
- |
- // Ensure that FPSCR contains values needed by JavaScript. |
- // We need the NaNModeControlBit to be sure that operations like |
- // vadd and vsub generate the Canonical NaN (if a NaN must be generated). |
- // In VFP3 it will be always the Canonical NaN. |
- // In VFP2 it will be either the Canonical NaN or the negative version |
- // of the Canonical NaN. It doesn't matter if we have two values. The aim |
- // is to be sure to never generate the hole NaN. |
- void VFPEnsureFPSCRState(Register scratch); |
+ // Flush the I-cache from asm code. You should use CpuFeatures::FlushICache |
+ // from C. |
+ // Does not handle errors. |
+ void FlushICache(Register address, size_t size, |
+ Register scratch); |
// If the value is a NaN, canonicalize the value else, do nothing. |
- void VFPCanonicalizeNaN(const DwVfpRegister dst, |
- const DwVfpRegister src, |
- const Condition cond = al); |
- void VFPCanonicalizeNaN(const DwVfpRegister value, |
- const Condition cond = al) { |
- VFPCanonicalizeNaN(value, value, cond); |
+ void CanonicalizeNaN(const DoubleRegister dst, |
+ const DoubleRegister src); |
+ void CanonicalizeNaN(const DoubleRegister value) { |
+ CanonicalizeNaN(value, value); |
} |
- // Compare double values and move the result to the normal condition flags. |
- void VFPCompareAndSetFlags(const DwVfpRegister src1, |
- const DwVfpRegister src2, |
- const Condition cond = al); |
- void VFPCompareAndSetFlags(const DwVfpRegister src1, |
- const double src2, |
- const Condition cond = al); |
- |
- // Compare double values and then load the fpscr flags to a register. |
- void VFPCompareAndLoadFlags(const DwVfpRegister src1, |
- const DwVfpRegister src2, |
- const Register fpscr_flags, |
- const Condition cond = al); |
- void VFPCompareAndLoadFlags(const DwVfpRegister src1, |
- const double src2, |
- const Register fpscr_flags, |
- const Condition cond = al); |
- |
- void Vmov(const DwVfpRegister dst, |
- const double imm, |
- const Register scratch = no_reg); |
- |
- void VmovHigh(Register dst, DwVfpRegister src); |
- void VmovHigh(DwVfpRegister dst, Register src); |
- void VmovLow(Register dst, DwVfpRegister src); |
- void VmovLow(DwVfpRegister dst, Register src); |
- |
- // Loads the number from object into dst register. |
- // If |object| is neither smi nor heap number, |not_number| is jumped to |
- // with |object| still intact. |
- void LoadNumber(Register object, |
- LowDwVfpRegister dst, |
- Register heap_number_map, |
- Register scratch, |
- Label* not_number); |
- |
- // Loads the number from object into double_dst in the double format. |
- // Control will jump to not_int32 if the value cannot be exactly represented |
- // by a 32-bit integer. |
- // Floating point value in the 32-bit integer range that are not exact integer |
- // won't be loaded. |
- void LoadNumberAsInt32Double(Register object, |
- DwVfpRegister double_dst, |
- Register heap_number_map, |
- Register scratch, |
- LowDwVfpRegister double_scratch, |
- Label* not_int32); |
- |
- // Loads the number from object into dst as a 32-bit integer. |
- // Control will jump to not_int32 if the object cannot be exactly represented |
- // by a 32-bit integer. |
- // Floating point value in the 32-bit integer range that are not exact integer |
- // won't be converted. |
- void LoadNumberAsInt32(Register object, |
- Register dst, |
- Register heap_number_map, |
- Register scratch, |
- DwVfpRegister double_scratch0, |
- LowDwVfpRegister double_scratch1, |
- Label* not_int32); |
+ // Converts the integer (untagged smi) in |src| to a double, storing |
+ // the result to |double_dst| |
+ void ConvertIntToDouble(Register src, |
+ DoubleRegister double_dst); |
+ |
+ // Converts the unsigned integer (untagged smi) in |src| to |
+ // a double, storing the result to |double_dst| |
+ void ConvertUnsignedIntToDouble(Register src, |
+ DoubleRegister double_dst); |
+ |
+ // Converts the integer (untagged smi) in |src| to |
+ // a float, storing the result in |dst| |
+ // Warning: The value in |int_scrach| will be changed in the process! |
+ void ConvertIntToFloat(const DoubleRegister dst, |
+ const Register src, |
+ const Register int_scratch); |
+ |
+ // Converts the double_input to an integer. Note that, upon return, |
+ // the contents of double_dst will also hold the fixed point representation. |
+ void ConvertDoubleToInt64(const DoubleRegister double_input, |
+ const Register dst, |
+#if !V8_TARGET_ARCH_PPC64 |
+ const Register dst_hi, |
+#endif |
+ const DoubleRegister double_dst, |
+ FPRoundingMode rounding_mode = kRoundToZero); |
// Generates function and stub prologue code. |
void StubPrologue(); |
@@ -586,6 +492,99 @@ class MacroAssembler: public Assembler { |
mov(kRootRegister, Operand(roots_array_start)); |
} |
+ // ---------------------------------------------------------------- |
+ // new PPC macro-assembler interfaces that are slightly higher level |
+ // than assembler-ppc and may generate variable length sequences |
+ |
+ // load a literal signed int value <value> to GPR <dst> |
+ void LoadIntLiteral(Register dst, int value); |
+ |
+ // load an SMI value <value> to GPR <dst> |
+ void LoadSmiLiteral(Register dst, Smi *smi); |
+ |
+ // load a literal double value <value> to FPR <result> |
+ void LoadDoubleLiteral(DoubleRegister result, |
+ double value, |
+ Register scratch); |
+ |
+ void LoadWord(Register dst, |
+ const MemOperand& mem, |
+ Register scratch, |
+ bool updateForm = false); |
+ |
+ void LoadWordArith(Register dst, |
+ const MemOperand& mem, |
+ Register scratch = no_reg); |
+ |
+ void StoreWord(Register src, |
+ const MemOperand& mem, |
+ Register scratch, |
+ bool updateForm = false); |
+ |
+ void LoadHalfWord(Register dst, |
+ const MemOperand& mem, |
+ Register scratch, |
+ bool updateForm = false); |
+ |
+ void StoreHalfWord(Register src, |
+ const MemOperand& mem, |
+ Register scratch, |
+ bool updateForm = false); |
+ |
+ void LoadByte(Register dst, |
+ const MemOperand& mem, |
+ Register scratch, |
+ bool updateForm = false); |
+ |
+ void StoreByte(Register src, |
+ const MemOperand& mem, |
+ Register scratch, |
+ bool updateForm = false); |
+ |
+ void LoadRepresentation(Register dst, |
+ const MemOperand& mem, |
+ Representation r, |
+ Register scratch = no_reg); |
+ |
+ void StoreRepresentation(Register src, |
+ const MemOperand& mem, |
+ Representation r, |
+ Register scratch = no_reg); |
+ |
+ |
+ |
+ void Add(Register dst, Register src, intptr_t value, Register scratch); |
+ void Cmpi(Register src1, const Operand& src2, Register scratch, |
+ CRegister cr = cr7); |
+ void Cmpli(Register src1, const Operand& src2, Register scratch, |
+ CRegister cr = cr7); |
+ void Cmpwi(Register src1, const Operand& src2, Register scratch, |
+ CRegister cr = cr7); |
+ void Cmplwi(Register src1, const Operand& src2, Register scratch, |
+ CRegister cr = cr7); |
+ void And(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC); |
+ void Or(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC); |
+ void Xor(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC); |
+ |
+ void AddSmiLiteral(Register dst, Register src, Smi *smi, Register scratch); |
+ void SubSmiLiteral(Register dst, Register src, Smi *smi, Register scratch); |
+ void CmpSmiLiteral(Register src1, Smi *smi, Register scratch, |
+ CRegister cr = cr7); |
+ void CmplSmiLiteral(Register src1, Smi *smi, Register scratch, |
+ CRegister cr = cr7); |
+ void AndSmiLiteral(Register dst, Register src, Smi *smi, Register scratch, |
+ RCBit rc = LeaveRC); |
+ |
+ // Set new rounding mode RN to FPSCR |
+ void SetRoundingMode(FPRoundingMode RN); |
+ |
+ // reset rounding mode to default (kRoundToNearest) |
+ void ResetRoundingMode(); |
+ |
+ // These exist to provide portability between 32 and 64bit |
+ void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg); |
+ void StoreP(Register src, const MemOperand& mem, Register scratch = no_reg); |
+ |
// --------------------------------------------------------------------------- |
// JavaScript invokes |
@@ -785,17 +784,14 @@ class MacroAssembler: public Assembler { |
TaggingMode tagging_mode = TAG_RESULT, |
MutableMode mode = IMMUTABLE); |
void AllocateHeapNumberWithValue(Register result, |
- DwVfpRegister value, |
+ DoubleRegister value, |
Register scratch1, |
Register scratch2, |
Register heap_number_map, |
Label* gc_required); |
// Copies a fixed number of fields of heap objects from src to dst. |
- void CopyFields(Register dst, |
- Register src, |
- LowDwVfpRegister double_scratch, |
- int field_count); |
+ void CopyFields(Register dst, Register src, RegList temps, int field_count); |
// Copies a number of bytes from src to dst. All registers are clobbered. On |
// exit src and dst will point to the place just after where the last byte was |
@@ -805,6 +801,14 @@ class MacroAssembler: public Assembler { |
Register length, |
Register scratch); |
+ // Initialize fields with filler values. |count| fields starting at |
+ // |start_offset| are overwritten with the value in |filler|. At the end the |
+ // loop, |start_offset| points at the next uninitialized field. |count| is |
+ // assumed to be non-zero. |
+ void InitializeNFieldsWithFiller(Register start_offset, |
+ Register count, |
+ Register filler); |
+ |
// Initialize fields with filler values. Fields starting at |start_offset| |
// not including end_offset are overwritten with the value in |filler|. At |
// the end the loop, |start_offset| takes the value of |end_offset|. |
@@ -881,7 +885,7 @@ class MacroAssembler: public Assembler { |
Register key_reg, |
Register elements_reg, |
Register scratch1, |
- LowDwVfpRegister double_scratch, |
+ DoubleRegister double_scratch, |
Label* fail, |
int elements_offset = 0); |
@@ -935,15 +939,12 @@ class MacroAssembler: public Assembler { |
// Load and check the instance type of an object for being a string. |
// Loads the type into the second argument register. |
- // Returns a condition that will be enabled if the object was a string |
- // and the passed-in condition passed. If the passed-in condition failed |
- // then flags remain unchanged. |
+ // Returns a condition that will be enabled if the object was a string. |
Condition IsObjectStringType(Register obj, |
- Register type, |
- Condition cond = al) { |
- ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond); |
- ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond); |
- tst(type, Operand(kIsNotStringMask), cond); |
+ Register type) { |
+ LoadP(type, FieldMemOperand(obj, HeapObject::kMapOffset)); |
+ lbz(type, FieldMemOperand(type, Map::kInstanceTypeOffset)); |
+ andi(r0, type, Operand(kIsNotStringMask)); |
ASSERT_EQ(0, kStringTag); |
return eq; |
} |
@@ -960,28 +961,31 @@ class MacroAssembler: public Assembler { |
void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits); |
// Load the value of a smi object into a double register. |
- // The register value must be between d0 and d15. |
- void SmiToDouble(LowDwVfpRegister value, Register smi); |
+ void SmiToDouble(DoubleRegister value, Register smi); |
// Check if a double can be exactly represented as a signed 32-bit integer. |
- // Z flag set to one if true. |
- void TestDoubleIsInt32(DwVfpRegister double_input, |
- LowDwVfpRegister double_scratch); |
+ // CR_EQ in cr7 is set if true. |
+ void TestDoubleIsInt32(DoubleRegister double_input, |
+ Register scratch1, |
+ Register scratch2, |
+ DoubleRegister double_scratch); |
// Try to convert a double to a signed 32-bit integer. |
- // Z flag set to one and result assigned if the conversion is exact. |
+ // CR_EQ in cr7 is set and result assigned if the conversion is exact. |
void TryDoubleToInt32Exact(Register result, |
- DwVfpRegister double_input, |
- LowDwVfpRegister double_scratch); |
+ DoubleRegister double_input, |
+ Register scratch, |
+ DoubleRegister double_scratch); |
// Floor a double and writes the value to the result register. |
// Go to exact if the conversion is exact (to be able to test -0), |
// fall through calling code if an overflow occurred, else go to done. |
// In return, input_high is loaded with high bits of input. |
void TryInt32Floor(Register result, |
- DwVfpRegister double_input, |
+ DoubleRegister double_input, |
Register input_high, |
- LowDwVfpRegister double_scratch, |
+ Register scratch, |
+ DoubleRegister double_scratch, |
Label* done, |
Label* exact); |
@@ -992,13 +996,13 @@ class MacroAssembler: public Assembler { |
// |
// Only public for the test code in test-code-stubs-arm.cc. |
void TryInlineTruncateDoubleToI(Register result, |
- DwVfpRegister input, |
+ DoubleRegister input, |
Label* done); |
// Performs a truncating conversion of a floating point number as used by |
// the JS bitwise operations. See ECMA-262 9.5: ToInt32. |
// Exits with 'result' holding the answer. |
- void TruncateDoubleToI(Register result, DwVfpRegister double_input); |
+ void TruncateDoubleToI(Register result, DoubleRegister double_input); |
// Performs a truncating conversion of a heap number as used by |
// the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input' |
@@ -1015,17 +1019,57 @@ class MacroAssembler: public Assembler { |
Register scratch1, |
Label* not_int32); |
- // Check whether d16-d31 are available on the CPU. The result is given by the |
- // Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise. |
- void CheckFor32DRegs(Register scratch); |
+ // Overflow handling functions. |
+ // Usage: call the appropriate arithmetic function and then call one of the |
+ // flow control functions with the corresponding label. |
+ |
+ // Compute dst = left + right, setting condition codes. dst may be same as |
+ // either left or right (or a unique register). left and right must not be |
+ // the same register. |
+ void AddAndCheckForOverflow(Register dst, |
+ Register left, |
+ Register right, |
+ Register overflow_dst, |
+ Register scratch = r0); |
+ |
+ // Compute dst = left - right, setting condition codes. dst may be same as |
+ // either left or right (or a unique register). left and right must not be |
+ // the same register. |
+ void SubAndCheckForOverflow(Register dst, |
+ Register left, |
+ Register right, |
+ Register overflow_dst, |
+ Register scratch = r0); |
+ |
+ void BranchOnOverflow(Label* label) { |
+ blt(label, cr0); |
+ } |
- // Does a runtime check for 16/32 FP registers. Either way, pushes 32 double |
- // values to location, saving [d0..(d15|d31)]. |
- void SaveFPRegs(Register location, Register scratch); |
+ void BranchOnNoOverflow(Label* label) { |
+ bge(label, cr0); |
+ } |
+ |
+ void RetOnOverflow(void) { |
+ Label label; |
- // Does a runtime check for 16/32 FP registers. Either way, pops 32 double |
- // values to location, restoring [d0..(d15|d31)]. |
- void RestoreFPRegs(Register location, Register scratch); |
+ blt(&label, cr0); |
+ Ret(); |
+ bind(&label); |
+ } |
+ |
+ void RetOnNoOverflow(void) { |
+ Label label; |
+ |
+ bge(&label, cr0); |
+ Ret(); |
+ bind(&label); |
+ } |
+ |
+ // Pushes <count> double values to <location>, starting from d<first>. |
+ void SaveFPRegs(Register location, int first, int count); |
+ |
+ // Pops <count> double values from <location>, starting from d<first>. |
+ void RestoreFPRegs(Register location, int first, int count); |
// --------------------------------------------------------------------------- |
// Runtime calls |
@@ -1093,9 +1137,9 @@ class MacroAssembler: public Assembler { |
// whether soft or hard floating point ABI is used. These functions |
// abstract parameter passing for the three different ways we call |
// C functions from generated code. |
- void MovToFloatParameter(DwVfpRegister src); |
- void MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2); |
- void MovToFloatResult(DwVfpRegister src); |
+ void MovToFloatParameter(DoubleRegister src); |
+ void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2); |
+ void MovToFloatResult(DoubleRegister src); |
// Calls a C function and cleans up the space for arguments allocated |
// by PrepareCallCFunction. The called function is not allowed to trigger a |
@@ -1111,8 +1155,8 @@ class MacroAssembler: public Assembler { |
int num_reg_arguments, |
int num_double_arguments); |
- void MovFromFloatParameter(DwVfpRegister dst); |
- void MovFromFloatResult(DwVfpRegister dst); |
+ void MovFromFloatParameter(DoubleRegister dst); |
+ void MovFromFloatResult(DoubleRegister dst); |
// Calls an API function. Allocates HandleScope, extracts returned value |
// from handle and propagates exceptions. Restores context. stack_space |
@@ -1166,14 +1210,14 @@ class MacroAssembler: public Assembler { |
// Calls Abort(msg) if the condition cond is not satisfied. |
// Use --debug_code to enable. |
- void Assert(Condition cond, BailoutReason reason); |
+ void Assert(Condition cond, BailoutReason reason, CRegister cr = cr7); |
void AssertFastElements(Register elements); |
// Like Assert(), but always enabled. |
- void Check(Condition cond, BailoutReason reason); |
+ void Check(Condition cond, BailoutReason reason, CRegister cr = cr7); |
// Print a message to stdout and abort execution. |
- void Abort(BailoutReason msg); |
+ void Abort(BailoutReason reason); |
// Verify restrictions about code generated in stubs. |
void set_generating_stub(bool value) { generating_stub_ = value; } |
@@ -1182,17 +1226,6 @@ class MacroAssembler: public Assembler { |
bool has_frame() { return has_frame_; } |
inline bool AllowThisStubCall(CodeStub* stub); |
- // EABI variant for double arguments in use. |
- bool use_eabi_hardfloat() { |
-#ifdef __arm__ |
- return base::OS::ArmUsingHardFloat(); |
-#elif USE_EABI_HARDFLOAT |
- return true; |
-#else |
- return false; |
-#endif |
- } |
- |
// --------------------------------------------------------------------------- |
// Number utilities |
@@ -1215,33 +1248,190 @@ class MacroAssembler: public Assembler { |
Label* not_power_of_two); |
// --------------------------------------------------------------------------- |
+ // Bit testing/extraction |
+ // |
+ // Bit numbering is such that the least significant bit is bit 0 |
+ // (for consistency between 32/64-bit). |
+ |
+ // Extract consecutive bits (defined by rangeStart - rangeEnd) from src |
+ // and place them into the least significant bits of dst. |
+ inline void ExtractBitRange(Register dst, Register src, |
+ int rangeStart, int rangeEnd, |
+ RCBit rc = LeaveRC) { |
+ ASSERT(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer); |
+ int rotate = (rangeEnd == 0) ? 0 : kBitsPerPointer - rangeEnd; |
+ int width = rangeStart - rangeEnd + 1; |
+#if V8_TARGET_ARCH_PPC64 |
+ rldicl(dst, src, rotate, kBitsPerPointer - width, rc); |
+#else |
+ rlwinm(dst, src, rotate, kBitsPerPointer - width, kBitsPerPointer - 1, rc); |
+#endif |
+ } |
+ |
+ inline void ExtractBit(Register dst, Register src, uint32_t bitNumber, |
+ RCBit rc = LeaveRC) { |
+ ExtractBitRange(dst, src, bitNumber, bitNumber, rc); |
+ } |
+ |
+ // Extract consecutive bits (defined by mask) from src and place them |
+ // into the least significant bits of dst. |
+ inline void ExtractBitMask(Register dst, Register src, uintptr_t mask, |
+ RCBit rc = LeaveRC) { |
+ int start = kBitsPerPointer - 1; |
+ int end; |
+ uintptr_t bit = (1L << start); |
+ |
+ while (bit && (mask & bit) == 0) { |
+ start--; |
+ bit >>= 1; |
+ } |
+ end = start; |
+ bit >>= 1; |
+ |
+ while (bit && (mask & bit)) { |
+ end--; |
+ bit >>= 1; |
+ } |
+ |
+ // 1-bits in mask must be contiguous |
+ ASSERT(bit == 0 || (mask & ((bit << 1) - 1)) == 0); |
+ |
+ ExtractBitRange(dst, src, start, end, rc); |
+ } |
+ |
+ // Test single bit in value. |
+ inline void TestBit(Register value, int bitNumber, |
+ Register scratch = r0) { |
+ ExtractBitRange(scratch, value, bitNumber, bitNumber, SetRC); |
+ } |
+ |
+ // Test consecutive bit range in value. Range is defined by |
+ // rangeStart - rangeEnd. |
+ inline void TestBitRange(Register value, |
+ int rangeStart, int rangeEnd, |
+ Register scratch = r0) { |
+ ExtractBitRange(scratch, value, rangeStart, rangeEnd, SetRC); |
+ } |
+ |
+ // Test consecutive bit range in value. Range is defined by mask. |
+ inline void TestBitMask(Register value, uintptr_t mask, |
+ Register scratch = r0) { |
+ ExtractBitMask(scratch, value, mask, SetRC); |
+ } |
+ |
+ |
+ // --------------------------------------------------------------------------- |
// Smi utilities |
- void SmiTag(Register reg, SBit s = LeaveCC) { |
- add(reg, reg, Operand(reg), s); |
+ // Shift left by 1 |
+ void SmiTag(Register reg, RCBit rc = LeaveRC) { |
+ SmiTag(reg, reg, rc); |
} |
- void SmiTag(Register dst, Register src, SBit s = LeaveCC) { |
- add(dst, src, Operand(src), s); |
+ void SmiTag(Register dst, Register src, RCBit rc = LeaveRC) { |
+ ShiftLeftImm(dst, src, Operand(kSmiShift), rc); |
} |
- // Try to convert int32 to smi. If the value is to large, preserve |
- // the original value and jump to not_a_smi. Destroys scratch and |
- // sets flags. |
- void TrySmiTag(Register reg, Label* not_a_smi) { |
- TrySmiTag(reg, reg, not_a_smi); |
+#if !V8_TARGET_ARCH_PPC64 |
+ // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow(). |
+ void SmiTagCheckOverflow(Register reg, Register overflow); |
+ void SmiTagCheckOverflow(Register dst, Register src, Register overflow); |
+ |
+ inline void JumpIfNotSmiCandidate(Register value, Register scratch, |
+ Label* not_smi_label) { |
+ // High bits must be identical to fit into an Smi |
+ addis(scratch, value, Operand(0x40000000u >> 16)); |
+ cmpi(scratch, Operand::Zero()); |
+ blt(not_smi_label); |
} |
- void TrySmiTag(Register reg, Register src, Label* not_a_smi) { |
- SmiTag(ip, src, SetCC); |
- b(vs, not_a_smi); |
- mov(reg, ip); |
+#endif |
+ inline void TestUnsignedSmiCandidate(Register value, Register scratch) { |
+ // The test is different for unsigned int values. Since we need |
+ // the value to be in the range of a positive smi, we can't |
+ // handle any of the high bits being set in the value. |
+ TestBitRange(value, |
+ kBitsPerPointer - 1, |
+ kBitsPerPointer - 1 - kSmiShift, |
+ scratch); |
+ } |
+ inline void JumpIfNotUnsignedSmiCandidate(Register value, Register scratch, |
+ Label* not_smi_label) { |
+ TestUnsignedSmiCandidate(value, scratch); |
+ bne(not_smi_label, cr0); |
} |
+ void SmiUntag(Register reg, RCBit rc = LeaveRC) { |
+ SmiUntag(reg, reg, rc); |
+ } |
- void SmiUntag(Register reg, SBit s = LeaveCC) { |
- mov(reg, Operand::SmiUntag(reg), s); |
+ void SmiUntag(Register dst, Register src, RCBit rc = LeaveRC) { |
+ ShiftRightArithImm(dst, src, kSmiShift, rc); |
} |
- void SmiUntag(Register dst, Register src, SBit s = LeaveCC) { |
- mov(dst, Operand::SmiUntag(src), s); |
+ |
+ void SmiToPtrArrayOffset(Register dst, Register src) { |
+#if V8_TARGET_ARCH_PPC64 |
+ STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2); |
+ ShiftRightArithImm(dst, src, kSmiShift - kPointerSizeLog2); |
+#else |
+ STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kPointerSizeLog2); |
+ ShiftLeftImm(dst, src, Operand(kPointerSizeLog2 - kSmiShift)); |
+#endif |
+ } |
+ |
+ void SmiToByteArrayOffset(Register dst, Register src) { |
+ SmiUntag(dst, src); |
+ } |
+ |
+ void SmiToShortArrayOffset(Register dst, Register src) { |
+#if V8_TARGET_ARCH_PPC64 |
+ STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 1); |
+ ShiftRightArithImm(dst, src, kSmiShift - 1); |
+#else |
+ STATIC_ASSERT(kSmiTag == 0 && kSmiShift == 1); |
+ if (!dst.is(src)) { |
+ mr(dst, src); |
+ } |
+#endif |
+ } |
+ |
+ void SmiToIntArrayOffset(Register dst, Register src) { |
+#if V8_TARGET_ARCH_PPC64 |
+ STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 2); |
+ ShiftRightArithImm(dst, src, kSmiShift - 2); |
+#else |
+ STATIC_ASSERT(kSmiTag == 0 && kSmiShift < 2); |
+ ShiftLeftImm(dst, src, Operand(2 - kSmiShift)); |
+#endif |
+ } |
+ |
+#define SmiToFloatArrayOffset SmiToIntArrayOffset |
+ |
+ void SmiToDoubleArrayOffset(Register dst, Register src) { |
+#if V8_TARGET_ARCH_PPC64 |
+ STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kDoubleSizeLog2); |
+ ShiftRightArithImm(dst, src, kSmiShift - kDoubleSizeLog2); |
+#else |
+ STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kDoubleSizeLog2); |
+ ShiftLeftImm(dst, src, Operand(kDoubleSizeLog2 - kSmiShift)); |
+#endif |
+ } |
+ |
+ void SmiToArrayOffset(Register dst, Register src, int elementSizeLog2) { |
+ if (kSmiShift < elementSizeLog2) { |
+ ShiftLeftImm(dst, src, Operand(elementSizeLog2 - kSmiShift)); |
+ } else if (kSmiShift > elementSizeLog2) { |
+ ShiftRightArithImm(dst, src, kSmiShift - elementSizeLog2); |
+ } else if (!dst.is(src)) { |
+ mr(dst, src); |
+ } |
+ } |
+ |
+ void IndexToArrayOffset(Register dst, Register src, int elementSizeLog2, |
+ bool isSmi) { |
+ if (isSmi) { |
+ SmiToArrayOffset(dst, src, elementSizeLog2); |
+ } else { |
+ ShiftLeftImm(dst, src, Operand(elementSizeLog2)); |
+ } |
} |
// Untag the source value into destination and jump if source is a smi. |
@@ -1252,22 +1442,29 @@ class MacroAssembler: public Assembler { |
// Souce and destination can be the same register. |
void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case); |
- // Test if the register contains a smi (Z == 0 (eq) if true). |
- inline void SmiTst(Register value) { |
- tst(value, Operand(kSmiTagMask)); |
+ inline void TestIfSmi(Register value, Register scratch) { |
+ TestBit(value, 0, scratch); // tst(value, Operand(kSmiTagMask)); |
} |
- inline void NonNegativeSmiTst(Register value) { |
- tst(value, Operand(kSmiTagMask | kSmiSignMask)); |
+ |
+ inline void TestIfPositiveSmi(Register value, Register scratch) { |
+ STATIC_ASSERT((kSmiTagMask | kSmiSignMask) == |
+ (intptr_t)(1UL << (kBitsPerPointer - 1) | 1)); |
+#if V8_TARGET_ARCH_PPC64 |
+ rldicl(scratch, value, 1, kBitsPerPointer - 2, SetRC); |
+#else |
+ rlwinm(scratch, value, 1, kBitsPerPointer - 2, kBitsPerPointer - 1, SetRC); |
+#endif |
} |
- // Jump if the register contains a smi. |
+ |
+ // Jump the register contains a smi. |
inline void JumpIfSmi(Register value, Label* smi_label) { |
- tst(value, Operand(kSmiTagMask)); |
- b(eq, smi_label); |
+ TestIfSmi(value, r0); |
+ beq(smi_label, cr0); // branch if SMI |
} |
// Jump if either of the registers contain a non-smi. |
inline void JumpIfNotSmi(Register value, Label* not_smi_label) { |
- tst(value, Operand(kSmiTagMask)); |
- b(ne, not_smi_label); |
+ TestIfSmi(value, r0); |
+ bne(not_smi_label, cr0); |
} |
// Jump if either of the registers contain a non-smi. |
void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi); |
@@ -1278,6 +1475,25 @@ class MacroAssembler: public Assembler { |
void AssertNotSmi(Register object); |
void AssertSmi(Register object); |
+ |
+#if V8_TARGET_ARCH_PPC64 |
+ inline void TestIfInt32(Register value, |
+ Register scratch1, Register scratch2, |
+ CRegister cr = cr7) { |
+ // High bits must be identical to fit into an 32-bit integer |
+ srawi(scratch1, value, 31); |
+ sradi(scratch2, value, 32); |
+ cmp(scratch1, scratch2, cr); |
+ } |
+#else |
+ inline void TestIfInt32(Register hi_word, Register lo_word, |
+ Register scratch, CRegister cr = cr7) { |
+ // High bits must be identical to fit into an 32-bit integer |
+ srawi(scratch, lo_word, 31); |
+ cmp(scratch, hi_word, cr); |
+ } |
+#endif |
+ |
// Abort execution if argument is not a string, enabled via --debug-code. |
void AssertString(Register object); |
@@ -1356,17 +1572,23 @@ class MacroAssembler: public Assembler { |
// --------------------------------------------------------------------------- |
// Patching helpers. |
- // Get the location of a relocated constant (its address in the constant pool) |
- // from its load site. |
- void GetRelocatedValueLocation(Register ldr_location, Register result, |
- Register scratch); |
- |
+ // Retrieve/patch the relocated value (lis/ori pair or constant pool load). |
+ void GetRelocatedValue(Register location, |
+ Register result, |
+ Register scratch); |
+ void SetRelocatedValue(Register location, |
+ Register scratch, |
+ Register new_value); |
void ClampUint8(Register output_reg, Register input_reg); |
+ // Saturate a value into 8-bit unsigned integer |
+ // if input_value < 0, output_value is 0 |
+ // if input_value > 255, output_value is 255 |
+ // otherwise output_value is the (int)input_value (round to nearest) |
void ClampDoubleToUint8(Register result_reg, |
- DwVfpRegister input_reg, |
- LowDwVfpRegister double_scratch); |
+ DoubleRegister input_reg, |
+ DoubleRegister temp_double_reg); |
void LoadInstanceDescriptors(Register map, Register descriptors); |
@@ -1375,7 +1597,7 @@ class MacroAssembler: public Assembler { |
template<typename Field> |
void DecodeField(Register dst, Register src) { |
- Ubfx(dst, src, Field::kShift, Field::kSize); |
+ ExtractBitRange(dst, src, Field::kShift + Field::kSize - 1, Field::kShift); |
} |
template<typename Field> |
@@ -1385,24 +1607,26 @@ class MacroAssembler: public Assembler { |
template<typename Field> |
void DecodeFieldToSmi(Register dst, Register src) { |
- static const int shift = Field::kShift; |
- static const int mask = Field::kMask >> shift << kSmiTagSize; |
- STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0); |
- STATIC_ASSERT(kSmiTag == 0); |
- if (shift < kSmiTagSize) { |
- mov(dst, Operand(src, LSL, kSmiTagSize - shift)); |
- and_(dst, dst, Operand(mask)); |
- } else if (shift > kSmiTagSize) { |
- mov(dst, Operand(src, LSR, shift - kSmiTagSize)); |
- and_(dst, dst, Operand(mask)); |
- } else { |
- and_(dst, src, Operand(mask)); |
+#if V8_TARGET_ARCH_PPC64 |
+ DecodeField<Field>(dst, src); |
+ SmiTag(dst); |
+#else |
+ // 32-bit can do this in one instruction: |
+ int start = Field::kSize + kSmiShift - 1; |
+ int end = kSmiShift; |
+ int rotate = kSmiShift - Field::kShift; |
+ if (rotate < 0) { |
+ rotate += kBitsPerPointer; |
} |
+ rlwinm(dst, src, rotate, |
+ kBitsPerPointer - start - 1, |
+ kBitsPerPointer - end - 1); |
+#endif |
} |
template<typename Field> |
void DecodeFieldToSmi(Register reg) { |
- DecodeField<Field>(reg, reg); |
+ DecodeFieldToSmi<Field>(reg, reg); |
} |
// Activation support. |
@@ -1430,7 +1654,7 @@ class MacroAssembler: public Assembler { |
Label no_memento_found; |
TestJSArrayForAllocationMemento(receiver_reg, scratch_reg, |
&no_memento_found); |
- b(eq, memento_found); |
+ beq(memento_found); |
bind(&no_memento_found); |
} |
@@ -1439,11 +1663,14 @@ class MacroAssembler: public Assembler { |
Register scratch1, Label* found); |
private: |
+ static const int kSmiShift = kSmiTagSize + kSmiShiftSize; |
+ |
void CallCFunctionHelper(Register function, |
int num_reg_arguments, |
int num_double_arguments); |
- void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al); |
+ void Jump(intptr_t target, RelocInfo::Mode rmode, |
+ Condition cond = al, CRegister cr = cr7); |
// Helper functions for generating invokes. |
void InvokePrologue(const ParameterCount& expected, |
@@ -1483,8 +1710,10 @@ class MacroAssembler: public Assembler { |
MemOperand SafepointRegisterSlot(Register reg); |
MemOperand SafepointRegistersAndDoublesSlot(Register reg); |
- // Loads the constant pool pointer (pp) register. |
+#if V8_OOL_CONSTANT_POOL |
+ // Loads the constant pool pointer (kConstantPoolRegister). |
void LoadConstantPoolPointerRegister(); |
+#endif |
bool generating_stub_; |
bool has_frame_; |
@@ -1520,9 +1749,6 @@ class CodePatcher { |
// Emit an instruction directly. |
void Emit(Instr instr); |
- // Emit an address directly. |
- void Emit(Address addr); |
- |
// Emit the condition part of an instruction leaving the rest of the current |
// instruction unchanged. |
void EmitCondition(Condition cond); |
@@ -1535,6 +1761,7 @@ class CodePatcher { |
}; |
+#if V8_OOL_CONSTANT_POOL |
class FrameAndConstantPoolScope { |
public: |
FrameAndConstantPoolScope(MacroAssembler* masm, StackFrame::Type type) |
@@ -1574,8 +1801,12 @@ class FrameAndConstantPoolScope { |
DISALLOW_IMPLICIT_CONSTRUCTORS(FrameAndConstantPoolScope); |
}; |
+#else |
+#define FrameAndConstantPoolScope FrameScope |
+#endif |
+#if V8_OOL_CONSTANT_POOL |
// Class for scoping the the unavailability of constant pool access. |
class ConstantPoolUnavailableScope { |
public: |
@@ -1598,6 +1829,7 @@ class ConstantPoolUnavailableScope { |
DISALLOW_IMPLICIT_CONSTRUCTORS(ConstantPoolUnavailableScope); |
}; |
+#endif |
// ----------------------------------------------------------------------------- |
@@ -1625,4 +1857,4 @@ inline MemOperand GlobalObjectOperand() { |
} } // namespace v8::internal |
-#endif // V8_ARM_MACRO_ASSEMBLER_ARM_H_ |
+#endif // V8_PPC_MACRO_ASSEMBLER_PPC_H_ |