| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // |
| 3 // Copyright IBM Corp. 2012, 2013. All rights reserved. |
| 4 // |
| 2 // Use of this source code is governed by a BSD-style license that can be | 5 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 6 // found in the LICENSE file. |
| 4 | 7 |
| 5 #ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_ | 8 #ifndef V8_PPC_MACRO_ASSEMBLER_PPC_H_ |
| 6 #define V8_ARM_MACRO_ASSEMBLER_ARM_H_ | 9 #define V8_PPC_MACRO_ASSEMBLER_PPC_H_ |
| 7 | 10 |
| 8 #include "src/assembler.h" | 11 #include "src/assembler.h" |
| 9 #include "src/frames.h" | 12 #include "src/frames.h" |
| 10 #include "src/globals.h" | 13 #include "src/globals.h" |
| 11 | 14 |
| 12 namespace v8 { | 15 namespace v8 { |
| 13 namespace internal { | 16 namespace internal { |
| 14 | 17 |
| 15 // ---------------------------------------------------------------------------- | 18 // ---------------------------------------------------------------------------- |
| 16 // Static helper functions | 19 // Static helper functions |
| 17 | 20 |
| 18 // Generate a MemOperand for loading a field from an object. | 21 // Generate a MemOperand for loading a field from an object. |
| 19 inline MemOperand FieldMemOperand(Register object, int offset) { | 22 inline MemOperand FieldMemOperand(Register object, int offset) { |
| 20 return MemOperand(object, offset - kHeapObjectTag); | 23 return MemOperand(object, offset - kHeapObjectTag); |
| 21 } | 24 } |
| 22 | 25 |
| 23 | 26 |
| 24 // Give alias names to registers | |
| 25 const Register cp = { kRegister_r7_Code }; // JavaScript context pointer. | |
| 26 const Register pp = { kRegister_r8_Code }; // Constant pool pointer. | |
| 27 const Register kRootRegister = { kRegister_r10_Code }; // Roots array pointer. | |
| 28 | |
| 29 // Flags used for AllocateHeapNumber | 27 // Flags used for AllocateHeapNumber |
| 30 enum TaggingMode { | 28 enum TaggingMode { |
| 31 // Tag the result. | 29 // Tag the result. |
| 32 TAG_RESULT, | 30 TAG_RESULT, |
| 33 // Don't tag | 31 // Don't tag |
| 34 DONT_TAG_RESULT | 32 DONT_TAG_RESULT |
| 35 }; | 33 }; |
| 36 | 34 |
| 37 | 35 |
| 38 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; | 36 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; |
| (...skipping 17 matching lines...) Expand all Loading... |
| 56 bool AreAliased(Register reg1, | 54 bool AreAliased(Register reg1, |
| 57 Register reg2, | 55 Register reg2, |
| 58 Register reg3 = no_reg, | 56 Register reg3 = no_reg, |
| 59 Register reg4 = no_reg, | 57 Register reg4 = no_reg, |
| 60 Register reg5 = no_reg, | 58 Register reg5 = no_reg, |
| 61 Register reg6 = no_reg, | 59 Register reg6 = no_reg, |
| 62 Register reg7 = no_reg, | 60 Register reg7 = no_reg, |
| 63 Register reg8 = no_reg); | 61 Register reg8 = no_reg); |
| 64 #endif | 62 #endif |
| 65 | 63 |
| 64 // These exist to provide portability between 32 and 64bit |
| 65 #if V8_TARGET_ARCH_PPC64 |
| 66 #define LoadPU ldu |
| 67 #define LoadPX ldx |
| 68 #define LoadPUX ldux |
| 69 #define StorePU stdu |
| 70 #define StorePX stdx |
| 71 #define StorePUX stdux |
| 72 #define ShiftLeftImm sldi |
| 73 #define ShiftRightImm srdi |
| 74 #define ClearLeftImm clrldi |
| 75 #define ClearRightImm clrrdi |
| 76 #define ShiftRightArithImm sradi |
| 77 #define ShiftLeft sld |
| 78 #define ShiftRight srd |
| 79 #define ShiftRightArith srad |
| 80 #define Mul mulld |
| 81 #define Div divd |
| 82 #else |
| 83 #define LoadPU lwzu |
| 84 #define LoadPX lwzx |
| 85 #define LoadPUX lwzux |
| 86 #define StorePU stwu |
| 87 #define StorePX stwx |
| 88 #define StorePUX stwux |
| 89 #define ShiftLeftImm slwi |
| 90 #define ShiftRightImm srwi |
| 91 #define ClearLeftImm clrlwi |
| 92 #define ClearRightImm clrrwi |
| 93 #define ShiftRightArithImm srawi |
| 94 #define ShiftLeft slw |
| 95 #define ShiftRight srw |
| 96 #define ShiftRightArith sraw |
| 97 #define Mul mullw |
| 98 #define Div divw |
| 99 #endif |
| 66 | 100 |
| 67 enum TargetAddressStorageMode { | |
| 68 CAN_INLINE_TARGET_ADDRESS, | |
| 69 NEVER_INLINE_TARGET_ADDRESS | |
| 70 }; | |
| 71 | 101 |
| 72 // MacroAssembler implements a collection of frequently used macros. | 102 // MacroAssembler implements a collection of frequently used macros. |
| 73 class MacroAssembler: public Assembler { | 103 class MacroAssembler: public Assembler { |
| 74 public: | 104 public: |
| 75 // The isolate parameter can be NULL if the macro assembler should | 105 // The isolate parameter can be NULL if the macro assembler should |
| 76 // not use isolate-dependent functionality. In this case, it's the | 106 // not use isolate-dependent functionality. In this case, it's the |
| 77 // responsibility of the caller to never invoke such function on the | 107 // responsibility of the caller to never invoke such function on the |
| 78 // macro assembler. | 108 // macro assembler. |
| 79 MacroAssembler(Isolate* isolate, void* buffer, int size); | 109 MacroAssembler(Isolate* isolate, void* buffer, int size); |
| 80 | 110 |
| 81 | 111 |
| 82 // Returns the size of a call in instructions. Note, the value returned is | 112 // Returns the size of a call in instructions. Note, the value returned is |
| 83 // only valid as long as no entries are added to the constant pool between | 113 // only valid as long as no entries are added to the constant pool between |
| 84 // checking the call size and emitting the actual call. | 114 // checking the call size and emitting the actual call. |
| 85 static int CallSize(Register target, Condition cond = al); | 115 static int CallSize(Register target, Condition cond = al); |
| 86 int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al); | 116 int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al); |
| 87 int CallStubSize(CodeStub* stub, | 117 static int CallSizeNotPredictableCodeSize(Address target, |
| 88 TypeFeedbackId ast_id = TypeFeedbackId::None(), | |
| 89 Condition cond = al); | |
| 90 static int CallSizeNotPredictableCodeSize(Isolate* isolate, | |
| 91 Address target, | |
| 92 RelocInfo::Mode rmode, | 118 RelocInfo::Mode rmode, |
| 93 Condition cond = al); | 119 Condition cond = al); |
| 94 | 120 |
| 95 // Jump, Call, and Ret pseudo instructions implementing inter-working. | 121 // Jump, Call, and Ret pseudo instructions implementing inter-working. |
| 96 void Jump(Register target, Condition cond = al); | 122 void Jump(Register target, Condition cond = al); |
| 97 void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al); | 123 void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al, |
| 124 CRegister cr = cr7); |
| 98 void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al); | 125 void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al); |
| 99 void Call(Register target, Condition cond = al); | 126 void Call(Register target, Condition cond = al); |
| 100 void Call(Address target, RelocInfo::Mode rmode, | 127 void Call(Address target, RelocInfo::Mode rmode, Condition cond = al); |
| 101 Condition cond = al, | |
| 102 TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS); | |
| 103 int CallSize(Handle<Code> code, | 128 int CallSize(Handle<Code> code, |
| 104 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, | 129 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, |
| 105 TypeFeedbackId ast_id = TypeFeedbackId::None(), | 130 TypeFeedbackId ast_id = TypeFeedbackId::None(), |
| 106 Condition cond = al); | 131 Condition cond = al); |
| 107 void Call(Handle<Code> code, | 132 void Call(Handle<Code> code, |
| 108 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, | 133 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, |
| 109 TypeFeedbackId ast_id = TypeFeedbackId::None(), | 134 TypeFeedbackId ast_id = TypeFeedbackId::None(), |
| 110 Condition cond = al, | 135 Condition cond = al); |
| 111 TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS); | |
| 112 void Ret(Condition cond = al); | 136 void Ret(Condition cond = al); |
| 113 | 137 |
| 114 // Emit code to discard a non-negative number of pointer-sized elements | 138 // Emit code to discard a non-negative number of pointer-sized elements |
| 115 // from the stack, clobbering only the sp register. | 139 // from the stack, clobbering only the sp register. |
| 116 void Drop(int count, Condition cond = al); | 140 void Drop(int count, Condition cond = al); |
| 117 | 141 |
| 118 void Ret(int drop, Condition cond = al); | 142 void Ret(int drop, Condition cond = al); |
| 119 | 143 |
| 120 // Swap two registers. If the scratch register is omitted then a slightly | 144 void Call(Label* target); |
| 121 // less efficient form using xor instead of mov is emitted. | |
| 122 void Swap(Register reg1, | |
| 123 Register reg2, | |
| 124 Register scratch = no_reg, | |
| 125 Condition cond = al); | |
| 126 | 145 |
| 127 void Mls(Register dst, Register src1, Register src2, Register srcA, | 146 // Emit call to the code we are currently generating. |
| 128 Condition cond = al); | 147 void CallSelf() { |
| 129 void And(Register dst, Register src1, const Operand& src2, | 148 Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location())); |
| 130 Condition cond = al); | 149 Call(self, RelocInfo::CODE_TARGET); |
| 131 void Ubfx(Register dst, Register src, int lsb, int width, | 150 } |
| 132 Condition cond = al); | |
| 133 void Sbfx(Register dst, Register src, int lsb, int width, | |
| 134 Condition cond = al); | |
| 135 // The scratch register is not used for ARMv7. | |
| 136 // scratch can be the same register as src (in which case it is trashed), but | |
| 137 // not the same as dst. | |
| 138 void Bfi(Register dst, | |
| 139 Register src, | |
| 140 Register scratch, | |
| 141 int lsb, | |
| 142 int width, | |
| 143 Condition cond = al); | |
| 144 void Bfc(Register dst, Register src, int lsb, int width, Condition cond = al); | |
| 145 void Usat(Register dst, int satpos, const Operand& src, | |
| 146 Condition cond = al); | |
| 147 | |
| 148 void Call(Label* target); | |
| 149 void Push(Register src) { push(src); } | |
| 150 void Pop(Register dst) { pop(dst); } | |
| 151 | 151 |
| 152 // Register move. May do nothing if the registers are identical. | 152 // Register move. May do nothing if the registers are identical. |
| 153 void Move(Register dst, Handle<Object> value); | 153 void Move(Register dst, Handle<Object> value); |
| 154 void Move(Register dst, Register src, Condition cond = al); | 154 void Move(Register dst, Register src, Condition cond = al); |
| 155 void Move(DwVfpRegister dst, DwVfpRegister src); | 155 void Move(DoubleRegister dst, DoubleRegister src); |
| 156 | 156 |
| 157 void Load(Register dst, const MemOperand& src, Representation r); | 157 void MultiPush(RegList regs); |
| 158 void Store(Register src, const MemOperand& dst, Representation r); | 158 void MultiPop(RegList regs); |
| 159 | 159 |
| 160 // Load an object from the root table. | 160 // Load an object from the root table. |
| 161 void LoadRoot(Register destination, | 161 void LoadRoot(Register destination, |
| 162 Heap::RootListIndex index, | 162 Heap::RootListIndex index, |
| 163 Condition cond = al); | 163 Condition cond = al); |
| 164 // Store an object to the root table. | 164 // Store an object to the root table. |
| 165 void StoreRoot(Register source, | 165 void StoreRoot(Register source, |
| 166 Heap::RootListIndex index, | 166 Heap::RootListIndex index, |
| 167 Condition cond = al); | 167 Condition cond = al); |
| 168 | 168 |
| (...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 298 Register object, | 298 Register object, |
| 299 Register address, | 299 Register address, |
| 300 Register value, | 300 Register value, |
| 301 LinkRegisterStatus lr_status, | 301 LinkRegisterStatus lr_status, |
| 302 SaveFPRegsMode save_fp, | 302 SaveFPRegsMode save_fp, |
| 303 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, | 303 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, |
| 304 SmiCheck smi_check = INLINE_SMI_CHECK, | 304 SmiCheck smi_check = INLINE_SMI_CHECK, |
| 305 PointersToHereCheck pointers_to_here_check_for_value = | 305 PointersToHereCheck pointers_to_here_check_for_value = |
| 306 kPointersToHereMaybeInteresting); | 306 kPointersToHereMaybeInteresting); |
| 307 | 307 |
| 308 void Push(Register src) { push(src); } |
| 309 |
| 308 // Push a handle. | 310 // Push a handle. |
| 309 void Push(Handle<Object> handle); | 311 void Push(Handle<Object> handle); |
| 310 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); } | 312 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); } |
| 311 | 313 |
| 312 // Push two registers. Pushes leftmost register first (to highest address). | 314 // Push two registers. Pushes leftmost register first (to highest address). |
| 313 void Push(Register src1, Register src2, Condition cond = al) { | 315 void Push(Register src1, Register src2) { |
| 314 ASSERT(!src1.is(src2)); | 316 StorePU(src1, MemOperand(sp, -kPointerSize)); |
| 315 if (src1.code() > src2.code()) { | 317 StorePU(src2, MemOperand(sp, -kPointerSize)); |
| 316 stm(db_w, sp, src1.bit() | src2.bit(), cond); | |
| 317 } else { | |
| 318 str(src1, MemOperand(sp, 4, NegPreIndex), cond); | |
| 319 str(src2, MemOperand(sp, 4, NegPreIndex), cond); | |
| 320 } | |
| 321 } | 318 } |
| 322 | 319 |
| 323 // Push three registers. Pushes leftmost register first (to highest address). | 320 // Push three registers. Pushes leftmost register first (to highest address). |
| 324 void Push(Register src1, Register src2, Register src3, Condition cond = al) { | 321 void Push(Register src1, Register src2, Register src3) { |
| 325 ASSERT(!src1.is(src2)); | 322 StorePU(src1, MemOperand(sp, -kPointerSize)); |
| 326 ASSERT(!src2.is(src3)); | 323 StorePU(src2, MemOperand(sp, -kPointerSize)); |
| 327 ASSERT(!src1.is(src3)); | 324 StorePU(src3, MemOperand(sp, -kPointerSize)); |
| 328 if (src1.code() > src2.code()) { | |
| 329 if (src2.code() > src3.code()) { | |
| 330 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond); | |
| 331 } else { | |
| 332 stm(db_w, sp, src1.bit() | src2.bit(), cond); | |
| 333 str(src3, MemOperand(sp, 4, NegPreIndex), cond); | |
| 334 } | |
| 335 } else { | |
| 336 str(src1, MemOperand(sp, 4, NegPreIndex), cond); | |
| 337 Push(src2, src3, cond); | |
| 338 } | |
| 339 } | 325 } |
| 340 | 326 |
| 341 // Push four registers. Pushes leftmost register first (to highest address). | 327 // Push four registers. Pushes leftmost register first (to highest address). |
| 342 void Push(Register src1, | 328 void Push(Register src1, |
| 343 Register src2, | 329 Register src2, |
| 344 Register src3, | 330 Register src3, |
| 345 Register src4, | 331 Register src4) { |
| 346 Condition cond = al) { | 332 StorePU(src1, MemOperand(sp, -kPointerSize)); |
| 347 ASSERT(!src1.is(src2)); | 333 StorePU(src2, MemOperand(sp, -kPointerSize)); |
| 348 ASSERT(!src2.is(src3)); | 334 StorePU(src3, MemOperand(sp, -kPointerSize)); |
| 349 ASSERT(!src1.is(src3)); | 335 StorePU(src4, MemOperand(sp, -kPointerSize)); |
| 350 ASSERT(!src1.is(src4)); | |
| 351 ASSERT(!src2.is(src4)); | |
| 352 ASSERT(!src3.is(src4)); | |
| 353 if (src1.code() > src2.code()) { | |
| 354 if (src2.code() > src3.code()) { | |
| 355 if (src3.code() > src4.code()) { | |
| 356 stm(db_w, | |
| 357 sp, | |
| 358 src1.bit() | src2.bit() | src3.bit() | src4.bit(), | |
| 359 cond); | |
| 360 } else { | |
| 361 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond); | |
| 362 str(src4, MemOperand(sp, 4, NegPreIndex), cond); | |
| 363 } | |
| 364 } else { | |
| 365 stm(db_w, sp, src1.bit() | src2.bit(), cond); | |
| 366 Push(src3, src4, cond); | |
| 367 } | |
| 368 } else { | |
| 369 str(src1, MemOperand(sp, 4, NegPreIndex), cond); | |
| 370 Push(src2, src3, src4, cond); | |
| 371 } | |
| 372 } | 336 } |
| 373 | 337 |
| 338 // Push five registers. Pushes leftmost register first (to highest address). |
| 339 void Push(Register src1, |
| 340 Register src2, |
| 341 Register src3, |
| 342 Register src4, |
| 343 Register src5) { |
| 344 StorePU(src1, MemOperand(sp, -kPointerSize)); |
| 345 StorePU(src2, MemOperand(sp, -kPointerSize)); |
| 346 StorePU(src3, MemOperand(sp, -kPointerSize)); |
| 347 StorePU(src4, MemOperand(sp, -kPointerSize)); |
| 348 StorePU(src5, MemOperand(sp, -kPointerSize)); |
| 349 } |
| 350 |
| 351 void Pop(Register dst) { pop(dst); } |
| 352 |
| 374 // Pop two registers. Pops rightmost register first (from lower address). | 353 // Pop two registers. Pops rightmost register first (from lower address). |
| 375 void Pop(Register src1, Register src2, Condition cond = al) { | 354 void Pop(Register src1, Register src2) { |
| 376 ASSERT(!src1.is(src2)); | 355 LoadP(src2, MemOperand(sp, 0)); |
| 377 if (src1.code() > src2.code()) { | 356 LoadP(src1, MemOperand(sp, kPointerSize)); |
| 378 ldm(ia_w, sp, src1.bit() | src2.bit(), cond); | 357 addi(sp, sp, Operand(2 * kPointerSize)); |
| 379 } else { | |
| 380 ldr(src2, MemOperand(sp, 4, PostIndex), cond); | |
| 381 ldr(src1, MemOperand(sp, 4, PostIndex), cond); | |
| 382 } | |
| 383 } | 358 } |
| 384 | 359 |
| 385 // Pop three registers. Pops rightmost register first (from lower address). | 360 // Pop three registers. Pops rightmost register first (from lower address). |
| 386 void Pop(Register src1, Register src2, Register src3, Condition cond = al) { | 361 void Pop(Register src1, Register src2, Register src3) { |
| 387 ASSERT(!src1.is(src2)); | 362 LoadP(src3, MemOperand(sp, 0)); |
| 388 ASSERT(!src2.is(src3)); | 363 LoadP(src2, MemOperand(sp, kPointerSize)); |
| 389 ASSERT(!src1.is(src3)); | 364 LoadP(src1, MemOperand(sp, 2 * kPointerSize)); |
| 390 if (src1.code() > src2.code()) { | 365 addi(sp, sp, Operand(3 * kPointerSize)); |
| 391 if (src2.code() > src3.code()) { | |
| 392 ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond); | |
| 393 } else { | |
| 394 ldr(src3, MemOperand(sp, 4, PostIndex), cond); | |
| 395 ldm(ia_w, sp, src1.bit() | src2.bit(), cond); | |
| 396 } | |
| 397 } else { | |
| 398 Pop(src2, src3, cond); | |
| 399 ldr(src1, MemOperand(sp, 4, PostIndex), cond); | |
| 400 } | |
| 401 } | 366 } |
| 402 | 367 |
| 403 // Pop four registers. Pops rightmost register first (from lower address). | 368 // Pop four registers. Pops rightmost register first (from lower address). |
| 404 void Pop(Register src1, | 369 void Pop(Register src1, |
| 405 Register src2, | 370 Register src2, |
| 406 Register src3, | 371 Register src3, |
| 407 Register src4, | 372 Register src4) { |
| 408 Condition cond = al) { | 373 LoadP(src4, MemOperand(sp, 0)); |
| 409 ASSERT(!src1.is(src2)); | 374 LoadP(src3, MemOperand(sp, kPointerSize)); |
| 410 ASSERT(!src2.is(src3)); | 375 LoadP(src2, MemOperand(sp, 2 * kPointerSize)); |
| 411 ASSERT(!src1.is(src3)); | 376 LoadP(src1, MemOperand(sp, 3 * kPointerSize)); |
| 412 ASSERT(!src1.is(src4)); | 377 addi(sp, sp, Operand(4 * kPointerSize)); |
| 413 ASSERT(!src2.is(src4)); | |
| 414 ASSERT(!src3.is(src4)); | |
| 415 if (src1.code() > src2.code()) { | |
| 416 if (src2.code() > src3.code()) { | |
| 417 if (src3.code() > src4.code()) { | |
| 418 ldm(ia_w, | |
| 419 sp, | |
| 420 src1.bit() | src2.bit() | src3.bit() | src4.bit(), | |
| 421 cond); | |
| 422 } else { | |
| 423 ldr(src4, MemOperand(sp, 4, PostIndex), cond); | |
| 424 ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond); | |
| 425 } | |
| 426 } else { | |
| 427 Pop(src3, src4, cond); | |
| 428 ldm(ia_w, sp, src1.bit() | src2.bit(), cond); | |
| 429 } | |
| 430 } else { | |
| 431 Pop(src2, src3, src4, cond); | |
| 432 ldr(src1, MemOperand(sp, 4, PostIndex), cond); | |
| 433 } | |
| 434 } | 378 } |
| 435 | 379 |
| 436 // Push a fixed frame, consisting of lr, fp, constant pool (if | 380 // Pop five registers. Pops rightmost register first (from lower address). |
| 437 // FLAG_enable_ool_constant_pool), context and JS function / marker id if | 381 void Pop(Register src1, |
| 438 // marker_reg is a valid register. | 382 Register src2, |
| 383 Register src3, |
| 384 Register src4, |
| 385 Register src5) { |
| 386 LoadP(src5, MemOperand(sp, 0)); |
| 387 LoadP(src4, MemOperand(sp, kPointerSize)); |
| 388 LoadP(src3, MemOperand(sp, 2 * kPointerSize)); |
| 389 LoadP(src2, MemOperand(sp, 3 * kPointerSize)); |
| 390 LoadP(src1, MemOperand(sp, 4 * kPointerSize)); |
| 391 addi(sp, sp, Operand(5 * kPointerSize)); |
| 392 } |
| 393 |
| 394 // Push a fixed frame, consisting of lr, fp, context and |
| 395 // JS function / marker id if marker_reg is a valid register. |
| 439 void PushFixedFrame(Register marker_reg = no_reg); | 396 void PushFixedFrame(Register marker_reg = no_reg); |
| 440 void PopFixedFrame(Register marker_reg = no_reg); | 397 void PopFixedFrame(Register marker_reg = no_reg); |
| 441 | 398 |
| 442 // Push and pop the registers that can hold pointers, as defined by the | 399 // Push and pop the registers that can hold pointers, as defined by the |
| 443 // RegList constant kSafepointSavedRegisters. | 400 // RegList constant kSafepointSavedRegisters. |
| 444 void PushSafepointRegisters(); | 401 void PushSafepointRegisters(); |
| 445 void PopSafepointRegisters(); | 402 void PopSafepointRegisters(); |
| 446 // Store value in register src in the safepoint stack slot for | 403 // Store value in register src in the safepoint stack slot for |
| 447 // register dst. | 404 // register dst. |
| 448 void StoreToSafepointRegisterSlot(Register src, Register dst); | 405 void StoreToSafepointRegisterSlot(Register src, Register dst); |
| 449 // Load the value of the src register from its safepoint stack slot | 406 // Load the value of the src register from its safepoint stack slot |
| 450 // into register dst. | 407 // into register dst. |
| 451 void LoadFromSafepointRegisterSlot(Register dst, Register src); | 408 void LoadFromSafepointRegisterSlot(Register dst, Register src); |
| 452 | 409 |
| 453 // Load two consecutive registers with two consecutive memory locations. | 410 // Flush the I-cache from asm code. You should use CpuFeatures::FlushICache |
| 454 void Ldrd(Register dst1, | 411 // from C. |
| 455 Register dst2, | 412 // Does not handle errors. |
| 456 const MemOperand& src, | 413 void FlushICache(Register address, size_t size, |
| 457 Condition cond = al); | 414 Register scratch); |
| 458 | |
| 459 // Store two consecutive registers to two consecutive memory locations. | |
| 460 void Strd(Register src1, | |
| 461 Register src2, | |
| 462 const MemOperand& dst, | |
| 463 Condition cond = al); | |
| 464 | |
| 465 // Ensure that FPSCR contains values needed by JavaScript. | |
| 466 // We need the NaNModeControlBit to be sure that operations like | |
| 467 // vadd and vsub generate the Canonical NaN (if a NaN must be generated). | |
| 468 // In VFP3 it will be always the Canonical NaN. | |
| 469 // In VFP2 it will be either the Canonical NaN or the negative version | |
| 470 // of the Canonical NaN. It doesn't matter if we have two values. The aim | |
| 471 // is to be sure to never generate the hole NaN. | |
| 472 void VFPEnsureFPSCRState(Register scratch); | |
| 473 | 415 |
| 474 // If the value is a NaN, canonicalize the value else, do nothing. | 416 // If the value is a NaN, canonicalize the value else, do nothing. |
| 475 void VFPCanonicalizeNaN(const DwVfpRegister dst, | 417 void CanonicalizeNaN(const DoubleRegister dst, |
| 476 const DwVfpRegister src, | 418 const DoubleRegister src); |
| 477 const Condition cond = al); | 419 void CanonicalizeNaN(const DoubleRegister value) { |
| 478 void VFPCanonicalizeNaN(const DwVfpRegister value, | 420 CanonicalizeNaN(value, value); |
| 479 const Condition cond = al) { | |
| 480 VFPCanonicalizeNaN(value, value, cond); | |
| 481 } | 421 } |
| 482 | 422 |
| 483 // Compare double values and move the result to the normal condition flags. | 423 // Converts the integer (untagged smi) in |src| to a double, storing |
| 484 void VFPCompareAndSetFlags(const DwVfpRegister src1, | 424 // the result to |double_dst| |
| 485 const DwVfpRegister src2, | 425 void ConvertIntToDouble(Register src, |
| 486 const Condition cond = al); | 426 DoubleRegister double_dst); |
| 487 void VFPCompareAndSetFlags(const DwVfpRegister src1, | |
| 488 const double src2, | |
| 489 const Condition cond = al); | |
| 490 | 427 |
| 491 // Compare double values and then load the fpscr flags to a register. | 428 // Converts the unsigned integer (untagged smi) in |src| to |
| 492 void VFPCompareAndLoadFlags(const DwVfpRegister src1, | 429 // a double, storing the result to |double_dst| |
| 493 const DwVfpRegister src2, | 430 void ConvertUnsignedIntToDouble(Register src, |
| 494 const Register fpscr_flags, | 431 DoubleRegister double_dst); |
| 495 const Condition cond = al); | |
| 496 void VFPCompareAndLoadFlags(const DwVfpRegister src1, | |
| 497 const double src2, | |
| 498 const Register fpscr_flags, | |
| 499 const Condition cond = al); | |
| 500 | 432 |
| 501 void Vmov(const DwVfpRegister dst, | 433 // Converts the integer (untagged smi) in |src| to |
| 502 const double imm, | 434 // a float, storing the result in |dst| |
| 503 const Register scratch = no_reg); | 435 // Warning: The value in |int_scrach| will be changed in the process! |
| 436 void ConvertIntToFloat(const DoubleRegister dst, |
| 437 const Register src, |
| 438 const Register int_scratch); |
| 504 | 439 |
| 505 void VmovHigh(Register dst, DwVfpRegister src); | 440 // Converts the double_input to an integer. Note that, upon return, |
| 506 void VmovHigh(DwVfpRegister dst, Register src); | 441 // the contents of double_dst will also hold the fixed point representation. |
| 507 void VmovLow(Register dst, DwVfpRegister src); | 442 void ConvertDoubleToInt64(const DoubleRegister double_input, |
| 508 void VmovLow(DwVfpRegister dst, Register src); | 443 const Register dst, |
| 509 | 444 #if !V8_TARGET_ARCH_PPC64 |
| 510 // Loads the number from object into dst register. | 445 const Register dst_hi, |
| 511 // If |object| is neither smi nor heap number, |not_number| is jumped to | 446 #endif |
| 512 // with |object| still intact. | 447 const DoubleRegister double_dst, |
| 513 void LoadNumber(Register object, | 448 FPRoundingMode rounding_mode = kRoundToZero); |
| 514 LowDwVfpRegister dst, | |
| 515 Register heap_number_map, | |
| 516 Register scratch, | |
| 517 Label* not_number); | |
| 518 | |
| 519 // Loads the number from object into double_dst in the double format. | |
| 520 // Control will jump to not_int32 if the value cannot be exactly represented | |
| 521 // by a 32-bit integer. | |
| 522 // Floating point value in the 32-bit integer range that are not exact integer | |
| 523 // won't be loaded. | |
| 524 void LoadNumberAsInt32Double(Register object, | |
| 525 DwVfpRegister double_dst, | |
| 526 Register heap_number_map, | |
| 527 Register scratch, | |
| 528 LowDwVfpRegister double_scratch, | |
| 529 Label* not_int32); | |
| 530 | |
| 531 // Loads the number from object into dst as a 32-bit integer. | |
| 532 // Control will jump to not_int32 if the object cannot be exactly represented | |
| 533 // by a 32-bit integer. | |
| 534 // Floating point value in the 32-bit integer range that are not exact integer | |
| 535 // won't be converted. | |
| 536 void LoadNumberAsInt32(Register object, | |
| 537 Register dst, | |
| 538 Register heap_number_map, | |
| 539 Register scratch, | |
| 540 DwVfpRegister double_scratch0, | |
| 541 LowDwVfpRegister double_scratch1, | |
| 542 Label* not_int32); | |
| 543 | 449 |
| 544 // Generates function and stub prologue code. | 450 // Generates function and stub prologue code. |
| 545 void StubPrologue(); | 451 void StubPrologue(); |
| 546 void Prologue(bool code_pre_aging); | 452 void Prologue(bool code_pre_aging); |
| 547 | 453 |
| 548 // Enter exit frame. | 454 // Enter exit frame. |
| 549 // stack_space - extra stack space, used for alignment before call to C. | 455 // stack_space - extra stack space, used for alignment before call to C. |
| 550 void EnterExitFrame(bool save_doubles, int stack_space = 0); | 456 void EnterExitFrame(bool save_doubles, int stack_space = 0); |
| 551 | 457 |
| 552 // Leave the current exit frame. Expects the return value in r0. | 458 // Leave the current exit frame. Expects the return value in r0. |
| (...skipping 26 matching lines...) Expand all Loading... |
| 579 void LoadGlobalFunctionInitialMap(Register function, | 485 void LoadGlobalFunctionInitialMap(Register function, |
| 580 Register map, | 486 Register map, |
| 581 Register scratch); | 487 Register scratch); |
| 582 | 488 |
| 583 void InitializeRootRegister() { | 489 void InitializeRootRegister() { |
| 584 ExternalReference roots_array_start = | 490 ExternalReference roots_array_start = |
| 585 ExternalReference::roots_array_start(isolate()); | 491 ExternalReference::roots_array_start(isolate()); |
| 586 mov(kRootRegister, Operand(roots_array_start)); | 492 mov(kRootRegister, Operand(roots_array_start)); |
| 587 } | 493 } |
| 588 | 494 |
| 495 // ---------------------------------------------------------------- |
| 496 // new PPC macro-assembler interfaces that are slightly higher level |
| 497 // than assembler-ppc and may generate variable length sequences |
| 498 |
| 499 // load a literal signed int value <value> to GPR <dst> |
| 500 void LoadIntLiteral(Register dst, int value); |
| 501 |
| 502 // load an SMI value <value> to GPR <dst> |
| 503 void LoadSmiLiteral(Register dst, Smi *smi); |
| 504 |
| 505 // load a literal double value <value> to FPR <result> |
| 506 void LoadDoubleLiteral(DoubleRegister result, |
| 507 double value, |
| 508 Register scratch); |
| 509 |
| 510 void LoadWord(Register dst, |
| 511 const MemOperand& mem, |
| 512 Register scratch, |
| 513 bool updateForm = false); |
| 514 |
| 515 void LoadWordArith(Register dst, |
| 516 const MemOperand& mem, |
| 517 Register scratch = no_reg); |
| 518 |
| 519 void StoreWord(Register src, |
| 520 const MemOperand& mem, |
| 521 Register scratch, |
| 522 bool updateForm = false); |
| 523 |
| 524 void LoadHalfWord(Register dst, |
| 525 const MemOperand& mem, |
| 526 Register scratch, |
| 527 bool updateForm = false); |
| 528 |
| 529 void StoreHalfWord(Register src, |
| 530 const MemOperand& mem, |
| 531 Register scratch, |
| 532 bool updateForm = false); |
| 533 |
| 534 void LoadByte(Register dst, |
| 535 const MemOperand& mem, |
| 536 Register scratch, |
| 537 bool updateForm = false); |
| 538 |
| 539 void StoreByte(Register src, |
| 540 const MemOperand& mem, |
| 541 Register scratch, |
| 542 bool updateForm = false); |
| 543 |
| 544 void LoadRepresentation(Register dst, |
| 545 const MemOperand& mem, |
| 546 Representation r, |
| 547 Register scratch = no_reg); |
| 548 |
| 549 void StoreRepresentation(Register src, |
| 550 const MemOperand& mem, |
| 551 Representation r, |
| 552 Register scratch = no_reg); |
| 553 |
| 554 |
| 555 |
| 556 void Add(Register dst, Register src, intptr_t value, Register scratch); |
| 557 void Cmpi(Register src1, const Operand& src2, Register scratch, |
| 558 CRegister cr = cr7); |
| 559 void Cmpli(Register src1, const Operand& src2, Register scratch, |
| 560 CRegister cr = cr7); |
| 561 void Cmpwi(Register src1, const Operand& src2, Register scratch, |
| 562 CRegister cr = cr7); |
| 563 void Cmplwi(Register src1, const Operand& src2, Register scratch, |
| 564 CRegister cr = cr7); |
| 565 void And(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC); |
| 566 void Or(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC); |
| 567 void Xor(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC); |
| 568 |
| 569 void AddSmiLiteral(Register dst, Register src, Smi *smi, Register scratch); |
| 570 void SubSmiLiteral(Register dst, Register src, Smi *smi, Register scratch); |
| 571 void CmpSmiLiteral(Register src1, Smi *smi, Register scratch, |
| 572 CRegister cr = cr7); |
| 573 void CmplSmiLiteral(Register src1, Smi *smi, Register scratch, |
| 574 CRegister cr = cr7); |
| 575 void AndSmiLiteral(Register dst, Register src, Smi *smi, Register scratch, |
| 576 RCBit rc = LeaveRC); |
| 577 |
| 578 // Set new rounding mode RN to FPSCR |
| 579 void SetRoundingMode(FPRoundingMode RN); |
| 580 |
| 581 // reset rounding mode to default (kRoundToNearest) |
| 582 void ResetRoundingMode(); |
| 583 |
| 584 // These exist to provide portability between 32 and 64bit |
| 585 void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg); |
| 586 void StoreP(Register src, const MemOperand& mem, Register scratch = no_reg); |
| 587 |
| 589 // --------------------------------------------------------------------------- | 588 // --------------------------------------------------------------------------- |
| 590 // JavaScript invokes | 589 // JavaScript invokes |
| 591 | 590 |
| 592 // Invoke the JavaScript function code by either calling or jumping. | 591 // Invoke the JavaScript function code by either calling or jumping. |
| 593 void InvokeCode(Register code, | 592 void InvokeCode(Register code, |
| 594 const ParameterCount& expected, | 593 const ParameterCount& expected, |
| 595 const ParameterCount& actual, | 594 const ParameterCount& actual, |
| 596 InvokeFlag flag, | 595 InvokeFlag flag, |
| 597 const CallWrapper& call_wrapper); | 596 const CallWrapper& call_wrapper); |
| 598 | 597 |
| (...skipping 179 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 778 // space is full and a scavenge is needed. All registers are clobbered also | 777 // space is full and a scavenge is needed. All registers are clobbered also |
| 779 // when control continues at the gc_required label. | 778 // when control continues at the gc_required label. |
| 780 void AllocateHeapNumber(Register result, | 779 void AllocateHeapNumber(Register result, |
| 781 Register scratch1, | 780 Register scratch1, |
| 782 Register scratch2, | 781 Register scratch2, |
| 783 Register heap_number_map, | 782 Register heap_number_map, |
| 784 Label* gc_required, | 783 Label* gc_required, |
| 785 TaggingMode tagging_mode = TAG_RESULT, | 784 TaggingMode tagging_mode = TAG_RESULT, |
| 786 MutableMode mode = IMMUTABLE); | 785 MutableMode mode = IMMUTABLE); |
| 787 void AllocateHeapNumberWithValue(Register result, | 786 void AllocateHeapNumberWithValue(Register result, |
| 788 DwVfpRegister value, | 787 DoubleRegister value, |
| 789 Register scratch1, | 788 Register scratch1, |
| 790 Register scratch2, | 789 Register scratch2, |
| 791 Register heap_number_map, | 790 Register heap_number_map, |
| 792 Label* gc_required); | 791 Label* gc_required); |
| 793 | 792 |
| 794 // Copies a fixed number of fields of heap objects from src to dst. | 793 // Copies a fixed number of fields of heap objects from src to dst. |
| 795 void CopyFields(Register dst, | 794 void CopyFields(Register dst, Register src, RegList temps, int field_count); |
| 796 Register src, | |
| 797 LowDwVfpRegister double_scratch, | |
| 798 int field_count); | |
| 799 | 795 |
| 800 // Copies a number of bytes from src to dst. All registers are clobbered. On | 796 // Copies a number of bytes from src to dst. All registers are clobbered. On |
| 801 // exit src and dst will point to the place just after where the last byte was | 797 // exit src and dst will point to the place just after where the last byte was |
| 802 // read or written and length will be zero. | 798 // read or written and length will be zero. |
| 803 void CopyBytes(Register src, | 799 void CopyBytes(Register src, |
| 804 Register dst, | 800 Register dst, |
| 805 Register length, | 801 Register length, |
| 806 Register scratch); | 802 Register scratch); |
| 807 | 803 |
| 804 // Initialize fields with filler values. |count| fields starting at |
| 805 // |start_offset| are overwritten with the value in |filler|. At the end the |
| 806 // loop, |start_offset| points at the next uninitialized field. |count| is |
| 807 // assumed to be non-zero. |
| 808 void InitializeNFieldsWithFiller(Register start_offset, |
| 809 Register count, |
| 810 Register filler); |
| 811 |
| 808 // Initialize fields with filler values. Fields starting at |start_offset| | 812 // Initialize fields with filler values. Fields starting at |start_offset| |
| 809 // not including end_offset are overwritten with the value in |filler|. At | 813 // not including end_offset are overwritten with the value in |filler|. At |
| 810 // the end the loop, |start_offset| takes the value of |end_offset|. | 814 // the end the loop, |start_offset| takes the value of |end_offset|. |
| 811 void InitializeFieldsWithFiller(Register start_offset, | 815 void InitializeFieldsWithFiller(Register start_offset, |
| 812 Register end_offset, | 816 Register end_offset, |
| 813 Register filler); | 817 Register filler); |
| 814 | 818 |
| 815 // --------------------------------------------------------------------------- | 819 // --------------------------------------------------------------------------- |
| 816 // Support functions. | 820 // Support functions. |
| 817 | 821 |
| (...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 874 Register scratch, | 878 Register scratch, |
| 875 Label* fail); | 879 Label* fail); |
| 876 | 880 |
| 877 // Check to see if maybe_number can be stored as a double in | 881 // Check to see if maybe_number can be stored as a double in |
| 878 // FastDoubleElements. If it can, store it at the index specified by key in | 882 // FastDoubleElements. If it can, store it at the index specified by key in |
| 879 // the FastDoubleElements array elements. Otherwise jump to fail. | 883 // the FastDoubleElements array elements. Otherwise jump to fail. |
| 880 void StoreNumberToDoubleElements(Register value_reg, | 884 void StoreNumberToDoubleElements(Register value_reg, |
| 881 Register key_reg, | 885 Register key_reg, |
| 882 Register elements_reg, | 886 Register elements_reg, |
| 883 Register scratch1, | 887 Register scratch1, |
| 884 LowDwVfpRegister double_scratch, | 888 DoubleRegister double_scratch, |
| 885 Label* fail, | 889 Label* fail, |
| 886 int elements_offset = 0); | 890 int elements_offset = 0); |
| 887 | 891 |
| 888 // Compare an object's map with the specified map and its transitioned | 892 // Compare an object's map with the specified map and its transitioned |
| 889 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are | 893 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are |
| 890 // set with result of map compare. If multiple map compares are required, the | 894 // set with result of map compare. If multiple map compares are required, the |
| 891 // compare sequences branches to early_success. | 895 // compare sequences branches to early_success. |
| 892 void CompareMap(Register obj, | 896 void CompareMap(Register obj, |
| 893 Register scratch, | 897 Register scratch, |
| 894 Handle<Map> map, | 898 Handle<Map> map, |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 928 SmiCheckType smi_check_type); | 932 SmiCheckType smi_check_type); |
| 929 | 933 |
| 930 | 934 |
| 931 // Compare the object in a register to a value from the root list. | 935 // Compare the object in a register to a value from the root list. |
| 932 // Uses the ip register as scratch. | 936 // Uses the ip register as scratch. |
| 933 void CompareRoot(Register obj, Heap::RootListIndex index); | 937 void CompareRoot(Register obj, Heap::RootListIndex index); |
| 934 | 938 |
| 935 | 939 |
| 936 // Load and check the instance type of an object for being a string. | 940 // Load and check the instance type of an object for being a string. |
| 937 // Loads the type into the second argument register. | 941 // Loads the type into the second argument register. |
| 938 // Returns a condition that will be enabled if the object was a string | 942 // Returns a condition that will be enabled if the object was a string. |
| 939 // and the passed-in condition passed. If the passed-in condition failed | |
| 940 // then flags remain unchanged. | |
| 941 Condition IsObjectStringType(Register obj, | 943 Condition IsObjectStringType(Register obj, |
| 942 Register type, | 944 Register type) { |
| 943 Condition cond = al) { | 945 LoadP(type, FieldMemOperand(obj, HeapObject::kMapOffset)); |
| 944 ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond); | 946 lbz(type, FieldMemOperand(type, Map::kInstanceTypeOffset)); |
| 945 ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond); | 947 andi(r0, type, Operand(kIsNotStringMask)); |
| 946 tst(type, Operand(kIsNotStringMask), cond); | |
| 947 ASSERT_EQ(0, kStringTag); | 948 ASSERT_EQ(0, kStringTag); |
| 948 return eq; | 949 return eq; |
| 949 } | 950 } |
| 950 | 951 |
| 951 | 952 |
| 952 // Picks out an array index from the hash field. | 953 // Picks out an array index from the hash field. |
| 953 // Register use: | 954 // Register use: |
| 954 // hash - holds the index's hash. Clobbered. | 955 // hash - holds the index's hash. Clobbered. |
| 955 // index - holds the overwritten index on exit. | 956 // index - holds the overwritten index on exit. |
| 956 void IndexFromHash(Register hash, Register index); | 957 void IndexFromHash(Register hash, Register index); |
| 957 | 958 |
| 958 // Get the number of least significant bits from a register | 959 // Get the number of least significant bits from a register |
| 959 void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits); | 960 void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits); |
| 960 void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits); | 961 void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits); |
| 961 | 962 |
| 962 // Load the value of a smi object into a double register. | 963 // Load the value of a smi object into a double register. |
| 963 // The register value must be between d0 and d15. | 964 void SmiToDouble(DoubleRegister value, Register smi); |
| 964 void SmiToDouble(LowDwVfpRegister value, Register smi); | |
| 965 | 965 |
| 966 // Check if a double can be exactly represented as a signed 32-bit integer. | 966 // Check if a double can be exactly represented as a signed 32-bit integer. |
| 967 // Z flag set to one if true. | 967 // CR_EQ in cr7 is set if true. |
| 968 void TestDoubleIsInt32(DwVfpRegister double_input, | 968 void TestDoubleIsInt32(DoubleRegister double_input, |
| 969 LowDwVfpRegister double_scratch); | 969 Register scratch1, |
| 970 Register scratch2, |
| 971 DoubleRegister double_scratch); |
| 970 | 972 |
| 971 // Try to convert a double to a signed 32-bit integer. | 973 // Try to convert a double to a signed 32-bit integer. |
| 972 // Z flag set to one and result assigned if the conversion is exact. | 974 // CR_EQ in cr7 is set and result assigned if the conversion is exact. |
| 973 void TryDoubleToInt32Exact(Register result, | 975 void TryDoubleToInt32Exact(Register result, |
| 974 DwVfpRegister double_input, | 976 DoubleRegister double_input, |
| 975 LowDwVfpRegister double_scratch); | 977 Register scratch, |
| 978 DoubleRegister double_scratch); |
| 976 | 979 |
| 977 // Floor a double and writes the value to the result register. | 980 // Floor a double and writes the value to the result register. |
| 978 // Go to exact if the conversion is exact (to be able to test -0), | 981 // Go to exact if the conversion is exact (to be able to test -0), |
| 979 // fall through calling code if an overflow occurred, else go to done. | 982 // fall through calling code if an overflow occurred, else go to done. |
| 980 // In return, input_high is loaded with high bits of input. | 983 // In return, input_high is loaded with high bits of input. |
| 981 void TryInt32Floor(Register result, | 984 void TryInt32Floor(Register result, |
| 982 DwVfpRegister double_input, | 985 DoubleRegister double_input, |
| 983 Register input_high, | 986 Register input_high, |
| 984 LowDwVfpRegister double_scratch, | 987 Register scratch, |
| 988 DoubleRegister double_scratch, |
| 985 Label* done, | 989 Label* done, |
| 986 Label* exact); | 990 Label* exact); |
| 987 | 991 |
| 988 // Performs a truncating conversion of a floating point number as used by | 992 // Performs a truncating conversion of a floating point number as used by |
| 989 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it | 993 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it |
| 990 // succeeds, otherwise falls through if result is saturated. On return | 994 // succeeds, otherwise falls through if result is saturated. On return |
| 991 // 'result' either holds answer, or is clobbered on fall through. | 995 // 'result' either holds answer, or is clobbered on fall through. |
| 992 // | 996 // |
| 993 // Only public for the test code in test-code-stubs-arm.cc. | 997 // Only public for the test code in test-code-stubs-arm.cc. |
| 994 void TryInlineTruncateDoubleToI(Register result, | 998 void TryInlineTruncateDoubleToI(Register result, |
| 995 DwVfpRegister input, | 999 DoubleRegister input, |
| 996 Label* done); | 1000 Label* done); |
| 997 | 1001 |
| 998 // Performs a truncating conversion of a floating point number as used by | 1002 // Performs a truncating conversion of a floating point number as used by |
| 999 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. | 1003 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. |
| 1000 // Exits with 'result' holding the answer. | 1004 // Exits with 'result' holding the answer. |
| 1001 void TruncateDoubleToI(Register result, DwVfpRegister double_input); | 1005 void TruncateDoubleToI(Register result, DoubleRegister double_input); |
| 1002 | 1006 |
| 1003 // Performs a truncating conversion of a heap number as used by | 1007 // Performs a truncating conversion of a heap number as used by |
| 1004 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input' | 1008 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input' |
| 1005 // must be different registers. Exits with 'result' holding the answer. | 1009 // must be different registers. Exits with 'result' holding the answer. |
| 1006 void TruncateHeapNumberToI(Register result, Register object); | 1010 void TruncateHeapNumberToI(Register result, Register object); |
| 1007 | 1011 |
| 1008 // Converts the smi or heap number in object to an int32 using the rules | 1012 // Converts the smi or heap number in object to an int32 using the rules |
| 1009 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated | 1013 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated |
| 1010 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be | 1014 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be |
| 1011 // different registers. | 1015 // different registers. |
| 1012 void TruncateNumberToI(Register object, | 1016 void TruncateNumberToI(Register object, |
| 1013 Register result, | 1017 Register result, |
| 1014 Register heap_number_map, | 1018 Register heap_number_map, |
| 1015 Register scratch1, | 1019 Register scratch1, |
| 1016 Label* not_int32); | 1020 Label* not_int32); |
| 1017 | 1021 |
| 1018 // Check whether d16-d31 are available on the CPU. The result is given by the | 1022 // Overflow handling functions. |
| 1019 // Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise. | 1023 // Usage: call the appropriate arithmetic function and then call one of the |
| 1020 void CheckFor32DRegs(Register scratch); | 1024 // flow control functions with the corresponding label. |
| 1021 | 1025 |
| 1022 // Does a runtime check for 16/32 FP registers. Either way, pushes 32 double | 1026 // Compute dst = left + right, setting condition codes. dst may be same as |
| 1023 // values to location, saving [d0..(d15|d31)]. | 1027 // either left or right (or a unique register). left and right must not be |
| 1024 void SaveFPRegs(Register location, Register scratch); | 1028 // the same register. |
| 1029 void AddAndCheckForOverflow(Register dst, |
| 1030 Register left, |
| 1031 Register right, |
| 1032 Register overflow_dst, |
| 1033 Register scratch = r0); |
| 1025 | 1034 |
| 1026 // Does a runtime check for 16/32 FP registers. Either way, pops 32 double | 1035 // Compute dst = left - right, setting condition codes. dst may be same as |
| 1027 // values to location, restoring [d0..(d15|d31)]. | 1036 // either left or right (or a unique register). left and right must not be |
| 1028 void RestoreFPRegs(Register location, Register scratch); | 1037 // the same register. |
| 1038 void SubAndCheckForOverflow(Register dst, |
| 1039 Register left, |
| 1040 Register right, |
| 1041 Register overflow_dst, |
| 1042 Register scratch = r0); |
| 1043 |
| 1044 void BranchOnOverflow(Label* label) { |
| 1045 blt(label, cr0); |
| 1046 } |
| 1047 |
| 1048 void BranchOnNoOverflow(Label* label) { |
| 1049 bge(label, cr0); |
| 1050 } |
| 1051 |
| 1052 void RetOnOverflow(void) { |
| 1053 Label label; |
| 1054 |
| 1055 blt(&label, cr0); |
| 1056 Ret(); |
| 1057 bind(&label); |
| 1058 } |
| 1059 |
| 1060 void RetOnNoOverflow(void) { |
| 1061 Label label; |
| 1062 |
| 1063 bge(&label, cr0); |
| 1064 Ret(); |
| 1065 bind(&label); |
| 1066 } |
| 1067 |
| 1068 // Pushes <count> double values to <location>, starting from d<first>. |
| 1069 void SaveFPRegs(Register location, int first, int count); |
| 1070 |
| 1071 // Pops <count> double values from <location>, starting from d<first>. |
| 1072 void RestoreFPRegs(Register location, int first, int count); |
| 1029 | 1073 |
| 1030 // --------------------------------------------------------------------------- | 1074 // --------------------------------------------------------------------------- |
| 1031 // Runtime calls | 1075 // Runtime calls |
| 1032 | 1076 |
| 1033 // Call a code stub. | 1077 // Call a code stub. |
| 1034 void CallStub(CodeStub* stub, | 1078 void CallStub(CodeStub* stub, |
| 1035 TypeFeedbackId ast_id = TypeFeedbackId::None(), | 1079 TypeFeedbackId ast_id = TypeFeedbackId::None(), |
| 1036 Condition cond = al); | 1080 Condition cond = al); |
| 1037 | 1081 |
| 1038 // Call a code stub. | 1082 // Call a code stub. |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1086 void PrepareCallCFunction(int num_reg_arguments, | 1130 void PrepareCallCFunction(int num_reg_arguments, |
| 1087 int num_double_registers, | 1131 int num_double_registers, |
| 1088 Register scratch); | 1132 Register scratch); |
| 1089 void PrepareCallCFunction(int num_reg_arguments, | 1133 void PrepareCallCFunction(int num_reg_arguments, |
| 1090 Register scratch); | 1134 Register scratch); |
| 1091 | 1135 |
| 1092 // There are two ways of passing double arguments on ARM, depending on | 1136 // There are two ways of passing double arguments on ARM, depending on |
| 1093 // whether soft or hard floating point ABI is used. These functions | 1137 // whether soft or hard floating point ABI is used. These functions |
| 1094 // abstract parameter passing for the three different ways we call | 1138 // abstract parameter passing for the three different ways we call |
| 1095 // C functions from generated code. | 1139 // C functions from generated code. |
| 1096 void MovToFloatParameter(DwVfpRegister src); | 1140 void MovToFloatParameter(DoubleRegister src); |
| 1097 void MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2); | 1141 void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2); |
| 1098 void MovToFloatResult(DwVfpRegister src); | 1142 void MovToFloatResult(DoubleRegister src); |
| 1099 | 1143 |
| 1100 // Calls a C function and cleans up the space for arguments allocated | 1144 // Calls a C function and cleans up the space for arguments allocated |
| 1101 // by PrepareCallCFunction. The called function is not allowed to trigger a | 1145 // by PrepareCallCFunction. The called function is not allowed to trigger a |
| 1102 // garbage collection, since that might move the code and invalidate the | 1146 // garbage collection, since that might move the code and invalidate the |
| 1103 // return address (unless this is somehow accounted for by the called | 1147 // return address (unless this is somehow accounted for by the called |
| 1104 // function). | 1148 // function). |
| 1105 void CallCFunction(ExternalReference function, int num_arguments); | 1149 void CallCFunction(ExternalReference function, int num_arguments); |
| 1106 void CallCFunction(Register function, int num_arguments); | 1150 void CallCFunction(Register function, int num_arguments); |
| 1107 void CallCFunction(ExternalReference function, | 1151 void CallCFunction(ExternalReference function, |
| 1108 int num_reg_arguments, | 1152 int num_reg_arguments, |
| 1109 int num_double_arguments); | 1153 int num_double_arguments); |
| 1110 void CallCFunction(Register function, | 1154 void CallCFunction(Register function, |
| 1111 int num_reg_arguments, | 1155 int num_reg_arguments, |
| 1112 int num_double_arguments); | 1156 int num_double_arguments); |
| 1113 | 1157 |
| 1114 void MovFromFloatParameter(DwVfpRegister dst); | 1158 void MovFromFloatParameter(DoubleRegister dst); |
| 1115 void MovFromFloatResult(DwVfpRegister dst); | 1159 void MovFromFloatResult(DoubleRegister dst); |
| 1116 | 1160 |
| 1117 // Calls an API function. Allocates HandleScope, extracts returned value | 1161 // Calls an API function. Allocates HandleScope, extracts returned value |
| 1118 // from handle and propagates exceptions. Restores context. stack_space | 1162 // from handle and propagates exceptions. Restores context. stack_space |
| 1119 // - space to be unwound on exit (includes the call JS arguments space and | 1163 // - space to be unwound on exit (includes the call JS arguments space and |
| 1120 // the additional space allocated for the fast call). | 1164 // the additional space allocated for the fast call). |
| 1121 void CallApiFunctionAndReturn(Register function_address, | 1165 void CallApiFunctionAndReturn(Register function_address, |
| 1122 ExternalReference thunk_ref, | 1166 ExternalReference thunk_ref, |
| 1123 int stack_space, | 1167 int stack_space, |
| 1124 MemOperand return_value_operand, | 1168 MemOperand return_value_operand, |
| 1125 MemOperand* context_restore_operand); | 1169 MemOperand* context_restore_operand); |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1159 Register scratch1, Register scratch2); | 1203 Register scratch1, Register scratch2); |
| 1160 void DecrementCounter(StatsCounter* counter, int value, | 1204 void DecrementCounter(StatsCounter* counter, int value, |
| 1161 Register scratch1, Register scratch2); | 1205 Register scratch1, Register scratch2); |
| 1162 | 1206 |
| 1163 | 1207 |
| 1164 // --------------------------------------------------------------------------- | 1208 // --------------------------------------------------------------------------- |
| 1165 // Debugging | 1209 // Debugging |
| 1166 | 1210 |
| 1167 // Calls Abort(msg) if the condition cond is not satisfied. | 1211 // Calls Abort(msg) if the condition cond is not satisfied. |
| 1168 // Use --debug_code to enable. | 1212 // Use --debug_code to enable. |
| 1169 void Assert(Condition cond, BailoutReason reason); | 1213 void Assert(Condition cond, BailoutReason reason, CRegister cr = cr7); |
| 1170 void AssertFastElements(Register elements); | 1214 void AssertFastElements(Register elements); |
| 1171 | 1215 |
| 1172 // Like Assert(), but always enabled. | 1216 // Like Assert(), but always enabled. |
| 1173 void Check(Condition cond, BailoutReason reason); | 1217 void Check(Condition cond, BailoutReason reason, CRegister cr = cr7); |
| 1174 | 1218 |
| 1175 // Print a message to stdout and abort execution. | 1219 // Print a message to stdout and abort execution. |
| 1176 void Abort(BailoutReason msg); | 1220 void Abort(BailoutReason reason); |
| 1177 | 1221 |
| 1178 // Verify restrictions about code generated in stubs. | 1222 // Verify restrictions about code generated in stubs. |
| 1179 void set_generating_stub(bool value) { generating_stub_ = value; } | 1223 void set_generating_stub(bool value) { generating_stub_ = value; } |
| 1180 bool generating_stub() { return generating_stub_; } | 1224 bool generating_stub() { return generating_stub_; } |
| 1181 void set_has_frame(bool value) { has_frame_ = value; } | 1225 void set_has_frame(bool value) { has_frame_ = value; } |
| 1182 bool has_frame() { return has_frame_; } | 1226 bool has_frame() { return has_frame_; } |
| 1183 inline bool AllowThisStubCall(CodeStub* stub); | 1227 inline bool AllowThisStubCall(CodeStub* stub); |
| 1184 | 1228 |
| 1185 // EABI variant for double arguments in use. | |
| 1186 bool use_eabi_hardfloat() { | |
| 1187 #ifdef __arm__ | |
| 1188 return base::OS::ArmUsingHardFloat(); | |
| 1189 #elif USE_EABI_HARDFLOAT | |
| 1190 return true; | |
| 1191 #else | |
| 1192 return false; | |
| 1193 #endif | |
| 1194 } | |
| 1195 | |
| 1196 // --------------------------------------------------------------------------- | 1229 // --------------------------------------------------------------------------- |
| 1197 // Number utilities | 1230 // Number utilities |
| 1198 | 1231 |
| 1199 // Check whether the value of reg is a power of two and not zero. If not | 1232 // Check whether the value of reg is a power of two and not zero. If not |
| 1200 // control continues at the label not_power_of_two. If reg is a power of two | 1233 // control continues at the label not_power_of_two. If reg is a power of two |
| 1201 // the register scratch contains the value of (reg - 1) when control falls | 1234 // the register scratch contains the value of (reg - 1) when control falls |
| 1202 // through. | 1235 // through. |
| 1203 void JumpIfNotPowerOfTwoOrZero(Register reg, | 1236 void JumpIfNotPowerOfTwoOrZero(Register reg, |
| 1204 Register scratch, | 1237 Register scratch, |
| 1205 Label* not_power_of_two_or_zero); | 1238 Label* not_power_of_two_or_zero); |
| 1206 // Check whether the value of reg is a power of two and not zero. | 1239 // Check whether the value of reg is a power of two and not zero. |
| 1207 // Control falls through if it is, with scratch containing the mask | 1240 // Control falls through if it is, with scratch containing the mask |
| 1208 // value (reg - 1). | 1241 // value (reg - 1). |
| 1209 // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is | 1242 // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is |
| 1210 // zero or negative, or jumps to the 'not_power_of_two' label if the value is | 1243 // zero or negative, or jumps to the 'not_power_of_two' label if the value is |
| 1211 // strictly positive but not a power of two. | 1244 // strictly positive but not a power of two. |
| 1212 void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg, | 1245 void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg, |
| 1213 Register scratch, | 1246 Register scratch, |
| 1214 Label* zero_and_neg, | 1247 Label* zero_and_neg, |
| 1215 Label* not_power_of_two); | 1248 Label* not_power_of_two); |
| 1216 | 1249 |
| 1217 // --------------------------------------------------------------------------- | 1250 // --------------------------------------------------------------------------- |
| 1251 // Bit testing/extraction |
| 1252 // |
| 1253 // Bit numbering is such that the least significant bit is bit 0 |
| 1254 // (for consistency between 32/64-bit). |
| 1255 |
| 1256 // Extract consecutive bits (defined by rangeStart - rangeEnd) from src |
| 1257 // and place them into the least significant bits of dst. |
| 1258 inline void ExtractBitRange(Register dst, Register src, |
| 1259 int rangeStart, int rangeEnd, |
| 1260 RCBit rc = LeaveRC) { |
| 1261 ASSERT(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer); |
| 1262 int rotate = (rangeEnd == 0) ? 0 : kBitsPerPointer - rangeEnd; |
| 1263 int width = rangeStart - rangeEnd + 1; |
| 1264 #if V8_TARGET_ARCH_PPC64 |
| 1265 rldicl(dst, src, rotate, kBitsPerPointer - width, rc); |
| 1266 #else |
| 1267 rlwinm(dst, src, rotate, kBitsPerPointer - width, kBitsPerPointer - 1, rc); |
| 1268 #endif |
| 1269 } |
| 1270 |
| 1271 inline void ExtractBit(Register dst, Register src, uint32_t bitNumber, |
| 1272 RCBit rc = LeaveRC) { |
| 1273 ExtractBitRange(dst, src, bitNumber, bitNumber, rc); |
| 1274 } |
| 1275 |
| 1276 // Extract consecutive bits (defined by mask) from src and place them |
| 1277 // into the least significant bits of dst. |
| 1278 inline void ExtractBitMask(Register dst, Register src, uintptr_t mask, |
| 1279 RCBit rc = LeaveRC) { |
| 1280 int start = kBitsPerPointer - 1; |
| 1281 int end; |
| 1282 uintptr_t bit = (1L << start); |
| 1283 |
| 1284 while (bit && (mask & bit) == 0) { |
| 1285 start--; |
| 1286 bit >>= 1; |
| 1287 } |
| 1288 end = start; |
| 1289 bit >>= 1; |
| 1290 |
| 1291 while (bit && (mask & bit)) { |
| 1292 end--; |
| 1293 bit >>= 1; |
| 1294 } |
| 1295 |
| 1296 // 1-bits in mask must be contiguous |
| 1297 ASSERT(bit == 0 || (mask & ((bit << 1) - 1)) == 0); |
| 1298 |
| 1299 ExtractBitRange(dst, src, start, end, rc); |
| 1300 } |
| 1301 |
| 1302 // Test single bit in value. |
| 1303 inline void TestBit(Register value, int bitNumber, |
| 1304 Register scratch = r0) { |
| 1305 ExtractBitRange(scratch, value, bitNumber, bitNumber, SetRC); |
| 1306 } |
| 1307 |
| 1308 // Test consecutive bit range in value. Range is defined by |
| 1309 // rangeStart - rangeEnd. |
| 1310 inline void TestBitRange(Register value, |
| 1311 int rangeStart, int rangeEnd, |
| 1312 Register scratch = r0) { |
| 1313 ExtractBitRange(scratch, value, rangeStart, rangeEnd, SetRC); |
| 1314 } |
| 1315 |
| 1316 // Test consecutive bit range in value. Range is defined by mask. |
| 1317 inline void TestBitMask(Register value, uintptr_t mask, |
| 1318 Register scratch = r0) { |
| 1319 ExtractBitMask(scratch, value, mask, SetRC); |
| 1320 } |
| 1321 |
| 1322 |
| 1323 // --------------------------------------------------------------------------- |
| 1218 // Smi utilities | 1324 // Smi utilities |
| 1219 | 1325 |
| 1220 void SmiTag(Register reg, SBit s = LeaveCC) { | 1326 // Shift left by 1 |
| 1221 add(reg, reg, Operand(reg), s); | 1327 void SmiTag(Register reg, RCBit rc = LeaveRC) { |
| 1328 SmiTag(reg, reg, rc); |
| 1222 } | 1329 } |
| 1223 void SmiTag(Register dst, Register src, SBit s = LeaveCC) { | 1330 void SmiTag(Register dst, Register src, RCBit rc = LeaveRC) { |
| 1224 add(dst, src, Operand(src), s); | 1331 ShiftLeftImm(dst, src, Operand(kSmiShift), rc); |
| 1225 } | 1332 } |
| 1226 | 1333 |
| 1227 // Try to convert int32 to smi. If the value is to large, preserve | 1334 #if !V8_TARGET_ARCH_PPC64 |
| 1228 // the original value and jump to not_a_smi. Destroys scratch and | 1335 // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow(). |
| 1229 // sets flags. | 1336 void SmiTagCheckOverflow(Register reg, Register overflow); |
| 1230 void TrySmiTag(Register reg, Label* not_a_smi) { | 1337 void SmiTagCheckOverflow(Register dst, Register src, Register overflow); |
| 1231 TrySmiTag(reg, reg, not_a_smi); | 1338 |
| 1339 inline void JumpIfNotSmiCandidate(Register value, Register scratch, |
| 1340 Label* not_smi_label) { |
| 1341 // High bits must be identical to fit into an Smi |
| 1342 addis(scratch, value, Operand(0x40000000u >> 16)); |
| 1343 cmpi(scratch, Operand::Zero()); |
| 1344 blt(not_smi_label); |
| 1232 } | 1345 } |
| 1233 void TrySmiTag(Register reg, Register src, Label* not_a_smi) { | 1346 #endif |
| 1234 SmiTag(ip, src, SetCC); | 1347 inline void TestUnsignedSmiCandidate(Register value, Register scratch) { |
| 1235 b(vs, not_a_smi); | 1348 // The test is different for unsigned int values. Since we need |
| 1236 mov(reg, ip); | 1349 // the value to be in the range of a positive smi, we can't |
| 1350 // handle any of the high bits being set in the value. |
| 1351 TestBitRange(value, |
| 1352 kBitsPerPointer - 1, |
| 1353 kBitsPerPointer - 1 - kSmiShift, |
| 1354 scratch); |
| 1355 } |
| 1356 inline void JumpIfNotUnsignedSmiCandidate(Register value, Register scratch, |
| 1357 Label* not_smi_label) { |
| 1358 TestUnsignedSmiCandidate(value, scratch); |
| 1359 bne(not_smi_label, cr0); |
| 1237 } | 1360 } |
| 1238 | 1361 |
| 1362 void SmiUntag(Register reg, RCBit rc = LeaveRC) { |
| 1363 SmiUntag(reg, reg, rc); |
| 1364 } |
| 1239 | 1365 |
| 1240 void SmiUntag(Register reg, SBit s = LeaveCC) { | 1366 void SmiUntag(Register dst, Register src, RCBit rc = LeaveRC) { |
| 1241 mov(reg, Operand::SmiUntag(reg), s); | 1367 ShiftRightArithImm(dst, src, kSmiShift, rc); |
| 1242 } | 1368 } |
| 1243 void SmiUntag(Register dst, Register src, SBit s = LeaveCC) { | 1369 |
| 1244 mov(dst, Operand::SmiUntag(src), s); | 1370 void SmiToPtrArrayOffset(Register dst, Register src) { |
| 1371 #if V8_TARGET_ARCH_PPC64 |
| 1372 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2); |
| 1373 ShiftRightArithImm(dst, src, kSmiShift - kPointerSizeLog2); |
| 1374 #else |
| 1375 STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kPointerSizeLog2); |
| 1376 ShiftLeftImm(dst, src, Operand(kPointerSizeLog2 - kSmiShift)); |
| 1377 #endif |
| 1378 } |
| 1379 |
| 1380 void SmiToByteArrayOffset(Register dst, Register src) { |
| 1381 SmiUntag(dst, src); |
| 1382 } |
| 1383 |
| 1384 void SmiToShortArrayOffset(Register dst, Register src) { |
| 1385 #if V8_TARGET_ARCH_PPC64 |
| 1386 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 1); |
| 1387 ShiftRightArithImm(dst, src, kSmiShift - 1); |
| 1388 #else |
| 1389 STATIC_ASSERT(kSmiTag == 0 && kSmiShift == 1); |
| 1390 if (!dst.is(src)) { |
| 1391 mr(dst, src); |
| 1392 } |
| 1393 #endif |
| 1394 } |
| 1395 |
| 1396 void SmiToIntArrayOffset(Register dst, Register src) { |
| 1397 #if V8_TARGET_ARCH_PPC64 |
| 1398 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 2); |
| 1399 ShiftRightArithImm(dst, src, kSmiShift - 2); |
| 1400 #else |
| 1401 STATIC_ASSERT(kSmiTag == 0 && kSmiShift < 2); |
| 1402 ShiftLeftImm(dst, src, Operand(2 - kSmiShift)); |
| 1403 #endif |
| 1404 } |
| 1405 |
| 1406 #define SmiToFloatArrayOffset SmiToIntArrayOffset |
| 1407 |
| 1408 void SmiToDoubleArrayOffset(Register dst, Register src) { |
| 1409 #if V8_TARGET_ARCH_PPC64 |
| 1410 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kDoubleSizeLog2); |
| 1411 ShiftRightArithImm(dst, src, kSmiShift - kDoubleSizeLog2); |
| 1412 #else |
| 1413 STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kDoubleSizeLog2); |
| 1414 ShiftLeftImm(dst, src, Operand(kDoubleSizeLog2 - kSmiShift)); |
| 1415 #endif |
| 1416 } |
| 1417 |
| 1418 void SmiToArrayOffset(Register dst, Register src, int elementSizeLog2) { |
| 1419 if (kSmiShift < elementSizeLog2) { |
| 1420 ShiftLeftImm(dst, src, Operand(elementSizeLog2 - kSmiShift)); |
| 1421 } else if (kSmiShift > elementSizeLog2) { |
| 1422 ShiftRightArithImm(dst, src, kSmiShift - elementSizeLog2); |
| 1423 } else if (!dst.is(src)) { |
| 1424 mr(dst, src); |
| 1425 } |
| 1426 } |
| 1427 |
| 1428 void IndexToArrayOffset(Register dst, Register src, int elementSizeLog2, |
| 1429 bool isSmi) { |
| 1430 if (isSmi) { |
| 1431 SmiToArrayOffset(dst, src, elementSizeLog2); |
| 1432 } else { |
| 1433 ShiftLeftImm(dst, src, Operand(elementSizeLog2)); |
| 1434 } |
| 1245 } | 1435 } |
| 1246 | 1436 |
| 1247 // Untag the source value into destination and jump if source is a smi. | 1437 // Untag the source value into destination and jump if source is a smi. |
| 1248 // Souce and destination can be the same register. | 1438 // Souce and destination can be the same register. |
| 1249 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case); | 1439 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case); |
| 1250 | 1440 |
| 1251 // Untag the source value into destination and jump if source is not a smi. | 1441 // Untag the source value into destination and jump if source is not a smi. |
| 1252 // Souce and destination can be the same register. | 1442 // Souce and destination can be the same register. |
| 1253 void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case); | 1443 void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case); |
| 1254 | 1444 |
| 1255 // Test if the register contains a smi (Z == 0 (eq) if true). | 1445 inline void TestIfSmi(Register value, Register scratch) { |
| 1256 inline void SmiTst(Register value) { | 1446 TestBit(value, 0, scratch); // tst(value, Operand(kSmiTagMask)); |
| 1257 tst(value, Operand(kSmiTagMask)); | |
| 1258 } | 1447 } |
| 1259 inline void NonNegativeSmiTst(Register value) { | 1448 |
| 1260 tst(value, Operand(kSmiTagMask | kSmiSignMask)); | 1449 inline void TestIfPositiveSmi(Register value, Register scratch) { |
| 1450 STATIC_ASSERT((kSmiTagMask | kSmiSignMask) == |
| 1451 (intptr_t)(1UL << (kBitsPerPointer - 1) | 1)); |
| 1452 #if V8_TARGET_ARCH_PPC64 |
| 1453 rldicl(scratch, value, 1, kBitsPerPointer - 2, SetRC); |
| 1454 #else |
| 1455 rlwinm(scratch, value, 1, kBitsPerPointer - 2, kBitsPerPointer - 1, SetRC); |
| 1456 #endif |
| 1261 } | 1457 } |
| 1262 // Jump if the register contains a smi. | 1458 |
| 1459 // Jump the register contains a smi. |
| 1263 inline void JumpIfSmi(Register value, Label* smi_label) { | 1460 inline void JumpIfSmi(Register value, Label* smi_label) { |
| 1264 tst(value, Operand(kSmiTagMask)); | 1461 TestIfSmi(value, r0); |
| 1265 b(eq, smi_label); | 1462 beq(smi_label, cr0); // branch if SMI |
| 1266 } | 1463 } |
| 1267 // Jump if either of the registers contain a non-smi. | 1464 // Jump if either of the registers contain a non-smi. |
| 1268 inline void JumpIfNotSmi(Register value, Label* not_smi_label) { | 1465 inline void JumpIfNotSmi(Register value, Label* not_smi_label) { |
| 1269 tst(value, Operand(kSmiTagMask)); | 1466 TestIfSmi(value, r0); |
| 1270 b(ne, not_smi_label); | 1467 bne(not_smi_label, cr0); |
| 1271 } | 1468 } |
| 1272 // Jump if either of the registers contain a non-smi. | 1469 // Jump if either of the registers contain a non-smi. |
| 1273 void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi); | 1470 void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi); |
| 1274 // Jump if either of the registers contain a smi. | 1471 // Jump if either of the registers contain a smi. |
| 1275 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi); | 1472 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi); |
| 1276 | 1473 |
| 1277 // Abort execution if argument is a smi, enabled via --debug-code. | 1474 // Abort execution if argument is a smi, enabled via --debug-code. |
| 1278 void AssertNotSmi(Register object); | 1475 void AssertNotSmi(Register object); |
| 1279 void AssertSmi(Register object); | 1476 void AssertSmi(Register object); |
| 1280 | 1477 |
| 1478 |
| 1479 #if V8_TARGET_ARCH_PPC64 |
| 1480 inline void TestIfInt32(Register value, |
| 1481 Register scratch1, Register scratch2, |
| 1482 CRegister cr = cr7) { |
| 1483 // High bits must be identical to fit into an 32-bit integer |
| 1484 srawi(scratch1, value, 31); |
| 1485 sradi(scratch2, value, 32); |
| 1486 cmp(scratch1, scratch2, cr); |
| 1487 } |
| 1488 #else |
| 1489 inline void TestIfInt32(Register hi_word, Register lo_word, |
| 1490 Register scratch, CRegister cr = cr7) { |
| 1491 // High bits must be identical to fit into an 32-bit integer |
| 1492 srawi(scratch, lo_word, 31); |
| 1493 cmp(scratch, hi_word, cr); |
| 1494 } |
| 1495 #endif |
| 1496 |
| 1281 // Abort execution if argument is not a string, enabled via --debug-code. | 1497 // Abort execution if argument is not a string, enabled via --debug-code. |
| 1282 void AssertString(Register object); | 1498 void AssertString(Register object); |
| 1283 | 1499 |
| 1284 // Abort execution if argument is not a name, enabled via --debug-code. | 1500 // Abort execution if argument is not a name, enabled via --debug-code. |
| 1285 void AssertName(Register object); | 1501 void AssertName(Register object); |
| 1286 | 1502 |
| 1287 // Abort execution if argument is not undefined or an AllocationSite, enabled | 1503 // Abort execution if argument is not undefined or an AllocationSite, enabled |
| 1288 // via --debug-code. | 1504 // via --debug-code. |
| 1289 void AssertUndefinedOrAllocationSite(Register object, Register scratch); | 1505 void AssertUndefinedOrAllocationSite(Register object, Register scratch); |
| 1290 | 1506 |
| (...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1349 void JumpIfNotUniqueName(Register reg, Label* not_unique_name); | 1565 void JumpIfNotUniqueName(Register reg, Label* not_unique_name); |
| 1350 | 1566 |
| 1351 void EmitSeqStringSetCharCheck(Register string, | 1567 void EmitSeqStringSetCharCheck(Register string, |
| 1352 Register index, | 1568 Register index, |
| 1353 Register value, | 1569 Register value, |
| 1354 uint32_t encoding_mask); | 1570 uint32_t encoding_mask); |
| 1355 | 1571 |
| 1356 // --------------------------------------------------------------------------- | 1572 // --------------------------------------------------------------------------- |
| 1357 // Patching helpers. | 1573 // Patching helpers. |
| 1358 | 1574 |
| 1359 // Get the location of a relocated constant (its address in the constant pool) | 1575 // Retrieve/patch the relocated value (lis/ori pair or constant pool load). |
| 1360 // from its load site. | 1576 void GetRelocatedValue(Register location, |
| 1361 void GetRelocatedValueLocation(Register ldr_location, Register result, | 1577 Register result, |
| 1362 Register scratch); | 1578 Register scratch); |
| 1363 | 1579 void SetRelocatedValue(Register location, |
| 1580 Register scratch, |
| 1581 Register new_value); |
| 1364 | 1582 |
| 1365 void ClampUint8(Register output_reg, Register input_reg); | 1583 void ClampUint8(Register output_reg, Register input_reg); |
| 1366 | 1584 |
| 1585 // Saturate a value into 8-bit unsigned integer |
| 1586 // if input_value < 0, output_value is 0 |
| 1587 // if input_value > 255, output_value is 255 |
| 1588 // otherwise output_value is the (int)input_value (round to nearest) |
| 1367 void ClampDoubleToUint8(Register result_reg, | 1589 void ClampDoubleToUint8(Register result_reg, |
| 1368 DwVfpRegister input_reg, | 1590 DoubleRegister input_reg, |
| 1369 LowDwVfpRegister double_scratch); | 1591 DoubleRegister temp_double_reg); |
| 1370 | 1592 |
| 1371 | 1593 |
| 1372 void LoadInstanceDescriptors(Register map, Register descriptors); | 1594 void LoadInstanceDescriptors(Register map, Register descriptors); |
| 1373 void EnumLength(Register dst, Register map); | 1595 void EnumLength(Register dst, Register map); |
| 1374 void NumberOfOwnDescriptors(Register dst, Register map); | 1596 void NumberOfOwnDescriptors(Register dst, Register map); |
| 1375 | 1597 |
| 1376 template<typename Field> | 1598 template<typename Field> |
| 1377 void DecodeField(Register dst, Register src) { | 1599 void DecodeField(Register dst, Register src) { |
| 1378 Ubfx(dst, src, Field::kShift, Field::kSize); | 1600 ExtractBitRange(dst, src, Field::kShift + Field::kSize - 1, Field::kShift); |
| 1379 } | 1601 } |
| 1380 | 1602 |
| 1381 template<typename Field> | 1603 template<typename Field> |
| 1382 void DecodeField(Register reg) { | 1604 void DecodeField(Register reg) { |
| 1383 DecodeField<Field>(reg, reg); | 1605 DecodeField<Field>(reg, reg); |
| 1384 } | 1606 } |
| 1385 | 1607 |
| 1386 template<typename Field> | 1608 template<typename Field> |
| 1387 void DecodeFieldToSmi(Register dst, Register src) { | 1609 void DecodeFieldToSmi(Register dst, Register src) { |
| 1388 static const int shift = Field::kShift; | 1610 #if V8_TARGET_ARCH_PPC64 |
| 1389 static const int mask = Field::kMask >> shift << kSmiTagSize; | 1611 DecodeField<Field>(dst, src); |
| 1390 STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0); | 1612 SmiTag(dst); |
| 1391 STATIC_ASSERT(kSmiTag == 0); | 1613 #else |
| 1392 if (shift < kSmiTagSize) { | 1614 // 32-bit can do this in one instruction: |
| 1393 mov(dst, Operand(src, LSL, kSmiTagSize - shift)); | 1615 int start = Field::kSize + kSmiShift - 1; |
| 1394 and_(dst, dst, Operand(mask)); | 1616 int end = kSmiShift; |
| 1395 } else if (shift > kSmiTagSize) { | 1617 int rotate = kSmiShift - Field::kShift; |
| 1396 mov(dst, Operand(src, LSR, shift - kSmiTagSize)); | 1618 if (rotate < 0) { |
| 1397 and_(dst, dst, Operand(mask)); | 1619 rotate += kBitsPerPointer; |
| 1398 } else { | |
| 1399 and_(dst, src, Operand(mask)); | |
| 1400 } | 1620 } |
| 1621 rlwinm(dst, src, rotate, |
| 1622 kBitsPerPointer - start - 1, |
| 1623 kBitsPerPointer - end - 1); |
| 1624 #endif |
| 1401 } | 1625 } |
| 1402 | 1626 |
| 1403 template<typename Field> | 1627 template<typename Field> |
| 1404 void DecodeFieldToSmi(Register reg) { | 1628 void DecodeFieldToSmi(Register reg) { |
| 1405 DecodeField<Field>(reg, reg); | 1629 DecodeFieldToSmi<Field>(reg, reg); |
| 1406 } | 1630 } |
| 1407 | 1631 |
| 1408 // Activation support. | 1632 // Activation support. |
| 1409 void EnterFrame(StackFrame::Type type, bool load_constant_pool = false); | 1633 void EnterFrame(StackFrame::Type type, bool load_constant_pool = false); |
| 1410 // Returns the pc offset at which the frame ends. | 1634 // Returns the pc offset at which the frame ends. |
| 1411 int LeaveFrame(StackFrame::Type type); | 1635 int LeaveFrame(StackFrame::Type type); |
| 1412 | 1636 |
| 1413 // Expects object in r0 and returns map with validated enum cache | 1637 // Expects object in r0 and returns map with validated enum cache |
| 1414 // in r0. Assumes that any other register can be used as a scratch. | 1638 // in r0. Assumes that any other register can be used as a scratch. |
| 1415 void CheckEnumCache(Register null_value, Label* call_runtime); | 1639 void CheckEnumCache(Register null_value, Label* call_runtime); |
| 1416 | 1640 |
| 1417 // AllocationMemento support. Arrays may have an associated | 1641 // AllocationMemento support. Arrays may have an associated |
| 1418 // AllocationMemento object that can be checked for in order to pretransition | 1642 // AllocationMemento object that can be checked for in order to pretransition |
| 1419 // to another type. | 1643 // to another type. |
| 1420 // On entry, receiver_reg should point to the array object. | 1644 // On entry, receiver_reg should point to the array object. |
| 1421 // scratch_reg gets clobbered. | 1645 // scratch_reg gets clobbered. |
| 1422 // If allocation info is present, condition flags are set to eq. | 1646 // If allocation info is present, condition flags are set to eq. |
| 1423 void TestJSArrayForAllocationMemento(Register receiver_reg, | 1647 void TestJSArrayForAllocationMemento(Register receiver_reg, |
| 1424 Register scratch_reg, | 1648 Register scratch_reg, |
| 1425 Label* no_memento_found); | 1649 Label* no_memento_found); |
| 1426 | 1650 |
| 1427 void JumpIfJSArrayHasAllocationMemento(Register receiver_reg, | 1651 void JumpIfJSArrayHasAllocationMemento(Register receiver_reg, |
| 1428 Register scratch_reg, | 1652 Register scratch_reg, |
| 1429 Label* memento_found) { | 1653 Label* memento_found) { |
| 1430 Label no_memento_found; | 1654 Label no_memento_found; |
| 1431 TestJSArrayForAllocationMemento(receiver_reg, scratch_reg, | 1655 TestJSArrayForAllocationMemento(receiver_reg, scratch_reg, |
| 1432 &no_memento_found); | 1656 &no_memento_found); |
| 1433 b(eq, memento_found); | 1657 beq(memento_found); |
| 1434 bind(&no_memento_found); | 1658 bind(&no_memento_found); |
| 1435 } | 1659 } |
| 1436 | 1660 |
| 1437 // Jumps to found label if a prototype map has dictionary elements. | 1661 // Jumps to found label if a prototype map has dictionary elements. |
| 1438 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0, | 1662 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0, |
| 1439 Register scratch1, Label* found); | 1663 Register scratch1, Label* found); |
| 1440 | 1664 |
| 1441 private: | 1665 private: |
| 1666 static const int kSmiShift = kSmiTagSize + kSmiShiftSize; |
| 1667 |
| 1442 void CallCFunctionHelper(Register function, | 1668 void CallCFunctionHelper(Register function, |
| 1443 int num_reg_arguments, | 1669 int num_reg_arguments, |
| 1444 int num_double_arguments); | 1670 int num_double_arguments); |
| 1445 | 1671 |
| 1446 void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al); | 1672 void Jump(intptr_t target, RelocInfo::Mode rmode, |
| 1673 Condition cond = al, CRegister cr = cr7); |
| 1447 | 1674 |
| 1448 // Helper functions for generating invokes. | 1675 // Helper functions for generating invokes. |
| 1449 void InvokePrologue(const ParameterCount& expected, | 1676 void InvokePrologue(const ParameterCount& expected, |
| 1450 const ParameterCount& actual, | 1677 const ParameterCount& actual, |
| 1451 Handle<Code> code_constant, | 1678 Handle<Code> code_constant, |
| 1452 Register code_reg, | 1679 Register code_reg, |
| 1453 Label* done, | 1680 Label* done, |
| 1454 bool* definitely_mismatches, | 1681 bool* definitely_mismatches, |
| 1455 InvokeFlag flag, | 1682 InvokeFlag flag, |
| 1456 const CallWrapper& call_wrapper); | 1683 const CallWrapper& call_wrapper); |
| (...skipping 19 matching lines...) Expand all Loading... |
| 1476 | 1703 |
| 1477 // Helper for throwing exceptions. Compute a handler address and jump to | 1704 // Helper for throwing exceptions. Compute a handler address and jump to |
| 1478 // it. See the implementation for register usage. | 1705 // it. See the implementation for register usage. |
| 1479 void JumpToHandlerEntry(); | 1706 void JumpToHandlerEntry(); |
| 1480 | 1707 |
| 1481 // Compute memory operands for safepoint stack slots. | 1708 // Compute memory operands for safepoint stack slots. |
| 1482 static int SafepointRegisterStackIndex(int reg_code); | 1709 static int SafepointRegisterStackIndex(int reg_code); |
| 1483 MemOperand SafepointRegisterSlot(Register reg); | 1710 MemOperand SafepointRegisterSlot(Register reg); |
| 1484 MemOperand SafepointRegistersAndDoublesSlot(Register reg); | 1711 MemOperand SafepointRegistersAndDoublesSlot(Register reg); |
| 1485 | 1712 |
| 1486 // Loads the constant pool pointer (pp) register. | 1713 #if V8_OOL_CONSTANT_POOL |
| 1714 // Loads the constant pool pointer (kConstantPoolRegister). |
| 1487 void LoadConstantPoolPointerRegister(); | 1715 void LoadConstantPoolPointerRegister(); |
| 1716 #endif |
| 1488 | 1717 |
| 1489 bool generating_stub_; | 1718 bool generating_stub_; |
| 1490 bool has_frame_; | 1719 bool has_frame_; |
| 1491 // This handle will be patched with the code object on installation. | 1720 // This handle will be patched with the code object on installation. |
| 1492 Handle<Object> code_object_; | 1721 Handle<Object> code_object_; |
| 1493 | 1722 |
| 1494 // Needs access to SafepointRegisterStackIndex for compiled frame | 1723 // Needs access to SafepointRegisterStackIndex for compiled frame |
| 1495 // traversal. | 1724 // traversal. |
| 1496 friend class StandardFrame; | 1725 friend class StandardFrame; |
| 1497 }; | 1726 }; |
| (...skipping 15 matching lines...) Expand all Loading... |
| 1513 int instructions, | 1742 int instructions, |
| 1514 FlushICache flush_cache = FLUSH); | 1743 FlushICache flush_cache = FLUSH); |
| 1515 virtual ~CodePatcher(); | 1744 virtual ~CodePatcher(); |
| 1516 | 1745 |
| 1517 // Macro assembler to emit code. | 1746 // Macro assembler to emit code. |
| 1518 MacroAssembler* masm() { return &masm_; } | 1747 MacroAssembler* masm() { return &masm_; } |
| 1519 | 1748 |
| 1520 // Emit an instruction directly. | 1749 // Emit an instruction directly. |
| 1521 void Emit(Instr instr); | 1750 void Emit(Instr instr); |
| 1522 | 1751 |
| 1523 // Emit an address directly. | |
| 1524 void Emit(Address addr); | |
| 1525 | |
| 1526 // Emit the condition part of an instruction leaving the rest of the current | 1752 // Emit the condition part of an instruction leaving the rest of the current |
| 1527 // instruction unchanged. | 1753 // instruction unchanged. |
| 1528 void EmitCondition(Condition cond); | 1754 void EmitCondition(Condition cond); |
| 1529 | 1755 |
| 1530 private: | 1756 private: |
| 1531 byte* address_; // The address of the code being patched. | 1757 byte* address_; // The address of the code being patched. |
| 1532 int size_; // Number of bytes of the expected patch size. | 1758 int size_; // Number of bytes of the expected patch size. |
| 1533 MacroAssembler masm_; // Macro assembler used to generate the code. | 1759 MacroAssembler masm_; // Macro assembler used to generate the code. |
| 1534 FlushICache flush_cache_; // Whether to flush the I cache after patching. | 1760 FlushICache flush_cache_; // Whether to flush the I cache after patching. |
| 1535 }; | 1761 }; |
| 1536 | 1762 |
| 1537 | 1763 |
| 1764 #if V8_OOL_CONSTANT_POOL |
| 1538 class FrameAndConstantPoolScope { | 1765 class FrameAndConstantPoolScope { |
| 1539 public: | 1766 public: |
| 1540 FrameAndConstantPoolScope(MacroAssembler* masm, StackFrame::Type type) | 1767 FrameAndConstantPoolScope(MacroAssembler* masm, StackFrame::Type type) |
| 1541 : masm_(masm), | 1768 : masm_(masm), |
| 1542 type_(type), | 1769 type_(type), |
| 1543 old_has_frame_(masm->has_frame()), | 1770 old_has_frame_(masm->has_frame()), |
| 1544 old_constant_pool_available_(masm->is_constant_pool_available()) { | 1771 old_constant_pool_available_(masm->is_constant_pool_available()) { |
| 1545 // We only want to enable constant pool access for non-manual frame scopes | 1772 // We only want to enable constant pool access for non-manual frame scopes |
| 1546 // to ensure the constant pool pointer is valid throughout the scope. | 1773 // to ensure the constant pool pointer is valid throughout the scope. |
| 1547 ASSERT(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE); | 1774 ASSERT(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE); |
| (...skipping 19 matching lines...) Expand all Loading... |
| 1567 } | 1794 } |
| 1568 | 1795 |
| 1569 private: | 1796 private: |
| 1570 MacroAssembler* masm_; | 1797 MacroAssembler* masm_; |
| 1571 StackFrame::Type type_; | 1798 StackFrame::Type type_; |
| 1572 bool old_has_frame_; | 1799 bool old_has_frame_; |
| 1573 bool old_constant_pool_available_; | 1800 bool old_constant_pool_available_; |
| 1574 | 1801 |
| 1575 DISALLOW_IMPLICIT_CONSTRUCTORS(FrameAndConstantPoolScope); | 1802 DISALLOW_IMPLICIT_CONSTRUCTORS(FrameAndConstantPoolScope); |
| 1576 }; | 1803 }; |
| 1804 #else |
| 1805 #define FrameAndConstantPoolScope FrameScope |
| 1806 #endif |
| 1577 | 1807 |
| 1578 | 1808 |
| 1809 #if V8_OOL_CONSTANT_POOL |
| 1579 // Class for scoping the the unavailability of constant pool access. | 1810 // Class for scoping the the unavailability of constant pool access. |
| 1580 class ConstantPoolUnavailableScope { | 1811 class ConstantPoolUnavailableScope { |
| 1581 public: | 1812 public: |
| 1582 explicit ConstantPoolUnavailableScope(MacroAssembler* masm) | 1813 explicit ConstantPoolUnavailableScope(MacroAssembler* masm) |
| 1583 : masm_(masm), | 1814 : masm_(masm), |
| 1584 old_constant_pool_available_(masm->is_constant_pool_available()) { | 1815 old_constant_pool_available_(masm->is_constant_pool_available()) { |
| 1585 if (FLAG_enable_ool_constant_pool) { | 1816 if (FLAG_enable_ool_constant_pool) { |
| 1586 masm_->set_constant_pool_available(false); | 1817 masm_->set_constant_pool_available(false); |
| 1587 } | 1818 } |
| 1588 } | 1819 } |
| 1589 ~ConstantPoolUnavailableScope() { | 1820 ~ConstantPoolUnavailableScope() { |
| 1590 if (FLAG_enable_ool_constant_pool) { | 1821 if (FLAG_enable_ool_constant_pool) { |
| 1591 masm_->set_constant_pool_available(old_constant_pool_available_); | 1822 masm_->set_constant_pool_available(old_constant_pool_available_); |
| 1592 } | 1823 } |
| 1593 } | 1824 } |
| 1594 | 1825 |
| 1595 private: | 1826 private: |
| 1596 MacroAssembler* masm_; | 1827 MacroAssembler* masm_; |
| 1597 int old_constant_pool_available_; | 1828 int old_constant_pool_available_; |
| 1598 | 1829 |
| 1599 DISALLOW_IMPLICIT_CONSTRUCTORS(ConstantPoolUnavailableScope); | 1830 DISALLOW_IMPLICIT_CONSTRUCTORS(ConstantPoolUnavailableScope); |
| 1600 }; | 1831 }; |
| 1832 #endif |
| 1601 | 1833 |
| 1602 | 1834 |
| 1603 // ----------------------------------------------------------------------------- | 1835 // ----------------------------------------------------------------------------- |
| 1604 // Static helper functions. | 1836 // Static helper functions. |
| 1605 | 1837 |
| 1606 inline MemOperand ContextOperand(Register context, int index) { | 1838 inline MemOperand ContextOperand(Register context, int index) { |
| 1607 return MemOperand(context, Context::SlotOffset(index)); | 1839 return MemOperand(context, Context::SlotOffset(index)); |
| 1608 } | 1840 } |
| 1609 | 1841 |
| 1610 | 1842 |
| 1611 inline MemOperand GlobalObjectOperand() { | 1843 inline MemOperand GlobalObjectOperand() { |
| 1612 return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX); | 1844 return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX); |
| 1613 } | 1845 } |
| 1614 | 1846 |
| 1615 | 1847 |
| 1616 #ifdef GENERATED_CODE_COVERAGE | 1848 #ifdef GENERATED_CODE_COVERAGE |
| 1617 #define CODE_COVERAGE_STRINGIFY(x) #x | 1849 #define CODE_COVERAGE_STRINGIFY(x) #x |
| 1618 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x) | 1850 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x) |
| 1619 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__) | 1851 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__) |
| 1620 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm-> | 1852 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm-> |
| 1621 #else | 1853 #else |
| 1622 #define ACCESS_MASM(masm) masm-> | 1854 #define ACCESS_MASM(masm) masm-> |
| 1623 #endif | 1855 #endif |
| 1624 | 1856 |
| 1625 | 1857 |
| 1626 } } // namespace v8::internal | 1858 } } // namespace v8::internal |
| 1627 | 1859 |
| 1628 #endif // V8_ARM_MACRO_ASSEMBLER_ARM_H_ | 1860 #endif // V8_PPC_MACRO_ASSEMBLER_PPC_H_ |
| OLD | NEW |