Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(2)

Side by Side Diff: src/ppc/macro-assembler-ppc.h

Issue 422063005: Contribution of PowerPC port. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Caught up to bleending edge (8/15) Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 //
3 // Copyright IBM Corp. 2012, 2013. All rights reserved.
4 //
2 // Use of this source code is governed by a BSD-style license that can be 5 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 6 // found in the LICENSE file.
4 7
5 #ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_ 8 #ifndef V8_PPC_MACRO_ASSEMBLER_PPC_H_
6 #define V8_ARM_MACRO_ASSEMBLER_ARM_H_ 9 #define V8_PPC_MACRO_ASSEMBLER_PPC_H_
7 10
8 #include "src/assembler.h" 11 #include "src/assembler.h"
9 #include "src/frames.h" 12 #include "src/frames.h"
10 #include "src/globals.h" 13 #include "src/globals.h"
11 14
12 namespace v8 { 15 namespace v8 {
13 namespace internal { 16 namespace internal {
14 17
15 // ---------------------------------------------------------------------------- 18 // ----------------------------------------------------------------------------
16 // Static helper functions 19 // Static helper functions
17 20
18 // Generate a MemOperand for loading a field from an object. 21 // Generate a MemOperand for loading a field from an object.
19 inline MemOperand FieldMemOperand(Register object, int offset) { 22 inline MemOperand FieldMemOperand(Register object, int offset) {
20 return MemOperand(object, offset - kHeapObjectTag); 23 return MemOperand(object, offset - kHeapObjectTag);
21 } 24 }
22 25
23 26
24 // Give alias names to registers
25 const Register cp = { kRegister_r7_Code }; // JavaScript context pointer.
26 const Register pp = { kRegister_r8_Code }; // Constant pool pointer.
27 const Register kRootRegister = { kRegister_r10_Code }; // Roots array pointer.
28
29 // Flags used for AllocateHeapNumber 27 // Flags used for AllocateHeapNumber
30 enum TaggingMode { 28 enum TaggingMode {
31 // Tag the result. 29 // Tag the result.
32 TAG_RESULT, 30 TAG_RESULT,
33 // Don't tag 31 // Don't tag
34 DONT_TAG_RESULT 32 DONT_TAG_RESULT
35 }; 33 };
36 34
37 35
38 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET }; 36 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
(...skipping 17 matching lines...) Expand all
56 bool AreAliased(Register reg1, 54 bool AreAliased(Register reg1,
57 Register reg2, 55 Register reg2,
58 Register reg3 = no_reg, 56 Register reg3 = no_reg,
59 Register reg4 = no_reg, 57 Register reg4 = no_reg,
60 Register reg5 = no_reg, 58 Register reg5 = no_reg,
61 Register reg6 = no_reg, 59 Register reg6 = no_reg,
62 Register reg7 = no_reg, 60 Register reg7 = no_reg,
63 Register reg8 = no_reg); 61 Register reg8 = no_reg);
64 #endif 62 #endif
65 63
64 // These exist to provide portability between 32 and 64bit
65 #if V8_TARGET_ARCH_PPC64
66 #define LoadPU ldu
67 #define LoadPX ldx
68 #define LoadPUX ldux
69 #define StorePU stdu
70 #define StorePX stdx
71 #define StorePUX stdux
72 #define ShiftLeftImm sldi
73 #define ShiftRightImm srdi
74 #define ClearLeftImm clrldi
75 #define ClearRightImm clrrdi
76 #define ShiftRightArithImm sradi
77 #define ShiftLeft sld
78 #define ShiftRight srd
79 #define ShiftRightArith srad
80 #define Mul mulld
81 #define Div divd
82 #else
83 #define LoadPU lwzu
84 #define LoadPX lwzx
85 #define LoadPUX lwzux
86 #define StorePU stwu
87 #define StorePX stwx
88 #define StorePUX stwux
89 #define ShiftLeftImm slwi
90 #define ShiftRightImm srwi
91 #define ClearLeftImm clrlwi
92 #define ClearRightImm clrrwi
93 #define ShiftRightArithImm srawi
94 #define ShiftLeft slw
95 #define ShiftRight srw
96 #define ShiftRightArith sraw
97 #define Mul mullw
98 #define Div divw
99 #endif
66 100
67 enum TargetAddressStorageMode {
68 CAN_INLINE_TARGET_ADDRESS,
69 NEVER_INLINE_TARGET_ADDRESS
70 };
71 101
72 // MacroAssembler implements a collection of frequently used macros. 102 // MacroAssembler implements a collection of frequently used macros.
73 class MacroAssembler: public Assembler { 103 class MacroAssembler: public Assembler {
74 public: 104 public:
75 // The isolate parameter can be NULL if the macro assembler should 105 // The isolate parameter can be NULL if the macro assembler should
76 // not use isolate-dependent functionality. In this case, it's the 106 // not use isolate-dependent functionality. In this case, it's the
77 // responsibility of the caller to never invoke such function on the 107 // responsibility of the caller to never invoke such function on the
78 // macro assembler. 108 // macro assembler.
79 MacroAssembler(Isolate* isolate, void* buffer, int size); 109 MacroAssembler(Isolate* isolate, void* buffer, int size);
80 110
81 111
82 // Returns the size of a call in instructions. Note, the value returned is 112 // Returns the size of a call in instructions. Note, the value returned is
83 // only valid as long as no entries are added to the constant pool between 113 // only valid as long as no entries are added to the constant pool between
84 // checking the call size and emitting the actual call. 114 // checking the call size and emitting the actual call.
85 static int CallSize(Register target, Condition cond = al); 115 static int CallSize(Register target, Condition cond = al);
86 int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al); 116 int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
87 int CallStubSize(CodeStub* stub, 117 static int CallSizeNotPredictableCodeSize(Address target,
88 TypeFeedbackId ast_id = TypeFeedbackId::None(),
89 Condition cond = al);
90 static int CallSizeNotPredictableCodeSize(Isolate* isolate,
91 Address target,
92 RelocInfo::Mode rmode, 118 RelocInfo::Mode rmode,
93 Condition cond = al); 119 Condition cond = al);
94 120
95 // Jump, Call, and Ret pseudo instructions implementing inter-working. 121 // Jump, Call, and Ret pseudo instructions implementing inter-working.
96 void Jump(Register target, Condition cond = al); 122 void Jump(Register target, Condition cond = al);
97 void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al); 123 void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al,
124 CRegister cr = cr7);
98 void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al); 125 void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
99 void Call(Register target, Condition cond = al); 126 void Call(Register target, Condition cond = al);
100 void Call(Address target, RelocInfo::Mode rmode, 127 void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
101 Condition cond = al,
102 TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
103 int CallSize(Handle<Code> code, 128 int CallSize(Handle<Code> code,
104 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, 129 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
105 TypeFeedbackId ast_id = TypeFeedbackId::None(), 130 TypeFeedbackId ast_id = TypeFeedbackId::None(),
106 Condition cond = al); 131 Condition cond = al);
107 void Call(Handle<Code> code, 132 void Call(Handle<Code> code,
108 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET, 133 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
109 TypeFeedbackId ast_id = TypeFeedbackId::None(), 134 TypeFeedbackId ast_id = TypeFeedbackId::None(),
110 Condition cond = al, 135 Condition cond = al);
111 TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
112 void Ret(Condition cond = al); 136 void Ret(Condition cond = al);
113 137
114 // Emit code to discard a non-negative number of pointer-sized elements 138 // Emit code to discard a non-negative number of pointer-sized elements
115 // from the stack, clobbering only the sp register. 139 // from the stack, clobbering only the sp register.
116 void Drop(int count, Condition cond = al); 140 void Drop(int count, Condition cond = al);
117 141
118 void Ret(int drop, Condition cond = al); 142 void Ret(int drop, Condition cond = al);
119 143
120 // Swap two registers. If the scratch register is omitted then a slightly 144 void Call(Label* target);
121 // less efficient form using xor instead of mov is emitted.
122 void Swap(Register reg1,
123 Register reg2,
124 Register scratch = no_reg,
125 Condition cond = al);
126 145
127 void Mls(Register dst, Register src1, Register src2, Register srcA, 146 // Emit call to the code we are currently generating.
128 Condition cond = al); 147 void CallSelf() {
129 void And(Register dst, Register src1, const Operand& src2, 148 Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
130 Condition cond = al); 149 Call(self, RelocInfo::CODE_TARGET);
131 void Ubfx(Register dst, Register src, int lsb, int width, 150 }
132 Condition cond = al);
133 void Sbfx(Register dst, Register src, int lsb, int width,
134 Condition cond = al);
135 // The scratch register is not used for ARMv7.
136 // scratch can be the same register as src (in which case it is trashed), but
137 // not the same as dst.
138 void Bfi(Register dst,
139 Register src,
140 Register scratch,
141 int lsb,
142 int width,
143 Condition cond = al);
144 void Bfc(Register dst, Register src, int lsb, int width, Condition cond = al);
145 void Usat(Register dst, int satpos, const Operand& src,
146 Condition cond = al);
147
148 void Call(Label* target);
149 void Push(Register src) { push(src); }
150 void Pop(Register dst) { pop(dst); }
151 151
152 // Register move. May do nothing if the registers are identical. 152 // Register move. May do nothing if the registers are identical.
153 void Move(Register dst, Handle<Object> value); 153 void Move(Register dst, Handle<Object> value);
154 void Move(Register dst, Register src, Condition cond = al); 154 void Move(Register dst, Register src, Condition cond = al);
155 void Move(Register dst, const Operand& src, Condition cond = al) { 155 void Move(DoubleRegister dst, DoubleRegister src);
156 if (!src.is_reg() || !src.rm().is(dst)) mov(dst, src, LeaveCC, cond);
157 }
158 void Move(DwVfpRegister dst, DwVfpRegister src);
159 156
160 void Load(Register dst, const MemOperand& src, Representation r); 157 void MultiPush(RegList regs);
161 void Store(Register src, const MemOperand& dst, Representation r); 158 void MultiPop(RegList regs);
162 159
163 // Load an object from the root table. 160 // Load an object from the root table.
164 void LoadRoot(Register destination, 161 void LoadRoot(Register destination,
165 Heap::RootListIndex index, 162 Heap::RootListIndex index,
166 Condition cond = al); 163 Condition cond = al);
167 // Store an object to the root table. 164 // Store an object to the root table.
168 void StoreRoot(Register source, 165 void StoreRoot(Register source,
169 Heap::RootListIndex index, 166 Heap::RootListIndex index,
170 Condition cond = al); 167 Condition cond = al);
171 168
(...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after
301 Register object, 298 Register object,
302 Register address, 299 Register address,
303 Register value, 300 Register value,
304 LinkRegisterStatus lr_status, 301 LinkRegisterStatus lr_status,
305 SaveFPRegsMode save_fp, 302 SaveFPRegsMode save_fp,
306 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET, 303 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
307 SmiCheck smi_check = INLINE_SMI_CHECK, 304 SmiCheck smi_check = INLINE_SMI_CHECK,
308 PointersToHereCheck pointers_to_here_check_for_value = 305 PointersToHereCheck pointers_to_here_check_for_value =
309 kPointersToHereMaybeInteresting); 306 kPointersToHereMaybeInteresting);
310 307
308 void Push(Register src) { push(src); }
309
311 // Push a handle. 310 // Push a handle.
312 void Push(Handle<Object> handle); 311 void Push(Handle<Object> handle);
313 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); } 312 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
314 313
315 // Push two registers. Pushes leftmost register first (to highest address). 314 // Push two registers. Pushes leftmost register first (to highest address).
316 void Push(Register src1, Register src2, Condition cond = al) { 315 void Push(Register src1, Register src2) {
317 DCHECK(!src1.is(src2)); 316 StorePU(src1, MemOperand(sp, -kPointerSize));
318 if (src1.code() > src2.code()) { 317 StorePU(src2, MemOperand(sp, -kPointerSize));
319 stm(db_w, sp, src1.bit() | src2.bit(), cond);
320 } else {
321 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
322 str(src2, MemOperand(sp, 4, NegPreIndex), cond);
323 }
324 } 318 }
325 319
326 // Push three registers. Pushes leftmost register first (to highest address). 320 // Push three registers. Pushes leftmost register first (to highest address).
327 void Push(Register src1, Register src2, Register src3, Condition cond = al) { 321 void Push(Register src1, Register src2, Register src3) {
328 DCHECK(!src1.is(src2)); 322 StorePU(src1, MemOperand(sp, -kPointerSize));
329 DCHECK(!src2.is(src3)); 323 StorePU(src2, MemOperand(sp, -kPointerSize));
330 DCHECK(!src1.is(src3)); 324 StorePU(src3, MemOperand(sp, -kPointerSize));
331 if (src1.code() > src2.code()) {
332 if (src2.code() > src3.code()) {
333 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
334 } else {
335 stm(db_w, sp, src1.bit() | src2.bit(), cond);
336 str(src3, MemOperand(sp, 4, NegPreIndex), cond);
337 }
338 } else {
339 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
340 Push(src2, src3, cond);
341 }
342 } 325 }
343 326
344 // Push four registers. Pushes leftmost register first (to highest address). 327 // Push four registers. Pushes leftmost register first (to highest address).
345 void Push(Register src1, 328 void Push(Register src1,
346 Register src2, 329 Register src2,
347 Register src3, 330 Register src3,
348 Register src4, 331 Register src4) {
349 Condition cond = al) { 332 StorePU(src1, MemOperand(sp, -kPointerSize));
350 DCHECK(!src1.is(src2)); 333 StorePU(src2, MemOperand(sp, -kPointerSize));
351 DCHECK(!src2.is(src3)); 334 StorePU(src3, MemOperand(sp, -kPointerSize));
352 DCHECK(!src1.is(src3)); 335 StorePU(src4, MemOperand(sp, -kPointerSize));
353 DCHECK(!src1.is(src4));
354 DCHECK(!src2.is(src4));
355 DCHECK(!src3.is(src4));
356 if (src1.code() > src2.code()) {
357 if (src2.code() > src3.code()) {
358 if (src3.code() > src4.code()) {
359 stm(db_w,
360 sp,
361 src1.bit() | src2.bit() | src3.bit() | src4.bit(),
362 cond);
363 } else {
364 stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
365 str(src4, MemOperand(sp, 4, NegPreIndex), cond);
366 }
367 } else {
368 stm(db_w, sp, src1.bit() | src2.bit(), cond);
369 Push(src3, src4, cond);
370 }
371 } else {
372 str(src1, MemOperand(sp, 4, NegPreIndex), cond);
373 Push(src2, src3, src4, cond);
374 }
375 } 336 }
376 337
338 // Push five registers. Pushes leftmost register first (to highest address).
339 void Push(Register src1,
340 Register src2,
341 Register src3,
342 Register src4,
343 Register src5) {
344 StorePU(src1, MemOperand(sp, -kPointerSize));
345 StorePU(src2, MemOperand(sp, -kPointerSize));
346 StorePU(src3, MemOperand(sp, -kPointerSize));
347 StorePU(src4, MemOperand(sp, -kPointerSize));
348 StorePU(src5, MemOperand(sp, -kPointerSize));
349 }
350
351 void Pop(Register dst) { pop(dst); }
352
377 // Pop two registers. Pops rightmost register first (from lower address). 353 // Pop two registers. Pops rightmost register first (from lower address).
378 void Pop(Register src1, Register src2, Condition cond = al) { 354 void Pop(Register src1, Register src2) {
379 DCHECK(!src1.is(src2)); 355 LoadP(src2, MemOperand(sp, 0));
380 if (src1.code() > src2.code()) { 356 LoadP(src1, MemOperand(sp, kPointerSize));
381 ldm(ia_w, sp, src1.bit() | src2.bit(), cond); 357 addi(sp, sp, Operand(2 * kPointerSize));
382 } else {
383 ldr(src2, MemOperand(sp, 4, PostIndex), cond);
384 ldr(src1, MemOperand(sp, 4, PostIndex), cond);
385 }
386 } 358 }
387 359
388 // Pop three registers. Pops rightmost register first (from lower address). 360 // Pop three registers. Pops rightmost register first (from lower address).
389 void Pop(Register src1, Register src2, Register src3, Condition cond = al) { 361 void Pop(Register src1, Register src2, Register src3) {
390 DCHECK(!src1.is(src2)); 362 LoadP(src3, MemOperand(sp, 0));
391 DCHECK(!src2.is(src3)); 363 LoadP(src2, MemOperand(sp, kPointerSize));
392 DCHECK(!src1.is(src3)); 364 LoadP(src1, MemOperand(sp, 2 * kPointerSize));
393 if (src1.code() > src2.code()) { 365 addi(sp, sp, Operand(3 * kPointerSize));
394 if (src2.code() > src3.code()) {
395 ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
396 } else {
397 ldr(src3, MemOperand(sp, 4, PostIndex), cond);
398 ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
399 }
400 } else {
401 Pop(src2, src3, cond);
402 ldr(src1, MemOperand(sp, 4, PostIndex), cond);
403 }
404 } 366 }
405 367
406 // Pop four registers. Pops rightmost register first (from lower address). 368 // Pop four registers. Pops rightmost register first (from lower address).
407 void Pop(Register src1, 369 void Pop(Register src1,
408 Register src2, 370 Register src2,
409 Register src3, 371 Register src3,
410 Register src4, 372 Register src4) {
411 Condition cond = al) { 373 LoadP(src4, MemOperand(sp, 0));
412 DCHECK(!src1.is(src2)); 374 LoadP(src3, MemOperand(sp, kPointerSize));
413 DCHECK(!src2.is(src3)); 375 LoadP(src2, MemOperand(sp, 2 * kPointerSize));
414 DCHECK(!src1.is(src3)); 376 LoadP(src1, MemOperand(sp, 3 * kPointerSize));
415 DCHECK(!src1.is(src4)); 377 addi(sp, sp, Operand(4 * kPointerSize));
416 DCHECK(!src2.is(src4));
417 DCHECK(!src3.is(src4));
418 if (src1.code() > src2.code()) {
419 if (src2.code() > src3.code()) {
420 if (src3.code() > src4.code()) {
421 ldm(ia_w,
422 sp,
423 src1.bit() | src2.bit() | src3.bit() | src4.bit(),
424 cond);
425 } else {
426 ldr(src4, MemOperand(sp, 4, PostIndex), cond);
427 ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
428 }
429 } else {
430 Pop(src3, src4, cond);
431 ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
432 }
433 } else {
434 Pop(src2, src3, src4, cond);
435 ldr(src1, MemOperand(sp, 4, PostIndex), cond);
436 }
437 } 378 }
438 379
439 // Push a fixed frame, consisting of lr, fp, constant pool (if 380 // Pop five registers. Pops rightmost register first (from lower address).
440 // FLAG_enable_ool_constant_pool), context and JS function / marker id if 381 void Pop(Register src1,
441 // marker_reg is a valid register. 382 Register src2,
383 Register src3,
384 Register src4,
385 Register src5) {
386 LoadP(src5, MemOperand(sp, 0));
387 LoadP(src4, MemOperand(sp, kPointerSize));
388 LoadP(src3, MemOperand(sp, 2 * kPointerSize));
389 LoadP(src2, MemOperand(sp, 3 * kPointerSize));
390 LoadP(src1, MemOperand(sp, 4 * kPointerSize));
391 addi(sp, sp, Operand(5 * kPointerSize));
392 }
393
394 // Push a fixed frame, consisting of lr, fp, context and
395 // JS function / marker id if marker_reg is a valid register.
442 void PushFixedFrame(Register marker_reg = no_reg); 396 void PushFixedFrame(Register marker_reg = no_reg);
443 void PopFixedFrame(Register marker_reg = no_reg); 397 void PopFixedFrame(Register marker_reg = no_reg);
444 398
445 // Push and pop the registers that can hold pointers, as defined by the 399 // Push and pop the registers that can hold pointers, as defined by the
446 // RegList constant kSafepointSavedRegisters. 400 // RegList constant kSafepointSavedRegisters.
447 void PushSafepointRegisters(); 401 void PushSafepointRegisters();
448 void PopSafepointRegisters(); 402 void PopSafepointRegisters();
449 // Store value in register src in the safepoint stack slot for 403 // Store value in register src in the safepoint stack slot for
450 // register dst. 404 // register dst.
451 void StoreToSafepointRegisterSlot(Register src, Register dst); 405 void StoreToSafepointRegisterSlot(Register src, Register dst);
452 // Load the value of the src register from its safepoint stack slot 406 // Load the value of the src register from its safepoint stack slot
453 // into register dst. 407 // into register dst.
454 void LoadFromSafepointRegisterSlot(Register dst, Register src); 408 void LoadFromSafepointRegisterSlot(Register dst, Register src);
455 409
456 // Load two consecutive registers with two consecutive memory locations. 410 // Flush the I-cache from asm code. You should use CpuFeatures::FlushICache
457 void Ldrd(Register dst1, 411 // from C.
458 Register dst2, 412 // Does not handle errors.
459 const MemOperand& src, 413 void FlushICache(Register address, size_t size,
460 Condition cond = al); 414 Register scratch);
461
462 // Store two consecutive registers to two consecutive memory locations.
463 void Strd(Register src1,
464 Register src2,
465 const MemOperand& dst,
466 Condition cond = al);
467
468 // Ensure that FPSCR contains values needed by JavaScript.
469 // We need the NaNModeControlBit to be sure that operations like
470 // vadd and vsub generate the Canonical NaN (if a NaN must be generated).
471 // In VFP3 it will be always the Canonical NaN.
472 // In VFP2 it will be either the Canonical NaN or the negative version
473 // of the Canonical NaN. It doesn't matter if we have two values. The aim
474 // is to be sure to never generate the hole NaN.
475 void VFPEnsureFPSCRState(Register scratch);
476 415
477 // If the value is a NaN, canonicalize the value else, do nothing. 416 // If the value is a NaN, canonicalize the value else, do nothing.
478 void VFPCanonicalizeNaN(const DwVfpRegister dst, 417 void CanonicalizeNaN(const DoubleRegister dst,
479 const DwVfpRegister src, 418 const DoubleRegister src);
480 const Condition cond = al); 419 void CanonicalizeNaN(const DoubleRegister value) {
481 void VFPCanonicalizeNaN(const DwVfpRegister value, 420 CanonicalizeNaN(value, value);
482 const Condition cond = al) {
483 VFPCanonicalizeNaN(value, value, cond);
484 } 421 }
485 422
486 // Compare double values and move the result to the normal condition flags. 423 // Converts the integer (untagged smi) in |src| to a double, storing
487 void VFPCompareAndSetFlags(const DwVfpRegister src1, 424 // the result to |double_dst|
488 const DwVfpRegister src2, 425 void ConvertIntToDouble(Register src,
489 const Condition cond = al); 426 DoubleRegister double_dst);
490 void VFPCompareAndSetFlags(const DwVfpRegister src1,
491 const double src2,
492 const Condition cond = al);
493 427
494 // Compare double values and then load the fpscr flags to a register. 428 // Converts the unsigned integer (untagged smi) in |src| to
495 void VFPCompareAndLoadFlags(const DwVfpRegister src1, 429 // a double, storing the result to |double_dst|
496 const DwVfpRegister src2, 430 void ConvertUnsignedIntToDouble(Register src,
497 const Register fpscr_flags, 431 DoubleRegister double_dst);
498 const Condition cond = al);
499 void VFPCompareAndLoadFlags(const DwVfpRegister src1,
500 const double src2,
501 const Register fpscr_flags,
502 const Condition cond = al);
503 432
504 void Vmov(const DwVfpRegister dst, 433 // Converts the integer (untagged smi) in |src| to
505 const double imm, 434 // a float, storing the result in |dst|
506 const Register scratch = no_reg); 435 // Warning: The value in |int_scrach| will be changed in the process!
436 void ConvertIntToFloat(const DoubleRegister dst,
437 const Register src,
438 const Register int_scratch);
507 439
508 void VmovHigh(Register dst, DwVfpRegister src); 440 // Converts the double_input to an integer. Note that, upon return,
509 void VmovHigh(DwVfpRegister dst, Register src); 441 // the contents of double_dst will also hold the fixed point representation.
510 void VmovLow(Register dst, DwVfpRegister src); 442 void ConvertDoubleToInt64(const DoubleRegister double_input,
511 void VmovLow(DwVfpRegister dst, Register src); 443 #if !V8_TARGET_ARCH_PPC64
512 444 const Register dst_hi,
513 // Loads the number from object into dst register. 445 #endif
514 // If |object| is neither smi nor heap number, |not_number| is jumped to 446 const Register dst,
515 // with |object| still intact. 447 const DoubleRegister double_dst,
516 void LoadNumber(Register object, 448 FPRoundingMode rounding_mode = kRoundToZero);
517 LowDwVfpRegister dst,
518 Register heap_number_map,
519 Register scratch,
520 Label* not_number);
521
522 // Loads the number from object into double_dst in the double format.
523 // Control will jump to not_int32 if the value cannot be exactly represented
524 // by a 32-bit integer.
525 // Floating point value in the 32-bit integer range that are not exact integer
526 // won't be loaded.
527 void LoadNumberAsInt32Double(Register object,
528 DwVfpRegister double_dst,
529 Register heap_number_map,
530 Register scratch,
531 LowDwVfpRegister double_scratch,
532 Label* not_int32);
533
534 // Loads the number from object into dst as a 32-bit integer.
535 // Control will jump to not_int32 if the object cannot be exactly represented
536 // by a 32-bit integer.
537 // Floating point value in the 32-bit integer range that are not exact integer
538 // won't be converted.
539 void LoadNumberAsInt32(Register object,
540 Register dst,
541 Register heap_number_map,
542 Register scratch,
543 DwVfpRegister double_scratch0,
544 LowDwVfpRegister double_scratch1,
545 Label* not_int32);
546 449
547 // Generates function and stub prologue code. 450 // Generates function and stub prologue code.
548 void StubPrologue(); 451 void StubPrologue();
549 void Prologue(bool code_pre_aging); 452 void Prologue(bool code_pre_aging);
550 453
551 // Enter exit frame. 454 // Enter exit frame.
552 // stack_space - extra stack space, used for alignment before call to C. 455 // stack_space - extra stack space, used for alignment before call to C.
553 void EnterExitFrame(bool save_doubles, int stack_space = 0); 456 void EnterExitFrame(bool save_doubles, int stack_space = 0);
554 457
555 // Leave the current exit frame. Expects the return value in r0. 458 // Leave the current exit frame. Expects the return value in r0.
(...skipping 26 matching lines...) Expand all
582 void LoadGlobalFunctionInitialMap(Register function, 485 void LoadGlobalFunctionInitialMap(Register function,
583 Register map, 486 Register map,
584 Register scratch); 487 Register scratch);
585 488
586 void InitializeRootRegister() { 489 void InitializeRootRegister() {
587 ExternalReference roots_array_start = 490 ExternalReference roots_array_start =
588 ExternalReference::roots_array_start(isolate()); 491 ExternalReference::roots_array_start(isolate());
589 mov(kRootRegister, Operand(roots_array_start)); 492 mov(kRootRegister, Operand(roots_array_start));
590 } 493 }
591 494
495 // ----------------------------------------------------------------
496 // new PPC macro-assembler interfaces that are slightly higher level
497 // than assembler-ppc and may generate variable length sequences
498
499 // load a literal signed int value <value> to GPR <dst>
500 void LoadIntLiteral(Register dst, int value);
501
502 // load an SMI value <value> to GPR <dst>
503 void LoadSmiLiteral(Register dst, Smi *smi);
504
505 // load a literal double value <value> to FPR <result>
506 void LoadDoubleLiteral(DoubleRegister result,
507 double value,
508 Register scratch);
509
510 void LoadWord(Register dst,
511 const MemOperand& mem,
512 Register scratch,
513 bool updateForm = false);
514
515 void LoadWordArith(Register dst,
516 const MemOperand& mem,
517 Register scratch = no_reg);
518
519 void StoreWord(Register src,
520 const MemOperand& mem,
521 Register scratch,
522 bool updateForm = false);
523
524 void LoadHalfWord(Register dst,
525 const MemOperand& mem,
526 Register scratch,
527 bool updateForm = false);
528
529 void StoreHalfWord(Register src,
530 const MemOperand& mem,
531 Register scratch,
532 bool updateForm = false);
533
534 void LoadByte(Register dst,
535 const MemOperand& mem,
536 Register scratch,
537 bool updateForm = false);
538
539 void StoreByte(Register src,
540 const MemOperand& mem,
541 Register scratch,
542 bool updateForm = false);
543
544 void LoadRepresentation(Register dst,
545 const MemOperand& mem,
546 Representation r,
547 Register scratch = no_reg);
548
549 void StoreRepresentation(Register src,
550 const MemOperand& mem,
551 Representation r,
552 Register scratch = no_reg);
553
554 // Move values between integer and floating point registers.
555 void MovIntToDouble(DoubleRegister dst,
556 Register src,
557 Register scratch);
558 void MovUnsignedIntToDouble(DoubleRegister dst,
559 Register src,
560 Register scratch);
561 void MovInt64ToDouble(DoubleRegister dst,
562 #if !V8_TARGET_ARCH_PPC64
563 Register src_hi,
564 #endif
565 Register src);
566 #if V8_TARGET_ARCH_PPC64
567 void MovInt64ComponentsToDouble(DoubleRegister dst,
568 Register src_hi,
569 Register src_lo,
570 Register scratch);
571 #endif
572 void MovDoubleLowToInt(Register dst,
573 DoubleRegister src);
574 void MovDoubleHighToInt(Register dst,
575 DoubleRegister src);
576 void MovDoubleToInt64(
577 #if !V8_TARGET_ARCH_PPC64
578 Register dst_hi,
579 #endif
580 Register dst,
581 DoubleRegister src);
582
583 void Add(Register dst, Register src, intptr_t value, Register scratch);
584 void Cmpi(Register src1, const Operand& src2, Register scratch,
585 CRegister cr = cr7);
586 void Cmpli(Register src1, const Operand& src2, Register scratch,
587 CRegister cr = cr7);
588 void Cmpwi(Register src1, const Operand& src2, Register scratch,
589 CRegister cr = cr7);
590 void Cmplwi(Register src1, const Operand& src2, Register scratch,
591 CRegister cr = cr7);
592 void And(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
593 void Or(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
594 void Xor(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
595
596 void AddSmiLiteral(Register dst, Register src, Smi *smi, Register scratch);
597 void SubSmiLiteral(Register dst, Register src, Smi *smi, Register scratch);
598 void CmpSmiLiteral(Register src1, Smi *smi, Register scratch,
599 CRegister cr = cr7);
600 void CmplSmiLiteral(Register src1, Smi *smi, Register scratch,
601 CRegister cr = cr7);
602 void AndSmiLiteral(Register dst, Register src, Smi *smi, Register scratch,
603 RCBit rc = LeaveRC);
604
605 // Set new rounding mode RN to FPSCR
606 void SetRoundingMode(FPRoundingMode RN);
607
608 // reset rounding mode to default (kRoundToNearest)
609 void ResetRoundingMode();
610
611 // These exist to provide portability between 32 and 64bit
612 void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg);
613 void StoreP(Register src, const MemOperand& mem, Register scratch = no_reg);
614
592 // --------------------------------------------------------------------------- 615 // ---------------------------------------------------------------------------
593 // JavaScript invokes 616 // JavaScript invokes
594 617
595 // Invoke the JavaScript function code by either calling or jumping. 618 // Invoke the JavaScript function code by either calling or jumping.
596 void InvokeCode(Register code, 619 void InvokeCode(Register code,
597 const ParameterCount& expected, 620 const ParameterCount& expected,
598 const ParameterCount& actual, 621 const ParameterCount& actual,
599 InvokeFlag flag, 622 InvokeFlag flag,
600 const CallWrapper& call_wrapper); 623 const CallWrapper& call_wrapper);
601 624
(...skipping 179 matching lines...) Expand 10 before | Expand all | Expand 10 after
781 // space is full and a scavenge is needed. All registers are clobbered also 804 // space is full and a scavenge is needed. All registers are clobbered also
782 // when control continues at the gc_required label. 805 // when control continues at the gc_required label.
783 void AllocateHeapNumber(Register result, 806 void AllocateHeapNumber(Register result,
784 Register scratch1, 807 Register scratch1,
785 Register scratch2, 808 Register scratch2,
786 Register heap_number_map, 809 Register heap_number_map,
787 Label* gc_required, 810 Label* gc_required,
788 TaggingMode tagging_mode = TAG_RESULT, 811 TaggingMode tagging_mode = TAG_RESULT,
789 MutableMode mode = IMMUTABLE); 812 MutableMode mode = IMMUTABLE);
790 void AllocateHeapNumberWithValue(Register result, 813 void AllocateHeapNumberWithValue(Register result,
791 DwVfpRegister value, 814 DoubleRegister value,
792 Register scratch1, 815 Register scratch1,
793 Register scratch2, 816 Register scratch2,
794 Register heap_number_map, 817 Register heap_number_map,
795 Label* gc_required); 818 Label* gc_required);
796 819
797 // Copies a fixed number of fields of heap objects from src to dst. 820 // Copies a fixed number of fields of heap objects from src to dst.
798 void CopyFields(Register dst, 821 void CopyFields(Register dst, Register src, RegList temps, int field_count);
799 Register src,
800 LowDwVfpRegister double_scratch,
801 int field_count);
802 822
803 // Copies a number of bytes from src to dst. All registers are clobbered. On 823 // Copies a number of bytes from src to dst. All registers are clobbered. On
804 // exit src and dst will point to the place just after where the last byte was 824 // exit src and dst will point to the place just after where the last byte was
805 // read or written and length will be zero. 825 // read or written and length will be zero.
806 void CopyBytes(Register src, 826 void CopyBytes(Register src,
807 Register dst, 827 Register dst,
808 Register length, 828 Register length,
809 Register scratch); 829 Register scratch);
810 830
831 // Initialize fields with filler values. |count| fields starting at
832 // |start_offset| are overwritten with the value in |filler|. At the end the
833 // loop, |start_offset| points at the next uninitialized field. |count| is
834 // assumed to be non-zero.
835 void InitializeNFieldsWithFiller(Register start_offset,
836 Register count,
837 Register filler);
838
811 // Initialize fields with filler values. Fields starting at |start_offset| 839 // Initialize fields with filler values. Fields starting at |start_offset|
812 // not including end_offset are overwritten with the value in |filler|. At 840 // not including end_offset are overwritten with the value in |filler|. At
813 // the end the loop, |start_offset| takes the value of |end_offset|. 841 // the end the loop, |start_offset| takes the value of |end_offset|.
814 void InitializeFieldsWithFiller(Register start_offset, 842 void InitializeFieldsWithFiller(Register start_offset,
815 Register end_offset, 843 Register end_offset,
816 Register filler); 844 Register filler);
817 845
818 // --------------------------------------------------------------------------- 846 // ---------------------------------------------------------------------------
819 // Support functions. 847 // Support functions.
820 848
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
877 Register scratch, 905 Register scratch,
878 Label* fail); 906 Label* fail);
879 907
880 // Check to see if maybe_number can be stored as a double in 908 // Check to see if maybe_number can be stored as a double in
881 // FastDoubleElements. If it can, store it at the index specified by key in 909 // FastDoubleElements. If it can, store it at the index specified by key in
882 // the FastDoubleElements array elements. Otherwise jump to fail. 910 // the FastDoubleElements array elements. Otherwise jump to fail.
883 void StoreNumberToDoubleElements(Register value_reg, 911 void StoreNumberToDoubleElements(Register value_reg,
884 Register key_reg, 912 Register key_reg,
885 Register elements_reg, 913 Register elements_reg,
886 Register scratch1, 914 Register scratch1,
887 LowDwVfpRegister double_scratch, 915 DoubleRegister double_scratch,
888 Label* fail, 916 Label* fail,
889 int elements_offset = 0); 917 int elements_offset = 0);
890 918
891 // Compare an object's map with the specified map and its transitioned 919 // Compare an object's map with the specified map and its transitioned
892 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are 920 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
893 // set with result of map compare. If multiple map compares are required, the 921 // set with result of map compare. If multiple map compares are required, the
894 // compare sequences branches to early_success. 922 // compare sequences branches to early_success.
895 void CompareMap(Register obj, 923 void CompareMap(Register obj,
896 Register scratch, 924 Register scratch,
897 Handle<Map> map, 925 Handle<Map> map,
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
931 SmiCheckType smi_check_type); 959 SmiCheckType smi_check_type);
932 960
933 961
934 // Compare the object in a register to a value from the root list. 962 // Compare the object in a register to a value from the root list.
935 // Uses the ip register as scratch. 963 // Uses the ip register as scratch.
936 void CompareRoot(Register obj, Heap::RootListIndex index); 964 void CompareRoot(Register obj, Heap::RootListIndex index);
937 965
938 966
939 // Load and check the instance type of an object for being a string. 967 // Load and check the instance type of an object for being a string.
940 // Loads the type into the second argument register. 968 // Loads the type into the second argument register.
941 // Returns a condition that will be enabled if the object was a string 969 // Returns a condition that will be enabled if the object was a string.
942 // and the passed-in condition passed. If the passed-in condition failed
943 // then flags remain unchanged.
944 Condition IsObjectStringType(Register obj, 970 Condition IsObjectStringType(Register obj,
945 Register type, 971 Register type) {
946 Condition cond = al) { 972 LoadP(type, FieldMemOperand(obj, HeapObject::kMapOffset));
947 ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond); 973 lbz(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
948 ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond); 974 andi(r0, type, Operand(kIsNotStringMask));
949 tst(type, Operand(kIsNotStringMask), cond);
950 DCHECK_EQ(0, kStringTag); 975 DCHECK_EQ(0, kStringTag);
951 return eq; 976 return eq;
952 } 977 }
953 978
954 979
955 // Picks out an array index from the hash field. 980 // Picks out an array index from the hash field.
956 // Register use: 981 // Register use:
957 // hash - holds the index's hash. Clobbered. 982 // hash - holds the index's hash. Clobbered.
958 // index - holds the overwritten index on exit. 983 // index - holds the overwritten index on exit.
959 void IndexFromHash(Register hash, Register index); 984 void IndexFromHash(Register hash, Register index);
960 985
961 // Get the number of least significant bits from a register 986 // Get the number of least significant bits from a register
962 void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits); 987 void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
963 void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits); 988 void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
964 989
965 // Load the value of a smi object into a double register. 990 // Load the value of a smi object into a double register.
966 // The register value must be between d0 and d15. 991 void SmiToDouble(DoubleRegister value, Register smi);
967 void SmiToDouble(LowDwVfpRegister value, Register smi);
968 992
969 // Check if a double can be exactly represented as a signed 32-bit integer. 993 // Check if a double can be exactly represented as a signed 32-bit integer.
970 // Z flag set to one if true. 994 // CR_EQ in cr7 is set if true.
971 void TestDoubleIsInt32(DwVfpRegister double_input, 995 void TestDoubleIsInt32(DoubleRegister double_input,
972 LowDwVfpRegister double_scratch); 996 Register scratch1,
997 Register scratch2,
998 DoubleRegister double_scratch);
973 999
974 // Try to convert a double to a signed 32-bit integer. 1000 // Try to convert a double to a signed 32-bit integer.
975 // Z flag set to one and result assigned if the conversion is exact. 1001 // CR_EQ in cr7 is set and result assigned if the conversion is exact.
976 void TryDoubleToInt32Exact(Register result, 1002 void TryDoubleToInt32Exact(Register result,
977 DwVfpRegister double_input, 1003 DoubleRegister double_input,
978 LowDwVfpRegister double_scratch); 1004 Register scratch,
1005 DoubleRegister double_scratch);
979 1006
980 // Floor a double and writes the value to the result register. 1007 // Floor a double and writes the value to the result register.
981 // Go to exact if the conversion is exact (to be able to test -0), 1008 // Go to exact if the conversion is exact (to be able to test -0),
982 // fall through calling code if an overflow occurred, else go to done. 1009 // fall through calling code if an overflow occurred, else go to done.
983 // In return, input_high is loaded with high bits of input. 1010 // In return, input_high is loaded with high bits of input.
984 void TryInt32Floor(Register result, 1011 void TryInt32Floor(Register result,
985 DwVfpRegister double_input, 1012 DoubleRegister double_input,
986 Register input_high, 1013 Register input_high,
987 LowDwVfpRegister double_scratch, 1014 Register scratch,
1015 DoubleRegister double_scratch,
988 Label* done, 1016 Label* done,
989 Label* exact); 1017 Label* exact);
990 1018
991 // Performs a truncating conversion of a floating point number as used by 1019 // Performs a truncating conversion of a floating point number as used by
992 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it 1020 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
993 // succeeds, otherwise falls through if result is saturated. On return 1021 // succeeds, otherwise falls through if result is saturated. On return
994 // 'result' either holds answer, or is clobbered on fall through. 1022 // 'result' either holds answer, or is clobbered on fall through.
995 // 1023 //
996 // Only public for the test code in test-code-stubs-arm.cc. 1024 // Only public for the test code in test-code-stubs-arm.cc.
997 void TryInlineTruncateDoubleToI(Register result, 1025 void TryInlineTruncateDoubleToI(Register result,
998 DwVfpRegister input, 1026 DoubleRegister input,
999 Label* done); 1027 Label* done);
1000 1028
1001 // Performs a truncating conversion of a floating point number as used by 1029 // Performs a truncating conversion of a floating point number as used by
1002 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 1030 // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
1003 // Exits with 'result' holding the answer. 1031 // Exits with 'result' holding the answer.
1004 void TruncateDoubleToI(Register result, DwVfpRegister double_input); 1032 void TruncateDoubleToI(Register result, DoubleRegister double_input);
1005 1033
1006 // Performs a truncating conversion of a heap number as used by 1034 // Performs a truncating conversion of a heap number as used by
1007 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input' 1035 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
1008 // must be different registers. Exits with 'result' holding the answer. 1036 // must be different registers. Exits with 'result' holding the answer.
1009 void TruncateHeapNumberToI(Register result, Register object); 1037 void TruncateHeapNumberToI(Register result, Register object);
1010 1038
1011 // Converts the smi or heap number in object to an int32 using the rules 1039 // Converts the smi or heap number in object to an int32 using the rules
1012 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated 1040 // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
1013 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be 1041 // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
1014 // different registers. 1042 // different registers.
1015 void TruncateNumberToI(Register object, 1043 void TruncateNumberToI(Register object,
1016 Register result, 1044 Register result,
1017 Register heap_number_map, 1045 Register heap_number_map,
1018 Register scratch1, 1046 Register scratch1,
1019 Label* not_int32); 1047 Label* not_int32);
1020 1048
1021 // Check whether d16-d31 are available on the CPU. The result is given by the 1049 // Overflow handling functions.
1022 // Z condition flag: Z==0 if d16-d31 available, Z==1 otherwise. 1050 // Usage: call the appropriate arithmetic function and then call one of the
1023 void CheckFor32DRegs(Register scratch); 1051 // flow control functions with the corresponding label.
1024 1052
1025 // Does a runtime check for 16/32 FP registers. Either way, pushes 32 double 1053 // Compute dst = left + right, setting condition codes. dst may be same as
1026 // values to location, saving [d0..(d15|d31)]. 1054 // either left or right (or a unique register). left and right must not be
1027 void SaveFPRegs(Register location, Register scratch); 1055 // the same register.
1056 void AddAndCheckForOverflow(Register dst,
1057 Register left,
1058 Register right,
1059 Register overflow_dst,
1060 Register scratch = r0);
1028 1061
1029 // Does a runtime check for 16/32 FP registers. Either way, pops 32 double 1062 // Compute dst = left - right, setting condition codes. dst may be same as
1030 // values to location, restoring [d0..(d15|d31)]. 1063 // either left or right (or a unique register). left and right must not be
1031 void RestoreFPRegs(Register location, Register scratch); 1064 // the same register.
1065 void SubAndCheckForOverflow(Register dst,
1066 Register left,
1067 Register right,
1068 Register overflow_dst,
1069 Register scratch = r0);
1070
1071 void BranchOnOverflow(Label* label) {
1072 blt(label, cr0);
1073 }
1074
1075 void BranchOnNoOverflow(Label* label) {
1076 bge(label, cr0);
1077 }
1078
1079 void RetOnOverflow(void) {
1080 Label label;
1081
1082 blt(&label, cr0);
1083 Ret();
1084 bind(&label);
1085 }
1086
1087 void RetOnNoOverflow(void) {
1088 Label label;
1089
1090 bge(&label, cr0);
1091 Ret();
1092 bind(&label);
1093 }
1094
1095 // Pushes <count> double values to <location>, starting from d<first>.
1096 void SaveFPRegs(Register location, int first, int count);
1097
1098 // Pops <count> double values from <location>, starting from d<first>.
1099 void RestoreFPRegs(Register location, int first, int count);
1032 1100
1033 // --------------------------------------------------------------------------- 1101 // ---------------------------------------------------------------------------
1034 // Runtime calls 1102 // Runtime calls
1035 1103
1036 // Call a code stub. 1104 // Call a code stub.
1037 void CallStub(CodeStub* stub, 1105 void CallStub(CodeStub* stub,
1038 TypeFeedbackId ast_id = TypeFeedbackId::None(), 1106 TypeFeedbackId ast_id = TypeFeedbackId::None(),
1039 Condition cond = al); 1107 Condition cond = al);
1040 1108
1041 // Call a code stub. 1109 // Call a code stub.
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
1089 void PrepareCallCFunction(int num_reg_arguments, 1157 void PrepareCallCFunction(int num_reg_arguments,
1090 int num_double_registers, 1158 int num_double_registers,
1091 Register scratch); 1159 Register scratch);
1092 void PrepareCallCFunction(int num_reg_arguments, 1160 void PrepareCallCFunction(int num_reg_arguments,
1093 Register scratch); 1161 Register scratch);
1094 1162
1095 // There are two ways of passing double arguments on ARM, depending on 1163 // There are two ways of passing double arguments on ARM, depending on
1096 // whether soft or hard floating point ABI is used. These functions 1164 // whether soft or hard floating point ABI is used. These functions
1097 // abstract parameter passing for the three different ways we call 1165 // abstract parameter passing for the three different ways we call
1098 // C functions from generated code. 1166 // C functions from generated code.
1099 void MovToFloatParameter(DwVfpRegister src); 1167 void MovToFloatParameter(DoubleRegister src);
1100 void MovToFloatParameters(DwVfpRegister src1, DwVfpRegister src2); 1168 void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
1101 void MovToFloatResult(DwVfpRegister src); 1169 void MovToFloatResult(DoubleRegister src);
1102 1170
1103 // Calls a C function and cleans up the space for arguments allocated 1171 // Calls a C function and cleans up the space for arguments allocated
1104 // by PrepareCallCFunction. The called function is not allowed to trigger a 1172 // by PrepareCallCFunction. The called function is not allowed to trigger a
1105 // garbage collection, since that might move the code and invalidate the 1173 // garbage collection, since that might move the code and invalidate the
1106 // return address (unless this is somehow accounted for by the called 1174 // return address (unless this is somehow accounted for by the called
1107 // function). 1175 // function).
1108 void CallCFunction(ExternalReference function, int num_arguments); 1176 void CallCFunction(ExternalReference function, int num_arguments);
1109 void CallCFunction(Register function, int num_arguments); 1177 void CallCFunction(Register function, int num_arguments);
1110 void CallCFunction(ExternalReference function, 1178 void CallCFunction(ExternalReference function,
1111 int num_reg_arguments, 1179 int num_reg_arguments,
1112 int num_double_arguments); 1180 int num_double_arguments);
1113 void CallCFunction(Register function, 1181 void CallCFunction(Register function,
1114 int num_reg_arguments, 1182 int num_reg_arguments,
1115 int num_double_arguments); 1183 int num_double_arguments);
1116 1184
1117 void MovFromFloatParameter(DwVfpRegister dst); 1185 void MovFromFloatParameter(DoubleRegister dst);
1118 void MovFromFloatResult(DwVfpRegister dst); 1186 void MovFromFloatResult(DoubleRegister dst);
1119 1187
1120 // Calls an API function. Allocates HandleScope, extracts returned value 1188 // Calls an API function. Allocates HandleScope, extracts returned value
1121 // from handle and propagates exceptions. Restores context. stack_space 1189 // from handle and propagates exceptions. Restores context. stack_space
1122 // - space to be unwound on exit (includes the call JS arguments space and 1190 // - space to be unwound on exit (includes the call JS arguments space and
1123 // the additional space allocated for the fast call). 1191 // the additional space allocated for the fast call).
1124 void CallApiFunctionAndReturn(Register function_address, 1192 void CallApiFunctionAndReturn(Register function_address,
1125 ExternalReference thunk_ref, 1193 ExternalReference thunk_ref,
1126 int stack_space, 1194 int stack_space,
1127 MemOperand return_value_operand, 1195 MemOperand return_value_operand,
1128 MemOperand* context_restore_operand); 1196 MemOperand* context_restore_operand);
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
1162 Register scratch1, Register scratch2); 1230 Register scratch1, Register scratch2);
1163 void DecrementCounter(StatsCounter* counter, int value, 1231 void DecrementCounter(StatsCounter* counter, int value,
1164 Register scratch1, Register scratch2); 1232 Register scratch1, Register scratch2);
1165 1233
1166 1234
1167 // --------------------------------------------------------------------------- 1235 // ---------------------------------------------------------------------------
1168 // Debugging 1236 // Debugging
1169 1237
1170 // Calls Abort(msg) if the condition cond is not satisfied. 1238 // Calls Abort(msg) if the condition cond is not satisfied.
1171 // Use --debug_code to enable. 1239 // Use --debug_code to enable.
1172 void Assert(Condition cond, BailoutReason reason); 1240 void Assert(Condition cond, BailoutReason reason, CRegister cr = cr7);
1173 void AssertFastElements(Register elements); 1241 void AssertFastElements(Register elements);
1174 1242
1175 // Like Assert(), but always enabled. 1243 // Like Assert(), but always enabled.
1176 void Check(Condition cond, BailoutReason reason); 1244 void Check(Condition cond, BailoutReason reason, CRegister cr = cr7);
1177 1245
1178 // Print a message to stdout and abort execution. 1246 // Print a message to stdout and abort execution.
1179 void Abort(BailoutReason msg); 1247 void Abort(BailoutReason reason);
1180 1248
1181 // Verify restrictions about code generated in stubs. 1249 // Verify restrictions about code generated in stubs.
1182 void set_generating_stub(bool value) { generating_stub_ = value; } 1250 void set_generating_stub(bool value) { generating_stub_ = value; }
1183 bool generating_stub() { return generating_stub_; } 1251 bool generating_stub() { return generating_stub_; }
1184 void set_has_frame(bool value) { has_frame_ = value; } 1252 void set_has_frame(bool value) { has_frame_ = value; }
1185 bool has_frame() { return has_frame_; } 1253 bool has_frame() { return has_frame_; }
1186 inline bool AllowThisStubCall(CodeStub* stub); 1254 inline bool AllowThisStubCall(CodeStub* stub);
1187 1255
1188 // EABI variant for double arguments in use.
1189 bool use_eabi_hardfloat() {
1190 #ifdef __arm__
1191 return base::OS::ArmUsingHardFloat();
1192 #elif USE_EABI_HARDFLOAT
1193 return true;
1194 #else
1195 return false;
1196 #endif
1197 }
1198
1199 // --------------------------------------------------------------------------- 1256 // ---------------------------------------------------------------------------
1200 // Number utilities 1257 // Number utilities
1201 1258
1202 // Check whether the value of reg is a power of two and not zero. If not 1259 // Check whether the value of reg is a power of two and not zero. If not
1203 // control continues at the label not_power_of_two. If reg is a power of two 1260 // control continues at the label not_power_of_two. If reg is a power of two
1204 // the register scratch contains the value of (reg - 1) when control falls 1261 // the register scratch contains the value of (reg - 1) when control falls
1205 // through. 1262 // through.
1206 void JumpIfNotPowerOfTwoOrZero(Register reg, 1263 void JumpIfNotPowerOfTwoOrZero(Register reg,
1207 Register scratch, 1264 Register scratch,
1208 Label* not_power_of_two_or_zero); 1265 Label* not_power_of_two_or_zero);
1209 // Check whether the value of reg is a power of two and not zero. 1266 // Check whether the value of reg is a power of two and not zero.
1210 // Control falls through if it is, with scratch containing the mask 1267 // Control falls through if it is, with scratch containing the mask
1211 // value (reg - 1). 1268 // value (reg - 1).
1212 // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is 1269 // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
1213 // zero or negative, or jumps to the 'not_power_of_two' label if the value is 1270 // zero or negative, or jumps to the 'not_power_of_two' label if the value is
1214 // strictly positive but not a power of two. 1271 // strictly positive but not a power of two.
1215 void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg, 1272 void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,
1216 Register scratch, 1273 Register scratch,
1217 Label* zero_and_neg, 1274 Label* zero_and_neg,
1218 Label* not_power_of_two); 1275 Label* not_power_of_two);
1219 1276
1220 // --------------------------------------------------------------------------- 1277 // ---------------------------------------------------------------------------
1278 // Bit testing/extraction
1279 //
1280 // Bit numbering is such that the least significant bit is bit 0
1281 // (for consistency between 32/64-bit).
1282
1283 // Extract consecutive bits (defined by rangeStart - rangeEnd) from src
1284 // and place them into the least significant bits of dst.
1285 inline void ExtractBitRange(Register dst, Register src,
1286 int rangeStart, int rangeEnd,
1287 RCBit rc = LeaveRC) {
1288 DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer);
1289 int rotate = (rangeEnd == 0) ? 0 : kBitsPerPointer - rangeEnd;
1290 int width = rangeStart - rangeEnd + 1;
1291 #if V8_TARGET_ARCH_PPC64
1292 rldicl(dst, src, rotate, kBitsPerPointer - width, rc);
1293 #else
1294 rlwinm(dst, src, rotate, kBitsPerPointer - width, kBitsPerPointer - 1, rc);
1295 #endif
1296 }
1297
1298 inline void ExtractBit(Register dst, Register src, uint32_t bitNumber,
1299 RCBit rc = LeaveRC) {
1300 ExtractBitRange(dst, src, bitNumber, bitNumber, rc);
1301 }
1302
1303 // Extract consecutive bits (defined by mask) from src and place them
1304 // into the least significant bits of dst.
1305 inline void ExtractBitMask(Register dst, Register src, uintptr_t mask,
1306 RCBit rc = LeaveRC) {
1307 int start = kBitsPerPointer - 1;
1308 int end;
1309 uintptr_t bit = (1L << start);
1310
1311 while (bit && (mask & bit) == 0) {
1312 start--;
1313 bit >>= 1;
1314 }
1315 end = start;
1316 bit >>= 1;
1317
1318 while (bit && (mask & bit)) {
1319 end--;
1320 bit >>= 1;
1321 }
1322
1323 // 1-bits in mask must be contiguous
1324 DCHECK(bit == 0 || (mask & ((bit << 1) - 1)) == 0);
1325
1326 ExtractBitRange(dst, src, start, end, rc);
1327 }
1328
1329 // Test single bit in value.
1330 inline void TestBit(Register value, int bitNumber,
1331 Register scratch = r0) {
1332 ExtractBitRange(scratch, value, bitNumber, bitNumber, SetRC);
1333 }
1334
1335 // Test consecutive bit range in value. Range is defined by
1336 // rangeStart - rangeEnd.
1337 inline void TestBitRange(Register value,
1338 int rangeStart, int rangeEnd,
1339 Register scratch = r0) {
1340 ExtractBitRange(scratch, value, rangeStart, rangeEnd, SetRC);
1341 }
1342
1343 // Test consecutive bit range in value. Range is defined by mask.
1344 inline void TestBitMask(Register value, uintptr_t mask,
1345 Register scratch = r0) {
1346 ExtractBitMask(scratch, value, mask, SetRC);
1347 }
1348
1349
1350 // ---------------------------------------------------------------------------
1221 // Smi utilities 1351 // Smi utilities
1222 1352
1223 void SmiTag(Register reg, SBit s = LeaveCC) { 1353 // Shift left by 1
1224 add(reg, reg, Operand(reg), s); 1354 void SmiTag(Register reg, RCBit rc = LeaveRC) {
1355 SmiTag(reg, reg, rc);
1225 } 1356 }
1226 void SmiTag(Register dst, Register src, SBit s = LeaveCC) { 1357 void SmiTag(Register dst, Register src, RCBit rc = LeaveRC) {
1227 add(dst, src, Operand(src), s); 1358 ShiftLeftImm(dst, src, Operand(kSmiShift), rc);
1228 } 1359 }
1229 1360
1230 // Try to convert int32 to smi. If the value is to large, preserve 1361 #if !V8_TARGET_ARCH_PPC64
1231 // the original value and jump to not_a_smi. Destroys scratch and 1362 // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
1232 // sets flags. 1363 void SmiTagCheckOverflow(Register reg, Register overflow);
1233 void TrySmiTag(Register reg, Label* not_a_smi) { 1364 void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
1234 TrySmiTag(reg, reg, not_a_smi); 1365
1366 inline void JumpIfNotSmiCandidate(Register value, Register scratch,
1367 Label* not_smi_label) {
1368 // High bits must be identical to fit into an Smi
1369 addis(scratch, value, Operand(0x40000000u >> 16));
1370 cmpi(scratch, Operand::Zero());
1371 blt(not_smi_label);
1235 } 1372 }
1236 void TrySmiTag(Register reg, Register src, Label* not_a_smi) { 1373 #endif
1237 SmiTag(ip, src, SetCC); 1374 inline void TestUnsignedSmiCandidate(Register value, Register scratch) {
1238 b(vs, not_a_smi); 1375 // The test is different for unsigned int values. Since we need
1239 mov(reg, ip); 1376 // the value to be in the range of a positive smi, we can't
1377 // handle any of the high bits being set in the value.
1378 TestBitRange(value,
1379 kBitsPerPointer - 1,
1380 kBitsPerPointer - 1 - kSmiShift,
1381 scratch);
1382 }
1383 inline void JumpIfNotUnsignedSmiCandidate(Register value, Register scratch,
1384 Label* not_smi_label) {
1385 TestUnsignedSmiCandidate(value, scratch);
1386 bne(not_smi_label, cr0);
1240 } 1387 }
1241 1388
1389 void SmiUntag(Register reg, RCBit rc = LeaveRC) {
1390 SmiUntag(reg, reg, rc);
1391 }
1242 1392
1243 void SmiUntag(Register reg, SBit s = LeaveCC) { 1393 void SmiUntag(Register dst, Register src, RCBit rc = LeaveRC) {
1244 mov(reg, Operand::SmiUntag(reg), s); 1394 ShiftRightArithImm(dst, src, kSmiShift, rc);
1245 } 1395 }
1246 void SmiUntag(Register dst, Register src, SBit s = LeaveCC) { 1396
1247 mov(dst, Operand::SmiUntag(src), s); 1397 void SmiToPtrArrayOffset(Register dst, Register src) {
1398 #if V8_TARGET_ARCH_PPC64
1399 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2);
1400 ShiftRightArithImm(dst, src, kSmiShift - kPointerSizeLog2);
1401 #else
1402 STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kPointerSizeLog2);
1403 ShiftLeftImm(dst, src, Operand(kPointerSizeLog2 - kSmiShift));
1404 #endif
1405 }
1406
1407 void SmiToByteArrayOffset(Register dst, Register src) {
1408 SmiUntag(dst, src);
1409 }
1410
1411 void SmiToShortArrayOffset(Register dst, Register src) {
1412 #if V8_TARGET_ARCH_PPC64
1413 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 1);
1414 ShiftRightArithImm(dst, src, kSmiShift - 1);
1415 #else
1416 STATIC_ASSERT(kSmiTag == 0 && kSmiShift == 1);
1417 if (!dst.is(src)) {
1418 mr(dst, src);
1419 }
1420 #endif
1421 }
1422
1423 void SmiToIntArrayOffset(Register dst, Register src) {
1424 #if V8_TARGET_ARCH_PPC64
1425 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 2);
1426 ShiftRightArithImm(dst, src, kSmiShift - 2);
1427 #else
1428 STATIC_ASSERT(kSmiTag == 0 && kSmiShift < 2);
1429 ShiftLeftImm(dst, src, Operand(2 - kSmiShift));
1430 #endif
1431 }
1432
1433 #define SmiToFloatArrayOffset SmiToIntArrayOffset
1434
1435 void SmiToDoubleArrayOffset(Register dst, Register src) {
1436 #if V8_TARGET_ARCH_PPC64
1437 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kDoubleSizeLog2);
1438 ShiftRightArithImm(dst, src, kSmiShift - kDoubleSizeLog2);
1439 #else
1440 STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kDoubleSizeLog2);
1441 ShiftLeftImm(dst, src, Operand(kDoubleSizeLog2 - kSmiShift));
1442 #endif
1443 }
1444
1445 void SmiToArrayOffset(Register dst, Register src, int elementSizeLog2) {
1446 if (kSmiShift < elementSizeLog2) {
1447 ShiftLeftImm(dst, src, Operand(elementSizeLog2 - kSmiShift));
1448 } else if (kSmiShift > elementSizeLog2) {
1449 ShiftRightArithImm(dst, src, kSmiShift - elementSizeLog2);
1450 } else if (!dst.is(src)) {
1451 mr(dst, src);
1452 }
1453 }
1454
1455 void IndexToArrayOffset(Register dst, Register src, int elementSizeLog2,
1456 bool isSmi) {
1457 if (isSmi) {
1458 SmiToArrayOffset(dst, src, elementSizeLog2);
1459 } else {
1460 ShiftLeftImm(dst, src, Operand(elementSizeLog2));
1461 }
1248 } 1462 }
1249 1463
1250 // Untag the source value into destination and jump if source is a smi. 1464 // Untag the source value into destination and jump if source is a smi.
1251 // Souce and destination can be the same register. 1465 // Souce and destination can be the same register.
1252 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case); 1466 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
1253 1467
1254 // Untag the source value into destination and jump if source is not a smi. 1468 // Untag the source value into destination and jump if source is not a smi.
1255 // Souce and destination can be the same register. 1469 // Souce and destination can be the same register.
1256 void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case); 1470 void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
1257 1471
1258 // Test if the register contains a smi (Z == 0 (eq) if true). 1472 inline void TestIfSmi(Register value, Register scratch) {
1259 inline void SmiTst(Register value) { 1473 TestBit(value, 0, scratch); // tst(value, Operand(kSmiTagMask));
1260 tst(value, Operand(kSmiTagMask));
1261 } 1474 }
1262 inline void NonNegativeSmiTst(Register value) { 1475
1263 tst(value, Operand(kSmiTagMask | kSmiSignMask)); 1476 inline void TestIfPositiveSmi(Register value, Register scratch) {
1477 STATIC_ASSERT((kSmiTagMask | kSmiSignMask) ==
1478 (intptr_t)(1UL << (kBitsPerPointer - 1) | 1));
1479 #if V8_TARGET_ARCH_PPC64
1480 rldicl(scratch, value, 1, kBitsPerPointer - 2, SetRC);
1481 #else
1482 rlwinm(scratch, value, 1, kBitsPerPointer - 2, kBitsPerPointer - 1, SetRC);
1483 #endif
1264 } 1484 }
1265 // Jump if the register contains a smi. 1485
1486 // Jump the register contains a smi.
1266 inline void JumpIfSmi(Register value, Label* smi_label) { 1487 inline void JumpIfSmi(Register value, Label* smi_label) {
1267 tst(value, Operand(kSmiTagMask)); 1488 TestIfSmi(value, r0);
1268 b(eq, smi_label); 1489 beq(smi_label, cr0); // branch if SMI
1269 } 1490 }
1270 // Jump if either of the registers contain a non-smi. 1491 // Jump if either of the registers contain a non-smi.
1271 inline void JumpIfNotSmi(Register value, Label* not_smi_label) { 1492 inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
1272 tst(value, Operand(kSmiTagMask)); 1493 TestIfSmi(value, r0);
1273 b(ne, not_smi_label); 1494 bne(not_smi_label, cr0);
1274 } 1495 }
1275 // Jump if either of the registers contain a non-smi. 1496 // Jump if either of the registers contain a non-smi.
1276 void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi); 1497 void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
1277 // Jump if either of the registers contain a smi. 1498 // Jump if either of the registers contain a smi.
1278 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi); 1499 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
1279 1500
1280 // Abort execution if argument is a smi, enabled via --debug-code. 1501 // Abort execution if argument is a smi, enabled via --debug-code.
1281 void AssertNotSmi(Register object); 1502 void AssertNotSmi(Register object);
1282 void AssertSmi(Register object); 1503 void AssertSmi(Register object);
1283 1504
1505
1506 #if V8_TARGET_ARCH_PPC64
1507 inline void TestIfInt32(Register value,
1508 Register scratch1, Register scratch2,
1509 CRegister cr = cr7) {
1510 // High bits must be identical to fit into an 32-bit integer
1511 srawi(scratch1, value, 31);
1512 sradi(scratch2, value, 32);
1513 cmp(scratch1, scratch2, cr);
1514 }
1515 #else
1516 inline void TestIfInt32(Register hi_word, Register lo_word,
1517 Register scratch, CRegister cr = cr7) {
1518 // High bits must be identical to fit into an 32-bit integer
1519 srawi(scratch, lo_word, 31);
1520 cmp(scratch, hi_word, cr);
1521 }
1522 #endif
1523
1284 // Abort execution if argument is not a string, enabled via --debug-code. 1524 // Abort execution if argument is not a string, enabled via --debug-code.
1285 void AssertString(Register object); 1525 void AssertString(Register object);
1286 1526
1287 // Abort execution if argument is not a name, enabled via --debug-code. 1527 // Abort execution if argument is not a name, enabled via --debug-code.
1288 void AssertName(Register object); 1528 void AssertName(Register object);
1289 1529
1290 // Abort execution if argument is not undefined or an AllocationSite, enabled 1530 // Abort execution if argument is not undefined or an AllocationSite, enabled
1291 // via --debug-code. 1531 // via --debug-code.
1292 void AssertUndefinedOrAllocationSite(Register object, Register scratch); 1532 void AssertUndefinedOrAllocationSite(Register object, Register scratch);
1293 1533
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
1352 void JumpIfNotUniqueName(Register reg, Label* not_unique_name); 1592 void JumpIfNotUniqueName(Register reg, Label* not_unique_name);
1353 1593
1354 void EmitSeqStringSetCharCheck(Register string, 1594 void EmitSeqStringSetCharCheck(Register string,
1355 Register index, 1595 Register index,
1356 Register value, 1596 Register value,
1357 uint32_t encoding_mask); 1597 uint32_t encoding_mask);
1358 1598
1359 // --------------------------------------------------------------------------- 1599 // ---------------------------------------------------------------------------
1360 // Patching helpers. 1600 // Patching helpers.
1361 1601
1362 // Get the location of a relocated constant (its address in the constant pool) 1602 // Retrieve/patch the relocated value (lis/ori pair or constant pool load).
1363 // from its load site. 1603 void GetRelocatedValue(Register location,
1364 void GetRelocatedValueLocation(Register ldr_location, Register result, 1604 Register result,
1365 Register scratch); 1605 Register scratch);
1366 1606 void SetRelocatedValue(Register location,
1607 Register scratch,
1608 Register new_value);
1367 1609
1368 void ClampUint8(Register output_reg, Register input_reg); 1610 void ClampUint8(Register output_reg, Register input_reg);
1369 1611
1612 // Saturate a value into 8-bit unsigned integer
1613 // if input_value < 0, output_value is 0
1614 // if input_value > 255, output_value is 255
1615 // otherwise output_value is the (int)input_value (round to nearest)
1370 void ClampDoubleToUint8(Register result_reg, 1616 void ClampDoubleToUint8(Register result_reg,
1371 DwVfpRegister input_reg, 1617 DoubleRegister input_reg,
1372 LowDwVfpRegister double_scratch); 1618 DoubleRegister temp_double_reg);
1373 1619
1374 1620
1375 void LoadInstanceDescriptors(Register map, Register descriptors); 1621 void LoadInstanceDescriptors(Register map, Register descriptors);
1376 void EnumLength(Register dst, Register map); 1622 void EnumLength(Register dst, Register map);
1377 void NumberOfOwnDescriptors(Register dst, Register map); 1623 void NumberOfOwnDescriptors(Register dst, Register map);
1378 1624
1379 template<typename Field> 1625 template<typename Field>
1380 void DecodeField(Register dst, Register src) { 1626 void DecodeField(Register dst, Register src) {
1381 Ubfx(dst, src, Field::kShift, Field::kSize); 1627 ExtractBitRange(dst, src, Field::kShift + Field::kSize - 1, Field::kShift);
1382 } 1628 }
1383 1629
1384 template<typename Field> 1630 template<typename Field>
1385 void DecodeField(Register reg) { 1631 void DecodeField(Register reg) {
1386 DecodeField<Field>(reg, reg); 1632 DecodeField<Field>(reg, reg);
1387 } 1633 }
1388 1634
1389 template<typename Field> 1635 template<typename Field>
1390 void DecodeFieldToSmi(Register dst, Register src) { 1636 void DecodeFieldToSmi(Register dst, Register src) {
1391 static const int shift = Field::kShift; 1637 #if V8_TARGET_ARCH_PPC64
1392 static const int mask = Field::kMask >> shift << kSmiTagSize; 1638 DecodeField<Field>(dst, src);
1393 STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0); 1639 SmiTag(dst);
1394 STATIC_ASSERT(kSmiTag == 0); 1640 #else
1395 if (shift < kSmiTagSize) { 1641 // 32-bit can do this in one instruction:
1396 mov(dst, Operand(src, LSL, kSmiTagSize - shift)); 1642 int start = Field::kSize + kSmiShift - 1;
1397 and_(dst, dst, Operand(mask)); 1643 int end = kSmiShift;
1398 } else if (shift > kSmiTagSize) { 1644 int rotate = kSmiShift - Field::kShift;
1399 mov(dst, Operand(src, LSR, shift - kSmiTagSize)); 1645 if (rotate < 0) {
1400 and_(dst, dst, Operand(mask)); 1646 rotate += kBitsPerPointer;
1401 } else {
1402 and_(dst, src, Operand(mask));
1403 } 1647 }
1648 rlwinm(dst, src, rotate,
1649 kBitsPerPointer - start - 1,
1650 kBitsPerPointer - end - 1);
1651 #endif
1404 } 1652 }
1405 1653
1406 template<typename Field> 1654 template<typename Field>
1407 void DecodeFieldToSmi(Register reg) { 1655 void DecodeFieldToSmi(Register reg) {
1408 DecodeField<Field>(reg, reg); 1656 DecodeFieldToSmi<Field>(reg, reg);
1409 } 1657 }
1410 1658
1411 // Activation support. 1659 // Activation support.
1412 void EnterFrame(StackFrame::Type type, bool load_constant_pool = false); 1660 void EnterFrame(StackFrame::Type type, bool load_constant_pool = false);
1413 // Returns the pc offset at which the frame ends. 1661 // Returns the pc offset at which the frame ends.
1414 int LeaveFrame(StackFrame::Type type); 1662 int LeaveFrame(StackFrame::Type type);
1415 1663
1416 // Expects object in r0 and returns map with validated enum cache 1664 // Expects object in r0 and returns map with validated enum cache
1417 // in r0. Assumes that any other register can be used as a scratch. 1665 // in r0. Assumes that any other register can be used as a scratch.
1418 void CheckEnumCache(Register null_value, Label* call_runtime); 1666 void CheckEnumCache(Register null_value, Label* call_runtime);
1419 1667
1420 // AllocationMemento support. Arrays may have an associated 1668 // AllocationMemento support. Arrays may have an associated
1421 // AllocationMemento object that can be checked for in order to pretransition 1669 // AllocationMemento object that can be checked for in order to pretransition
1422 // to another type. 1670 // to another type.
1423 // On entry, receiver_reg should point to the array object. 1671 // On entry, receiver_reg should point to the array object.
1424 // scratch_reg gets clobbered. 1672 // scratch_reg gets clobbered.
1425 // If allocation info is present, condition flags are set to eq. 1673 // If allocation info is present, condition flags are set to eq.
1426 void TestJSArrayForAllocationMemento(Register receiver_reg, 1674 void TestJSArrayForAllocationMemento(Register receiver_reg,
1427 Register scratch_reg, 1675 Register scratch_reg,
1428 Label* no_memento_found); 1676 Label* no_memento_found);
1429 1677
1430 void JumpIfJSArrayHasAllocationMemento(Register receiver_reg, 1678 void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
1431 Register scratch_reg, 1679 Register scratch_reg,
1432 Label* memento_found) { 1680 Label* memento_found) {
1433 Label no_memento_found; 1681 Label no_memento_found;
1434 TestJSArrayForAllocationMemento(receiver_reg, scratch_reg, 1682 TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
1435 &no_memento_found); 1683 &no_memento_found);
1436 b(eq, memento_found); 1684 beq(memento_found);
1437 bind(&no_memento_found); 1685 bind(&no_memento_found);
1438 } 1686 }
1439 1687
1440 // Jumps to found label if a prototype map has dictionary elements. 1688 // Jumps to found label if a prototype map has dictionary elements.
1441 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0, 1689 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
1442 Register scratch1, Label* found); 1690 Register scratch1, Label* found);
1443 1691
1444 private: 1692 private:
1693 static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
1694
1445 void CallCFunctionHelper(Register function, 1695 void CallCFunctionHelper(Register function,
1446 int num_reg_arguments, 1696 int num_reg_arguments,
1447 int num_double_arguments); 1697 int num_double_arguments);
1448 1698
1449 void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al); 1699 void Jump(intptr_t target, RelocInfo::Mode rmode,
1700 Condition cond = al, CRegister cr = cr7);
1450 1701
1451 // Helper functions for generating invokes. 1702 // Helper functions for generating invokes.
1452 void InvokePrologue(const ParameterCount& expected, 1703 void InvokePrologue(const ParameterCount& expected,
1453 const ParameterCount& actual, 1704 const ParameterCount& actual,
1454 Handle<Code> code_constant, 1705 Handle<Code> code_constant,
1455 Register code_reg, 1706 Register code_reg,
1456 Label* done, 1707 Label* done,
1457 bool* definitely_mismatches, 1708 bool* definitely_mismatches,
1458 InvokeFlag flag, 1709 InvokeFlag flag,
1459 const CallWrapper& call_wrapper); 1710 const CallWrapper& call_wrapper);
(...skipping 19 matching lines...) Expand all
1479 1730
1480 // Helper for throwing exceptions. Compute a handler address and jump to 1731 // Helper for throwing exceptions. Compute a handler address and jump to
1481 // it. See the implementation for register usage. 1732 // it. See the implementation for register usage.
1482 void JumpToHandlerEntry(); 1733 void JumpToHandlerEntry();
1483 1734
1484 // Compute memory operands for safepoint stack slots. 1735 // Compute memory operands for safepoint stack slots.
1485 static int SafepointRegisterStackIndex(int reg_code); 1736 static int SafepointRegisterStackIndex(int reg_code);
1486 MemOperand SafepointRegisterSlot(Register reg); 1737 MemOperand SafepointRegisterSlot(Register reg);
1487 MemOperand SafepointRegistersAndDoublesSlot(Register reg); 1738 MemOperand SafepointRegistersAndDoublesSlot(Register reg);
1488 1739
1489 // Loads the constant pool pointer (pp) register. 1740 #if V8_OOL_CONSTANT_POOL
1741 // Loads the constant pool pointer (kConstantPoolRegister).
1490 void LoadConstantPoolPointerRegister(); 1742 void LoadConstantPoolPointerRegister();
1743 #endif
1491 1744
1492 bool generating_stub_; 1745 bool generating_stub_;
1493 bool has_frame_; 1746 bool has_frame_;
1494 // This handle will be patched with the code object on installation. 1747 // This handle will be patched with the code object on installation.
1495 Handle<Object> code_object_; 1748 Handle<Object> code_object_;
1496 1749
1497 // Needs access to SafepointRegisterStackIndex for compiled frame 1750 // Needs access to SafepointRegisterStackIndex for compiled frame
1498 // traversal. 1751 // traversal.
1499 friend class StandardFrame; 1752 friend class StandardFrame;
1500 }; 1753 };
(...skipping 15 matching lines...) Expand all
1516 int instructions, 1769 int instructions,
1517 FlushICache flush_cache = FLUSH); 1770 FlushICache flush_cache = FLUSH);
1518 virtual ~CodePatcher(); 1771 virtual ~CodePatcher();
1519 1772
1520 // Macro assembler to emit code. 1773 // Macro assembler to emit code.
1521 MacroAssembler* masm() { return &masm_; } 1774 MacroAssembler* masm() { return &masm_; }
1522 1775
1523 // Emit an instruction directly. 1776 // Emit an instruction directly.
1524 void Emit(Instr instr); 1777 void Emit(Instr instr);
1525 1778
1526 // Emit an address directly.
1527 void Emit(Address addr);
1528
1529 // Emit the condition part of an instruction leaving the rest of the current 1779 // Emit the condition part of an instruction leaving the rest of the current
1530 // instruction unchanged. 1780 // instruction unchanged.
1531 void EmitCondition(Condition cond); 1781 void EmitCondition(Condition cond);
1532 1782
1533 private: 1783 private:
1534 byte* address_; // The address of the code being patched. 1784 byte* address_; // The address of the code being patched.
1535 int size_; // Number of bytes of the expected patch size. 1785 int size_; // Number of bytes of the expected patch size.
1536 MacroAssembler masm_; // Macro assembler used to generate the code. 1786 MacroAssembler masm_; // Macro assembler used to generate the code.
1537 FlushICache flush_cache_; // Whether to flush the I cache after patching. 1787 FlushICache flush_cache_; // Whether to flush the I cache after patching.
1538 }; 1788 };
1539 1789
1540 1790
1791 #if V8_OOL_CONSTANT_POOL
1541 class FrameAndConstantPoolScope { 1792 class FrameAndConstantPoolScope {
1542 public: 1793 public:
1543 FrameAndConstantPoolScope(MacroAssembler* masm, StackFrame::Type type) 1794 FrameAndConstantPoolScope(MacroAssembler* masm, StackFrame::Type type)
1544 : masm_(masm), 1795 : masm_(masm),
1545 type_(type), 1796 type_(type),
1546 old_has_frame_(masm->has_frame()), 1797 old_has_frame_(masm->has_frame()),
1547 old_constant_pool_available_(masm->is_constant_pool_available()) { 1798 old_constant_pool_available_(masm->is_constant_pool_available()) {
1548 // We only want to enable constant pool access for non-manual frame scopes 1799 // We only want to enable constant pool access for non-manual frame scopes
1549 // to ensure the constant pool pointer is valid throughout the scope. 1800 // to ensure the constant pool pointer is valid throughout the scope.
1550 DCHECK(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE); 1801 DCHECK(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE);
(...skipping 19 matching lines...) Expand all
1570 } 1821 }
1571 1822
1572 private: 1823 private:
1573 MacroAssembler* masm_; 1824 MacroAssembler* masm_;
1574 StackFrame::Type type_; 1825 StackFrame::Type type_;
1575 bool old_has_frame_; 1826 bool old_has_frame_;
1576 bool old_constant_pool_available_; 1827 bool old_constant_pool_available_;
1577 1828
1578 DISALLOW_IMPLICIT_CONSTRUCTORS(FrameAndConstantPoolScope); 1829 DISALLOW_IMPLICIT_CONSTRUCTORS(FrameAndConstantPoolScope);
1579 }; 1830 };
1831 #else
1832 #define FrameAndConstantPoolScope FrameScope
1833 #endif
1580 1834
1581 1835
1836 #if V8_OOL_CONSTANT_POOL
1582 // Class for scoping the the unavailability of constant pool access. 1837 // Class for scoping the the unavailability of constant pool access.
1583 class ConstantPoolUnavailableScope { 1838 class ConstantPoolUnavailableScope {
1584 public: 1839 public:
1585 explicit ConstantPoolUnavailableScope(MacroAssembler* masm) 1840 explicit ConstantPoolUnavailableScope(MacroAssembler* masm)
1586 : masm_(masm), 1841 : masm_(masm),
1587 old_constant_pool_available_(masm->is_constant_pool_available()) { 1842 old_constant_pool_available_(masm->is_constant_pool_available()) {
1588 if (FLAG_enable_ool_constant_pool) { 1843 if (FLAG_enable_ool_constant_pool) {
1589 masm_->set_constant_pool_available(false); 1844 masm_->set_constant_pool_available(false);
1590 } 1845 }
1591 } 1846 }
1592 ~ConstantPoolUnavailableScope() { 1847 ~ConstantPoolUnavailableScope() {
1593 if (FLAG_enable_ool_constant_pool) { 1848 if (FLAG_enable_ool_constant_pool) {
1594 masm_->set_constant_pool_available(old_constant_pool_available_); 1849 masm_->set_constant_pool_available(old_constant_pool_available_);
1595 } 1850 }
1596 } 1851 }
1597 1852
1598 private: 1853 private:
1599 MacroAssembler* masm_; 1854 MacroAssembler* masm_;
1600 int old_constant_pool_available_; 1855 int old_constant_pool_available_;
1601 1856
1602 DISALLOW_IMPLICIT_CONSTRUCTORS(ConstantPoolUnavailableScope); 1857 DISALLOW_IMPLICIT_CONSTRUCTORS(ConstantPoolUnavailableScope);
1603 }; 1858 };
1859 #endif
1604 1860
1605 1861
1606 // ----------------------------------------------------------------------------- 1862 // -----------------------------------------------------------------------------
1607 // Static helper functions. 1863 // Static helper functions.
1608 1864
1609 inline MemOperand ContextOperand(Register context, int index) { 1865 inline MemOperand ContextOperand(Register context, int index) {
1610 return MemOperand(context, Context::SlotOffset(index)); 1866 return MemOperand(context, Context::SlotOffset(index));
1611 } 1867 }
1612 1868
1613 1869
1614 inline MemOperand GlobalObjectOperand() { 1870 inline MemOperand GlobalObjectOperand() {
1615 return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX); 1871 return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
1616 } 1872 }
1617 1873
1618 1874
1619 #ifdef GENERATED_CODE_COVERAGE 1875 #ifdef GENERATED_CODE_COVERAGE
1620 #define CODE_COVERAGE_STRINGIFY(x) #x 1876 #define CODE_COVERAGE_STRINGIFY(x) #x
1621 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x) 1877 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
1622 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__) 1878 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
1623 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm-> 1879 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
1624 #else 1880 #else
1625 #define ACCESS_MASM(masm) masm-> 1881 #define ACCESS_MASM(masm) masm->
1626 #endif 1882 #endif
1627 1883
1628 1884
1629 } } // namespace v8::internal 1885 } } // namespace v8::internal
1630 1886
1631 #endif // V8_ARM_MACRO_ASSEMBLER_ARM_H_ 1887 #endif // V8_PPC_MACRO_ASSEMBLER_PPC_H_
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698