Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(142)

Side by Side Diff: src/a64/macro-assembler-a64.h

Issue 148293020: Merge experimental/a64 to bleeding_edge. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Remove ARM from OWNERS Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/a64/lithium-gap-resolver-a64.cc ('k') | src/a64/macro-assembler-a64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #ifndef V8_A64_MACRO_ASSEMBLER_A64_H_
29 #define V8_A64_MACRO_ASSEMBLER_A64_H_
30
31 #include "v8globals.h"
32 #include "globals.h"
33
34 #include "a64/assembler-a64-inl.h"
35
36 namespace v8 {
37 namespace internal {
38
39 #define LS_MACRO_LIST(V) \
40 V(Ldrb, Register&, rt, LDRB_w) \
41 V(Strb, Register&, rt, STRB_w) \
42 V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \
43 V(Ldrh, Register&, rt, LDRH_w) \
44 V(Strh, Register&, rt, STRH_w) \
45 V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \
46 V(Ldr, CPURegister&, rt, LoadOpFor(rt)) \
47 V(Str, CPURegister&, rt, StoreOpFor(rt)) \
48 V(Ldrsw, Register&, rt, LDRSW_x)
49
50
51 // ----------------------------------------------------------------------------
52 // Static helper functions
53
54 // Generate a MemOperand for loading a field from an object.
55 inline MemOperand FieldMemOperand(Register object, int offset);
56 inline MemOperand UntagSmiFieldMemOperand(Register object, int offset);
57
58 // Generate a MemOperand for loading a SMI from memory.
59 inline MemOperand UntagSmiMemOperand(Register object, int offset);
60
61
62 // ----------------------------------------------------------------------------
63 // MacroAssembler
64
65 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
66 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
67 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
68 enum TargetAddressStorageMode {
69 CAN_INLINE_TARGET_ADDRESS,
70 NEVER_INLINE_TARGET_ADDRESS
71 };
72 enum UntagMode { kNotSpeculativeUntag, kSpeculativeUntag };
73 enum ArrayHasHoles { kArrayCantHaveHoles, kArrayCanHaveHoles };
74 enum CopyHint { kCopyUnknown, kCopyShort, kCopyLong };
75 enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
76 enum SeqStringSetCharCheckIndexType { kIndexIsSmi, kIndexIsInteger32 };
77
78 class MacroAssembler : public Assembler {
79 public:
80 MacroAssembler(Isolate* isolate, byte * buffer, unsigned buffer_size);
81
82 inline Handle<Object> CodeObject();
83
84 // Instruction set functions ------------------------------------------------
85 // Logical macros.
86 inline void And(const Register& rd,
87 const Register& rn,
88 const Operand& operand);
89 inline void Ands(const Register& rd,
90 const Register& rn,
91 const Operand& operand);
92 inline void Bic(const Register& rd,
93 const Register& rn,
94 const Operand& operand);
95 inline void Bics(const Register& rd,
96 const Register& rn,
97 const Operand& operand);
98 inline void Orr(const Register& rd,
99 const Register& rn,
100 const Operand& operand);
101 inline void Orn(const Register& rd,
102 const Register& rn,
103 const Operand& operand);
104 inline void Eor(const Register& rd,
105 const Register& rn,
106 const Operand& operand);
107 inline void Eon(const Register& rd,
108 const Register& rn,
109 const Operand& operand);
110 inline void Tst(const Register& rn, const Operand& operand);
111 void LogicalMacro(const Register& rd,
112 const Register& rn,
113 const Operand& operand,
114 LogicalOp op);
115
116 // Add and sub macros.
117 inline void Add(const Register& rd,
118 const Register& rn,
119 const Operand& operand);
120 inline void Adds(const Register& rd,
121 const Register& rn,
122 const Operand& operand);
123 inline void Sub(const Register& rd,
124 const Register& rn,
125 const Operand& operand);
126 inline void Subs(const Register& rd,
127 const Register& rn,
128 const Operand& operand);
129 inline void Cmn(const Register& rn, const Operand& operand);
130 inline void Cmp(const Register& rn, const Operand& operand);
131 inline void Neg(const Register& rd,
132 const Operand& operand);
133 inline void Negs(const Register& rd,
134 const Operand& operand);
135
136 void AddSubMacro(const Register& rd,
137 const Register& rn,
138 const Operand& operand,
139 FlagsUpdate S,
140 AddSubOp op);
141
142 // Add/sub with carry macros.
143 inline void Adc(const Register& rd,
144 const Register& rn,
145 const Operand& operand);
146 inline void Adcs(const Register& rd,
147 const Register& rn,
148 const Operand& operand);
149 inline void Sbc(const Register& rd,
150 const Register& rn,
151 const Operand& operand);
152 inline void Sbcs(const Register& rd,
153 const Register& rn,
154 const Operand& operand);
155 inline void Ngc(const Register& rd,
156 const Operand& operand);
157 inline void Ngcs(const Register& rd,
158 const Operand& operand);
159 void AddSubWithCarryMacro(const Register& rd,
160 const Register& rn,
161 const Operand& operand,
162 FlagsUpdate S,
163 AddSubWithCarryOp op);
164
165 // Move macros.
166 void Mov(const Register& rd,
167 const Operand& operand,
168 DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
169 void Mov(const Register& rd, uint64_t imm);
170 inline void Mvn(const Register& rd, uint64_t imm);
171 void Mvn(const Register& rd, const Operand& operand);
172 static bool IsImmMovn(uint64_t imm, unsigned reg_size);
173 static bool IsImmMovz(uint64_t imm, unsigned reg_size);
174 static unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
175
176 // Conditional macros.
177 inline void Ccmp(const Register& rn,
178 const Operand& operand,
179 StatusFlags nzcv,
180 Condition cond);
181 inline void Ccmn(const Register& rn,
182 const Operand& operand,
183 StatusFlags nzcv,
184 Condition cond);
185 void ConditionalCompareMacro(const Register& rn,
186 const Operand& operand,
187 StatusFlags nzcv,
188 Condition cond,
189 ConditionalCompareOp op);
190 void Csel(const Register& rd,
191 const Register& rn,
192 const Operand& operand,
193 Condition cond);
194
195 // Load/store macros.
196 #define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
197 inline void FN(const REGTYPE REG, const MemOperand& addr);
198 LS_MACRO_LIST(DECLARE_FUNCTION)
199 #undef DECLARE_FUNCTION
200
201 void LoadStoreMacro(const CPURegister& rt,
202 const MemOperand& addr,
203 LoadStoreOp op);
204
205 // V8-specific load/store helpers.
206 void Load(const Register& rt, const MemOperand& addr, Representation r);
207 void Store(const Register& rt, const MemOperand& addr, Representation r);
208
209 // Remaining instructions are simple pass-through calls to the assembler.
210 inline void Adr(const Register& rd, Label* label);
211 inline void Asr(const Register& rd, const Register& rn, unsigned shift);
212 inline void Asr(const Register& rd, const Register& rn, const Register& rm);
213 inline void B(Label* label);
214 inline void B(Condition cond, Label* label);
215 inline void B(Label* label, Condition cond);
216 inline void Bfi(const Register& rd,
217 const Register& rn,
218 unsigned lsb,
219 unsigned width);
220 inline void Bfxil(const Register& rd,
221 const Register& rn,
222 unsigned lsb,
223 unsigned width);
224 inline void Bind(Label* label);
225 inline void Bl(Label* label);
226 inline void Blr(const Register& xn);
227 inline void Br(const Register& xn);
228 inline void Brk(int code);
229 inline void Cbnz(const Register& rt, Label* label);
230 inline void Cbz(const Register& rt, Label* label);
231 inline void Cinc(const Register& rd, const Register& rn, Condition cond);
232 inline void Cinv(const Register& rd, const Register& rn, Condition cond);
233 inline void Cls(const Register& rd, const Register& rn);
234 inline void Clz(const Register& rd, const Register& rn);
235 inline void Cneg(const Register& rd, const Register& rn, Condition cond);
236 inline void CzeroX(const Register& rd, Condition cond);
237 inline void CmovX(const Register& rd, const Register& rn, Condition cond);
238 inline void Cset(const Register& rd, Condition cond);
239 inline void Csetm(const Register& rd, Condition cond);
240 inline void Csinc(const Register& rd,
241 const Register& rn,
242 const Register& rm,
243 Condition cond);
244 inline void Csinv(const Register& rd,
245 const Register& rn,
246 const Register& rm,
247 Condition cond);
248 inline void Csneg(const Register& rd,
249 const Register& rn,
250 const Register& rm,
251 Condition cond);
252 inline void Dmb(BarrierDomain domain, BarrierType type);
253 inline void Dsb(BarrierDomain domain, BarrierType type);
254 inline void Debug(const char* message, uint32_t code, Instr params = BREAK);
255 inline void Extr(const Register& rd,
256 const Register& rn,
257 const Register& rm,
258 unsigned lsb);
259 inline void Fabs(const FPRegister& fd, const FPRegister& fn);
260 inline void Fadd(const FPRegister& fd,
261 const FPRegister& fn,
262 const FPRegister& fm);
263 inline void Fccmp(const FPRegister& fn,
264 const FPRegister& fm,
265 StatusFlags nzcv,
266 Condition cond);
267 inline void Fcmp(const FPRegister& fn, const FPRegister& fm);
268 inline void Fcmp(const FPRegister& fn, double value);
269 inline void Fcsel(const FPRegister& fd,
270 const FPRegister& fn,
271 const FPRegister& fm,
272 Condition cond);
273 inline void Fcvt(const FPRegister& fd, const FPRegister& fn);
274 inline void Fcvtas(const Register& rd, const FPRegister& fn);
275 inline void Fcvtau(const Register& rd, const FPRegister& fn);
276 inline void Fcvtms(const Register& rd, const FPRegister& fn);
277 inline void Fcvtmu(const Register& rd, const FPRegister& fn);
278 inline void Fcvtns(const Register& rd, const FPRegister& fn);
279 inline void Fcvtnu(const Register& rd, const FPRegister& fn);
280 inline void Fcvtzs(const Register& rd, const FPRegister& fn);
281 inline void Fcvtzu(const Register& rd, const FPRegister& fn);
282 inline void Fdiv(const FPRegister& fd,
283 const FPRegister& fn,
284 const FPRegister& fm);
285 inline void Fmadd(const FPRegister& fd,
286 const FPRegister& fn,
287 const FPRegister& fm,
288 const FPRegister& fa);
289 inline void Fmax(const FPRegister& fd,
290 const FPRegister& fn,
291 const FPRegister& fm);
292 inline void Fmaxnm(const FPRegister& fd,
293 const FPRegister& fn,
294 const FPRegister& fm);
295 inline void Fmin(const FPRegister& fd,
296 const FPRegister& fn,
297 const FPRegister& fm);
298 inline void Fminnm(const FPRegister& fd,
299 const FPRegister& fn,
300 const FPRegister& fm);
301 inline void Fmov(FPRegister fd, FPRegister fn);
302 inline void Fmov(FPRegister fd, Register rn);
303 inline void Fmov(FPRegister fd, double imm);
304 inline void Fmov(Register rd, FPRegister fn);
305 inline void Fmsub(const FPRegister& fd,
306 const FPRegister& fn,
307 const FPRegister& fm,
308 const FPRegister& fa);
309 inline void Fmul(const FPRegister& fd,
310 const FPRegister& fn,
311 const FPRegister& fm);
312 inline void Fneg(const FPRegister& fd, const FPRegister& fn);
313 inline void Fnmadd(const FPRegister& fd,
314 const FPRegister& fn,
315 const FPRegister& fm,
316 const FPRegister& fa);
317 inline void Fnmsub(const FPRegister& fd,
318 const FPRegister& fn,
319 const FPRegister& fm,
320 const FPRegister& fa);
321 inline void Frinta(const FPRegister& fd, const FPRegister& fn);
322 inline void Frintn(const FPRegister& fd, const FPRegister& fn);
323 inline void Frintz(const FPRegister& fd, const FPRegister& fn);
324 inline void Fsqrt(const FPRegister& fd, const FPRegister& fn);
325 inline void Fsub(const FPRegister& fd,
326 const FPRegister& fn,
327 const FPRegister& fm);
328 inline void Hint(SystemHint code);
329 inline void Hlt(int code);
330 inline void Isb();
331 inline void Ldnp(const CPURegister& rt,
332 const CPURegister& rt2,
333 const MemOperand& src);
334 inline void Ldp(const CPURegister& rt,
335 const CPURegister& rt2,
336 const MemOperand& src);
337 inline void Ldpsw(const Register& rt,
338 const Register& rt2,
339 const MemOperand& src);
340 inline void Ldr(const FPRegister& ft, double imm);
341 inline void Ldr(const Register& rt, uint64_t imm);
342 inline void Lsl(const Register& rd, const Register& rn, unsigned shift);
343 inline void Lsl(const Register& rd, const Register& rn, const Register& rm);
344 inline void Lsr(const Register& rd, const Register& rn, unsigned shift);
345 inline void Lsr(const Register& rd, const Register& rn, const Register& rm);
346 inline void Madd(const Register& rd,
347 const Register& rn,
348 const Register& rm,
349 const Register& ra);
350 inline void Mneg(const Register& rd, const Register& rn, const Register& rm);
351 inline void Mov(const Register& rd, const Register& rm);
352 inline void Movk(const Register& rd, uint64_t imm, int shift = -1);
353 inline void Mrs(const Register& rt, SystemRegister sysreg);
354 inline void Msr(SystemRegister sysreg, const Register& rt);
355 inline void Msub(const Register& rd,
356 const Register& rn,
357 const Register& rm,
358 const Register& ra);
359 inline void Mul(const Register& rd, const Register& rn, const Register& rm);
360 inline void Nop() { nop(); }
361 inline void Rbit(const Register& rd, const Register& rn);
362 inline void Ret(const Register& xn = lr);
363 inline void Rev(const Register& rd, const Register& rn);
364 inline void Rev16(const Register& rd, const Register& rn);
365 inline void Rev32(const Register& rd, const Register& rn);
366 inline void Ror(const Register& rd, const Register& rs, unsigned shift);
367 inline void Ror(const Register& rd, const Register& rn, const Register& rm);
368 inline void Sbfiz(const Register& rd,
369 const Register& rn,
370 unsigned lsb,
371 unsigned width);
372 inline void Sbfx(const Register& rd,
373 const Register& rn,
374 unsigned lsb,
375 unsigned width);
376 inline void Scvtf(const FPRegister& fd,
377 const Register& rn,
378 unsigned fbits = 0);
379 inline void Sdiv(const Register& rd, const Register& rn, const Register& rm);
380 inline void Smaddl(const Register& rd,
381 const Register& rn,
382 const Register& rm,
383 const Register& ra);
384 inline void Smsubl(const Register& rd,
385 const Register& rn,
386 const Register& rm,
387 const Register& ra);
388 inline void Smull(const Register& rd,
389 const Register& rn,
390 const Register& rm);
391 inline void Smulh(const Register& rd,
392 const Register& rn,
393 const Register& rm);
394 inline void Stnp(const CPURegister& rt,
395 const CPURegister& rt2,
396 const MemOperand& dst);
397 inline void Stp(const CPURegister& rt,
398 const CPURegister& rt2,
399 const MemOperand& dst);
400 inline void Sxtb(const Register& rd, const Register& rn);
401 inline void Sxth(const Register& rd, const Register& rn);
402 inline void Sxtw(const Register& rd, const Register& rn);
403 inline void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
404 inline void Tbz(const Register& rt, unsigned bit_pos, Label* label);
405 inline void Ubfiz(const Register& rd,
406 const Register& rn,
407 unsigned lsb,
408 unsigned width);
409 inline void Ubfx(const Register& rd,
410 const Register& rn,
411 unsigned lsb,
412 unsigned width);
413 inline void Ucvtf(const FPRegister& fd,
414 const Register& rn,
415 unsigned fbits = 0);
416 inline void Udiv(const Register& rd, const Register& rn, const Register& rm);
417 inline void Umaddl(const Register& rd,
418 const Register& rn,
419 const Register& rm,
420 const Register& ra);
421 inline void Umsubl(const Register& rd,
422 const Register& rn,
423 const Register& rm,
424 const Register& ra);
425 inline void Unreachable();
426 inline void Uxtb(const Register& rd, const Register& rn);
427 inline void Uxth(const Register& rd, const Register& rn);
428 inline void Uxtw(const Register& rd, const Register& rn);
429
430 // Pseudo-instructions ------------------------------------------------------
431
432 // Compute rd = abs(rm).
433 // This function clobbers the condition flags.
434 //
435 // If rm is the minimum representable value, the result is not representable.
436 // Handlers for each case can be specified using the relevant labels.
437 void Abs(const Register& rd, const Register& rm,
438 Label * is_not_representable = NULL,
439 Label * is_representable = NULL);
440
441 // Push or pop up to 4 registers of the same width to or from the stack,
442 // using the current stack pointer as set by SetStackPointer.
443 //
444 // If an argument register is 'NoReg', all further arguments are also assumed
445 // to be 'NoReg', and are thus not pushed or popped.
446 //
447 // Arguments are ordered such that "Push(a, b);" is functionally equivalent
448 // to "Push(a); Push(b);".
449 //
450 // It is valid to push the same register more than once, and there is no
451 // restriction on the order in which registers are specified.
452 //
453 // It is not valid to pop into the same register more than once in one
454 // operation, not even into the zero register.
455 //
456 // If the current stack pointer (as set by SetStackPointer) is csp, then it
457 // must be aligned to 16 bytes on entry and the total size of the specified
458 // registers must also be a multiple of 16 bytes.
459 //
460 // Even if the current stack pointer is not the system stack pointer (csp),
461 // Push (and derived methods) will still modify the system stack pointer in
462 // order to comply with ABI rules about accessing memory below the system
463 // stack pointer.
464 //
465 // Other than the registers passed into Pop, the stack pointer and (possibly)
466 // the system stack pointer, these methods do not modify any other registers.
467 // Scratch registers such as Tmp0() and Tmp1() are preserved.
468 void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
469 const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg);
470 void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
471 const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
472
473 // Alternative forms of Push and Pop, taking a RegList or CPURegList that
474 // specifies the registers that are to be pushed or popped. Higher-numbered
475 // registers are associated with higher memory addresses (as in the A32 push
476 // and pop instructions).
477 //
478 // (Push|Pop)SizeRegList allow you to specify the register size as a
479 // parameter. Only kXRegSize, kWRegSize, kDRegSize and kSRegSize are
480 // supported.
481 //
482 // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
483 void PushCPURegList(CPURegList registers);
484 void PopCPURegList(CPURegList registers);
485
486 inline void PushSizeRegList(RegList registers, unsigned reg_size,
487 CPURegister::RegisterType type = CPURegister::kRegister) {
488 PushCPURegList(CPURegList(type, reg_size, registers));
489 }
490 inline void PopSizeRegList(RegList registers, unsigned reg_size,
491 CPURegister::RegisterType type = CPURegister::kRegister) {
492 PopCPURegList(CPURegList(type, reg_size, registers));
493 }
494 inline void PushXRegList(RegList regs) {
495 PushSizeRegList(regs, kXRegSize);
496 }
497 inline void PopXRegList(RegList regs) {
498 PopSizeRegList(regs, kXRegSize);
499 }
500 inline void PushWRegList(RegList regs) {
501 PushSizeRegList(regs, kWRegSize);
502 }
503 inline void PopWRegList(RegList regs) {
504 PopSizeRegList(regs, kWRegSize);
505 }
506 inline void PushDRegList(RegList regs) {
507 PushSizeRegList(regs, kDRegSize, CPURegister::kFPRegister);
508 }
509 inline void PopDRegList(RegList regs) {
510 PopSizeRegList(regs, kDRegSize, CPURegister::kFPRegister);
511 }
512 inline void PushSRegList(RegList regs) {
513 PushSizeRegList(regs, kSRegSize, CPURegister::kFPRegister);
514 }
515 inline void PopSRegList(RegList regs) {
516 PopSizeRegList(regs, kSRegSize, CPURegister::kFPRegister);
517 }
518
519 // Push the specified register 'count' times.
520 void PushMultipleTimes(int count, Register src);
521
522 // This is a convenience method for pushing a single Handle<Object>.
523 inline void Push(Handle<Object> handle);
524 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
525
526 // Aliases of Push and Pop, required for V8 compatibility.
527 inline void push(Register src) {
528 Push(src);
529 }
530 inline void pop(Register dst) {
531 Pop(dst);
532 }
533
534 // Poke 'src' onto the stack. The offset is in bytes.
535 //
536 // If the current stack pointer (according to StackPointer()) is csp, then
537 // csp must be aligned to 16 bytes.
538 void Poke(const CPURegister& src, const Operand& offset);
539
540 // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
541 //
542 // If the current stack pointer (according to StackPointer()) is csp, then
543 // csp must be aligned to 16 bytes.
544 void Peek(const CPURegister& dst, const Operand& offset);
545
546 // Poke 'src1' and 'src2' onto the stack. The values written will be adjacent
547 // with 'src2' at a higher address than 'src1'. The offset is in bytes.
548 //
549 // If the current stack pointer (according to StackPointer()) is csp, then
550 // csp must be aligned to 16 bytes.
551 void PokePair(const CPURegister& src1, const CPURegister& src2, int offset);
552
553 // Peek at two values on the stack, and put them in 'dst1' and 'dst2'. The
554 // values peeked will be adjacent, with the value in 'dst2' being from a
555 // higher address than 'dst1'. The offset is in bytes.
556 //
557 // If the current stack pointer (according to StackPointer()) is csp, then
558 // csp must be aligned to 16 bytes.
559 void PeekPair(const CPURegister& dst1, const CPURegister& dst2, int offset);
560
561 // Claim or drop stack space without actually accessing memory.
562 //
563 // In debug mode, both of these will write invalid data into the claimed or
564 // dropped space.
565 //
566 // If the current stack pointer (according to StackPointer()) is csp, then it
567 // must be aligned to 16 bytes and the size claimed or dropped must be a
568 // multiple of 16 bytes.
569 //
570 // Note that unit_size must be specified in bytes. For variants which take a
571 // Register count, the unit size must be a power of two.
572 inline void Claim(uint64_t count, uint64_t unit_size = kXRegSizeInBytes);
573 inline void Claim(const Register& count,
574 uint64_t unit_size = kXRegSizeInBytes);
575 inline void Drop(uint64_t count, uint64_t unit_size = kXRegSizeInBytes);
576 inline void Drop(const Register& count,
577 uint64_t unit_size = kXRegSizeInBytes);
578
579 // Variants of Claim and Drop, where the 'count' parameter is a SMI held in a
580 // register.
581 inline void ClaimBySMI(const Register& count_smi,
582 uint64_t unit_size = kXRegSizeInBytes);
583 inline void DropBySMI(const Register& count_smi,
584 uint64_t unit_size = kXRegSizeInBytes);
585
586 // Compare a register with an operand, and branch to label depending on the
587 // condition. May corrupt the status flags.
588 inline void CompareAndBranch(const Register& lhs,
589 const Operand& rhs,
590 Condition cond,
591 Label* label);
592
593 // Test the bits of register defined by bit_pattern, and branch if ANY of
594 // those bits are set. May corrupt the status flags.
595 inline void TestAndBranchIfAnySet(const Register& reg,
596 const uint64_t bit_pattern,
597 Label* label);
598
599 // Test the bits of register defined by bit_pattern, and branch if ALL of
600 // those bits are clear (ie. not set.) May corrupt the status flags.
601 inline void TestAndBranchIfAllClear(const Register& reg,
602 const uint64_t bit_pattern,
603 Label* label);
604
605 // Insert one or more instructions into the instruction stream that encode
606 // some caller-defined data. The instructions used will be executable with no
607 // side effects.
608 inline void InlineData(uint64_t data);
609
610 // Insert an instrumentation enable marker into the instruction stream.
611 inline void EnableInstrumentation();
612
613 // Insert an instrumentation disable marker into the instruction stream.
614 inline void DisableInstrumentation();
615
616 // Insert an instrumentation event marker into the instruction stream. These
617 // will be picked up by the instrumentation system to annotate an instruction
618 // profile. The argument marker_name must be a printable two character string;
619 // it will be encoded in the event marker.
620 inline void AnnotateInstrumentation(const char* marker_name);
621
622 // If emit_debug_code() is true, emit a run-time check to ensure that
623 // StackPointer() does not point below the system stack pointer.
624 //
625 // Whilst it is architecturally legal for StackPointer() to point below csp,
626 // it can be evidence of a potential bug because the ABI forbids accesses
627 // below csp.
628 //
629 // If emit_debug_code() is false, this emits no code.
630 //
631 // If StackPointer() is the system stack pointer, this emits no code.
632 void AssertStackConsistency();
633
634 // Preserve the callee-saved registers (as defined by AAPCS64).
635 //
636 // Higher-numbered registers are pushed before lower-numbered registers, and
637 // thus get higher addresses.
638 // Floating-point registers are pushed before general-purpose registers, and
639 // thus get higher addresses.
640 //
641 // Note that registers are not checked for invalid values. Use this method
642 // only if you know that the GC won't try to examine the values on the stack.
643 //
644 // This method must not be called unless the current stack pointer (as set by
645 // SetStackPointer) is the system stack pointer (csp), and is aligned to
646 // ActivationFrameAlignment().
647 void PushCalleeSavedRegisters();
648
649 // Restore the callee-saved registers (as defined by AAPCS64).
650 //
651 // Higher-numbered registers are popped after lower-numbered registers, and
652 // thus come from higher addresses.
653 // Floating-point registers are popped after general-purpose registers, and
654 // thus come from higher addresses.
655 //
656 // This method must not be called unless the current stack pointer (as set by
657 // SetStackPointer) is the system stack pointer (csp), and is aligned to
658 // ActivationFrameAlignment().
659 void PopCalleeSavedRegisters();
660
661 // Set the current stack pointer, but don't generate any code.
662 inline void SetStackPointer(const Register& stack_pointer) {
663 ASSERT(!AreAliased(stack_pointer, Tmp0(), Tmp1()));
664 sp_ = stack_pointer;
665 }
666
667 // Return the current stack pointer, as set by SetStackPointer.
668 inline const Register& StackPointer() const {
669 return sp_;
670 }
671
672 // Align csp for a frame, as per ActivationFrameAlignment, and make it the
673 // current stack pointer.
674 inline void AlignAndSetCSPForFrame() {
675 int sp_alignment = ActivationFrameAlignment();
676 // AAPCS64 mandates at least 16-byte alignment.
677 ASSERT(sp_alignment >= 16);
678 ASSERT(IsPowerOf2(sp_alignment));
679 Bic(csp, StackPointer(), sp_alignment - 1);
680 SetStackPointer(csp);
681 }
682
683 // Push the system stack pointer (csp) down to allow the same to be done to
684 // the current stack pointer (according to StackPointer()). This must be
685 // called _before_ accessing the memory.
686 //
687 // This is necessary when pushing or otherwise adding things to the stack, to
688 // satisfy the AAPCS64 constraint that the memory below the system stack
689 // pointer is not accessed.
690 //
691 // This method asserts that StackPointer() is not csp, since the call does
692 // not make sense in that context.
693 //
694 // TODO(jbramley): Currently, this method can only accept values of 'space'
695 // that can be encoded in one instruction. Refer to the implementation for
696 // details.
697 inline void BumpSystemStackPointer(const Operand& space);
698
699 // Helpers ------------------------------------------------------------------
700 // Root register.
701 inline void InitializeRootRegister();
702
703 // Load an object from the root table.
704 void LoadRoot(Register destination,
705 Heap::RootListIndex index);
706 // Store an object to the root table.
707 void StoreRoot(Register source,
708 Heap::RootListIndex index);
709
710 // Load both TrueValue and FalseValue roots.
711 void LoadTrueFalseRoots(Register true_root, Register false_root);
712
713 void LoadHeapObject(Register dst, Handle<HeapObject> object);
714
715 void LoadObject(Register result, Handle<Object> object) {
716 AllowDeferredHandleDereference heap_object_check;
717 if (object->IsHeapObject()) {
718 LoadHeapObject(result, Handle<HeapObject>::cast(object));
719 } else {
720 ASSERT(object->IsSmi());
721 Mov(result, Operand(object));
722 }
723 }
724
725 static int SafepointRegisterStackIndex(int reg_code);
726
727 // This is required for compatibility with architecture independant code.
728 // Remove if not needed.
729 inline void Move(Register dst, Register src) { Mov(dst, src); }
730
731 void LoadInstanceDescriptors(Register map,
732 Register descriptors);
733 void EnumLengthUntagged(Register dst, Register map);
734 void EnumLengthSmi(Register dst, Register map);
735 void NumberOfOwnDescriptors(Register dst, Register map);
736
737 template<typename Field>
738 void DecodeField(Register reg) {
739 static const uint64_t shift = Field::kShift + kSmiShift;
740 static const uint64_t setbits = CountSetBits(Field::kMask, 32);
741 Ubfx(reg, reg, shift, setbits);
742 }
743
744 // ---- SMI and Number Utilities ----
745
746 inline void SmiTag(Register dst, Register src);
747 inline void SmiTag(Register smi);
748 inline void SmiUntag(Register dst, Register src);
749 inline void SmiUntag(Register smi);
750 inline void SmiUntagToDouble(FPRegister dst,
751 Register src,
752 UntagMode mode = kNotSpeculativeUntag);
753 inline void SmiUntagToFloat(FPRegister dst,
754 Register src,
755 UntagMode mode = kNotSpeculativeUntag);
756
757 // Compute the absolute value of 'smi' and leave the result in 'smi'
758 // register. If 'smi' is the most negative SMI, the absolute value cannot
759 // be represented as a SMI and a jump to 'slow' is done.
760 void SmiAbs(const Register& smi, Label* slow);
761
762 inline void JumpIfSmi(Register value,
763 Label* smi_label,
764 Label* not_smi_label = NULL);
765 inline void JumpIfNotSmi(Register value, Label* not_smi_label);
766 inline void JumpIfBothSmi(Register value1,
767 Register value2,
768 Label* both_smi_label,
769 Label* not_smi_label = NULL);
770 inline void JumpIfEitherSmi(Register value1,
771 Register value2,
772 Label* either_smi_label,
773 Label* not_smi_label = NULL);
774 inline void JumpIfEitherNotSmi(Register value1,
775 Register value2,
776 Label* not_smi_label);
777 inline void JumpIfBothNotSmi(Register value1,
778 Register value2,
779 Label* not_smi_label);
780
781 // Abort execution if argument is a smi, enabled via --debug-code.
782 void AssertNotSmi(Register object, BailoutReason reason = kOperandIsASmi);
783 void AssertSmi(Register object, BailoutReason reason = kOperandIsNotASmi);
784
785 // Abort execution if argument is not a name, enabled via --debug-code.
786 void AssertName(Register object);
787
788 // Abort execution if argument is not a string, enabled via --debug-code.
789 void AssertString(Register object);
790
791 void JumpForHeapNumber(Register object,
792 Register heap_number_map,
793 Label* on_heap_number,
794 Label* on_not_heap_number = NULL);
795 void JumpIfHeapNumber(Register object,
796 Label* on_heap_number,
797 Register heap_number_map = NoReg);
798 void JumpIfNotHeapNumber(Register object,
799 Label* on_not_heap_number,
800 Register heap_number_map = NoReg);
801
802 // Jump to label if the input double register contains -0.0.
803 void JumpIfMinusZero(DoubleRegister input, Label* on_negative_zero);
804
805 // Generate code to do a lookup in the number string cache. If the number in
806 // the register object is found in the cache the generated code falls through
807 // with the result in the result register. The object and the result register
808 // can be the same. If the number is not found in the cache the code jumps to
809 // the label not_found with only the content of register object unchanged.
810 void LookupNumberStringCache(Register object,
811 Register result,
812 Register scratch1,
813 Register scratch2,
814 Register scratch3,
815 Label* not_found);
816
817 // Saturate a signed 32-bit integer in input to an unsigned 8-bit integer in
818 // output.
819 void ClampInt32ToUint8(Register in_out);
820 void ClampInt32ToUint8(Register output, Register input);
821
822 // Saturate a double in input to an unsigned 8-bit integer in output.
823 void ClampDoubleToUint8(Register output,
824 DoubleRegister input,
825 DoubleRegister dbl_scratch);
826
827 // Try to convert a double to a signed 32-bit int.
828 // This succeeds if the result compares equal to the input, so inputs of -0.0
829 // are converted to 0 and handled as a success.
830 void TryConvertDoubleToInt32(Register as_int,
831 FPRegister value,
832 FPRegister scratch_d,
833 Label* on_successful_conversion,
834 Label* on_failed_conversion = NULL) {
835 ASSERT(as_int.Is32Bits());
836 TryConvertDoubleToInt(as_int, value, scratch_d, on_successful_conversion,
837 on_failed_conversion);
838 }
839
840 // Try to convert a double to a signed 64-bit int.
841 // This succeeds if the result compares equal to the input, so inputs of -0.0
842 // are converted to 0 and handled as a success.
843 void TryConvertDoubleToInt64(Register as_int,
844 FPRegister value,
845 FPRegister scratch_d,
846 Label* on_successful_conversion,
847 Label* on_failed_conversion = NULL) {
848 ASSERT(as_int.Is64Bits());
849 TryConvertDoubleToInt(as_int, value, scratch_d, on_successful_conversion,
850 on_failed_conversion);
851 }
852
853 // ---- Object Utilities ----
854
855 // Copy fields from 'src' to 'dst', where both are tagged objects.
856 // The 'temps' list is a list of X registers which can be used for scratch
857 // values. The temps list must include at least one register, and it must not
858 // contain Tmp0() or Tmp1().
859 //
860 // Currently, CopyFields cannot make use of more than three registers from
861 // the 'temps' list.
862 //
863 // As with several MacroAssembler methods, Tmp0() and Tmp1() will be used.
864 void CopyFields(Register dst, Register src, CPURegList temps, unsigned count);
865
866 // Copies a number of bytes from src to dst. All passed registers are
867 // clobbered. On exit src and dst will point to the place just after where the
868 // last byte was read or written and length will be zero. Hint may be used to
869 // determine which is the most efficient algorithm to use for copying.
870 void CopyBytes(Register dst,
871 Register src,
872 Register length,
873 Register scratch,
874 CopyHint hint = kCopyUnknown);
875
876 // Initialize fields with filler values. Fields starting at start_offset not
877 // including end_offset are overwritten with the value in filler. At the end
878 // of the loop, start_offset takes the value of end_offset.
879 void InitializeFieldsWithFiller(Register start_offset,
880 Register end_offset,
881 Register filler);
882
883 // ---- String Utilities ----
884
885
886 // Jump to label if either object is not a sequential ASCII string.
887 // Optionally perform a smi check on the objects first.
888 void JumpIfEitherIsNotSequentialAsciiStrings(
889 Register first,
890 Register second,
891 Register scratch1,
892 Register scratch2,
893 Label* failure,
894 SmiCheckType smi_check = DO_SMI_CHECK);
895
896 // Check if instance type is sequential ASCII string and jump to label if
897 // it is not.
898 void JumpIfInstanceTypeIsNotSequentialAscii(Register type,
899 Register scratch,
900 Label* failure);
901
902 // Checks if both instance types are sequential ASCII strings and jumps to
903 // label if either is not.
904 void JumpIfEitherInstanceTypeIsNotSequentialAscii(
905 Register first_object_instance_type,
906 Register second_object_instance_type,
907 Register scratch1,
908 Register scratch2,
909 Label* failure);
910
911 // Checks if both instance types are sequential ASCII strings and jumps to
912 // label if either is not.
913 void JumpIfBothInstanceTypesAreNotSequentialAscii(
914 Register first_object_instance_type,
915 Register second_object_instance_type,
916 Register scratch1,
917 Register scratch2,
918 Label* failure);
919
920 void JumpIfNotUniqueName(Register type, Label* not_unique_name);
921
922 // ---- Calling / Jumping helpers ----
923
924 // This is required for compatibility in architecture indepenedant code.
925 inline void jmp(Label* L) { B(L); }
926
927 // Passes thrown value to the handler of top of the try handler chain.
928 // Register value must be x0.
929 void Throw(Register value,
930 Register scratch1,
931 Register scratch2,
932 Register scratch3,
933 Register scratch4);
934
935 // Propagates an uncatchable exception to the top of the current JS stack's
936 // handler chain. Register value must be x0.
937 void ThrowUncatchable(Register value,
938 Register scratch1,
939 Register scratch2,
940 Register scratch3,
941 Register scratch4);
942
943 // Throw a message string as an exception.
944 void Throw(BailoutReason reason);
945
946 // Throw a message string as an exception if a condition is not true.
947 void ThrowIf(Condition cc, BailoutReason reason);
948
949 // Throw a message string as an exception if the value is a smi.
950 void ThrowIfSmi(const Register& value, BailoutReason reason);
951
952 void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
953 void TailCallStub(CodeStub* stub);
954
955 void CallRuntime(const Runtime::Function* f,
956 int num_arguments,
957 SaveFPRegsMode save_doubles = kDontSaveFPRegs);
958
959 void CallRuntime(Runtime::FunctionId id,
960 int num_arguments,
961 SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
962 CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
963 }
964
965 // TODO(all): Why does this variant save FP regs unconditionally?
966 void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
967 const Runtime::Function* function = Runtime::FunctionForId(id);
968 CallRuntime(function, function->nargs, kSaveFPRegs);
969 }
970
971 void TailCallRuntime(Runtime::FunctionId fid,
972 int num_arguments,
973 int result_size);
974
975 int ActivationFrameAlignment();
976
977 // Calls a C function.
978 // The called function is not allowed to trigger a
979 // garbage collection, since that might move the code and invalidate the
980 // return address (unless this is somehow accounted for by the called
981 // function).
982 void CallCFunction(ExternalReference function,
983 int num_reg_arguments);
984 void CallCFunction(ExternalReference function,
985 int num_reg_arguments,
986 int num_double_arguments);
987 void CallCFunction(Register function,
988 int num_reg_arguments,
989 int num_double_arguments);
990
991 // Calls an API function. Allocates HandleScope, extracts returned value
992 // from handle and propagates exceptions.
993 // 'stack_space' is the space to be unwound on exit (includes the call JS
994 // arguments space and the additional space allocated for the fast call).
995 // 'spill_offset' is the offset from the stack pointer where
996 // CallApiFunctionAndReturn can spill registers.
997 void CallApiFunctionAndReturn(Register function_address,
998 ExternalReference thunk_ref,
999 int stack_space,
1000 int spill_offset,
1001 MemOperand return_value_operand,
1002 MemOperand* context_restore_operand);
1003
1004 // The number of register that CallApiFunctionAndReturn will need to save on
1005 // the stack. The space for these registers need to be allocated in the
1006 // ExitFrame before calling CallApiFunctionAndReturn.
1007 static const int kCallApiFunctionSpillSpace = 4;
1008
1009 // Jump to a runtime routine.
1010 void JumpToExternalReference(const ExternalReference& builtin);
1011 // Tail call of a runtime routine (jump).
1012 // Like JumpToExternalReference, but also takes care of passing the number
1013 // of parameters.
1014 void TailCallExternalReference(const ExternalReference& ext,
1015 int num_arguments,
1016 int result_size);
1017 void CallExternalReference(const ExternalReference& ext,
1018 int num_arguments);
1019
1020
1021 // Invoke specified builtin JavaScript function. Adds an entry to
1022 // the unresolved list if the name does not resolve.
1023 void InvokeBuiltin(Builtins::JavaScript id,
1024 InvokeFlag flag,
1025 const CallWrapper& call_wrapper = NullCallWrapper());
1026
1027 // Store the code object for the given builtin in the target register and
1028 // setup the function in x1.
1029 // TODO(all): Can we use another register than x1?
1030 void GetBuiltinEntry(Register target, Builtins::JavaScript id);
1031
1032 // Store the function for the given builtin in the target register.
1033 void GetBuiltinFunction(Register target, Builtins::JavaScript id);
1034
1035 void Jump(Register target);
1036 void Jump(Address target, RelocInfo::Mode rmode);
1037 void Jump(Handle<Code> code, RelocInfo::Mode rmode);
1038 void Jump(intptr_t target, RelocInfo::Mode rmode);
1039
1040 void Call(Register target);
1041 void Call(Label* target);
1042 void Call(Address target, RelocInfo::Mode rmode);
1043 void Call(Handle<Code> code,
1044 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
1045 TypeFeedbackId ast_id = TypeFeedbackId::None());
1046
1047 // For every Call variant, there is a matching CallSize function that returns
1048 // the size (in bytes) of the call sequence.
1049 static int CallSize(Register target);
1050 static int CallSize(Label* target);
1051 static int CallSize(Address target, RelocInfo::Mode rmode);
1052 static int CallSize(Handle<Code> code,
1053 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
1054 TypeFeedbackId ast_id = TypeFeedbackId::None());
1055
1056 // Registers used through the invocation chain are hard-coded.
1057 // We force passing the parameters to ensure the contracts are correctly
1058 // honoured by the caller.
1059 // 'function' must be x1.
1060 // 'actual' must use an immediate or x0.
1061 // 'expected' must use an immediate or x2.
1062 // 'call_kind' must be x5.
1063 void InvokePrologue(const ParameterCount& expected,
1064 const ParameterCount& actual,
1065 Handle<Code> code_constant,
1066 Register code_reg,
1067 Label* done,
1068 InvokeFlag flag,
1069 bool* definitely_mismatches,
1070 const CallWrapper& call_wrapper);
1071 void InvokeCode(Register code,
1072 const ParameterCount& expected,
1073 const ParameterCount& actual,
1074 InvokeFlag flag,
1075 const CallWrapper& call_wrapper);
1076 // Invoke the JavaScript function in the given register.
1077 // Changes the current context to the context in the function before invoking.
1078 void InvokeFunction(Register function,
1079 const ParameterCount& actual,
1080 InvokeFlag flag,
1081 const CallWrapper& call_wrapper);
1082 void InvokeFunction(Register function,
1083 const ParameterCount& expected,
1084 const ParameterCount& actual,
1085 InvokeFlag flag,
1086 const CallWrapper& call_wrapper);
1087 void InvokeFunction(Handle<JSFunction> function,
1088 const ParameterCount& expected,
1089 const ParameterCount& actual,
1090 InvokeFlag flag,
1091 const CallWrapper& call_wrapper);
1092
1093
1094 // ---- Floating point helpers ----
1095
1096 enum ECMA262ToInt32Result {
1097 // Provide an untagged int32_t which can be read using result.W(). That is,
1098 // the upper 32 bits of result are undefined.
1099 INT32_IN_W,
1100
1101 // Provide an untagged int32_t which can be read using the 64-bit result
1102 // register. The int32_t result is sign-extended.
1103 INT32_IN_X,
1104
1105 // Tag the int32_t result as a smi.
1106 SMI
1107 };
1108
1109 // Applies ECMA-262 ToInt32 (see section 9.5) to a double value.
1110 void ECMA262ToInt32(Register result,
1111 DoubleRegister input,
1112 Register scratch1,
1113 Register scratch2,
1114 ECMA262ToInt32Result format = INT32_IN_X);
1115
1116 // As ECMA262ToInt32, but operate on a HeapNumber.
1117 void HeapNumberECMA262ToInt32(Register result,
1118 Register heap_number,
1119 Register scratch1,
1120 Register scratch2,
1121 DoubleRegister double_scratch,
1122 ECMA262ToInt32Result format = INT32_IN_X);
1123
1124 // ---- Code generation helpers ----
1125
1126 void set_generating_stub(bool value) { generating_stub_ = value; }
1127 bool generating_stub() const { return generating_stub_; }
1128 #if DEBUG
1129 void set_allow_macro_instructions(bool value) {
1130 allow_macro_instructions_ = value;
1131 }
1132 bool allow_macro_instructions() const { return allow_macro_instructions_; }
1133 #endif
1134 bool use_real_aborts() const { return use_real_aborts_; }
1135 void set_has_frame(bool value) { has_frame_ = value; }
1136 bool has_frame() const { return has_frame_; }
1137 bool AllowThisStubCall(CodeStub* stub);
1138
1139 class NoUseRealAbortsScope {
1140 public:
1141 explicit NoUseRealAbortsScope(MacroAssembler* masm) :
1142 saved_(masm->use_real_aborts_), masm_(masm) {
1143 masm_->use_real_aborts_ = false;
1144 }
1145 ~NoUseRealAbortsScope() {
1146 masm_->use_real_aborts_ = saved_;
1147 }
1148 private:
1149 bool saved_;
1150 MacroAssembler* masm_;
1151 };
1152
1153 #ifdef ENABLE_DEBUGGER_SUPPORT
1154 // ---------------------------------------------------------------------------
1155 // Debugger Support
1156
1157 void DebugBreak();
1158 #endif
1159 // ---------------------------------------------------------------------------
1160 // Exception handling
1161
1162 // Push a new try handler and link into try handler chain.
1163 void PushTryHandler(StackHandler::Kind kind, int handler_index);
1164
1165 // Unlink the stack handler on top of the stack from the try handler chain.
1166 // Must preserve the result register.
1167 void PopTryHandler();
1168
1169
1170 // ---------------------------------------------------------------------------
1171 // Allocation support
1172
1173 // Allocate an object in new space or old pointer space. The object_size is
1174 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
1175 // is passed. The allocated object is returned in result.
1176 //
1177 // If the new space is exhausted control continues at the gc_required label.
1178 // In this case, the result and scratch registers may still be clobbered.
1179 // If flags includes TAG_OBJECT, the result is tagged as as a heap object.
1180 void Allocate(Register object_size,
1181 Register result,
1182 Register scratch1,
1183 Register scratch2,
1184 Label* gc_required,
1185 AllocationFlags flags);
1186
1187 void Allocate(int object_size,
1188 Register result,
1189 Register scratch1,
1190 Register scratch2,
1191 Label* gc_required,
1192 AllocationFlags flags);
1193
1194 // Undo allocation in new space. The object passed and objects allocated after
1195 // it will no longer be allocated. The caller must make sure that no pointers
1196 // are left to the object(s) no longer allocated as they would be invalid when
1197 // allocation is undone.
1198 void UndoAllocationInNewSpace(Register object, Register scratch);
1199
1200 void AllocateTwoByteString(Register result,
1201 Register length,
1202 Register scratch1,
1203 Register scratch2,
1204 Register scratch3,
1205 Label* gc_required);
1206 void AllocateAsciiString(Register result,
1207 Register length,
1208 Register scratch1,
1209 Register scratch2,
1210 Register scratch3,
1211 Label* gc_required);
1212 void AllocateTwoByteConsString(Register result,
1213 Register length,
1214 Register scratch1,
1215 Register scratch2,
1216 Label* gc_required);
1217 void AllocateAsciiConsString(Register result,
1218 Register length,
1219 Register scratch1,
1220 Register scratch2,
1221 Label* gc_required);
1222 void AllocateTwoByteSlicedString(Register result,
1223 Register length,
1224 Register scratch1,
1225 Register scratch2,
1226 Label* gc_required);
1227 void AllocateAsciiSlicedString(Register result,
1228 Register length,
1229 Register scratch1,
1230 Register scratch2,
1231 Label* gc_required);
1232
1233 // Allocates a heap number or jumps to the gc_required label if the young
1234 // space is full and a scavenge is needed.
1235 // All registers are clobbered.
1236 // If no heap_number_map register is provided, the function will take care of
1237 // loading it.
1238 void AllocateHeapNumber(Register result,
1239 Label* gc_required,
1240 Register scratch1,
1241 Register scratch2,
1242 Register heap_number_map = NoReg);
1243 void AllocateHeapNumberWithValue(Register result,
1244 DoubleRegister value,
1245 Label* gc_required,
1246 Register scratch1,
1247 Register scratch2,
1248 Register heap_number_map = NoReg);
1249
1250 // ---------------------------------------------------------------------------
1251 // Support functions.
1252
1253 // Try to get function prototype of a function and puts the value in the
1254 // result register. Checks that the function really is a function and jumps
1255 // to the miss label if the fast checks fail. The function register will be
1256 // untouched; the other registers may be clobbered.
1257 enum BoundFunctionAction {
1258 kMissOnBoundFunction,
1259 kDontMissOnBoundFunction
1260 };
1261
1262 void TryGetFunctionPrototype(Register function,
1263 Register result,
1264 Register scratch,
1265 Label* miss,
1266 BoundFunctionAction action =
1267 kDontMissOnBoundFunction);
1268
1269 // Compare object type for heap object. heap_object contains a non-Smi
1270 // whose object type should be compared with the given type. This both
1271 // sets the flags and leaves the object type in the type_reg register.
1272 // It leaves the map in the map register (unless the type_reg and map register
1273 // are the same register). It leaves the heap object in the heap_object
1274 // register unless the heap_object register is the same register as one of the
1275 // other registers.
1276 void CompareObjectType(Register heap_object,
1277 Register map,
1278 Register type_reg,
1279 InstanceType type);
1280
1281
1282 // Compare object type for heap object, and branch if equal (or not.)
1283 // heap_object contains a non-Smi whose object type should be compared with
1284 // the given type. This both sets the flags and leaves the object type in
1285 // the type_reg register. It leaves the map in the map register (unless the
1286 // type_reg and map register are the same register). It leaves the heap
1287 // object in the heap_object register unless the heap_object register is the
1288 // same register as one of the other registers.
1289 void JumpIfObjectType(Register object,
1290 Register map,
1291 Register type_reg,
1292 InstanceType type,
1293 Label* if_cond_pass,
1294 Condition cond = eq);
1295
1296 void JumpIfNotObjectType(Register object,
1297 Register map,
1298 Register type_reg,
1299 InstanceType type,
1300 Label* if_not_object);
1301
1302 // Compare instance type in a map. map contains a valid map object whose
1303 // object type should be compared with the given type. This both
1304 // sets the flags and leaves the object type in the type_reg register.
1305 void CompareInstanceType(Register map,
1306 Register type_reg,
1307 InstanceType type);
1308
1309 // Compare an object's map with the specified map and its transitioned
1310 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
1311 // set with result of map compare. If multiple map compares are required, the
1312 // compare sequences branches to early_success.
1313 void CompareMap(Register obj,
1314 Register scratch,
1315 Handle<Map> map,
1316 Label* early_success = NULL);
1317
1318 // As above, but the map of the object is already loaded into the register
1319 // which is preserved by the code generated.
1320 void CompareMap(Register obj_map,
1321 Handle<Map> map,
1322 Label* early_success = NULL);
1323
1324 // Check if the map of an object is equal to a specified map and branch to
1325 // label if not. Skip the smi check if not required (object is known to be a
1326 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
1327 // against maps that are ElementsKind transition maps of the specified map.
1328 void CheckMap(Register obj,
1329 Register scratch,
1330 Handle<Map> map,
1331 Label* fail,
1332 SmiCheckType smi_check_type);
1333
1334
1335 void CheckMap(Register obj,
1336 Register scratch,
1337 Heap::RootListIndex index,
1338 Label* fail,
1339 SmiCheckType smi_check_type);
1340
1341 // As above, but the map of the object is already loaded into obj_map, and is
1342 // preserved.
1343 void CheckMap(Register obj_map,
1344 Handle<Map> map,
1345 Label* fail,
1346 SmiCheckType smi_check_type);
1347
1348 // Check if the map of an object is equal to a specified map and branch to a
1349 // specified target if equal. Skip the smi check if not required (object is
1350 // known to be a heap object)
1351 void DispatchMap(Register obj,
1352 Register scratch,
1353 Handle<Map> map,
1354 Handle<Code> success,
1355 SmiCheckType smi_check_type);
1356
1357 // Test the bitfield of the heap object map with mask and set the condition
1358 // flags. The object register is preserved.
1359 void TestMapBitfield(Register object, uint64_t mask);
1360
1361 // Load the elements kind field of an object, and return it in the result
1362 // register.
1363 void LoadElementsKind(Register result, Register object);
1364
1365 // Compare the object in a register to a value from the root list.
1366 // Uses the Tmp0() register as scratch.
1367 void CompareRoot(const Register& obj, Heap::RootListIndex index);
1368
1369 // Compare the object in a register to a value and jump if they are equal.
1370 void JumpIfRoot(const Register& obj,
1371 Heap::RootListIndex index,
1372 Label* if_equal);
1373
1374 // Compare the object in a register to a value and jump if they are not equal.
1375 void JumpIfNotRoot(const Register& obj,
1376 Heap::RootListIndex index,
1377 Label* if_not_equal);
1378
1379 // Load and check the instance type of an object for being a unique name.
1380 // Loads the type into the second argument register.
1381 // The object and type arguments can be the same register; in that case it
1382 // will be overwritten with the type.
1383 // Fall-through if the object was a string and jump on fail otherwise.
1384 inline void IsObjectNameType(Register object, Register type, Label* fail);
1385
1386 inline void IsObjectJSObjectType(Register heap_object,
1387 Register map,
1388 Register scratch,
1389 Label* fail);
1390
1391 // Check the instance type in the given map to see if it corresponds to a
1392 // JS object type. Jump to the fail label if this is not the case and fall
1393 // through otherwise. However if fail label is NULL, no branch will be
1394 // performed and the flag will be updated. You can test the flag for "le"
1395 // condition to test if it is a valid JS object type.
1396 inline void IsInstanceJSObjectType(Register map,
1397 Register scratch,
1398 Label* fail);
1399
1400 // Load and check the instance type of an object for being a string.
1401 // Loads the type into the second argument register.
1402 // The object and type arguments can be the same register; in that case it
1403 // will be overwritten with the type.
1404 // Jumps to not_string or string appropriate. If the appropriate label is
1405 // NULL, fall through.
1406 inline void IsObjectJSStringType(Register object, Register type,
1407 Label* not_string, Label* string = NULL);
1408
1409 // Compare the contents of a register with an operand, and branch to true,
1410 // false or fall through, depending on condition.
1411 void CompareAndSplit(const Register& lhs,
1412 const Operand& rhs,
1413 Condition cond,
1414 Label* if_true,
1415 Label* if_false,
1416 Label* fall_through);
1417
1418 // Test the bits of register defined by bit_pattern, and branch to
1419 // if_any_set, if_all_clear or fall_through accordingly.
1420 void TestAndSplit(const Register& reg,
1421 uint64_t bit_pattern,
1422 Label* if_all_clear,
1423 Label* if_any_set,
1424 Label* fall_through);
1425
1426 // Check if a map for a JSObject indicates that the object has fast elements.
1427 // Jump to the specified label if it does not.
1428 void CheckFastElements(Register map,
1429 Register scratch,
1430 Label* fail);
1431
1432 // Check if a map for a JSObject indicates that the object can have both smi
1433 // and HeapObject elements. Jump to the specified label if it does not.
1434 void CheckFastObjectElements(Register map,
1435 Register scratch,
1436 Label* fail);
1437
1438 // Check if a map for a JSObject indicates that the object has fast smi only
1439 // elements. Jump to the specified label if it does not.
1440 void CheckFastSmiElements(Register map, Register scratch, Label* fail);
1441
1442 // Check to see if number can be stored as a double in FastDoubleElements.
1443 // If it can, store it at the index specified by key_reg in the array,
1444 // otherwise jump to fail.
1445 void StoreNumberToDoubleElements(Register value_reg,
1446 Register key_reg,
1447 Register elements_reg,
1448 Register scratch1,
1449 FPRegister fpscratch1,
1450 FPRegister fpscratch2,
1451 Label* fail,
1452 int elements_offset = 0);
1453
1454 // Picks out an array index from the hash field.
1455 // Register use:
1456 // hash - holds the index's hash. Clobbered.
1457 // index - holds the overwritten index on exit.
1458 void IndexFromHash(Register hash, Register index);
1459
1460 // ---------------------------------------------------------------------------
1461 // Inline caching support.
1462
1463 void EmitSeqStringSetCharCheck(Register string,
1464 Register index,
1465 SeqStringSetCharCheckIndexType index_type,
1466 Register scratch,
1467 uint32_t encoding_mask);
1468
1469 // Generate code for checking access rights - used for security checks
1470 // on access to global objects across environments. The holder register
1471 // is left untouched, whereas both scratch registers are clobbered.
1472 void CheckAccessGlobalProxy(Register holder_reg,
1473 Register scratch,
1474 Label* miss);
1475
1476 // Hash the interger value in 'key' register.
1477 // It uses the same algorithm as ComputeIntegerHash in utils.h.
1478 void GetNumberHash(Register key, Register scratch);
1479
1480 // Load value from the dictionary.
1481 //
1482 // elements - holds the slow-case elements of the receiver on entry.
1483 // Unchanged unless 'result' is the same register.
1484 //
1485 // key - holds the smi key on entry.
1486 // Unchanged unless 'result' is the same register.
1487 //
1488 // result - holds the result on exit if the load succeeded.
1489 // Allowed to be the same as 'key' or 'result'.
1490 // Unchanged on bailout so 'key' or 'result' can be used
1491 // in further computation.
1492 void LoadFromNumberDictionary(Label* miss,
1493 Register elements,
1494 Register key,
1495 Register result,
1496 Register scratch0,
1497 Register scratch1,
1498 Register scratch2,
1499 Register scratch3);
1500
1501 // ---------------------------------------------------------------------------
1502 // Frames.
1503
1504 // Activation support.
1505 // Note that Tmp0() and Tmp1() are used as a scratch registers. This is safe
1506 // because these methods are not used in Crankshaft.
1507 void EnterFrame(StackFrame::Type type);
1508 void LeaveFrame(StackFrame::Type type);
1509
1510 // Returns map with validated enum cache in object register.
1511 void CheckEnumCache(Register object,
1512 Register null_value,
1513 Register scratch0,
1514 Register scratch1,
1515 Register scratch2,
1516 Register scratch3,
1517 Label* call_runtime);
1518
1519 // AllocationMemento support. Arrays may have an associated
1520 // AllocationMemento object that can be checked for in order to pretransition
1521 // to another type.
1522 // On entry, receiver should point to the array object.
1523 // If allocation info is present, the Z flag is set (so that the eq
1524 // condition will pass).
1525 void TestJSArrayForAllocationMemento(Register receiver,
1526 Register scratch1,
1527 Register scratch2,
1528 Label* no_memento_found);
1529
1530 void JumpIfJSArrayHasAllocationMemento(Register receiver,
1531 Register scratch1,
1532 Register scratch2,
1533 Label* memento_found) {
1534 Label no_memento_found;
1535 TestJSArrayForAllocationMemento(receiver, scratch1, scratch2,
1536 &no_memento_found);
1537 B(eq, memento_found);
1538 Bind(&no_memento_found);
1539 }
1540
1541 // The stack pointer has to switch between csp and jssp when setting up and
1542 // destroying the exit frame. Hence preserving/restoring the registers is
1543 // slightly more complicated than simple push/pop operations.
1544 void ExitFramePreserveFPRegs();
1545 void ExitFrameRestoreFPRegs();
1546
1547 // Generates function and stub prologue code.
1548 void Prologue(PrologueFrameMode frame_mode);
1549
1550 // Enter exit frame. Exit frames are used when calling C code from generated
1551 // (JavaScript) code.
1552 //
1553 // The stack pointer must be jssp on entry, and will be set to csp by this
1554 // function. The frame pointer is also configured, but the only other
1555 // registers modified by this function are the provided scratch register, and
1556 // jssp.
1557 //
1558 // The 'extra_space' argument can be used to allocate some space in the exit
1559 // frame that will be ignored by the GC. This space will be reserved in the
1560 // bottom of the frame immediately above the return address slot.
1561 //
1562 // Set up a stack frame and registers as follows:
1563 // fp[8]: CallerPC (lr)
1564 // fp -> fp[0]: CallerFP (old fp)
1565 // fp[-8]: SPOffset (new csp)
1566 // fp[-16]: CodeObject()
1567 // fp[-16 - fp-size]: Saved doubles, if saved_doubles is true.
1568 // csp[8]: Memory reserved for the caller if extra_space != 0.
1569 // Alignment padding, if necessary.
1570 // csp -> csp[0]: Space reserved for the return address.
1571 //
1572 // This function also stores the new frame information in the top frame, so
1573 // that the new frame becomes the current frame.
1574 void EnterExitFrame(bool save_doubles,
1575 const Register& scratch,
1576 int extra_space = 0);
1577
1578 // Leave the current exit frame, after a C function has returned to generated
1579 // (JavaScript) code.
1580 //
1581 // This effectively unwinds the operation of EnterExitFrame:
1582 // * Preserved doubles are restored (if restore_doubles is true).
1583 // * The frame information is removed from the top frame.
1584 // * The exit frame is dropped.
1585 // * The stack pointer is reset to jssp.
1586 //
1587 // The stack pointer must be csp on entry.
1588 void LeaveExitFrame(bool save_doubles,
1589 const Register& scratch,
1590 bool restore_context);
1591
1592 void LoadContext(Register dst, int context_chain_length);
1593
1594 // ---------------------------------------------------------------------------
1595 // StatsCounter support
1596
1597 void SetCounter(StatsCounter* counter, int value, Register scratch1,
1598 Register scratch2);
1599 void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
1600 Register scratch2);
1601 void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
1602 Register scratch2);
1603
1604 // ---------------------------------------------------------------------------
1605 // Garbage collector support (GC).
1606
1607 enum RememberedSetFinalAction {
1608 kReturnAtEnd,
1609 kFallThroughAtEnd
1610 };
1611
1612 // Record in the remembered set the fact that we have a pointer to new space
1613 // at the address pointed to by the addr register. Only works if addr is not
1614 // in new space.
1615 void RememberedSetHelper(Register object, // Used for debug code.
1616 Register addr,
1617 Register scratch,
1618 SaveFPRegsMode save_fp,
1619 RememberedSetFinalAction and_then);
1620
1621 // Push and pop the registers that can hold pointers, as defined by the
1622 // RegList constant kSafepointSavedRegisters.
1623 void PushSafepointRegisters();
1624 void PopSafepointRegisters();
1625
1626 void PushSafepointFPRegisters();
1627 void PopSafepointFPRegisters();
1628
1629 // Store value in register src in the safepoint stack slot for register dst.
1630 void StoreToSafepointRegisterSlot(Register src, Register dst) {
1631 Poke(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
1632 }
1633
1634 // Load the value of the src register from its safepoint stack slot
1635 // into register dst.
1636 void LoadFromSafepointRegisterSlot(Register dst, Register src) {
1637 Peek(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
1638 }
1639
1640 void CheckPageFlagSet(const Register& object,
1641 const Register& scratch,
1642 int mask,
1643 Label* if_any_set);
1644
1645 void CheckPageFlagClear(const Register& object,
1646 const Register& scratch,
1647 int mask,
1648 Label* if_all_clear);
1649
1650 void CheckMapDeprecated(Handle<Map> map,
1651 Register scratch,
1652 Label* if_deprecated);
1653
1654 // Check if object is in new space and jump accordingly.
1655 // Register 'object' is preserved.
1656 void JumpIfNotInNewSpace(Register object,
1657 Label* branch) {
1658 InNewSpace(object, ne, branch);
1659 }
1660
1661 void JumpIfInNewSpace(Register object,
1662 Label* branch) {
1663 InNewSpace(object, eq, branch);
1664 }
1665
1666 // Notify the garbage collector that we wrote a pointer into an object.
1667 // |object| is the object being stored into, |value| is the object being
1668 // stored. value and scratch registers are clobbered by the operation.
1669 // The offset is the offset from the start of the object, not the offset from
1670 // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
1671 void RecordWriteField(
1672 Register object,
1673 int offset,
1674 Register value,
1675 Register scratch,
1676 LinkRegisterStatus lr_status,
1677 SaveFPRegsMode save_fp,
1678 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
1679 SmiCheck smi_check = INLINE_SMI_CHECK);
1680
1681 // As above, but the offset has the tag presubtracted. For use with
1682 // MemOperand(reg, off).
1683 inline void RecordWriteContextSlot(
1684 Register context,
1685 int offset,
1686 Register value,
1687 Register scratch,
1688 LinkRegisterStatus lr_status,
1689 SaveFPRegsMode save_fp,
1690 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
1691 SmiCheck smi_check = INLINE_SMI_CHECK) {
1692 RecordWriteField(context,
1693 offset + kHeapObjectTag,
1694 value,
1695 scratch,
1696 lr_status,
1697 save_fp,
1698 remembered_set_action,
1699 smi_check);
1700 }
1701
1702 // For a given |object| notify the garbage collector that the slot |address|
1703 // has been written. |value| is the object being stored. The value and
1704 // address registers are clobbered by the operation.
1705 void RecordWrite(
1706 Register object,
1707 Register address,
1708 Register value,
1709 LinkRegisterStatus lr_status,
1710 SaveFPRegsMode save_fp,
1711 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
1712 SmiCheck smi_check = INLINE_SMI_CHECK);
1713
1714 // Checks the color of an object. If the object is already grey or black
1715 // then we just fall through, since it is already live. If it is white and
1716 // we can determine that it doesn't need to be scanned, then we just mark it
1717 // black and fall through. For the rest we jump to the label so the
1718 // incremental marker can fix its assumptions.
1719 void EnsureNotWhite(Register object,
1720 Register scratch1,
1721 Register scratch2,
1722 Register scratch3,
1723 Register scratch4,
1724 Label* object_is_white_and_not_data);
1725
1726 // Detects conservatively whether an object is data-only, i.e. it does need to
1727 // be scanned by the garbage collector.
1728 void JumpIfDataObject(Register value,
1729 Register scratch,
1730 Label* not_data_object);
1731
1732 // Helper for finding the mark bits for an address.
1733 // Note that the behaviour slightly differs from other architectures.
1734 // On exit:
1735 // - addr_reg is unchanged.
1736 // - The bitmap register points at the word with the mark bits.
1737 // - The shift register contains the index of the first color bit for this
1738 // object in the bitmap.
1739 inline void GetMarkBits(Register addr_reg,
1740 Register bitmap_reg,
1741 Register shift_reg);
1742
1743 // Check if an object has a given incremental marking color.
1744 void HasColor(Register object,
1745 Register scratch0,
1746 Register scratch1,
1747 Label* has_color,
1748 int first_bit,
1749 int second_bit);
1750
1751 void JumpIfBlack(Register object,
1752 Register scratch0,
1753 Register scratch1,
1754 Label* on_black);
1755
1756
1757 // Get the location of a relocated constant (its address in the constant pool)
1758 // from its load site.
1759 void GetRelocatedValueLocation(Register ldr_location,
1760 Register result);
1761
1762
1763 // ---------------------------------------------------------------------------
1764 // Debugging.
1765
1766 // Calls Abort(msg) if the condition cond is not satisfied.
1767 // Use --debug_code to enable.
1768 void Assert(Condition cond, BailoutReason reason);
1769 void AssertRegisterIsClear(Register reg, BailoutReason reason);
1770 void AssertRegisterIsRoot(
1771 Register reg,
1772 Heap::RootListIndex index,
1773 BailoutReason reason = kRegisterDidNotMatchExpectedRoot);
1774 void AssertFastElements(Register elements);
1775
1776 // Abort if the specified register contains the invalid color bit pattern.
1777 // The pattern must be in bits [1:0] of 'reg' register.
1778 //
1779 // If emit_debug_code() is false, this emits no code.
1780 void AssertHasValidColor(const Register& reg);
1781
1782 // Abort if 'object' register doesn't point to a string object.
1783 //
1784 // If emit_debug_code() is false, this emits no code.
1785 void AssertIsString(const Register& object);
1786
1787 // Like Assert(), but always enabled.
1788 void Check(Condition cond, BailoutReason reason);
1789 void CheckRegisterIsClear(Register reg, BailoutReason reason);
1790
1791 // Print a message to stderr and abort execution.
1792 void Abort(BailoutReason reason);
1793
1794 // Conditionally load the cached Array transitioned map of type
1795 // transitioned_kind from the native context if the map in register
1796 // map_in_out is the cached Array map in the native context of
1797 // expected_kind.
1798 void LoadTransitionedArrayMapConditional(
1799 ElementsKind expected_kind,
1800 ElementsKind transitioned_kind,
1801 Register map_in_out,
1802 Register scratch,
1803 Label* no_map_match);
1804
1805 // Load the initial map for new Arrays from a JSFunction.
1806 void LoadInitialArrayMap(Register function_in,
1807 Register scratch,
1808 Register map_out,
1809 ArrayHasHoles holes);
1810
1811 void LoadArrayFunction(Register function);
1812 void LoadGlobalFunction(int index, Register function);
1813
1814 // Load the initial map from the global function. The registers function and
1815 // map can be the same, function is then overwritten.
1816 void LoadGlobalFunctionInitialMap(Register function,
1817 Register map,
1818 Register scratch);
1819
1820 // --------------------------------------------------------------------------
1821 // Set the registers used internally by the MacroAssembler as scratch
1822 // registers. These registers are used to implement behaviours which are not
1823 // directly supported by A64, and where an intermediate result is required.
1824 //
1825 // Both tmp0 and tmp1 may be set to any X register except for xzr, sp,
1826 // and StackPointer(). Also, they must not be the same register (though they
1827 // may both be NoReg).
1828 //
1829 // It is valid to set either or both of these registers to NoReg if you don't
1830 // want the MacroAssembler to use any scratch registers. In a debug build, the
1831 // Assembler will assert that any registers it uses are valid. Be aware that
1832 // this check is not present in release builds. If this is a problem, use the
1833 // Assembler directly.
1834 void SetScratchRegisters(const Register& tmp0, const Register& tmp1) {
1835 // V8 assumes the macro assembler uses ip0 and ip1 as temp registers.
1836 ASSERT(tmp0.IsNone() || tmp0.Is(ip0));
1837 ASSERT(tmp1.IsNone() || tmp1.Is(ip1));
1838
1839 ASSERT(!AreAliased(xzr, csp, tmp0, tmp1));
1840 ASSERT(!AreAliased(StackPointer(), tmp0, tmp1));
1841 tmp0_ = tmp0;
1842 tmp1_ = tmp1;
1843 }
1844
1845 const Register& Tmp0() const {
1846 return tmp0_;
1847 }
1848
1849 const Register& Tmp1() const {
1850 return tmp1_;
1851 }
1852
1853 const Register WTmp0() const {
1854 return Register::Create(tmp0_.code(), kWRegSize);
1855 }
1856
1857 const Register WTmp1() const {
1858 return Register::Create(tmp1_.code(), kWRegSize);
1859 }
1860
1861 void SetFPScratchRegister(const FPRegister& fptmp0) {
1862 fptmp0_ = fptmp0;
1863 }
1864
1865 const FPRegister& FPTmp0() const {
1866 return fptmp0_;
1867 }
1868
1869 const Register AppropriateTempFor(
1870 const Register& target,
1871 const CPURegister& forbidden = NoCPUReg) const {
1872 Register candidate = forbidden.Is(Tmp0()) ? Tmp1() : Tmp0();
1873 ASSERT(!candidate.Is(target));
1874 return Register::Create(candidate.code(), target.SizeInBits());
1875 }
1876
1877 const FPRegister AppropriateTempFor(
1878 const FPRegister& target,
1879 const CPURegister& forbidden = NoCPUReg) const {
1880 USE(forbidden);
1881 FPRegister candidate = FPTmp0();
1882 ASSERT(!candidate.Is(forbidden));
1883 ASSERT(!candidate.Is(target));
1884 return FPRegister::Create(candidate.code(), target.SizeInBits());
1885 }
1886
1887 // Like printf, but print at run-time from generated code.
1888 //
1889 // The caller must ensure that arguments for floating-point placeholders
1890 // (such as %e, %f or %g) are FPRegisters, and that arguments for integer
1891 // placeholders are Registers.
1892 //
1893 // A maximum of four arguments may be given to any single Printf call. The
1894 // arguments must be of the same type, but they do not need to have the same
1895 // size.
1896 //
1897 // The following registers cannot be printed:
1898 // Tmp0(), Tmp1(), StackPointer(), csp.
1899 //
1900 // This function automatically preserves caller-saved registers so that
1901 // calling code can use Printf at any point without having to worry about
1902 // corruption. The preservation mechanism generates a lot of code. If this is
1903 // a problem, preserve the important registers manually and then call
1904 // PrintfNoPreserve. Callee-saved registers are not used by Printf, and are
1905 // implicitly preserved.
1906 //
1907 // Unlike many MacroAssembler functions, x8 and x9 are guaranteed to be
1908 // preserved, and can be printed. This allows Printf to be used during debug
1909 // code.
1910 //
1911 // This function assumes (and asserts) that the current stack pointer is
1912 // callee-saved, not caller-saved. This is most likely the case anyway, as a
1913 // caller-saved stack pointer doesn't make a lot of sense.
1914 void Printf(const char * format,
1915 const CPURegister& arg0 = NoCPUReg,
1916 const CPURegister& arg1 = NoCPUReg,
1917 const CPURegister& arg2 = NoCPUReg,
1918 const CPURegister& arg3 = NoCPUReg);
1919
1920 // Like Printf, but don't preserve any caller-saved registers, not even 'lr'.
1921 //
1922 // The return code from the system printf call will be returned in x0.
1923 void PrintfNoPreserve(const char * format,
1924 const CPURegister& arg0 = NoCPUReg,
1925 const CPURegister& arg1 = NoCPUReg,
1926 const CPURegister& arg2 = NoCPUReg,
1927 const CPURegister& arg3 = NoCPUReg);
1928
1929 // Code ageing support functions.
1930
1931 // Code ageing on A64 works similarly to on ARM. When V8 wants to mark a
1932 // function as old, it replaces some of the function prologue (generated by
1933 // FullCodeGenerator::Generate) with a call to a special stub (ultimately
1934 // generated by GenerateMakeCodeYoungAgainCommon). The stub restores the
1935 // function prologue to its initial young state (indicating that it has been
1936 // recently run) and continues. A young function is therefore one which has a
1937 // normal frame setup sequence, and an old function has a code age sequence
1938 // which calls a code ageing stub.
1939
1940 // Set up a basic stack frame for young code (or code exempt from ageing) with
1941 // type FUNCTION. It may be patched later for code ageing support. This is
1942 // done by to Code::PatchPlatformCodeAge and EmitCodeAgeSequence.
1943 //
1944 // This function takes an Assembler so it can be called from either a
1945 // MacroAssembler or a PatchingAssembler context.
1946 static void EmitFrameSetupForCodeAgePatching(Assembler* assm);
1947
1948 // Call EmitFrameSetupForCodeAgePatching from a MacroAssembler context.
1949 void EmitFrameSetupForCodeAgePatching();
1950
1951 // Emit a code age sequence that calls the relevant code age stub. The code
1952 // generated by this sequence is expected to replace the code generated by
1953 // EmitFrameSetupForCodeAgePatching, and represents an old function.
1954 //
1955 // If stub is NULL, this function generates the code age sequence but omits
1956 // the stub address that is normally embedded in the instruction stream. This
1957 // can be used by debug code to verify code age sequences.
1958 static void EmitCodeAgeSequence(Assembler* assm, Code* stub);
1959
1960 // Call EmitCodeAgeSequence from a MacroAssembler context.
1961 void EmitCodeAgeSequence(Code* stub);
1962
1963 // Return true if the sequence is a young sequence geneated by
1964 // EmitFrameSetupForCodeAgePatching. Otherwise, this method asserts that the
1965 // sequence is a code age sequence (emitted by EmitCodeAgeSequence).
1966 static bool IsYoungSequence(byte* sequence);
1967
1968 #ifdef DEBUG
1969 // Return true if the sequence is a code age sequence generated by
1970 // EmitCodeAgeSequence.
1971 static bool IsCodeAgeSequence(byte* sequence);
1972 #endif
1973
1974 // Jumps to found label if a prototype map has dictionary elements.
1975 void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
1976 Register scratch1, Label* found);
1977
1978 private:
1979 // Helpers for CopyFields.
1980 // These each implement CopyFields in a different way.
1981 void CopyFieldsLoopPairsHelper(Register dst, Register src, unsigned count,
1982 Register scratch1, Register scratch2,
1983 Register scratch3);
1984 void CopyFieldsUnrolledPairsHelper(Register dst, Register src, unsigned count,
1985 Register scratch1, Register scratch2);
1986 void CopyFieldsUnrolledHelper(Register dst, Register src, unsigned count,
1987 Register scratch1);
1988
1989 // The actual Push and Pop implementations. These don't generate any code
1990 // other than that required for the push or pop. This allows
1991 // (Push|Pop)CPURegList to bundle together run-time assertions for a large
1992 // block of registers.
1993 //
1994 // Note that size is per register, and is specified in bytes.
1995 void PushHelper(int count, int size,
1996 const CPURegister& src0, const CPURegister& src1,
1997 const CPURegister& src2, const CPURegister& src3);
1998 void PopHelper(int count, int size,
1999 const CPURegister& dst0, const CPURegister& dst1,
2000 const CPURegister& dst2, const CPURegister& dst3);
2001
2002 // Perform necessary maintenance operations before a push or pop.
2003 //
2004 // Note that size is per register, and is specified in bytes.
2005 void PrepareForPush(int count, int size);
2006 void PrepareForPop(int count, int size);
2007
2008 // Call Printf. On a native build, a simple call will be generated, but if the
2009 // simulator is being used then a suitable pseudo-instruction is used. The
2010 // arguments and stack (csp) must be prepared by the caller as for a normal
2011 // AAPCS64 call to 'printf'.
2012 //
2013 // The 'type' argument specifies the type of the optional arguments.
2014 void CallPrintf(CPURegister::RegisterType type = CPURegister::kNoRegister);
2015
2016 // Helper for throwing exceptions. Compute a handler address and jump to
2017 // it. See the implementation for register usage.
2018 void JumpToHandlerEntry(Register exception,
2019 Register object,
2020 Register state,
2021 Register scratch1,
2022 Register scratch2);
2023
2024 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
2025 void InNewSpace(Register object,
2026 Condition cond, // eq for new space, ne otherwise.
2027 Label* branch);
2028
2029 // Try to convert a double to an int so that integer fast-paths may be
2030 // used. Not every valid integer value is guaranteed to be caught.
2031 // It supports both 32-bit and 64-bit integers depending whether 'as_int'
2032 // is a W or X register.
2033 //
2034 // This does not distinguish between +0 and -0, so if this distinction is
2035 // important it must be checked separately.
2036 void TryConvertDoubleToInt(Register as_int,
2037 FPRegister value,
2038 FPRegister scratch_d,
2039 Label* on_successful_conversion,
2040 Label* on_failed_conversion = NULL);
2041
2042 bool generating_stub_;
2043 #if DEBUG
2044 // Tell whether any of the macro instruction can be used. When false the
2045 // MacroAssembler will assert if a method which can emit a variable number
2046 // of instructions is called.
2047 bool allow_macro_instructions_;
2048 #endif
2049 bool has_frame_;
2050
2051 // The Abort method should call a V8 runtime function, but the CallRuntime
2052 // mechanism depends on CEntryStub. If use_real_aborts is false, Abort will
2053 // use a simpler abort mechanism that doesn't depend on CEntryStub.
2054 //
2055 // The purpose of this is to allow Aborts to be compiled whilst CEntryStub is
2056 // being generated.
2057 bool use_real_aborts_;
2058
2059 // This handle will be patched with the code object on installation.
2060 Handle<Object> code_object_;
2061
2062 // The register to use as a stack pointer for stack operations.
2063 Register sp_;
2064
2065 // Scratch registers used internally by the MacroAssembler.
2066 Register tmp0_;
2067 Register tmp1_;
2068 FPRegister fptmp0_;
2069
2070 void InitializeNewString(Register string,
2071 Register length,
2072 Heap::RootListIndex map_index,
2073 Register scratch1,
2074 Register scratch2);
2075 };
2076
2077
2078 // Use this scope when you need a one-to-one mapping bewteen methods and
2079 // instructions. This scope prevents the MacroAssembler from being called and
2080 // literal pools from being emitted. It also asserts the number of instructions
2081 // emitted is what you specified when creating the scope.
2082 class InstructionAccurateScope BASE_EMBEDDED {
2083 public:
2084 explicit InstructionAccurateScope(MacroAssembler* masm)
2085 : masm_(masm), size_(0) {
2086 masm_->StartBlockConstPool();
2087 #ifdef DEBUG
2088 previous_allow_macro_instructions_ = masm_->allow_macro_instructions();
2089 masm_->set_allow_macro_instructions(false);
2090 #endif
2091 }
2092
2093 InstructionAccurateScope(MacroAssembler* masm, size_t count)
2094 : masm_(masm), size_(count * kInstructionSize) {
2095 masm_->StartBlockConstPool();
2096 #ifdef DEBUG
2097 masm_->bind(&start_);
2098 previous_allow_macro_instructions_ = masm_->allow_macro_instructions();
2099 masm_->set_allow_macro_instructions(false);
2100 #endif
2101 }
2102
2103 ~InstructionAccurateScope() {
2104 masm_->EndBlockConstPool();
2105 #ifdef DEBUG
2106 if (start_.is_bound()) {
2107 ASSERT(masm_->SizeOfCodeGeneratedSince(&start_) == size_);
2108 }
2109 masm_->set_allow_macro_instructions(previous_allow_macro_instructions_);
2110 #endif
2111 }
2112
2113 private:
2114 MacroAssembler* masm_;
2115 size_t size_;
2116 #ifdef DEBUG
2117 Label start_;
2118 bool previous_allow_macro_instructions_;
2119 #endif
2120 };
2121
2122
2123 inline MemOperand ContextMemOperand(Register context, int index) {
2124 return MemOperand(context, Context::SlotOffset(index));
2125 }
2126
2127 inline MemOperand GlobalObjectMemOperand() {
2128 return ContextMemOperand(cp, Context::GLOBAL_OBJECT_INDEX);
2129 }
2130
2131
2132 // Encode and decode information about patchable inline SMI checks.
2133 class InlineSmiCheckInfo {
2134 public:
2135 explicit InlineSmiCheckInfo(Address info);
2136
2137 bool HasSmiCheck() const {
2138 return smi_check_ != NULL;
2139 }
2140
2141 const Register& SmiRegister() const {
2142 return reg_;
2143 }
2144
2145 Instruction* SmiCheck() const {
2146 return smi_check_;
2147 }
2148
2149 // Use MacroAssembler::InlineData to emit information about patchable inline
2150 // SMI checks. The caller may specify 'reg' as NoReg and an unbound 'site' to
2151 // indicate that there is no inline SMI check. Note that 'reg' cannot be csp.
2152 //
2153 // The generated patch information can be read using the InlineSMICheckInfo
2154 // class.
2155 static void Emit(MacroAssembler* masm, const Register& reg,
2156 const Label* smi_check);
2157
2158 // Emit information to indicate that there is no inline SMI check.
2159 static void EmitNotInlined(MacroAssembler* masm) {
2160 Label unbound;
2161 Emit(masm, NoReg, &unbound);
2162 }
2163
2164 private:
2165 Register reg_;
2166 Instruction* smi_check_;
2167
2168 // Fields in the data encoded by InlineData.
2169
2170 // A width of 5 (Rd_width) for the SMI register preclues the use of csp,
2171 // since kSPRegInternalCode is 63. However, csp should never hold a SMI or be
2172 // used in a patchable check. The Emit() method checks this.
2173 //
2174 // Note that the total size of the fields is restricted by the underlying
2175 // storage size handled by the BitField class, which is a uint32_t.
2176 class RegisterBits : public BitField<unsigned, 0, 5> {};
2177 class DeltaBits : public BitField<uint32_t, 5, 32-5> {};
2178 };
2179
2180 } } // namespace v8::internal
2181
2182 #ifdef GENERATED_CODE_COVERAGE
2183 #error "Unsupported option"
2184 #define CODE_COVERAGE_STRINGIFY(x) #x
2185 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
2186 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
2187 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
2188 #else
2189 #define ACCESS_MASM(masm) masm->
2190 #endif
2191
2192 #endif // V8_A64_MACRO_ASSEMBLER_A64_H_
OLDNEW
« no previous file with comments | « src/a64/lithium-gap-resolver-a64.cc ('k') | src/a64/macro-assembler-a64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698