Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(132)

Side by Side Diff: src/a64/macro-assembler-a64.h

Issue 144963003: A64: add missing files. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/a64
Patch Set: Created 6 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/a64/lithium-gap-resolver-a64.cc ('k') | src/a64/macro-assembler-a64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #ifndef V8_A64_MACRO_ASSEMBLER_A64_H_
29 #define V8_A64_MACRO_ASSEMBLER_A64_H_
30
31 #include "v8globals.h"
32 #include "globals.h"
33
34 #include "a64/assembler-a64-inl.h"
35
36 namespace v8 {
37 namespace internal {
38
39 #define LS_MACRO_LIST(V) \
40 V(Ldrb, Register&, rt, LDRB_w) \
41 V(Strb, Register&, rt, STRB_w) \
42 V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \
43 V(Ldrh, Register&, rt, LDRH_w) \
44 V(Strh, Register&, rt, STRH_w) \
45 V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \
46 V(Ldr, CPURegister&, rt, LoadOpFor(rt)) \
47 V(Str, CPURegister&, rt, StoreOpFor(rt)) \
48 V(Ldrsw, Register&, rt, LDRSW_x)
49
50
51 // ----------------------------------------------------------------------------
52 // Static helper functions
53
54 // Generate a MemOperand for loading a field from an object.
55 inline MemOperand FieldMemOperand(Register object, int offset);
56 inline MemOperand UntagSmiFieldMemOperand(Register object, int offset);
57
58 // Generate a MemOperand for loading a SMI from memory.
59 inline MemOperand UntagSmiMemOperand(Register object, int offset);
60
61
62 // ----------------------------------------------------------------------------
63 // MacroAssembler
64
65 enum PregenExpectation { MAYBE_PREGENERATED, EXPECT_PREGENERATED };
66 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
67 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
68 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
69 enum TargetAddressStorageMode {
70 CAN_INLINE_TARGET_ADDRESS,
71 NEVER_INLINE_TARGET_ADDRESS
72 };
73 enum UntagMode { kNotSpeculativeUntag, kSpeculativeUntag };
74 enum ArrayHasHoles { kArrayCantHaveHoles, kArrayCanHaveHoles };
75 enum CopyHint { kCopyUnknown, kCopyShort, kCopyLong };
76 enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
77
78 class MacroAssembler : public Assembler {
79 public:
80 MacroAssembler(Isolate* isolate, byte * buffer, unsigned buffer_size);
81
82 inline Handle<Object> CodeObject();
83
84 // Instruction set functions ------------------------------------------------
85 // Logical macros.
86 inline void And(const Register& rd,
87 const Register& rn,
88 const Operand& operand);
89 inline void Ands(const Register& rd,
90 const Register& rn,
91 const Operand& operand);
92 inline void Bic(const Register& rd,
93 const Register& rn,
94 const Operand& operand);
95 inline void Bics(const Register& rd,
96 const Register& rn,
97 const Operand& operand);
98 inline void Orr(const Register& rd,
99 const Register& rn,
100 const Operand& operand);
101 inline void Orn(const Register& rd,
102 const Register& rn,
103 const Operand& operand);
104 inline void Eor(const Register& rd,
105 const Register& rn,
106 const Operand& operand);
107 inline void Eon(const Register& rd,
108 const Register& rn,
109 const Operand& operand);
110 inline void Tst(const Register& rn, const Operand& operand);
111 void LogicalMacro(const Register& rd,
112 const Register& rn,
113 const Operand& operand,
114 LogicalOp op);
115
116 // Add and sub macros.
117 inline void Add(const Register& rd,
118 const Register& rn,
119 const Operand& operand);
120 inline void Adds(const Register& rd,
121 const Register& rn,
122 const Operand& operand);
123 inline void Sub(const Register& rd,
124 const Register& rn,
125 const Operand& operand);
126 inline void Subs(const Register& rd,
127 const Register& rn,
128 const Operand& operand);
129 inline void Cmn(const Register& rn, const Operand& operand);
130 inline void Cmp(const Register& rn, const Operand& operand);
131 inline void Neg(const Register& rd,
132 const Operand& operand);
133 inline void Negs(const Register& rd,
134 const Operand& operand);
135
136 void AddSubMacro(const Register& rd,
137 const Register& rn,
138 const Operand& operand,
139 FlagsUpdate S,
140 AddSubOp op);
141
142 // Add/sub with carry macros.
143 inline void Adc(const Register& rd,
144 const Register& rn,
145 const Operand& operand);
146 inline void Adcs(const Register& rd,
147 const Register& rn,
148 const Operand& operand);
149 inline void Sbc(const Register& rd,
150 const Register& rn,
151 const Operand& operand);
152 inline void Sbcs(const Register& rd,
153 const Register& rn,
154 const Operand& operand);
155 inline void Ngc(const Register& rd,
156 const Operand& operand);
157 inline void Ngcs(const Register& rd,
158 const Operand& operand);
159 void AddSubWithCarryMacro(const Register& rd,
160 const Register& rn,
161 const Operand& operand,
162 FlagsUpdate S,
163 AddSubWithCarryOp op);
164
165 // Move macros.
166 void Mov(const Register& rd,
167 const Operand& operand,
168 DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
169 void Mov(const Register& rd, uint64_t imm);
170 inline void Mvn(const Register& rd, uint64_t imm);
171 void Mvn(const Register& rd, const Operand& operand);
172 static bool IsImmMovn(uint64_t imm, unsigned reg_size);
173 static bool IsImmMovz(uint64_t imm, unsigned reg_size);
174 static unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
175
176 // Conditional macros.
177 inline void Ccmp(const Register& rn,
178 const Operand& operand,
179 StatusFlags nzcv,
180 Condition cond);
181 inline void Ccmn(const Register& rn,
182 const Operand& operand,
183 StatusFlags nzcv,
184 Condition cond);
185 void ConditionalCompareMacro(const Register& rn,
186 const Operand& operand,
187 StatusFlags nzcv,
188 Condition cond,
189 ConditionalCompareOp op);
190 void Csel(const Register& rd,
191 const Register& rn,
192 const Operand& operand,
193 Condition cond);
194
195 // Load/store macros.
196 #define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
197 inline void FN(const REGTYPE REG, const MemOperand& addr);
198 LS_MACRO_LIST(DECLARE_FUNCTION)
199 #undef DECLARE_FUNCTION
200
201 void LoadStoreMacro(const CPURegister& rt,
202 const MemOperand& addr,
203 LoadStoreOp op);
204
205 // Remaining instructions are simple pass-through calls to the assembler.
206 inline void Adr(const Register& rd, Label* label);
207 inline void Asr(const Register& rd, const Register& rn, unsigned shift);
208 inline void Asr(const Register& rd, const Register& rn, const Register& rm);
209 inline void B(Label* label);
210 inline void B(Condition cond, Label* label);
211 inline void B(Label* label, Condition cond);
212 inline void Bfi(const Register& rd,
213 const Register& rn,
214 unsigned lsb,
215 unsigned width);
216 inline void Bfxil(const Register& rd,
217 const Register& rn,
218 unsigned lsb,
219 unsigned width);
220 inline void Bind(Label* label);
221 inline void Bl(Label* label);
222 inline void Blr(const Register& xn);
223 inline void Br(const Register& xn);
224 inline void Brk(int code);
225 inline void Cbnz(const Register& rt, Label* label);
226 inline void Cbz(const Register& rt, Label* label);
227 inline void Cinc(const Register& rd, const Register& rn, Condition cond);
228 inline void Cinv(const Register& rd, const Register& rn, Condition cond);
229 inline void Cls(const Register& rd, const Register& rn);
230 inline void Clz(const Register& rd, const Register& rn);
231 inline void Cneg(const Register& rd, const Register& rn, Condition cond);
232 inline void CzeroX(const Register& rd, Condition cond);
233 inline void CmovX(const Register& rd, const Register& rn, Condition cond);
234 inline void Cset(const Register& rd, Condition cond);
235 inline void Csetm(const Register& rd, Condition cond);
236 inline void Csinc(const Register& rd,
237 const Register& rn,
238 const Register& rm,
239 Condition cond);
240 inline void Csinv(const Register& rd,
241 const Register& rn,
242 const Register& rm,
243 Condition cond);
244 inline void Csneg(const Register& rd,
245 const Register& rn,
246 const Register& rm,
247 Condition cond);
248 inline void Dmb(BarrierDomain domain, BarrierType type);
249 inline void Dsb(BarrierDomain domain, BarrierType type);
250 inline void Debug(const char* message, uint32_t code, Instr params = BREAK);
251 inline void Extr(const Register& rd,
252 const Register& rn,
253 const Register& rm,
254 unsigned lsb);
255 inline void Fabs(const FPRegister& fd, const FPRegister& fn);
256 inline void Fadd(const FPRegister& fd,
257 const FPRegister& fn,
258 const FPRegister& fm);
259 inline void Fccmp(const FPRegister& fn,
260 const FPRegister& fm,
261 StatusFlags nzcv,
262 Condition cond);
263 inline void Fcmp(const FPRegister& fn, const FPRegister& fm);
264 inline void Fcmp(const FPRegister& fn, double value);
265 inline void Fcsel(const FPRegister& fd,
266 const FPRegister& fn,
267 const FPRegister& fm,
268 Condition cond);
269 inline void Fcvt(const FPRegister& fd, const FPRegister& fn);
270 inline void Fcvtas(const Register& rd, const FPRegister& fn);
271 inline void Fcvtau(const Register& rd, const FPRegister& fn);
272 inline void Fcvtms(const Register& rd, const FPRegister& fn);
273 inline void Fcvtmu(const Register& rd, const FPRegister& fn);
274 inline void Fcvtns(const Register& rd, const FPRegister& fn);
275 inline void Fcvtnu(const Register& rd, const FPRegister& fn);
276 inline void Fcvtzs(const Register& rd, const FPRegister& fn);
277 inline void Fcvtzu(const Register& rd, const FPRegister& fn);
278 inline void Fdiv(const FPRegister& fd,
279 const FPRegister& fn,
280 const FPRegister& fm);
281 inline void Fmadd(const FPRegister& fd,
282 const FPRegister& fn,
283 const FPRegister& fm,
284 const FPRegister& fa);
285 inline void Fmax(const FPRegister& fd,
286 const FPRegister& fn,
287 const FPRegister& fm);
288 inline void Fmaxnm(const FPRegister& fd,
289 const FPRegister& fn,
290 const FPRegister& fm);
291 inline void Fmin(const FPRegister& fd,
292 const FPRegister& fn,
293 const FPRegister& fm);
294 inline void Fminnm(const FPRegister& fd,
295 const FPRegister& fn,
296 const FPRegister& fm);
297 inline void Fmov(FPRegister fd, FPRegister fn);
298 inline void Fmov(FPRegister fd, Register rn);
299 inline void Fmov(FPRegister fd, double imm);
300 inline void Fmov(Register rd, FPRegister fn);
301 inline void Fmsub(const FPRegister& fd,
302 const FPRegister& fn,
303 const FPRegister& fm,
304 const FPRegister& fa);
305 inline void Fmul(const FPRegister& fd,
306 const FPRegister& fn,
307 const FPRegister& fm);
308 inline void Fneg(const FPRegister& fd, const FPRegister& fn);
309 inline void Fnmadd(const FPRegister& fd,
310 const FPRegister& fn,
311 const FPRegister& fm,
312 const FPRegister& fa);
313 inline void Fnmsub(const FPRegister& fd,
314 const FPRegister& fn,
315 const FPRegister& fm,
316 const FPRegister& fa);
317 inline void Frinta(const FPRegister& fd, const FPRegister& fn);
318 inline void Frintn(const FPRegister& fd, const FPRegister& fn);
319 inline void Frintz(const FPRegister& fd, const FPRegister& fn);
320 inline void Fsqrt(const FPRegister& fd, const FPRegister& fn);
321 inline void Fsub(const FPRegister& fd,
322 const FPRegister& fn,
323 const FPRegister& fm);
324 inline void Hint(SystemHint code);
325 inline void Hlt(int code);
326 inline void Isb();
327 inline void Ldnp(const CPURegister& rt,
328 const CPURegister& rt2,
329 const MemOperand& src);
330 inline void Ldp(const CPURegister& rt,
331 const CPURegister& rt2,
332 const MemOperand& src);
333 inline void Ldpsw(const Register& rt,
334 const Register& rt2,
335 const MemOperand& src);
336 inline void Ldr(const FPRegister& ft, double imm);
337 inline void Ldr(const Register& rt, uint64_t imm);
338 inline void Lsl(const Register& rd, const Register& rn, unsigned shift);
339 inline void Lsl(const Register& rd, const Register& rn, const Register& rm);
340 inline void Lsr(const Register& rd, const Register& rn, unsigned shift);
341 inline void Lsr(const Register& rd, const Register& rn, const Register& rm);
342 inline void Madd(const Register& rd,
343 const Register& rn,
344 const Register& rm,
345 const Register& ra);
346 inline void Mneg(const Register& rd, const Register& rn, const Register& rm);
347 inline void Mov(const Register& rd, const Register& rm);
348 inline void Movk(const Register& rd, uint64_t imm, int shift = -1);
349 inline void Mrs(const Register& rt, SystemRegister sysreg);
350 inline void Msr(SystemRegister sysreg, const Register& rt);
351 inline void Msub(const Register& rd,
352 const Register& rn,
353 const Register& rm,
354 const Register& ra);
355 inline void Mul(const Register& rd, const Register& rn, const Register& rm);
356 inline void Nop() { nop(); }
357 inline void Rbit(const Register& rd, const Register& rn);
358 inline void Ret(const Register& xn = lr);
359 inline void Rev(const Register& rd, const Register& rn);
360 inline void Rev16(const Register& rd, const Register& rn);
361 inline void Rev32(const Register& rd, const Register& rn);
362 inline void Ror(const Register& rd, const Register& rs, unsigned shift);
363 inline void Ror(const Register& rd, const Register& rn, const Register& rm);
364 inline void Sbfiz(const Register& rd,
365 const Register& rn,
366 unsigned lsb,
367 unsigned width);
368 inline void Sbfx(const Register& rd,
369 const Register& rn,
370 unsigned lsb,
371 unsigned width);
372 inline void Scvtf(const FPRegister& fd,
373 const Register& rn,
374 unsigned fbits = 0);
375 inline void Sdiv(const Register& rd, const Register& rn, const Register& rm);
376 inline void Smaddl(const Register& rd,
377 const Register& rn,
378 const Register& rm,
379 const Register& ra);
380 inline void Smsubl(const Register& rd,
381 const Register& rn,
382 const Register& rm,
383 const Register& ra);
384 inline void Smull(const Register& rd,
385 const Register& rn,
386 const Register& rm);
387 inline void Smulh(const Register& rd,
388 const Register& rn,
389 const Register& rm);
390 inline void Stnp(const CPURegister& rt,
391 const CPURegister& rt2,
392 const MemOperand& dst);
393 inline void Stp(const CPURegister& rt,
394 const CPURegister& rt2,
395 const MemOperand& dst);
396 inline void Sxtb(const Register& rd, const Register& rn);
397 inline void Sxth(const Register& rd, const Register& rn);
398 inline void Sxtw(const Register& rd, const Register& rn);
399 inline void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
400 inline void Tbz(const Register& rt, unsigned bit_pos, Label* label);
401 inline void Ubfiz(const Register& rd,
402 const Register& rn,
403 unsigned lsb,
404 unsigned width);
405 inline void Ubfx(const Register& rd,
406 const Register& rn,
407 unsigned lsb,
408 unsigned width);
409 inline void Ucvtf(const FPRegister& fd,
410 const Register& rn,
411 unsigned fbits = 0);
412 inline void Udiv(const Register& rd, const Register& rn, const Register& rm);
413 inline void Umaddl(const Register& rd,
414 const Register& rn,
415 const Register& rm,
416 const Register& ra);
417 inline void Umsubl(const Register& rd,
418 const Register& rn,
419 const Register& rm,
420 const Register& ra);
421 inline void Unreachable();
422 inline void Uxtb(const Register& rd, const Register& rn);
423 inline void Uxth(const Register& rd, const Register& rn);
424 inline void Uxtw(const Register& rd, const Register& rn);
425
426 // Pseudo-instructions ------------------------------------------------------
427
428 // Compute rd = abs(rm).
429 // This function clobbers the condition flags.
430 //
431 // If rm is the minimum representable value, the result is not representable.
432 // Handlers for each case can be specified using the relevant labels.
433 void Abs(const Register& rd, const Register& rm,
434 Label * is_not_representable = NULL,
435 Label * is_representable = NULL);
436
437 // Push or pop up to 4 registers of the same width to or from the stack,
438 // using the current stack pointer as set by SetStackPointer.
439 //
440 // If an argument register is 'NoReg', all further arguments are also assumed
441 // to be 'NoReg', and are thus not pushed or popped.
442 //
443 // Arguments are ordered such that "Push(a, b);" is functionally equivalent
444 // to "Push(a); Push(b);".
445 //
446 // It is valid to push the same register more than once, and there is no
447 // restriction on the order in which registers are specified.
448 //
449 // It is not valid to pop into the same register more than once in one
450 // operation, not even into the zero register.
451 //
452 // If the current stack pointer (as set by SetStackPointer) is csp, then it
453 // must be aligned to 16 bytes on entry and the total size of the specified
454 // registers must also be a multiple of 16 bytes.
455 //
456 // Even if the current stack pointer is not the system stack pointer (csp),
457 // Push (and derived methods) will still modify the system stack pointer in
458 // order to comply with ABI rules about accessing memory below the system
459 // stack pointer.
460 //
461 // Other than the registers passed into Pop, the stack pointer and (possibly)
462 // the system stack pointer, these methods do not modify any other registers.
463 // Scratch registers such as Tmp0() and Tmp1() are preserved.
464 void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
465 const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg);
466 void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
467 const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
468
469 // Alternative forms of Push and Pop, taking a RegList or CPURegList that
470 // specifies the registers that are to be pushed or popped. Higher-numbered
471 // registers are associated with higher memory addresses (as in the A32 push
472 // and pop instructions).
473 //
474 // (Push|Pop)SizeRegList allow you to specify the register size as a
475 // parameter. Only kXRegSize, kWRegSize, kDRegSize and kSRegSize are
476 // supported.
477 //
478 // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
479 void PushCPURegList(CPURegList registers);
480 void PopCPURegList(CPURegList registers);
481
482 inline void PushSizeRegList(RegList registers, unsigned reg_size,
483 CPURegister::RegisterType type = CPURegister::kRegister) {
484 PushCPURegList(CPURegList(type, reg_size, registers));
485 }
486 inline void PopSizeRegList(RegList registers, unsigned reg_size,
487 CPURegister::RegisterType type = CPURegister::kRegister) {
488 PopCPURegList(CPURegList(type, reg_size, registers));
489 }
490 inline void PushXRegList(RegList regs) {
491 PushSizeRegList(regs, kXRegSize);
492 }
493 inline void PopXRegList(RegList regs) {
494 PopSizeRegList(regs, kXRegSize);
495 }
496 inline void PushWRegList(RegList regs) {
497 PushSizeRegList(regs, kWRegSize);
498 }
499 inline void PopWRegList(RegList regs) {
500 PopSizeRegList(regs, kWRegSize);
501 }
502 inline void PushDRegList(RegList regs) {
503 PushSizeRegList(regs, kDRegSize, CPURegister::kFPRegister);
504 }
505 inline void PopDRegList(RegList regs) {
506 PopSizeRegList(regs, kDRegSize, CPURegister::kFPRegister);
507 }
508 inline void PushSRegList(RegList regs) {
509 PushSizeRegList(regs, kSRegSize, CPURegister::kFPRegister);
510 }
511 inline void PopSRegList(RegList regs) {
512 PopSizeRegList(regs, kSRegSize, CPURegister::kFPRegister);
513 }
514
515 // Push the specified register 'count' times.
516 void PushMultipleTimes(int count, Register src);
517
518 // This is a convenience method for pushing a single Handle<Object>.
519 inline void Push(Handle<Object> handle);
520 void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
521
522 // Aliases of Push and Pop, required for V8 compatibility.
523 inline void push(Register src) {
524 Push(src);
525 }
526 inline void pop(Register dst) {
527 Pop(dst);
528 }
529
530 // Poke 'src' onto the stack. The offset is in bytes.
531 //
532 // If the current stack pointer (according to StackPointer()) is csp, then
533 // csp must be aligned to 16 bytes.
534 void Poke(const Register& src, const Operand& offset);
535
536 // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
537 //
538 // If the current stack pointer (according to StackPointer()) is csp, then
539 // csp must be aligned to 16 bytes.
540 void Peek(const Register& dst, const Operand& offset);
541
542 // Claim or drop stack space without actually accessing memory.
543 //
544 // In debug mode, both of these will write invalid data into the claimed or
545 // dropped space.
546 //
547 // If the current stack pointer (according to StackPointer()) is csp, then it
548 // must be aligned to 16 bytes and the size claimed or dropped must be a
549 // multiple of 16 bytes.
550 //
551 // Note that unit_size must be specified in bytes. For variants which take a
552 // Register count, the unit size must be a power of two.
553 inline void Claim(uint64_t count, uint64_t unit_size = kXRegSizeInBytes);
554 inline void Claim(const Register& count,
555 uint64_t unit_size = kXRegSizeInBytes);
556 inline void Drop(uint64_t count, uint64_t unit_size = kXRegSizeInBytes);
557 inline void Drop(const Register& count,
558 uint64_t unit_size = kXRegSizeInBytes);
559
560 // Variants of Claim and Drop, where the 'count' parameter is a SMI held in a
561 // register.
562 inline void ClaimBySMI(const Register& count_smi,
563 uint64_t unit_size = kXRegSizeInBytes);
564 inline void DropBySMI(const Register& count_smi,
565 uint64_t unit_size = kXRegSizeInBytes);
566
567 // Compare a register with an operand, and branch to label depending on the
568 // condition. May corrupt the status flags.
569 inline void CompareAndBranch(const Register& lhs,
570 const Operand& rhs,
571 Condition cond,
572 Label* label);
573
574 // Test the bits of register defined by bit_pattern, and branch if ANY of
575 // those bits are set. May corrupt the status flags.
576 inline void TestAndBranchIfAnySet(const Register& reg,
577 const uint64_t bit_pattern,
578 Label* label);
579
580 // Test the bits of register defined by bit_pattern, and branch if ALL of
581 // those bits are clear (ie. not set.) May corrupt the status flags.
582 inline void TestAndBranchIfAllClear(const Register& reg,
583 const uint64_t bit_pattern,
584 Label* label);
585
586 // Insert one or more instructions into the instruction stream that encode
587 // some caller-defined data. The instructions used will be executable with no
588 // side effects.
589 inline void InlineData(uint64_t data);
590
591 // Insert an instrumentation enable marker into the instruction stream.
592 inline void EnableInstrumentation();
593
594 // Insert an instrumentation disable marker into the instruction stream.
595 inline void DisableInstrumentation();
596
597 // Insert an instrumentation event marker into the instruction stream. These
598 // will be picked up by the instrumentation system to annotate an instruction
599 // profile. The argument marker_name must be a printable two character string;
600 // it will be encoded in the event marker.
601 inline void AnnotateInstrumentation(const char* marker_name);
602
603 // If emit_debug_code() is true, emit a run-time check to ensure that
604 // StackPointer() does not point below the system stack pointer.
605 //
606 // Whilst it is architecturally legal for StackPointer() to point below csp,
607 // it can be evidence of a potential bug because the ABI forbids accesses
608 // below csp.
609 //
610 // If emit_debug_code() is false, this emits no code.
611 //
612 // If StackPointer() is the system stack pointer, this emits no code.
613 void AssertStackConsistency();
614
615 // Preserve the callee-saved registers (as defined by AAPCS64).
616 //
617 // Higher-numbered registers are pushed before lower-numbered registers, and
618 // thus get higher addresses.
619 // Floating-point registers are pushed before general-purpose registers, and
620 // thus get higher addresses.
621 //
622 // Note that registers are not checked for invalid values. Use this method
623 // only if you know that the GC won't try to examine the values on the stack.
624 //
625 // This method must not be called unless the current stack pointer (as set by
626 // SetStackPointer) is the system stack pointer (csp), and is aligned to
627 // ActivationFrameAlignment().
628 void PushCalleeSavedRegisters();
629
630 // Restore the callee-saved registers (as defined by AAPCS64).
631 //
632 // Higher-numbered registers are popped after lower-numbered registers, and
633 // thus come from higher addresses.
634 // Floating-point registers are popped after general-purpose registers, and
635 // thus come from higher addresses.
636 //
637 // This method must not be called unless the current stack pointer (as set by
638 // SetStackPointer) is the system stack pointer (csp), and is aligned to
639 // ActivationFrameAlignment().
640 void PopCalleeSavedRegisters();
641
642 // Set the current stack pointer, but don't generate any code.
643 inline void SetStackPointer(const Register& stack_pointer) {
644 ASSERT(!AreAliased(stack_pointer, Tmp0(), Tmp1()));
645 sp_ = stack_pointer;
646 }
647
648 // Return the current stack pointer, as set by SetStackPointer.
649 inline const Register& StackPointer() const {
650 return sp_;
651 }
652
653 // Align csp for a frame, as per ActivationFrameAlignment, and make it the
654 // current stack pointer.
655 inline void AlignAndSetCSPForFrame() {
656 int sp_alignment = ActivationFrameAlignment();
657 // AAPCS64 mandates at least 16-byte alignment.
658 ASSERT(sp_alignment >= 16);
659 ASSERT(IsPowerOf2(sp_alignment));
660 Bic(csp, StackPointer(), sp_alignment - 1);
661 SetStackPointer(csp);
662 }
663
664 // Push the system stack pointer (csp) down to allow the same to be done to
665 // the current stack pointer (according to StackPointer()). This must be
666 // called _before_ accessing the memory.
667 //
668 // This is necessary when pushing or otherwise adding things to the stack, to
669 // satisfy the AAPCS64 constraint that the memory below the system stack
670 // pointer is not accessed.
671 //
672 // This method asserts that StackPointer() is not csp, since the call does
673 // not make sense in that context.
674 //
675 // TODO(jbramley): Currently, this method can only accept values of 'space'
676 // that can be encoded in one instruction. Refer to the implementation for
677 // details.
678 inline void BumpSystemStackPointer(const Operand& space);
679
680 // Helpers ------------------------------------------------------------------
681 // Root register.
682 inline void InitializeRootRegister();
683
684 // Load an object from the root table.
685 void LoadRoot(Register destination,
686 Heap::RootListIndex index);
687 // Store an object to the root table.
688 void StoreRoot(Register source,
689 Heap::RootListIndex index);
690
691 // Load both TrueValue and FalseValue roots.
692 void LoadTrueFalseRoots(Register true_root, Register false_root);
693
694 void LoadHeapObject(Register dst, Handle<HeapObject> object);
695
696 void LoadObject(Register result, Handle<Object> object) {
697 AllowDeferredHandleDereference heap_object_check;
698 if (object->IsHeapObject()) {
699 LoadHeapObject(result, Handle<HeapObject>::cast(object));
700 } else {
701 Mov(result, Operand(object));
702 }
703 }
704
705 static int SafepointRegisterStackIndex(int reg_code);
706
707 void CheckForInvalidValuesInCalleeSavedRegs(RegList list);
708
709 // This is required for compatibility with architecture independant code.
710 // Remove if not needed.
711 inline void Move(Register dst, Register src) { Mov(dst, src); }
712
713 void LoadInstanceDescriptors(Register map,
714 Register descriptors);
715 void EnumLengthUntagged(Register dst, Register map);
716 void EnumLengthSmi(Register dst, Register map);
717 void NumberOfOwnDescriptors(Register dst, Register map);
718
719 template<typename Field>
720 void DecodeField(Register reg) {
721 static const uint64_t shift = Field::kShift + kSmiShift;
722 static const uint64_t setbits = CountSetBits(Field::kMask, 32);
723 Ubfx(reg, reg, shift, setbits);
724 }
725
726 // ---- SMI and Number Utilities ----
727
728 inline void SmiTag(Register dst, Register src);
729 inline void SmiTag(Register smi);
730 inline void SmiUntag(Register dst, Register src);
731 inline void SmiUntag(Register smi);
732 inline void SmiUntagToDouble(FPRegister dst,
733 Register src,
734 UntagMode mode = kNotSpeculativeUntag);
735 inline void SmiUntagToFloat(FPRegister dst,
736 Register src,
737 UntagMode mode = kNotSpeculativeUntag);
738
739 // Compute the absolute value of 'smi' and leave the result in 'smi'
740 // register. If 'smi' is the most negative SMI, the absolute value cannot
741 // be represented as a SMI and a jump to 'slow' is done.
742 void SmiAbs(Register smi, Register scratch, Label *slow);
743
744 inline void JumpIfSmi(Register value,
745 Label* smi_label,
746 Label* not_smi_label = NULL);
747 inline void JumpIfNotSmi(Register value, Label* not_smi_label);
748 inline void JumpIfBothSmi(Register value1,
749 Register value2,
750 Label* both_smi_label,
751 Label* not_smi_label = NULL);
752 inline void JumpIfEitherSmi(Register value1,
753 Register value2,
754 Label* either_smi_label,
755 Label* not_smi_label = NULL);
756 inline void JumpIfEitherNotSmi(Register value1,
757 Register value2,
758 Label* not_smi_label);
759 inline void JumpIfBothNotSmi(Register value1,
760 Register value2,
761 Label* not_smi_label);
762
763 // Abort execution if argument is a smi, enabled via --debug-code.
764 void AssertNotSmi(Register object,
765 const char* fail_message = "Operand is a smi");
766 void AssertSmi(Register object,
767 const char* fail_message = "Operand is not a smi");
768
769 // Abort execution if argument is not a name, enabled via --debug-code.
770 void AssertName(Register object);
771
772 // Abort execution if argument is not a string, enabled via --debug-code.
773 void AssertString(Register object);
774
775 // Abort execution if argument is not the root value with the given index,
776 // enabled via --debug-code.
777 void AssertRootValue(Register src,
778 Heap::RootListIndex root_value_index,
779 const char* message);
780
781 void JumpForHeapNumber(Register object,
782 Register heap_number_map,
783 Label* on_heap_number,
784 Label* on_not_heap_number = NULL);
785 void JumpIfHeapNumber(Register object,
786 Label* on_heap_number,
787 Register heap_number_map = NoReg);
788 void JumpIfNotHeapNumber(Register object,
789 Label* on_not_heap_number,
790 Register heap_number_map = NoReg);
791
792 // Saturate a signed 32-bit integer in input to an unsigned 8-bit integer in
793 // output.
794 void ClampInt32ToUint8(Register in_out);
795 void ClampInt32ToUint8(Register output, Register input);
796
797 // Saturate a double in input to an unsigned 8-bit integer in output.
798 void ClampDoubleToUint8(Register output,
799 DoubleRegister input,
800 DoubleRegister dbl_scratch);
801
802 // Try to convert a double to a signed 32-bit int.
803 // This succeeds if the result compares equal to the input, so inputs of -0.0
804 // are converted to 0 and handled as a success.
805 void TryConvertDoubleToInt32(Register as_int,
806 FPRegister value,
807 FPRegister scratch_d,
808 Label* on_successful_conversion,
809 Label* on_failed_conversion = NULL) {
810 ASSERT(as_int.Is32Bits());
811 TryConvertDoubleToInt(as_int, value, scratch_d, on_successful_conversion,
812 on_failed_conversion);
813 }
814
815 // Try to convert a double to a signed 64-bit int.
816 // This succeeds if the result compares equal to the input, so inputs of -0.0
817 // are converted to 0 and handled as a success.
818 void TryConvertDoubleToInt64(Register as_int,
819 FPRegister value,
820 FPRegister scratch_d,
821 Label* on_successful_conversion,
822 Label* on_failed_conversion = NULL) {
823 ASSERT(as_int.Is64Bits());
824 TryConvertDoubleToInt(as_int, value, scratch_d, on_successful_conversion,
825 on_failed_conversion);
826 }
827
828 // ---- Object Utilities ----
829
830 // Copy fields from 'src' to 'dst', where both are tagged objects.
831 // The 'temps' list is a list of X registers which can be used for scratch
832 // values. The temps list must include at least one register, and it must not
833 // contain Tmp0() or Tmp1().
834 //
835 // Currently, CopyFields cannot make use of more than three registers from
836 // the 'temps' list.
837 //
838 // As with several MacroAssembler methods, Tmp0() and Tmp1() will be used.
839 void CopyFields(Register dst, Register src, CPURegList temps, unsigned count);
840
841 // Copies a number of bytes from src to dst. All passed registers are
842 // clobbered. On exit src and dst will point to the place just after where the
843 // last byte was read or written and length will be zero. Hint may be used to
844 // determine which is the most efficient algorithm to use for copying.
845 void CopyBytes(Register dst,
846 Register src,
847 Register length,
848 Register scratch,
849 CopyHint hint = kCopyUnknown);
850
851 // Initialize fields with filler values. Fields starting at start_offset not
852 // including end_offset are overwritten with the value in filler. At the end
853 // of the loop, start_offset takes the value of end_offset.
854 void InitializeFieldsWithFiller(Register start_offset,
855 Register end_offset,
856 Register filler);
857
858 // ---- String Utilities ----
859
860
861 // Jump to label if either object is not a sequential ASCII string.
862 // Optionally perform a smi check on the objects first.
863 void JumpIfEitherIsNotSequentialAsciiStrings(
864 Register first,
865 Register second,
866 Register scratch1,
867 Register scratch2,
868 Label* failure,
869 SmiCheckType smi_check = DO_SMI_CHECK);
870
871 // Check if instance type is sequential ASCII string and jump to label if
872 // it is not.
873 void JumpIfInstanceTypeIsNotSequentialAscii(Register type,
874 Register scratch,
875 Label* failure);
876
877 // Checks if both instance types are sequential ASCII strings and jumps to
878 // label if either is not.
879 void JumpIfEitherInstanceTypeIsNotSequentialAscii(
880 Register first_object_instance_type,
881 Register second_object_instance_type,
882 Register scratch1,
883 Register scratch2,
884 Label* failure);
885
886 // Checks if both instance types are sequential ASCII strings and jumps to
887 // label if either is not.
888 void JumpIfBothInstanceTypesAreNotSequentialAscii(
889 Register first_object_instance_type,
890 Register second_object_instance_type,
891 Register scratch1,
892 Register scratch2,
893 Label* failure);
894
895 // ---- Calling / Jumping helpers ----
896
897 // This is required for compatibility in architecture indepenedant code.
898 inline void jmp(Label* L) { B(L); }
899
900 // Passes thrown value to the handler of top of the try handler chain.
901 // Register value must be x0.
902 void Throw(Register value,
903 Register scratch1,
904 Register scratch2,
905 Register scratch3,
906 Register scratch4);
907
908 // Propagates an uncatchable exception to the top of the current JS stack's
909 // handler chain. Register value must be x0.
910 void ThrowUncatchable(Register value,
911 Register scratch1,
912 Register scratch2,
913 Register scratch3,
914 Register scratch4);
915
916 void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None());
917 void TailCallStub(CodeStub* stub);
918
919 void CallRuntime(const Runtime::Function* f, int num_arguments);
920 void CallRuntime(Runtime::FunctionId fid, int num_arguments);
921 void TailCallRuntime(Runtime::FunctionId fid,
922 int num_arguments,
923 int result_size);
924 void CallRuntimeSaveDoubles(Runtime::FunctionId id);
925
926 int ActivationFrameAlignment();
927
928 // Calls a C function.
929 // The called function is not allowed to trigger a
930 // garbage collection, since that might move the code and invalidate the
931 // return address (unless this is somehow accounted for by the called
932 // function).
933 void CallCFunction(ExternalReference function,
934 int num_reg_arguments);
935 void CallCFunction(ExternalReference function,
936 int num_reg_arguments,
937 int num_double_arguments);
938 void CallCFunction(Register function,
939 int num_reg_arguments,
940 int num_double_arguments);
941
942 // Calls an API function. Allocates HandleScope, extracts returned value
943 // from handle and propagates exceptions.
944 // 'stack_space' is the space to be unwound on exit (includes the call JS
945 // arguments space and the additional space allocated for the fast call).
946 // 'spill_offset' is the offset from the stack pointer where
947 // CallApiFunctionAndReturn can spill registers.
948 void CallApiFunctionAndReturn(ExternalReference function,
949 int stack_space,
950 int spill_offset,
951 bool returns_handle,
952 int return_value_offset_from_fp);
953
954 // The number of register that CallApiFunctionAndReturn will need to save on
955 // the stack. The space for these registers need to be allocated in the
956 // ExitFrame before calling CallApiFunctionAndReturn.
957 static const int kCallApiFunctionSpillSpace = 4;
958
959 // Jump to a runtime routine.
960 void JumpToExternalReference(const ExternalReference& builtin);
961 // Tail call of a runtime routine (jump).
962 // Like JumpToExternalReference, but also takes care of passing the number
963 // of parameters.
964 void TailCallExternalReference(const ExternalReference& ext,
965 int num_arguments,
966 int result_size);
967 void CallExternalReference(const ExternalReference& ext,
968 int num_arguments);
969
970
971 // Invoke specified builtin JavaScript function. Adds an entry to
972 // the unresolved list if the name does not resolve.
973 void InvokeBuiltin(Builtins::JavaScript id,
974 InvokeFlag flag,
975 const CallWrapper& call_wrapper = NullCallWrapper());
976
977 // Store the code object for the given builtin in the target register and
978 // setup the function in x1.
979 // TODO(all): Can we use another register than x1?
980 void GetBuiltinEntry(Register target, Builtins::JavaScript id);
981
982 // Store the function for the given builtin in the target register.
983 void GetBuiltinFunction(Register target, Builtins::JavaScript id);
984
985 void Jump(Register target);
986 void Jump(Address target, RelocInfo::Mode rmode);
987 void Jump(Handle<Code> code, RelocInfo::Mode rmode);
988 void Jump(intptr_t target, RelocInfo::Mode rmode);
989
990 void Call(Register target);
991 void Call(Label* target);
992 void Call(Address target, RelocInfo::Mode rmode);
993 void Call(Handle<Code> code,
994 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
995 TypeFeedbackId ast_id = TypeFeedbackId::None());
996
997 // For every Call variant, there is a matching CallSize function that returns
998 // the size (in bytes) of the call sequence.
999 static int CallSize(Register target);
1000 static int CallSize(Label* target);
1001 static int CallSize(Address target, RelocInfo::Mode rmode);
1002 static int CallSize(Handle<Code> code,
1003 RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
1004 TypeFeedbackId ast_id = TypeFeedbackId::None());
1005
1006 // Set up call kind marking in x5. The method takes x5 as an
1007 // explicit first parameter to make the code more readable at the
1008 // call sites.
1009 void SetCallKind(Register dst, CallKind kind);
1010
1011 // Registers used through the invocation chain are hard-coded.
1012 // We force passing the parameters to ensure the contracts are correctly
1013 // honoured by the caller.
1014 // 'function' must be x1.
1015 // 'actual' must use an immediate or x0.
1016 // 'expected' must use an immediate or x2.
1017 // 'call_kind' must be x5.
1018 void InvokePrologue(const ParameterCount& expected,
1019 const ParameterCount& actual,
1020 Handle<Code> code_constant,
1021 Register code_reg,
1022 Label* done,
1023 InvokeFlag flag,
1024 bool* definitely_mismatches,
1025 const CallWrapper& call_wrapper,
1026 CallKind call_kind);
1027 void InvokeCode(Register code,
1028 const ParameterCount& expected,
1029 const ParameterCount& actual,
1030 InvokeFlag flag,
1031 const CallWrapper& call_wrapper,
1032 CallKind call_kind);
1033 void InvokeCode(Handle<Code> code,
1034 const ParameterCount& expected,
1035 const ParameterCount& actual,
1036 RelocInfo::Mode rmode,
1037 InvokeFlag flag,
1038 CallKind call_kind);
1039 // Invoke the JavaScript function in the given register.
1040 // Changes the current context to the context in the function before invoking.
1041 void InvokeFunction(Register function,
1042 const ParameterCount& actual,
1043 InvokeFlag flag,
1044 const CallWrapper& call_wrapper,
1045 CallKind call_kind);
1046 void InvokeFunction(Handle<JSFunction> function,
1047 const ParameterCount& expected,
1048 const ParameterCount& actual,
1049 InvokeFlag flag,
1050 const CallWrapper& call_wrapper,
1051 CallKind call_kind,
1052 Register function_reg = NoReg);
1053
1054
1055 // ---- Floating point helpers ----
1056
1057 enum ECMA262ToInt32Result {
1058 // Provide an untagged int32_t which can be read using result.W(). That is,
1059 // the upper 32 bits of result are undefined.
1060 INT32_IN_W,
1061
1062 // Provide an untagged int32_t which can be read using the 64-bit result
1063 // register. The int32_t result is sign-extended.
1064 INT32_IN_X,
1065
1066 // Tag the int32_t result as a smi.
1067 SMI
1068 };
1069
1070 // Applies ECMA-262 ToInt32 (see section 9.5) to a double value.
1071 void ECMA262ToInt32(Register result,
1072 DoubleRegister input,
1073 Register scratch1,
1074 Register scratch2,
1075 ECMA262ToInt32Result format = INT32_IN_X);
1076
1077 // As ECMA262ToInt32, but operate on a HeapNumber.
1078 void HeapNumberECMA262ToInt32(Register result,
1079 Register heap_number,
1080 Register scratch1,
1081 Register scratch2,
1082 DoubleRegister double_scratch,
1083 ECMA262ToInt32Result format = INT32_IN_X);
1084
1085 // ---- Code generation helpers ----
1086
1087 // Generate a runtime call or jump for a unary operation.
1088 // Caller-saved registers are not preserved.
1089 // Expected on entry:
1090 // x0: operand
1091 // Returns with:
1092 // x0: result
1093 // sp on exit == sp before entry.
1094 void GenerateNumberUnaryOperation(Token::Value op, InvokeFlag flag);
1095
1096 // Generate a runtime call or jump for a binary operation.
1097 // Caller-saved registers are not preserved.
1098 // Expected on entry:
1099 // x0: right
1100 // x1: left
1101 // Returns with:
1102 // x0: result
1103 // sp on exit == sp before entry.
1104 void GenerateNumberNumberBinaryOperation(Token::Value op, InvokeFlag flag);
1105
1106 void set_generating_stub(bool value) { generating_stub_ = value; }
1107 bool generating_stub() const { return generating_stub_; }
1108 void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
1109 bool allow_stub_calls() const { return allow_stub_calls_; }
1110 #if DEBUG
1111 void set_allow_macro_instructions(bool value) {
1112 allow_macro_instructions_ = value;
1113 }
1114 bool allow_macro_instructions() const { return allow_macro_instructions_; }
1115 #endif
1116 void set_use_real_aborts(bool value) { use_real_aborts_ = value; }
1117 bool use_real_aborts() const { return use_real_aborts_; }
1118 void set_has_frame(bool value) { has_frame_ = value; }
1119 bool has_frame() const { return has_frame_; }
1120 bool AllowThisStubCall(CodeStub* stub);
1121
1122 #ifdef ENABLE_DEBUGGER_SUPPORT
1123 // ---------------------------------------------------------------------------
1124 // Debugger Support
1125
1126 void DebugBreak();
1127 #endif
1128 // ---------------------------------------------------------------------------
1129 // Exception handling
1130
1131 // Push a new try handler and link into try handler chain.
1132 void PushTryHandler(StackHandler::Kind kind, int handler_index);
1133
1134 // Unlink the stack handler on top of the stack from the try handler chain.
1135 // Must preserve the result register.
1136 void PopTryHandler();
1137
1138
1139 // ---------------------------------------------------------------------------
1140 // Allocation support
1141
1142 // Allocate an object in new space or old pointer space. The object_size is
1143 // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
1144 // is passed. The allocated object is returned in result.
1145 //
1146 // If the new space is exhausted control continues at the gc_required label.
1147 // In this case, the result and scratch registers may still be clobbered.
1148 // If flags includes TAG_OBJECT, the result is tagged as as a heap object.
1149 void Allocate(Register object_size,
1150 Register result,
1151 Register scratch1,
1152 Register scratch2,
1153 Label* gc_required,
1154 AllocationFlags flags);
1155
1156 void Allocate(int object_size,
1157 Register result,
1158 Register scratch1,
1159 Register scratch2,
1160 Label* gc_required,
1161 AllocationFlags flags);
1162
1163 // Undo allocation in new space. The object passed and objects allocated after
1164 // it will no longer be allocated. The caller must make sure that no pointers
1165 // are left to the object(s) no longer allocated as they would be invalid when
1166 // allocation is undone.
1167 void UndoAllocationInNewSpace(Register object, Register scratch);
1168
1169 void AllocateTwoByteString(Register result,
1170 Register length,
1171 Register scratch1,
1172 Register scratch2,
1173 Register scratch3,
1174 Label* gc_required);
1175 void AllocateAsciiString(Register result,
1176 Register length,
1177 Register scratch1,
1178 Register scratch2,
1179 Register scratch3,
1180 Label* gc_required);
1181 void AllocateTwoByteConsString(Register result,
1182 Register length,
1183 Register scratch1,
1184 Register scratch2,
1185 Label* gc_required);
1186 void AllocateAsciiConsString(Register result,
1187 Register length,
1188 Register scratch1,
1189 Register scratch2,
1190 Label* gc_required);
1191 void AllocateTwoByteSlicedString(Register result,
1192 Register length,
1193 Register scratch1,
1194 Register scratch2,
1195 Label* gc_required);
1196 void AllocateAsciiSlicedString(Register result,
1197 Register length,
1198 Register scratch1,
1199 Register scratch2,
1200 Label* gc_required);
1201
1202 // Allocates a heap number or jumps to the gc_required label if the young
1203 // space is full and a scavenge is needed.
1204 // All registers are clobbered.
1205 // If no heap_number_map register is provided, the function will take care of
1206 // loading it.
1207 void AllocateHeapNumber(Register result,
1208 Label* gc_required,
1209 Register scratch1,
1210 Register scratch2,
1211 Register heap_number_map = NoReg);
1212 void AllocateHeapNumberWithValue(Register result,
1213 DoubleRegister value,
1214 Label* gc_required,
1215 Register scratch1,
1216 Register scratch2,
1217 Register heap_number_map = NoReg);
1218
1219 // ---------------------------------------------------------------------------
1220 // Support functions.
1221
1222 // Try to get function prototype of a function and puts the value in the
1223 // result register. Checks that the function really is a function and jumps
1224 // to the miss label if the fast checks fail. The function register will be
1225 // untouched; the other registers may be clobbered.
1226 enum BoundFunctionAction {
1227 kMissOnBoundFunction,
1228 kDontMissOnBoundFunction
1229 };
1230
1231 void TryGetFunctionPrototype(Register function,
1232 Register result,
1233 Register scratch,
1234 Label* miss,
1235 BoundFunctionAction action =
1236 kDontMissOnBoundFunction);
1237
1238 // Compare object type for heap object. heap_object contains a non-Smi
1239 // whose object type should be compared with the given type. This both
1240 // sets the flags and leaves the object type in the type_reg register.
1241 // It leaves the map in the map register (unless the type_reg and map register
1242 // are the same register). It leaves the heap object in the heap_object
1243 // register unless the heap_object register is the same register as one of the
1244 // other registers.
1245 void CompareObjectType(Register heap_object,
1246 Register map,
1247 Register type_reg,
1248 InstanceType type);
1249
1250
1251 // Compare object type for heap object, and branch if equal (or not.)
1252 // heap_object contains a non-Smi whose object type should be compared with
1253 // the given type. This both sets the flags and leaves the object type in
1254 // the type_reg register. It leaves the map in the map register (unless the
1255 // type_reg and map register are the same register). It leaves the heap
1256 // object in the heap_object register unless the heap_object register is the
1257 // same register as one of the other registers.
1258 void JumpIfObjectType(Register object,
1259 Register map,
1260 Register type_reg,
1261 InstanceType type,
1262 Label* if_cond_pass,
1263 Condition cond = eq);
1264
1265 void JumpIfNotObjectType(Register object,
1266 Register map,
1267 Register type_reg,
1268 InstanceType type,
1269 Label* if_not_object);
1270
1271 // Compare instance type in a map. map contains a valid map object whose
1272 // object type should be compared with the given type. This both
1273 // sets the flags and leaves the object type in the type_reg register.
1274 void CompareInstanceType(Register map,
1275 Register type_reg,
1276 InstanceType type);
1277
1278 // Compare an object's map with the specified map and its transitioned
1279 // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
1280 // set with result of map compare. If multiple map compares are required, the
1281 // compare sequences branches to early_success.
1282 void CompareMap(Register obj,
1283 Register scratch,
1284 Handle<Map> map,
1285 Label* early_success = NULL);
1286
1287 // As above, but the map of the object is already loaded into the register
1288 // which is preserved by the code generated.
1289 void CompareMap(Register obj_map,
1290 Handle<Map> map,
1291 Label* early_success = NULL);
1292
1293 // Check if the map of an object is equal to a specified map and branch to
1294 // label if not. Skip the smi check if not required (object is known to be a
1295 // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
1296 // against maps that are ElementsKind transition maps of the specified map.
1297 void CheckMap(Register obj,
1298 Register scratch,
1299 Handle<Map> map,
1300 Label* fail,
1301 SmiCheckType smi_check_type);
1302
1303
1304 void CheckMap(Register obj,
1305 Register scratch,
1306 Heap::RootListIndex index,
1307 Label* fail,
1308 SmiCheckType smi_check_type);
1309
1310 // As above, but the map of the object is already loaded into obj_map, and is
1311 // preserved.
1312 void CheckMap(Register obj_map,
1313 Handle<Map> map,
1314 Label* fail,
1315 SmiCheckType smi_check_type);
1316
1317 // Check if the map of an object is equal to a specified map and branch to a
1318 // specified target if equal. Skip the smi check if not required (object is
1319 // known to be a heap object)
1320 void DispatchMap(Register obj,
1321 Register scratch,
1322 Handle<Map> map,
1323 Handle<Code> success,
1324 SmiCheckType smi_check_type);
1325
1326 // Test the bitfield of the heap object map with mask and set the condition
1327 // flags. The object register is preserved.
1328 void TestMapBitfield(Register object, uint64_t mask);
1329
1330 // Load the elements kind field of an object, and return it in the result
1331 // register.
1332 void LoadElementsKind(Register result, Register object);
1333
1334 // Compare the object in a register to a value from the root list.
1335 // Uses the Tmp0() register as scratch.
1336 void CompareRoot(const Register& obj, Heap::RootListIndex index);
1337
1338 // Compare the object in a register to a value and jump if they are equal.
1339 void JumpIfRoot(const Register& obj,
1340 Heap::RootListIndex index,
1341 Label* if_equal);
1342
1343 // Compare the object in a register to a value and jump if they are not equal.
1344 void JumpIfNotRoot(const Register& obj,
1345 Heap::RootListIndex index,
1346 Label* if_not_equal);
1347
1348 // Load and check the instance type of an object for being a unique name.
1349 // Loads the type into the second argument register.
1350 // The object and type arguments can be the same register; in that case it
1351 // will be overwritten with the type.
1352 // Fall-through if the object was a string and jump on fail otherwise.
1353 inline void IsObjectNameType(Register object, Register type, Label* fail);
1354
1355 inline void IsObjectJSObjectType(Register heap_object,
1356 Register map,
1357 Register scratch,
1358 Label* fail);
1359
1360 // Check the instance type in the given map to see if it corresponds to a
1361 // JS object type. Jump to the fail label if this is not the case and fall
1362 // through otherwise. However if fail label is NULL, no branch will be
1363 // performed and the flag will be updated. You can test the flag for "le"
1364 // condition to test if it is a valid JS object type.
1365 inline void IsInstanceJSObjectType(Register map,
1366 Register scratch,
1367 Label* fail);
1368
1369 // Load and check the instance type of an object for being a string.
1370 // Loads the type into the second argument register.
1371 // The object and type arguments can be the same register; in that case it
1372 // will be overwritten with the type.
1373 // Jumps to not_string or string appropriate. If the appropriate label is
1374 // NULL, fall through.
1375 inline void IsObjectJSStringType(Register object, Register type,
1376 Label* not_string, Label* string = NULL);
1377
1378 // Compare the contents of a register with an operand, and branch to true,
1379 // false or fall through, depending on condition.
1380 void CompareAndSplit(const Register& lhs,
1381 const Operand& rhs,
1382 Condition cond,
1383 Label* if_true,
1384 Label* if_false,
1385 Label* fall_through);
1386
1387 // Test the bits of register defined by bit_pattern, and branch to
1388 // if_any_set, if_all_clear or fall_through accordingly.
1389 void TestAndSplit(const Register& reg,
1390 uint64_t bit_pattern,
1391 Label* if_all_clear,
1392 Label* if_any_set,
1393 Label* fall_through);
1394
1395 // Check if a map for a JSObject indicates that the object has fast elements.
1396 // Jump to the specified label if it does not.
1397 void CheckFastElements(Register map,
1398 Register scratch,
1399 Label* fail);
1400
1401 // Check if a map for a JSObject indicates that the object can have both smi
1402 // and HeapObject elements. Jump to the specified label if it does not.
1403 void CheckFastObjectElements(Register map,
1404 Register scratch,
1405 Label* fail);
1406
1407 // Check if a map for a JSObject indicates that the object has fast smi only
1408 // elements. Jump to the specified label if it does not.
1409 void CheckFastSmiElements(Register map, Register scratch, Label* fail);
1410
1411 // Check to see if number can be stored as a double in FastDoubleElements.
1412 // If it can, store it at the index specified by key_reg in the array,
1413 // otherwise jump to fail.
1414 void StoreNumberToDoubleElements(Register value_reg,
1415 Register key_reg,
1416 Register elements_reg,
1417 Register scratch1,
1418 FPRegister fpscratch1,
1419 FPRegister fpscratch2,
1420 Label* fail);
1421
1422 // Picks out an array index from the hash field.
1423 // Register use:
1424 // hash - holds the index's hash. Clobbered.
1425 // index - holds the overwritten index on exit.
1426 void IndexFromHash(Register hash, Register index);
1427
1428 // ---------------------------------------------------------------------------
1429 // Inline caching support.
1430
1431 // Generate code for checking access rights - used for security checks
1432 // on access to global objects across environments. The holder register
1433 // is left untouched, whereas both scratch registers are clobbered.
1434 void CheckAccessGlobalProxy(Register holder_reg,
1435 Register scratch,
1436 Label* miss);
1437
1438 // Hash the interger value in 'key' register.
1439 // It uses the same algorithm as ComputeIntegerHash in utils.h.
1440 void GetNumberHash(Register key, Register scratch);
1441
1442 // Load value from the dictionary.
1443 //
1444 // elements - holds the slow-case elements of the receiver on entry.
1445 // Unchanged unless 'result' is the same register.
1446 //
1447 // key - holds the smi key on entry.
1448 // Unchanged unless 'result' is the same register.
1449 //
1450 // result - holds the result on exit if the load succeeded.
1451 // Allowed to be the same as 'key' or 'result'.
1452 // Unchanged on bailout so 'key' or 'result' can be used
1453 // in further computation.
1454 void LoadFromNumberDictionary(Label* miss,
1455 Register elements,
1456 Register key,
1457 Register result,
1458 Register scratch0,
1459 Register scratch1,
1460 Register scratch2,
1461 Register scratch3);
1462
1463 // ---------------------------------------------------------------------------
1464 // Frames.
1465
1466 // Activation support.
1467 // Note that Tmp0() and Tmp1() are used as a scratch registers. This is safe
1468 // because these methods are not used in Crankshaft.
1469 void EnterFrame(StackFrame::Type type);
1470 void LeaveFrame(StackFrame::Type type);
1471
1472 // Returns map with validated enum cache in object register.
1473 void CheckEnumCache(Register object,
1474 Register null_value,
1475 Register scratch0,
1476 Register scratch1,
1477 Register scratch2,
1478 Register scratch3,
1479 Label* call_runtime);
1480
1481 // AllocationSiteInfo support. Arrays may have an associated
1482 // AllocationSiteInfo object that can be checked for in order to pretransition
1483 // to another type.
1484 // On entry, receiver should point to the array object.
1485 // If allocation info is present, the Z flag is set (so that the eq
1486 // condition will pass).
1487 void TestJSArrayForAllocationSiteInfo(Register receiver,
1488 Register scratch1,
1489 Register scratch2);
1490
1491 // Enter exit frame. Exit frames are used when calling C code from generated
1492 // (JavaScript) code.
1493 //
1494 // The stack pointer must be jssp on entry, and will be set to csp by this
1495 // function. The frame pointer is also configured, but the only other
1496 // registers modified by this function are the provided scratch register, and
1497 // jssp.
1498 //
1499 // The 'extra_space' argument can be used to allocate some space in the exit
1500 // frame that will be ignored by the GC. This space will be reserved in the
1501 // bottom of the frame immediately above the return address slot.
1502 //
1503 // Set up a stack frame and registers as follows:
1504 // fp[8]: CallerPC (lr)
1505 // fp -> fp[0]: CallerFP (old fp)
1506 // fp[-8]: SPOffset (new csp)
1507 // fp[-16]: CodeObject()
1508 // csp[...]: Saved doubles, if saved_doubles is true.
1509 // csp[8]: Memory reserved for the caller if extra_space != 0.
1510 // Alignment padding, if necessary.
1511 // csp -> csp[0]: Space reserved for the return address.
1512 //
1513 // This function also stores the new frame information in the top frame, so
1514 // that the new frame becomes the current frame.
1515 void EnterExitFrame(bool save_doubles,
1516 const Register& scratch,
1517 int extra_space = 0);
1518
1519 // Leave the current exit frame, after a C function has returned to generated
1520 // (JavaScript) code.
1521 //
1522 // This effectively unwinds the operation of EnterExitFrame:
1523 // * Preserved doubles are restored (if restore_doubles is true).
1524 // * The frame information is removed from the top frame.
1525 // * The exit frame is dropped.
1526 // * The stack pointer is reset to jssp.
1527 //
1528 // The stack pointer must be csp on entry.
1529 void LeaveExitFrame(bool restore_doubles, const Register& scratch);
1530
1531 void LoadContext(Register dst, int context_chain_length);
1532
1533 // ---------------------------------------------------------------------------
1534 // StatsCounter support
1535
1536 void SetCounter(StatsCounter* counter, int value, Register scratch1,
1537 Register scratch2);
1538 void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
1539 Register scratch2);
1540 void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
1541 Register scratch2);
1542
1543 // ---------------------------------------------------------------------------
1544 // Garbage collector support (GC).
1545
1546 enum RememberedSetFinalAction {
1547 kReturnAtEnd,
1548 kFallThroughAtEnd
1549 };
1550
1551 // Record in the remembered set the fact that we have a pointer to new space
1552 // at the address pointed to by the addr register. Only works if addr is not
1553 // in new space.
1554 void RememberedSetHelper(Register object, // Used for debug code.
1555 Register addr,
1556 Register scratch,
1557 SaveFPRegsMode save_fp,
1558 RememberedSetFinalAction and_then);
1559
1560 // Push and pop the registers that can hold pointers, as defined by the
1561 // RegList constant kSafepointSavedRegisters.
1562 void PushSafepointRegisters();
1563 void PopSafepointRegisters();
1564
1565 // Store value in register src in the safepoint stack slot for register dst.
1566 void StoreToSafepointRegisterSlot(Register src, Register dst) {
1567 Poke(src, SafepointRegisterStackIndex(dst.code()) * kPointerSize);
1568 }
1569
1570 void CheckPageFlagSet(const Register& object,
1571 const Register& scratch,
1572 int mask,
1573 Label* if_any_set);
1574
1575 void CheckPageFlagClear(const Register& object,
1576 const Register& scratch,
1577 int mask,
1578 Label* if_all_clear);
1579
1580 void CheckMapDeprecated(Handle<Map> map,
1581 Register scratch,
1582 Label* if_deprecated);
1583
1584 // Check if object is in new space and jump accordingly.
1585 // Register 'object' is preserved.
1586 void JumpIfNotInNewSpace(Register object,
1587 Label* branch) {
1588 InNewSpace(object, ne, branch);
1589 }
1590
1591 void JumpIfInNewSpace(Register object,
1592 Label* branch) {
1593 InNewSpace(object, eq, branch);
1594 }
1595
1596 // Notify the garbage collector that we wrote a pointer into an object.
1597 // |object| is the object being stored into, |value| is the object being
1598 // stored. value and scratch registers are clobbered by the operation.
1599 // The offset is the offset from the start of the object, not the offset from
1600 // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
1601 void RecordWriteField(
1602 Register object,
1603 int offset,
1604 Register value,
1605 Register scratch,
1606 LinkRegisterStatus lr_status,
1607 SaveFPRegsMode save_fp,
1608 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
1609 SmiCheck smi_check = INLINE_SMI_CHECK,
1610 PregenExpectation pregen_expectation = MAYBE_PREGENERATED);
1611
1612 // As above, but the offset has the tag presubtracted. For use with
1613 // MemOperand(reg, off).
1614 inline void RecordWriteContextSlot(
1615 Register context,
1616 int offset,
1617 Register value,
1618 Register scratch,
1619 LinkRegisterStatus lr_status,
1620 SaveFPRegsMode save_fp,
1621 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
1622 SmiCheck smi_check = INLINE_SMI_CHECK,
1623 PregenExpectation pregen_expectation = MAYBE_PREGENERATED) {
1624 RecordWriteField(context,
1625 offset + kHeapObjectTag,
1626 value,
1627 scratch,
1628 lr_status,
1629 save_fp,
1630 remembered_set_action,
1631 smi_check,
1632 pregen_expectation);
1633 }
1634
1635 // For a given |object| notify the garbage collector that the slot |address|
1636 // has been written. |value| is the object being stored. The value and
1637 // address registers are clobbered by the operation.
1638 void RecordWrite(
1639 Register object,
1640 Register address,
1641 Register value,
1642 LinkRegisterStatus lr_status,
1643 SaveFPRegsMode save_fp,
1644 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
1645 SmiCheck smi_check = INLINE_SMI_CHECK,
1646 PregenExpectation pregen_expecation = MAYBE_PREGENERATED);
1647
1648 // Checks the color of an object. If the object is already grey or black
1649 // then we just fall through, since it is already live. If it is white and
1650 // we can determine that it doesn't need to be scanned, then we just mark it
1651 // black and fall through. For the rest we jump to the label so the
1652 // incremental marker can fix its assumptions.
1653 void EnsureNotWhite(Register object,
1654 Register scratch1,
1655 Register scratch2,
1656 Register scratch3,
1657 Register scratch4,
1658 Label* object_is_white_and_not_data);
1659
1660 // Detects conservatively whether an object is data-only, i.e. it does need to
1661 // be scanned by the garbage collector.
1662 void JumpIfDataObject(Register value,
1663 Register scratch,
1664 Label* not_data_object);
1665
1666 // Helper for finding the mark bits for an address.
1667 // Note that the behaviour slightly differs from other architectures.
1668 // On exit:
1669 // - addr_reg is unchanged.
1670 // - The bitmap register points at the word with the mark bits.
1671 // - The shift register contains the index of the first color bit for this
1672 // object in the bitmap.
1673 inline void GetMarkBits(Register addr_reg,
1674 Register bitmap_reg,
1675 Register shift_reg);
1676
1677 // Check if an object has a given incremental marking color.
1678 void HasColor(Register object,
1679 Register scratch0,
1680 Register scratch1,
1681 Label* has_color,
1682 int first_bit,
1683 int second_bit);
1684
1685 void JumpIfBlack(Register object,
1686 Register scratch0,
1687 Register scratch1,
1688 Label* on_black);
1689
1690
1691 // ---------------------------------------------------------------------------
1692 // Debugging.
1693
1694 // Calls Abort(msg) if the condition cond is not satisfied.
1695 // Use --debug_code to enable.
1696 void Assert(Condition cond, const char* msg);
1697 void AssertRegisterIsClear(Register reg, const char* msg);
1698 void AssertRegisterIsRoot(Register reg, Heap::RootListIndex index);
1699 void AssertFastElements(Register elements);
1700
1701 // Abort if the specified register contains the invalid color bit pattern.
1702 // The pattern must be in bits [1:0] of 'reg' register.
1703 //
1704 // If emit_debug_code() is false, this emits no code.
1705 void AssertHasValidColor(const Register& reg);
1706
1707 // Abort if 'object' register doesn't point to a string object.
1708 //
1709 // If emit_debug_code() is false, this emits no code.
1710 void AssertIsString(const Register& object);
1711
1712 // Like Assert(), but always enabled.
1713 void Check(Condition cond, const char* msg);
1714 void CheckRegisterIsClear(Register reg, const char* msg);
1715
1716 // Print a message to stdout and abort execution.
1717 void Abort(const char* msg);
1718
1719 // Conditionally load the cached Array transitioned map of type
1720 // transitioned_kind from the native context if the map in register
1721 // map_in_out is the cached Array map in the native context of
1722 // expected_kind.
1723 void LoadTransitionedArrayMapConditional(
1724 ElementsKind expected_kind,
1725 ElementsKind transitioned_kind,
1726 Register map_in_out,
1727 Register scratch,
1728 Label* no_map_match);
1729
1730 // Load the initial map for new Arrays from a JSFunction.
1731 void LoadInitialArrayMap(Register function_in,
1732 Register scratch,
1733 Register map_out,
1734 ArrayHasHoles holes);
1735
1736 void LoadArrayFunction(Register function);
1737 void LoadGlobalFunction(int index, Register function);
1738
1739 // Load the initial map from the global function. The registers function and
1740 // map can be the same, function is then overwritten.
1741 void LoadGlobalFunctionInitialMap(Register function,
1742 Register map,
1743 Register scratch);
1744
1745 // --------------------------------------------------------------------------
1746 // Set the registers used internally by the MacroAssembler as scratch
1747 // registers. These registers are used to implement behaviours which are not
1748 // directly supported by A64, and where an intermediate result is required.
1749 //
1750 // Both tmp0 and tmp1 may be set to any X register except for xzr, sp,
1751 // and StackPointer(). Also, they must not be the same register (though they
1752 // may both be NoReg).
1753 //
1754 // It is valid to set either or both of these registers to NoReg if you don't
1755 // want the MacroAssembler to use any scratch registers. In a debug build, the
1756 // Assembler will assert that any registers it uses are valid. Be aware that
1757 // this check is not present in release builds. If this is a problem, use the
1758 // Assembler directly.
1759 void SetScratchRegisters(const Register& tmp0, const Register& tmp1) {
1760 // V8 assumes the macro assembler uses ip0 and ip1 as temp registers.
1761 ASSERT(tmp0.IsNone() || tmp0.Is(ip0));
1762 ASSERT(tmp1.IsNone() || tmp1.Is(ip1));
1763
1764 ASSERT(!AreAliased(xzr, csp, tmp0, tmp1));
1765 ASSERT(!AreAliased(StackPointer(), tmp0, tmp1));
1766 tmp0_ = tmp0;
1767 tmp1_ = tmp1;
1768 }
1769
1770 const Register& Tmp0() const {
1771 return tmp0_;
1772 }
1773
1774 const Register& Tmp1() const {
1775 return tmp1_;
1776 }
1777
1778 const Register WTmp0() const {
1779 return Register(tmp0_.code(), kWRegSize);
1780 }
1781
1782 const Register WTmp1() const {
1783 return Register(tmp1_.code(), kWRegSize);
1784 }
1785
1786 void SetFPScratchRegister(const FPRegister& fptmp0) {
1787 fptmp0_ = fptmp0;
1788 }
1789
1790 const FPRegister& FPTmp0() const {
1791 return fptmp0_;
1792 }
1793
1794 const Register AppropriateTempFor(
1795 const Register& target,
1796 const CPURegister& forbidden = NoCPUReg) const {
1797 Register candidate = forbidden.Is(Tmp0()) ? Tmp1() : Tmp0();
1798 ASSERT(!candidate.Is(target));
1799 return Register(candidate.code(), target.SizeInBits());
1800 }
1801
1802 const FPRegister AppropriateTempFor(
1803 const FPRegister& target,
1804 const CPURegister& forbidden = NoCPUReg) const {
1805 USE(forbidden);
1806 FPRegister candidate = FPTmp0();
1807 ASSERT(!candidate.Is(forbidden));
1808 ASSERT(!candidate.Is(target));
1809 return FPRegister(candidate.code(), target.SizeInBits());
1810 }
1811
1812 // Like printf, but print at run-time from generated code.
1813 //
1814 // The caller must ensure that arguments for floating-point placeholders
1815 // (such as %e, %f or %g) are FPRegisters, and that arguments for integer
1816 // placeholders are Registers.
1817 //
1818 // A maximum of four arguments may be given to any single Printf call. The
1819 // arguments must be of the same type, but they do not need to have the same
1820 // size.
1821 //
1822 // The following registers cannot be printed:
1823 // Tmp0(), Tmp1(), StackPointer(), csp.
1824 //
1825 // This function automatically preserves caller-saved registers so that
1826 // calling code can use Printf at any point without having to worry about
1827 // corruption. The preservation mechanism generates a lot of code. If this is
1828 // a problem, preserve the important registers manually and then call
1829 // PrintfNoPreserve. Callee-saved registers are not used by Printf, and are
1830 // implicitly preserved.
1831 //
1832 // Unlike many MacroAssembler functions, x8 and x9 are guaranteed to be
1833 // preserved, and can be printed. This allows Printf to be used during debug
1834 // code.
1835 //
1836 // This function assumes (and asserts) that the current stack pointer is
1837 // callee-saved, not caller-saved. This is most likely the case anyway, as a
1838 // caller-saved stack pointer doesn't make a lot of sense.
1839 void Printf(const char * format,
1840 const CPURegister& arg0 = NoCPUReg,
1841 const CPURegister& arg1 = NoCPUReg,
1842 const CPURegister& arg2 = NoCPUReg,
1843 const CPURegister& arg3 = NoCPUReg);
1844
1845 // Like Printf, but don't preserve any caller-saved registers, not even 'lr'.
1846 //
1847 // The return code from the system printf call will be returned in x0.
1848 void PrintfNoPreserve(const char * format,
1849 const CPURegister& arg0 = NoCPUReg,
1850 const CPURegister& arg1 = NoCPUReg,
1851 const CPURegister& arg2 = NoCPUReg,
1852 const CPURegister& arg3 = NoCPUReg);
1853
1854 // Code ageing support functions.
1855
1856 // Code ageing on A64 works similarly to on ARM. When V8 wants to mark a
1857 // function as old, it replaces some of the function prologue (generated by
1858 // FullCodeGenerator::Generate) with a call to a special stub (ultimately
1859 // generated by GenerateMakeCodeYoungAgainCommon). The stub restores the
1860 // function prologue to its initial young state (indicating that it has been
1861 // recently run) and continues. A young function is therefore one which has a
1862 // normal frame setup sequence, and an old function has a code age sequence
1863 // which calls a code ageing stub.
1864
1865 // Set up a basic stack frame for young code (or code exempt from ageing) with
1866 // type FUNCTION. It may be patched later for code ageing support. This is
1867 // done by to Code::PatchPlatformCodeAge and EmitCodeAgeSequence.
1868 //
1869 // This function takes an Assembler so it can be called from either a
1870 // MacroAssembler or a PatchingAssembler context.
1871 static void EmitFrameSetupForCodeAgePatching(Assembler * assm);
1872
1873 // Call EmitFrameSetupForCodeAgePatching from a MacroAssembler context.
1874 void EmitFrameSetupForCodeAgePatching();
1875
1876 // Emit a code age sequence that calls the relevant code age stub. The code
1877 // generated by this sequence is expected to replace the code generated by
1878 // EmitFrameSetupForCodeAgePatching, and represents an old function.
1879 //
1880 // It never makes sense to call this other than in a patching context, so this
1881 // method only accepts a PatchingAssembler.
1882 //
1883 // If stub is NULL, this function generates the code age sequence but omits
1884 // the stub address that is normally embedded in the instruction stream. This
1885 // can be used by debug code to verify code age sequences.
1886 static void EmitCodeAgeSequence(PatchingAssembler * assm, Code * stub);
1887
1888 // Return true if the sequence is a young sequence geneated by
1889 // EmitFrameSetupForCodeAgePatching. Otherwise, this method asserts that the
1890 // sequence is a code age sequence (emitted by EmitCodeAgeSequence).
1891 static bool IsYoungSequence(byte* sequence);
1892
1893 #ifdef DEBUG
1894 // Return true if the sequence is a code age sequence generated by
1895 // EmitCodeAgeSequence.
1896 static bool IsCodeAgeSequence(byte* sequence);
1897 #endif
1898
1899 private:
1900 // Helpers for CopyFields.
1901 // These each implement CopyFields in a different way.
1902 void CopyFieldsLoopPairsHelper(Register dst, Register src, unsigned count,
1903 Register scratch1, Register scratch2,
1904 Register scratch3);
1905 void CopyFieldsUnrolledPairsHelper(Register dst, Register src, unsigned count,
1906 Register scratch1, Register scratch2);
1907 void CopyFieldsUnrolledHelper(Register dst, Register src, unsigned count,
1908 Register scratch1);
1909
1910 // The actual Push and Pop implementations. These don't generate any code
1911 // other than that required for the push or pop. This allows
1912 // (Push|Pop)CPURegList to bundle together run-time assertions for a large
1913 // block of registers.
1914 //
1915 // Note that size is per register, and is specified in bytes.
1916 void PushHelper(int count, int size,
1917 const CPURegister& src0, const CPURegister& src1,
1918 const CPURegister& src2, const CPURegister& src3);
1919 void PopHelper(int count, int size,
1920 const CPURegister& dst0, const CPURegister& dst1,
1921 const CPURegister& dst2, const CPURegister& dst3);
1922
1923 // Perform necessary maintenance operations before a push or pop.
1924 //
1925 // Note that size is per register, and is specified in bytes.
1926 void PrepareForPush(int count, int size);
1927 void PrepareForPop(int count, int size);
1928
1929 // Call Printf. On a native build, a simple call will be generated, but if the
1930 // simulator is being used then a suitable pseudo-instruction is used. The
1931 // arguments and stack (csp) must be prepared by the caller as for a normal
1932 // AAPCS64 call to 'printf'.
1933 //
1934 // The 'type' argument specifies the type of the optional arguments.
1935 void CallPrintf(CPURegister::RegisterType type = CPURegister::kNoRegister);
1936
1937 // Helper for throwing exceptions. Compute a handler address and jump to
1938 // it. See the implementation for register usage.
1939 void JumpToHandlerEntry(Register exception,
1940 Register object,
1941 Register state,
1942 Register scratch1,
1943 Register scratch2);
1944
1945 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1946 void InNewSpace(Register object,
1947 Condition cond, // eq for new space, ne otherwise.
1948 Label* branch);
1949
1950 // Try to convert a double to an int so that integer fast-paths may be
1951 // used. Not every valid integer value is guaranteed to be caught.
1952 // It supports both 32-bit and 64-bit integers depending whether 'as_int'
1953 // is a W or X register.
1954 //
1955 // This does not distinguish between +0 and -0, so if this distinction is
1956 // important it must be checked separately.
1957 void TryConvertDoubleToInt(Register as_int,
1958 FPRegister value,
1959 FPRegister scratch_d,
1960 Label* on_successful_conversion,
1961 Label* on_failed_conversion = NULL);
1962
1963 bool generating_stub_;
1964 bool allow_stub_calls_;
1965 #if DEBUG
1966 // Tell whether any of the macro instruction can be used. When false the
1967 // MacroAssembler will assert if a method which can emit a variable number
1968 // of instructions is called.
1969 bool allow_macro_instructions_;
1970 #endif
1971 bool has_frame_;
1972
1973 // The Abort method should call a V8 runtime function, but the CallRuntime
1974 // mechanism depends on CEntryStub. If use_real_aborts is false, Abort will
1975 // use a simpler abort mechanism that doesn't depend on CEntryStub.
1976 //
1977 // The purpose of this is to allow Aborts to be compiled whilst CEntryStub is
1978 // being generated.
1979 bool use_real_aborts_;
1980
1981 // This handle will be patched with the code object on installation.
1982 Handle<Object> code_object_;
1983
1984 // The register to use as a stack pointer for stack operations.
1985 Register sp_;
1986
1987 // Scratch registers used internally by the MacroAssembler.
1988 Register tmp0_;
1989 Register tmp1_;
1990 FPRegister fptmp0_;
1991
1992 void InitializeNewString(Register string,
1993 Register length,
1994 Heap::RootListIndex map_index,
1995 Register scratch1,
1996 Register scratch2);
1997 };
1998
1999
2000 // Use this scope when you need a one-to-one mapping bewteen methods and
2001 // instructions. This scope prevents the MacroAssembler from being called and
2002 // literal pools from being emitted. It also asserts the number of instructions
2003 // emitted is what you specified when creating the scope.
2004 class InstructionAccurateScope BASE_EMBEDDED {
2005 public:
2006 explicit InstructionAccurateScope(MacroAssembler* masm)
2007 : masm_(masm), size_(0) {
2008 masm_->StartBlockConstPool();
2009 #ifdef DEBUG
2010 previous_allow_macro_instructions_ = masm_->allow_macro_instructions();
2011 masm_->set_allow_macro_instructions(false);
2012 #endif
2013 }
2014
2015 InstructionAccurateScope(MacroAssembler* masm, size_t count)
2016 : masm_(masm), size_(count * kInstructionSize) {
2017 masm_->StartBlockConstPool();
2018 #ifdef DEBUG
2019 masm_->bind(&start_);
2020 previous_allow_macro_instructions_ = masm_->allow_macro_instructions();
2021 masm_->set_allow_macro_instructions(false);
2022 #endif
2023 }
2024
2025 ~InstructionAccurateScope() {
2026 masm_->EndBlockConstPool();
2027 #ifdef DEBUG
2028 if (start_.is_bound()) {
2029 ASSERT(masm_->SizeOfCodeGeneratedSince(&start_) == size_);
2030 }
2031 masm_->set_allow_macro_instructions(previous_allow_macro_instructions_);
2032 #endif
2033 }
2034
2035 private:
2036 MacroAssembler* masm_;
2037 size_t size_;
2038 #ifdef DEBUG
2039 Label start_;
2040 bool previous_allow_macro_instructions_;
2041 #endif
2042 };
2043
2044
2045 inline MemOperand ContextMemOperand(Register context, int index) {
2046 return MemOperand(context, Context::SlotOffset(index));
2047 }
2048
2049 inline MemOperand GlobalObjectMemOperand() {
2050 return ContextMemOperand(cp, Context::GLOBAL_OBJECT_INDEX);
2051 }
2052
2053
2054 // Encode and decode information about patchable inline SMI checks.
2055 class InlineSmiCheckInfo {
2056 public:
2057 explicit InlineSmiCheckInfo(Address info);
2058
2059 inline bool HasSmiCheck() const {
2060 return smi_check_ != NULL;
2061 }
2062
2063 inline const Register& SmiRegister() const {
2064 return reg_;
2065 }
2066
2067 inline Instruction* SmiCheck() const {
2068 return smi_check_;
2069 }
2070
2071 // Use MacroAssembler::InlineData to emit information about patchable inline
2072 // SMI checks. The caller may specify 'reg' as NoReg and an unbound 'site' to
2073 // indicate that there is no inline SMI check. Note that 'reg' cannot be csp.
2074 //
2075 // The generated patch information can be read using the InlineSMICheckInfo
2076 // class.
2077 static void Emit(MacroAssembler* masm, const Register& reg,
2078 const Label* smi_check);
2079
2080 // Emit information to indicate that there is no inline SMI check.
2081 static void EmitNotInlined(MacroAssembler* masm) {
2082 Label unbound;
2083 Emit(masm, NoReg, &unbound);
2084 }
2085
2086 private:
2087 Register reg_;
2088 Instruction* smi_check_;
2089
2090 // Fields in the data encoded by InlineData.
2091
2092 // A width of 5 (Rd_width) for the SMI register preclues the use of csp,
2093 // since kSPRegInternalCode is 63. However, csp should never hold a SMI or be
2094 // used in a patchable check. The Emit() method checks this.
2095 //
2096 // Note that the total size of the fields is restricted by the underlying
2097 // storage size handled by the BitField class, which is a uint32_t.
2098 class RegisterBits : public BitField<unsigned, 0, 5> {};
2099 class DeltaBits : public BitField<uint32_t, 5, 32-5> {};
2100 };
2101
2102 } } // namespace v8::internal
2103
2104 #ifdef GENERATED_CODE_COVERAGE
2105 #error "Unsupported option"
2106 #define CODE_COVERAGE_STRINGIFY(x) #x
2107 #define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
2108 #define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
2109 #define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
2110 #else
2111 #define ACCESS_MASM(masm) masm->
2112 #endif
2113
2114 #endif // V8_A64_MACRO_ASSEMBLER_A64_H_
OLDNEW
« no previous file with comments | « src/a64/lithium-gap-resolver-a64.cc ('k') | src/a64/macro-assembler-a64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698