Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1119)

Side by Side Diff: src/a64/macro-assembler-a64-inl.h

Issue 148293020: Merge experimental/a64 to bleeding_edge. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Remove ARM from OWNERS Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/a64/macro-assembler-a64.cc ('k') | src/a64/regexp-macro-assembler-a64.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #ifndef V8_A64_MACRO_ASSEMBLER_A64_INL_H_
29 #define V8_A64_MACRO_ASSEMBLER_A64_INL_H_
30
31 #include <ctype.h>
32
33 #include "v8globals.h"
34 #include "globals.h"
35
36 #include "a64/assembler-a64.h"
37 #include "a64/assembler-a64-inl.h"
38 #include "a64/macro-assembler-a64.h"
39 #include "a64/instrument-a64.h"
40
41
42 namespace v8 {
43 namespace internal {
44
45
46 MemOperand FieldMemOperand(Register object, int offset) {
47 return MemOperand(object, offset - kHeapObjectTag);
48 }
49
50
51 MemOperand UntagSmiFieldMemOperand(Register object, int offset) {
52 return UntagSmiMemOperand(object, offset - kHeapObjectTag);
53 }
54
55
56 MemOperand UntagSmiMemOperand(Register object, int offset) {
57 // Assumes that Smis are shifted by 32 bits and little endianness.
58 STATIC_ASSERT(kSmiShift == 32);
59 return MemOperand(object, offset + (kSmiShift / kBitsPerByte));
60 }
61
62
63 Handle<Object> MacroAssembler::CodeObject() {
64 ASSERT(!code_object_.is_null());
65 return code_object_;
66 }
67
68
69 void MacroAssembler::And(const Register& rd,
70 const Register& rn,
71 const Operand& operand) {
72 ASSERT(allow_macro_instructions_);
73 ASSERT(!rd.IsZero());
74 LogicalMacro(rd, rn, operand, AND);
75 }
76
77
78 void MacroAssembler::Ands(const Register& rd,
79 const Register& rn,
80 const Operand& operand) {
81 ASSERT(allow_macro_instructions_);
82 ASSERT(!rd.IsZero());
83 LogicalMacro(rd, rn, operand, ANDS);
84 }
85
86
87 void MacroAssembler::Tst(const Register& rn,
88 const Operand& operand) {
89 ASSERT(allow_macro_instructions_);
90 LogicalMacro(AppropriateZeroRegFor(rn), rn, operand, ANDS);
91 }
92
93
94 void MacroAssembler::Bic(const Register& rd,
95 const Register& rn,
96 const Operand& operand) {
97 ASSERT(allow_macro_instructions_);
98 ASSERT(!rd.IsZero());
99 LogicalMacro(rd, rn, operand, BIC);
100 }
101
102
103 void MacroAssembler::Bics(const Register& rd,
104 const Register& rn,
105 const Operand& operand) {
106 ASSERT(allow_macro_instructions_);
107 ASSERT(!rd.IsZero());
108 LogicalMacro(rd, rn, operand, BICS);
109 }
110
111
112 void MacroAssembler::Orr(const Register& rd,
113 const Register& rn,
114 const Operand& operand) {
115 ASSERT(allow_macro_instructions_);
116 ASSERT(!rd.IsZero());
117 LogicalMacro(rd, rn, operand, ORR);
118 }
119
120
121 void MacroAssembler::Orn(const Register& rd,
122 const Register& rn,
123 const Operand& operand) {
124 ASSERT(allow_macro_instructions_);
125 ASSERT(!rd.IsZero());
126 LogicalMacro(rd, rn, operand, ORN);
127 }
128
129
130 void MacroAssembler::Eor(const Register& rd,
131 const Register& rn,
132 const Operand& operand) {
133 ASSERT(allow_macro_instructions_);
134 ASSERT(!rd.IsZero());
135 LogicalMacro(rd, rn, operand, EOR);
136 }
137
138
139 void MacroAssembler::Eon(const Register& rd,
140 const Register& rn,
141 const Operand& operand) {
142 ASSERT(allow_macro_instructions_);
143 ASSERT(!rd.IsZero());
144 LogicalMacro(rd, rn, operand, EON);
145 }
146
147
148 void MacroAssembler::Ccmp(const Register& rn,
149 const Operand& operand,
150 StatusFlags nzcv,
151 Condition cond) {
152 ASSERT(allow_macro_instructions_);
153 if (operand.IsImmediate() && (operand.immediate() < 0)) {
154 ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMN);
155 } else {
156 ConditionalCompareMacro(rn, operand, nzcv, cond, CCMP);
157 }
158 }
159
160
161 void MacroAssembler::Ccmn(const Register& rn,
162 const Operand& operand,
163 StatusFlags nzcv,
164 Condition cond) {
165 ASSERT(allow_macro_instructions_);
166 if (operand.IsImmediate() && (operand.immediate() < 0)) {
167 ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMP);
168 } else {
169 ConditionalCompareMacro(rn, operand, nzcv, cond, CCMN);
170 }
171 }
172
173
174 void MacroAssembler::Add(const Register& rd,
175 const Register& rn,
176 const Operand& operand) {
177 ASSERT(allow_macro_instructions_);
178 if (operand.IsImmediate() && (operand.immediate() < 0)) {
179 AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, SUB);
180 } else {
181 AddSubMacro(rd, rn, operand, LeaveFlags, ADD);
182 }
183 }
184
185 void MacroAssembler::Adds(const Register& rd,
186 const Register& rn,
187 const Operand& operand) {
188 ASSERT(allow_macro_instructions_);
189 if (operand.IsImmediate() && (operand.immediate() < 0)) {
190 AddSubMacro(rd, rn, -operand.immediate(), SetFlags, SUB);
191 } else {
192 AddSubMacro(rd, rn, operand, SetFlags, ADD);
193 }
194 }
195
196
197 void MacroAssembler::Sub(const Register& rd,
198 const Register& rn,
199 const Operand& operand) {
200 ASSERT(allow_macro_instructions_);
201 if (operand.IsImmediate() && (operand.immediate() < 0)) {
202 AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, ADD);
203 } else {
204 AddSubMacro(rd, rn, operand, LeaveFlags, SUB);
205 }
206 }
207
208
209 void MacroAssembler::Subs(const Register& rd,
210 const Register& rn,
211 const Operand& operand) {
212 ASSERT(allow_macro_instructions_);
213 if (operand.IsImmediate() && (operand.immediate() < 0)) {
214 AddSubMacro(rd, rn, -operand.immediate(), SetFlags, ADD);
215 } else {
216 AddSubMacro(rd, rn, operand, SetFlags, SUB);
217 }
218 }
219
220
221 void MacroAssembler::Cmn(const Register& rn, const Operand& operand) {
222 ASSERT(allow_macro_instructions_);
223 Adds(AppropriateZeroRegFor(rn), rn, operand);
224 }
225
226
227 void MacroAssembler::Cmp(const Register& rn, const Operand& operand) {
228 ASSERT(allow_macro_instructions_);
229 Subs(AppropriateZeroRegFor(rn), rn, operand);
230 }
231
232
233 void MacroAssembler::Neg(const Register& rd,
234 const Operand& operand) {
235 ASSERT(allow_macro_instructions_);
236 ASSERT(!rd.IsZero());
237 if (operand.IsImmediate()) {
238 Mov(rd, -operand.immediate());
239 } else {
240 Sub(rd, AppropriateZeroRegFor(rd), operand);
241 }
242 }
243
244
245 void MacroAssembler::Negs(const Register& rd,
246 const Operand& operand) {
247 ASSERT(allow_macro_instructions_);
248 Subs(rd, AppropriateZeroRegFor(rd), operand);
249 }
250
251
252 void MacroAssembler::Adc(const Register& rd,
253 const Register& rn,
254 const Operand& operand) {
255 ASSERT(allow_macro_instructions_);
256 ASSERT(!rd.IsZero());
257 AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, ADC);
258 }
259
260
261 void MacroAssembler::Adcs(const Register& rd,
262 const Register& rn,
263 const Operand& operand) {
264 ASSERT(allow_macro_instructions_);
265 ASSERT(!rd.IsZero());
266 AddSubWithCarryMacro(rd, rn, operand, SetFlags, ADC);
267 }
268
269
270 void MacroAssembler::Sbc(const Register& rd,
271 const Register& rn,
272 const Operand& operand) {
273 ASSERT(allow_macro_instructions_);
274 ASSERT(!rd.IsZero());
275 AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, SBC);
276 }
277
278
279 void MacroAssembler::Sbcs(const Register& rd,
280 const Register& rn,
281 const Operand& operand) {
282 ASSERT(allow_macro_instructions_);
283 ASSERT(!rd.IsZero());
284 AddSubWithCarryMacro(rd, rn, operand, SetFlags, SBC);
285 }
286
287
288 void MacroAssembler::Ngc(const Register& rd,
289 const Operand& operand) {
290 ASSERT(allow_macro_instructions_);
291 ASSERT(!rd.IsZero());
292 Register zr = AppropriateZeroRegFor(rd);
293 Sbc(rd, zr, operand);
294 }
295
296
297 void MacroAssembler::Ngcs(const Register& rd,
298 const Operand& operand) {
299 ASSERT(allow_macro_instructions_);
300 ASSERT(!rd.IsZero());
301 Register zr = AppropriateZeroRegFor(rd);
302 Sbcs(rd, zr, operand);
303 }
304
305
306 void MacroAssembler::Mvn(const Register& rd, uint64_t imm) {
307 ASSERT(allow_macro_instructions_);
308 ASSERT(!rd.IsZero());
309 Mov(rd, ~imm);
310 }
311
312
313 #define DEFINE_FUNCTION(FN, REGTYPE, REG, OP) \
314 void MacroAssembler::FN(const REGTYPE REG, const MemOperand& addr) { \
315 ASSERT(allow_macro_instructions_); \
316 LoadStoreMacro(REG, addr, OP); \
317 }
318 LS_MACRO_LIST(DEFINE_FUNCTION)
319 #undef DEFINE_FUNCTION
320
321
322 void MacroAssembler::Adr(const Register& rd, Label* label) {
323 ASSERT(allow_macro_instructions_);
324 ASSERT(!rd.IsZero());
325 adr(rd, label);
326 }
327
328
329 void MacroAssembler::Asr(const Register& rd,
330 const Register& rn,
331 unsigned shift) {
332 ASSERT(allow_macro_instructions_);
333 ASSERT(!rd.IsZero());
334 asr(rd, rn, shift);
335 }
336
337
338 void MacroAssembler::Asr(const Register& rd,
339 const Register& rn,
340 const Register& rm) {
341 ASSERT(allow_macro_instructions_);
342 ASSERT(!rd.IsZero());
343 asrv(rd, rn, rm);
344 }
345
346
347 void MacroAssembler::B(Label* label) {
348 b(label);
349 }
350
351
352 void MacroAssembler::B(Condition cond, Label* label) {
353 ASSERT(allow_macro_instructions_);
354 B(label, cond);
355 }
356
357
358 void MacroAssembler::B(Label* label, Condition cond) {
359 ASSERT(allow_macro_instructions_);
360 ASSERT((cond != al) && (cond != nv));
361 b(label, cond);
362 }
363
364
365 void MacroAssembler::Bfi(const Register& rd,
366 const Register& rn,
367 unsigned lsb,
368 unsigned width) {
369 ASSERT(allow_macro_instructions_);
370 ASSERT(!rd.IsZero());
371 bfi(rd, rn, lsb, width);
372 }
373
374
375 void MacroAssembler::Bfxil(const Register& rd,
376 const Register& rn,
377 unsigned lsb,
378 unsigned width) {
379 ASSERT(allow_macro_instructions_);
380 ASSERT(!rd.IsZero());
381 bfxil(rd, rn, lsb, width);
382 }
383
384
385 void MacroAssembler::Bind(Label* label) {
386 ASSERT(allow_macro_instructions_);
387 bind(label);
388 }
389
390
391 void MacroAssembler::Bl(Label* label) {
392 ASSERT(allow_macro_instructions_);
393 bl(label);
394 }
395
396
397 void MacroAssembler::Blr(const Register& xn) {
398 ASSERT(allow_macro_instructions_);
399 ASSERT(!xn.IsZero());
400 blr(xn);
401 }
402
403
404 void MacroAssembler::Br(const Register& xn) {
405 ASSERT(allow_macro_instructions_);
406 ASSERT(!xn.IsZero());
407 br(xn);
408 }
409
410
411 void MacroAssembler::Brk(int code) {
412 ASSERT(allow_macro_instructions_);
413 brk(code);
414 }
415
416
417 void MacroAssembler::Cbnz(const Register& rt, Label* label) {
418 ASSERT(allow_macro_instructions_);
419 cbnz(rt, label);
420 }
421
422
423 void MacroAssembler::Cbz(const Register& rt, Label* label) {
424 ASSERT(allow_macro_instructions_);
425 cbz(rt, label);
426 }
427
428
429 void MacroAssembler::Cinc(const Register& rd,
430 const Register& rn,
431 Condition cond) {
432 ASSERT(allow_macro_instructions_);
433 ASSERT(!rd.IsZero());
434 ASSERT((cond != al) && (cond != nv));
435 cinc(rd, rn, cond);
436 }
437
438
439 void MacroAssembler::Cinv(const Register& rd,
440 const Register& rn,
441 Condition cond) {
442 ASSERT(allow_macro_instructions_);
443 ASSERT(!rd.IsZero());
444 ASSERT((cond != al) && (cond != nv));
445 cinv(rd, rn, cond);
446 }
447
448
449 void MacroAssembler::Cls(const Register& rd, const Register& rn) {
450 ASSERT(allow_macro_instructions_);
451 ASSERT(!rd.IsZero());
452 cls(rd, rn);
453 }
454
455
456 void MacroAssembler::Clz(const Register& rd, const Register& rn) {
457 ASSERT(allow_macro_instructions_);
458 ASSERT(!rd.IsZero());
459 clz(rd, rn);
460 }
461
462
463 void MacroAssembler::Cneg(const Register& rd,
464 const Register& rn,
465 Condition cond) {
466 ASSERT(allow_macro_instructions_);
467 ASSERT(!rd.IsZero());
468 ASSERT((cond != al) && (cond != nv));
469 cneg(rd, rn, cond);
470 }
471
472
473 // Conditionally zero the destination register. Only X registers are supported
474 // due to the truncation side-effect when used on W registers.
475 void MacroAssembler::CzeroX(const Register& rd,
476 Condition cond) {
477 ASSERT(allow_macro_instructions_);
478 ASSERT(!rd.IsSP() && rd.Is64Bits());
479 ASSERT((cond != al) && (cond != nv));
480 csel(rd, xzr, rd, cond);
481 }
482
483
484 // Conditionally move a value into the destination register. Only X registers
485 // are supported due to the truncation side-effect when used on W registers.
486 void MacroAssembler::CmovX(const Register& rd,
487 const Register& rn,
488 Condition cond) {
489 ASSERT(allow_macro_instructions_);
490 ASSERT(!rd.IsSP());
491 ASSERT(rd.Is64Bits() && rn.Is64Bits());
492 ASSERT((cond != al) && (cond != nv));
493 if (!rd.is(rn)) {
494 csel(rd, rn, rd, cond);
495 }
496 }
497
498
499 void MacroAssembler::Cset(const Register& rd, Condition cond) {
500 ASSERT(allow_macro_instructions_);
501 ASSERT(!rd.IsZero());
502 ASSERT((cond != al) && (cond != nv));
503 cset(rd, cond);
504 }
505
506
507 void MacroAssembler::Csetm(const Register& rd, Condition cond) {
508 ASSERT(allow_macro_instructions_);
509 ASSERT(!rd.IsZero());
510 ASSERT((cond != al) && (cond != nv));
511 csetm(rd, cond);
512 }
513
514
515 void MacroAssembler::Csinc(const Register& rd,
516 const Register& rn,
517 const Register& rm,
518 Condition cond) {
519 ASSERT(allow_macro_instructions_);
520 ASSERT(!rd.IsZero());
521 ASSERT((cond != al) && (cond != nv));
522 csinc(rd, rn, rm, cond);
523 }
524
525
526 void MacroAssembler::Csinv(const Register& rd,
527 const Register& rn,
528 const Register& rm,
529 Condition cond) {
530 ASSERT(allow_macro_instructions_);
531 ASSERT(!rd.IsZero());
532 ASSERT((cond != al) && (cond != nv));
533 csinv(rd, rn, rm, cond);
534 }
535
536
537 void MacroAssembler::Csneg(const Register& rd,
538 const Register& rn,
539 const Register& rm,
540 Condition cond) {
541 ASSERT(allow_macro_instructions_);
542 ASSERT(!rd.IsZero());
543 ASSERT((cond != al) && (cond != nv));
544 csneg(rd, rn, rm, cond);
545 }
546
547
548 void MacroAssembler::Dmb(BarrierDomain domain, BarrierType type) {
549 ASSERT(allow_macro_instructions_);
550 dmb(domain, type);
551 }
552
553
554 void MacroAssembler::Dsb(BarrierDomain domain, BarrierType type) {
555 ASSERT(allow_macro_instructions_);
556 dsb(domain, type);
557 }
558
559
560 void MacroAssembler::Debug(const char* message, uint32_t code, Instr params) {
561 ASSERT(allow_macro_instructions_);
562 debug(message, code, params);
563 }
564
565
566 void MacroAssembler::Extr(const Register& rd,
567 const Register& rn,
568 const Register& rm,
569 unsigned lsb) {
570 ASSERT(allow_macro_instructions_);
571 ASSERT(!rd.IsZero());
572 extr(rd, rn, rm, lsb);
573 }
574
575
576 void MacroAssembler::Fabs(const FPRegister& fd, const FPRegister& fn) {
577 ASSERT(allow_macro_instructions_);
578 fabs(fd, fn);
579 }
580
581
582 void MacroAssembler::Fadd(const FPRegister& fd,
583 const FPRegister& fn,
584 const FPRegister& fm) {
585 ASSERT(allow_macro_instructions_);
586 fadd(fd, fn, fm);
587 }
588
589
590 void MacroAssembler::Fccmp(const FPRegister& fn,
591 const FPRegister& fm,
592 StatusFlags nzcv,
593 Condition cond) {
594 ASSERT(allow_macro_instructions_);
595 ASSERT((cond != al) && (cond != nv));
596 fccmp(fn, fm, nzcv, cond);
597 }
598
599
600 void MacroAssembler::Fcmp(const FPRegister& fn, const FPRegister& fm) {
601 ASSERT(allow_macro_instructions_);
602 fcmp(fn, fm);
603 }
604
605
606 void MacroAssembler::Fcmp(const FPRegister& fn, double value) {
607 ASSERT(allow_macro_instructions_);
608 if (value != 0.0) {
609 FPRegister tmp = AppropriateTempFor(fn);
610 Fmov(tmp, value);
611 fcmp(fn, tmp);
612 } else {
613 fcmp(fn, value);
614 }
615 }
616
617
618 void MacroAssembler::Fcsel(const FPRegister& fd,
619 const FPRegister& fn,
620 const FPRegister& fm,
621 Condition cond) {
622 ASSERT(allow_macro_instructions_);
623 ASSERT((cond != al) && (cond != nv));
624 fcsel(fd, fn, fm, cond);
625 }
626
627
628 void MacroAssembler::Fcvt(const FPRegister& fd, const FPRegister& fn) {
629 ASSERT(allow_macro_instructions_);
630 fcvt(fd, fn);
631 }
632
633
634 void MacroAssembler::Fcvtas(const Register& rd, const FPRegister& fn) {
635 ASSERT(allow_macro_instructions_);
636 ASSERT(!rd.IsZero());
637 fcvtas(rd, fn);
638 }
639
640
641 void MacroAssembler::Fcvtau(const Register& rd, const FPRegister& fn) {
642 ASSERT(allow_macro_instructions_);
643 ASSERT(!rd.IsZero());
644 fcvtau(rd, fn);
645 }
646
647
648 void MacroAssembler::Fcvtms(const Register& rd, const FPRegister& fn) {
649 ASSERT(allow_macro_instructions_);
650 ASSERT(!rd.IsZero());
651 fcvtms(rd, fn);
652 }
653
654
655 void MacroAssembler::Fcvtmu(const Register& rd, const FPRegister& fn) {
656 ASSERT(allow_macro_instructions_);
657 ASSERT(!rd.IsZero());
658 fcvtmu(rd, fn);
659 }
660
661
662 void MacroAssembler::Fcvtns(const Register& rd, const FPRegister& fn) {
663 ASSERT(allow_macro_instructions_);
664 ASSERT(!rd.IsZero());
665 fcvtns(rd, fn);
666 }
667
668
669 void MacroAssembler::Fcvtnu(const Register& rd, const FPRegister& fn) {
670 ASSERT(allow_macro_instructions_);
671 ASSERT(!rd.IsZero());
672 fcvtnu(rd, fn);
673 }
674
675
676 void MacroAssembler::Fcvtzs(const Register& rd, const FPRegister& fn) {
677 ASSERT(allow_macro_instructions_);
678 ASSERT(!rd.IsZero());
679 fcvtzs(rd, fn);
680 }
681 void MacroAssembler::Fcvtzu(const Register& rd, const FPRegister& fn) {
682 ASSERT(allow_macro_instructions_);
683 ASSERT(!rd.IsZero());
684 fcvtzu(rd, fn);
685 }
686
687
688 void MacroAssembler::Fdiv(const FPRegister& fd,
689 const FPRegister& fn,
690 const FPRegister& fm) {
691 ASSERT(allow_macro_instructions_);
692 fdiv(fd, fn, fm);
693 }
694
695
696 void MacroAssembler::Fmadd(const FPRegister& fd,
697 const FPRegister& fn,
698 const FPRegister& fm,
699 const FPRegister& fa) {
700 ASSERT(allow_macro_instructions_);
701 fmadd(fd, fn, fm, fa);
702 }
703
704
705 void MacroAssembler::Fmax(const FPRegister& fd,
706 const FPRegister& fn,
707 const FPRegister& fm) {
708 ASSERT(allow_macro_instructions_);
709 fmax(fd, fn, fm);
710 }
711
712
713 void MacroAssembler::Fmaxnm(const FPRegister& fd,
714 const FPRegister& fn,
715 const FPRegister& fm) {
716 ASSERT(allow_macro_instructions_);
717 fmaxnm(fd, fn, fm);
718 }
719
720
721 void MacroAssembler::Fmin(const FPRegister& fd,
722 const FPRegister& fn,
723 const FPRegister& fm) {
724 ASSERT(allow_macro_instructions_);
725 fmin(fd, fn, fm);
726 }
727
728
729 void MacroAssembler::Fminnm(const FPRegister& fd,
730 const FPRegister& fn,
731 const FPRegister& fm) {
732 ASSERT(allow_macro_instructions_);
733 fminnm(fd, fn, fm);
734 }
735
736
737 void MacroAssembler::Fmov(FPRegister fd, FPRegister fn) {
738 ASSERT(allow_macro_instructions_);
739 // Only emit an instruction if fd and fn are different, and they are both D
740 // registers. fmov(s0, s0) is not a no-op because it clears the top word of
741 // d0. Technically, fmov(d0, d0) is not a no-op either because it clears the
742 // top of q0, but FPRegister does not currently support Q registers.
743 if (!fd.Is(fn) || !fd.Is64Bits()) {
744 fmov(fd, fn);
745 }
746 }
747
748
749 void MacroAssembler::Fmov(FPRegister fd, Register rn) {
750 ASSERT(allow_macro_instructions_);
751 fmov(fd, rn);
752 }
753
754
755 void MacroAssembler::Fmov(FPRegister fd, double imm) {
756 ASSERT(allow_macro_instructions_);
757 if ((fd.Is64Bits() && IsImmFP64(imm)) ||
758 (fd.Is32Bits() && IsImmFP32(imm)) ||
759 ((imm == 0.0) && (copysign(1.0, imm) == 1.0))) {
760 // These cases can be handled by the Assembler.
761 fmov(fd, imm);
762 } else {
763 // TODO(all): The Assembler would try to relocate the immediate with
764 // Assembler::ldr(const FPRegister& ft, double imm) but it is not
765 // implemented yet.
766 if (fd.SizeInBits() == kDRegSize) {
767 Mov(Tmp0(), double_to_rawbits(imm));
768 Fmov(fd, Tmp0());
769 } else {
770 ASSERT(fd.SizeInBits() == kSRegSize);
771 Mov(WTmp0(), float_to_rawbits(static_cast<float>(imm)));
772 Fmov(fd, WTmp0());
773 }
774 }
775 }
776
777
778 void MacroAssembler::Fmov(Register rd, FPRegister fn) {
779 ASSERT(allow_macro_instructions_);
780 ASSERT(!rd.IsZero());
781 fmov(rd, fn);
782 }
783
784
785 void MacroAssembler::Fmsub(const FPRegister& fd,
786 const FPRegister& fn,
787 const FPRegister& fm,
788 const FPRegister& fa) {
789 ASSERT(allow_macro_instructions_);
790 fmsub(fd, fn, fm, fa);
791 }
792
793
794 void MacroAssembler::Fmul(const FPRegister& fd,
795 const FPRegister& fn,
796 const FPRegister& fm) {
797 ASSERT(allow_macro_instructions_);
798 fmul(fd, fn, fm);
799 }
800
801
802 void MacroAssembler::Fneg(const FPRegister& fd, const FPRegister& fn) {
803 ASSERT(allow_macro_instructions_);
804 fneg(fd, fn);
805 }
806
807
808 void MacroAssembler::Fnmadd(const FPRegister& fd,
809 const FPRegister& fn,
810 const FPRegister& fm,
811 const FPRegister& fa) {
812 ASSERT(allow_macro_instructions_);
813 fnmadd(fd, fn, fm, fa);
814 }
815
816
817 void MacroAssembler::Fnmsub(const FPRegister& fd,
818 const FPRegister& fn,
819 const FPRegister& fm,
820 const FPRegister& fa) {
821 ASSERT(allow_macro_instructions_);
822 fnmsub(fd, fn, fm, fa);
823 }
824
825
826 void MacroAssembler::Frinta(const FPRegister& fd, const FPRegister& fn) {
827 ASSERT(allow_macro_instructions_);
828 frinta(fd, fn);
829 }
830
831
832 void MacroAssembler::Frintn(const FPRegister& fd, const FPRegister& fn) {
833 ASSERT(allow_macro_instructions_);
834 frintn(fd, fn);
835 }
836
837
838 void MacroAssembler::Frintz(const FPRegister& fd, const FPRegister& fn) {
839 ASSERT(allow_macro_instructions_);
840 frintz(fd, fn);
841 }
842
843
844 void MacroAssembler::Fsqrt(const FPRegister& fd, const FPRegister& fn) {
845 ASSERT(allow_macro_instructions_);
846 fsqrt(fd, fn);
847 }
848
849
850 void MacroAssembler::Fsub(const FPRegister& fd,
851 const FPRegister& fn,
852 const FPRegister& fm) {
853 ASSERT(allow_macro_instructions_);
854 fsub(fd, fn, fm);
855 }
856
857
858 void MacroAssembler::Hint(SystemHint code) {
859 ASSERT(allow_macro_instructions_);
860 hint(code);
861 }
862
863
864 void MacroAssembler::Hlt(int code) {
865 ASSERT(allow_macro_instructions_);
866 hlt(code);
867 }
868
869
870 void MacroAssembler::Isb() {
871 ASSERT(allow_macro_instructions_);
872 isb();
873 }
874
875
876 void MacroAssembler::Ldnp(const CPURegister& rt,
877 const CPURegister& rt2,
878 const MemOperand& src) {
879 ASSERT(allow_macro_instructions_);
880 ASSERT(!AreAliased(rt, rt2));
881 ldnp(rt, rt2, src);
882 }
883
884
885 void MacroAssembler::Ldp(const CPURegister& rt,
886 const CPURegister& rt2,
887 const MemOperand& src) {
888 ASSERT(allow_macro_instructions_);
889 ASSERT(!AreAliased(rt, rt2));
890 ldp(rt, rt2, src);
891 }
892
893
894 void MacroAssembler::Ldpsw(const Register& rt,
895 const Register& rt2,
896 const MemOperand& src) {
897 ASSERT(allow_macro_instructions_);
898 ASSERT(!rt.IsZero());
899 ASSERT(!rt2.IsZero());
900 ldpsw(rt, rt2, src);
901 }
902
903
904 void MacroAssembler::Ldr(const FPRegister& ft, double imm) {
905 ASSERT(allow_macro_instructions_);
906 ldr(ft, imm);
907 }
908
909
910 void MacroAssembler::Ldr(const Register& rt, uint64_t imm) {
911 ASSERT(allow_macro_instructions_);
912 ASSERT(!rt.IsZero());
913 ldr(rt, imm);
914 }
915
916
917 void MacroAssembler::Lsl(const Register& rd,
918 const Register& rn,
919 unsigned shift) {
920 ASSERT(allow_macro_instructions_);
921 ASSERT(!rd.IsZero());
922 lsl(rd, rn, shift);
923 }
924
925
926 void MacroAssembler::Lsl(const Register& rd,
927 const Register& rn,
928 const Register& rm) {
929 ASSERT(allow_macro_instructions_);
930 ASSERT(!rd.IsZero());
931 lslv(rd, rn, rm);
932 }
933
934
935 void MacroAssembler::Lsr(const Register& rd,
936 const Register& rn,
937 unsigned shift) {
938 ASSERT(allow_macro_instructions_);
939 ASSERT(!rd.IsZero());
940 lsr(rd, rn, shift);
941 }
942
943
944 void MacroAssembler::Lsr(const Register& rd,
945 const Register& rn,
946 const Register& rm) {
947 ASSERT(allow_macro_instructions_);
948 ASSERT(!rd.IsZero());
949 lsrv(rd, rn, rm);
950 }
951
952
953 void MacroAssembler::Madd(const Register& rd,
954 const Register& rn,
955 const Register& rm,
956 const Register& ra) {
957 ASSERT(allow_macro_instructions_);
958 ASSERT(!rd.IsZero());
959 madd(rd, rn, rm, ra);
960 }
961
962
963 void MacroAssembler::Mneg(const Register& rd,
964 const Register& rn,
965 const Register& rm) {
966 ASSERT(allow_macro_instructions_);
967 ASSERT(!rd.IsZero());
968 mneg(rd, rn, rm);
969 }
970
971
972 void MacroAssembler::Mov(const Register& rd, const Register& rn) {
973 ASSERT(allow_macro_instructions_);
974 ASSERT(!rd.IsZero());
975 // Emit a register move only if the registers are distinct, or if they are
976 // not X registers. Note that mov(w0, w0) is not a no-op because it clears
977 // the top word of x0.
978 if (!rd.Is(rn) || !rd.Is64Bits()) {
979 Assembler::mov(rd, rn);
980 }
981 }
982
983
984 void MacroAssembler::Movk(const Register& rd, uint64_t imm, int shift) {
985 ASSERT(allow_macro_instructions_);
986 ASSERT(!rd.IsZero());
987 movk(rd, imm, shift);
988 }
989
990
991 void MacroAssembler::Mrs(const Register& rt, SystemRegister sysreg) {
992 ASSERT(allow_macro_instructions_);
993 ASSERT(!rt.IsZero());
994 mrs(rt, sysreg);
995 }
996
997
998 void MacroAssembler::Msr(SystemRegister sysreg, const Register& rt) {
999 ASSERT(allow_macro_instructions_);
1000 ASSERT(!rt.IsZero());
1001 msr(sysreg, rt);
1002 }
1003
1004
1005 void MacroAssembler::Msub(const Register& rd,
1006 const Register& rn,
1007 const Register& rm,
1008 const Register& ra) {
1009 ASSERT(allow_macro_instructions_);
1010 ASSERT(!rd.IsZero());
1011 msub(rd, rn, rm, ra);
1012 }
1013
1014
1015 void MacroAssembler::Mul(const Register& rd,
1016 const Register& rn,
1017 const Register& rm) {
1018 ASSERT(allow_macro_instructions_);
1019 ASSERT(!rd.IsZero());
1020 mul(rd, rn, rm);
1021 }
1022
1023
1024 void MacroAssembler::Rbit(const Register& rd, const Register& rn) {
1025 ASSERT(allow_macro_instructions_);
1026 ASSERT(!rd.IsZero());
1027 rbit(rd, rn);
1028 }
1029
1030
1031 void MacroAssembler::Ret(const Register& xn) {
1032 ASSERT(allow_macro_instructions_);
1033 ASSERT(!xn.IsZero());
1034 ret(xn);
1035 }
1036
1037
1038 void MacroAssembler::Rev(const Register& rd, const Register& rn) {
1039 ASSERT(allow_macro_instructions_);
1040 ASSERT(!rd.IsZero());
1041 rev(rd, rn);
1042 }
1043
1044
1045 void MacroAssembler::Rev16(const Register& rd, const Register& rn) {
1046 ASSERT(allow_macro_instructions_);
1047 ASSERT(!rd.IsZero());
1048 rev16(rd, rn);
1049 }
1050
1051
1052 void MacroAssembler::Rev32(const Register& rd, const Register& rn) {
1053 ASSERT(allow_macro_instructions_);
1054 ASSERT(!rd.IsZero());
1055 rev32(rd, rn);
1056 }
1057
1058
1059 void MacroAssembler::Ror(const Register& rd,
1060 const Register& rs,
1061 unsigned shift) {
1062 ASSERT(allow_macro_instructions_);
1063 ASSERT(!rd.IsZero());
1064 ror(rd, rs, shift);
1065 }
1066
1067
1068 void MacroAssembler::Ror(const Register& rd,
1069 const Register& rn,
1070 const Register& rm) {
1071 ASSERT(allow_macro_instructions_);
1072 ASSERT(!rd.IsZero());
1073 rorv(rd, rn, rm);
1074 }
1075
1076
1077 void MacroAssembler::Sbfiz(const Register& rd,
1078 const Register& rn,
1079 unsigned lsb,
1080 unsigned width) {
1081 ASSERT(allow_macro_instructions_);
1082 ASSERT(!rd.IsZero());
1083 sbfiz(rd, rn, lsb, width);
1084 }
1085
1086
1087 void MacroAssembler::Sbfx(const Register& rd,
1088 const Register& rn,
1089 unsigned lsb,
1090 unsigned width) {
1091 ASSERT(allow_macro_instructions_);
1092 ASSERT(!rd.IsZero());
1093 sbfx(rd, rn, lsb, width);
1094 }
1095
1096
1097 void MacroAssembler::Scvtf(const FPRegister& fd,
1098 const Register& rn,
1099 unsigned fbits) {
1100 ASSERT(allow_macro_instructions_);
1101 scvtf(fd, rn, fbits);
1102 }
1103
1104
1105 void MacroAssembler::Sdiv(const Register& rd,
1106 const Register& rn,
1107 const Register& rm) {
1108 ASSERT(allow_macro_instructions_);
1109 ASSERT(!rd.IsZero());
1110 sdiv(rd, rn, rm);
1111 }
1112
1113
1114 void MacroAssembler::Smaddl(const Register& rd,
1115 const Register& rn,
1116 const Register& rm,
1117 const Register& ra) {
1118 ASSERT(allow_macro_instructions_);
1119 ASSERT(!rd.IsZero());
1120 smaddl(rd, rn, rm, ra);
1121 }
1122
1123
1124 void MacroAssembler::Smsubl(const Register& rd,
1125 const Register& rn,
1126 const Register& rm,
1127 const Register& ra) {
1128 ASSERT(allow_macro_instructions_);
1129 ASSERT(!rd.IsZero());
1130 smsubl(rd, rn, rm, ra);
1131 }
1132
1133
1134 void MacroAssembler::Smull(const Register& rd,
1135 const Register& rn,
1136 const Register& rm) {
1137 ASSERT(allow_macro_instructions_);
1138 ASSERT(!rd.IsZero());
1139 smull(rd, rn, rm);
1140 }
1141
1142
1143 void MacroAssembler::Smulh(const Register& rd,
1144 const Register& rn,
1145 const Register& rm) {
1146 ASSERT(allow_macro_instructions_);
1147 ASSERT(!rd.IsZero());
1148 smulh(rd, rn, rm);
1149 }
1150
1151
1152 void MacroAssembler::Stnp(const CPURegister& rt,
1153 const CPURegister& rt2,
1154 const MemOperand& dst) {
1155 ASSERT(allow_macro_instructions_);
1156 stnp(rt, rt2, dst);
1157 }
1158
1159
1160 void MacroAssembler::Stp(const CPURegister& rt,
1161 const CPURegister& rt2,
1162 const MemOperand& dst) {
1163 ASSERT(allow_macro_instructions_);
1164 stp(rt, rt2, dst);
1165 }
1166
1167
1168 void MacroAssembler::Sxtb(const Register& rd, const Register& rn) {
1169 ASSERT(allow_macro_instructions_);
1170 ASSERT(!rd.IsZero());
1171 sxtb(rd, rn);
1172 }
1173
1174
1175 void MacroAssembler::Sxth(const Register& rd, const Register& rn) {
1176 ASSERT(allow_macro_instructions_);
1177 ASSERT(!rd.IsZero());
1178 sxth(rd, rn);
1179 }
1180
1181
1182 void MacroAssembler::Sxtw(const Register& rd, const Register& rn) {
1183 ASSERT(allow_macro_instructions_);
1184 ASSERT(!rd.IsZero());
1185 sxtw(rd, rn);
1186 }
1187
1188
1189 void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
1190 ASSERT(allow_macro_instructions_);
1191 tbnz(rt, bit_pos, label);
1192 }
1193
1194
1195 void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
1196 ASSERT(allow_macro_instructions_);
1197 tbz(rt, bit_pos, label);
1198 }
1199
1200
1201 void MacroAssembler::Ubfiz(const Register& rd,
1202 const Register& rn,
1203 unsigned lsb,
1204 unsigned width) {
1205 ASSERT(allow_macro_instructions_);
1206 ASSERT(!rd.IsZero());
1207 ubfiz(rd, rn, lsb, width);
1208 }
1209
1210
1211 void MacroAssembler::Ubfx(const Register& rd,
1212 const Register& rn,
1213 unsigned lsb,
1214 unsigned width) {
1215 ASSERT(allow_macro_instructions_);
1216 ASSERT(!rd.IsZero());
1217 ubfx(rd, rn, lsb, width);
1218 }
1219
1220
1221 void MacroAssembler::Ucvtf(const FPRegister& fd,
1222 const Register& rn,
1223 unsigned fbits) {
1224 ASSERT(allow_macro_instructions_);
1225 ucvtf(fd, rn, fbits);
1226 }
1227
1228
1229 void MacroAssembler::Udiv(const Register& rd,
1230 const Register& rn,
1231 const Register& rm) {
1232 ASSERT(allow_macro_instructions_);
1233 ASSERT(!rd.IsZero());
1234 udiv(rd, rn, rm);
1235 }
1236
1237
1238 void MacroAssembler::Umaddl(const Register& rd,
1239 const Register& rn,
1240 const Register& rm,
1241 const Register& ra) {
1242 ASSERT(allow_macro_instructions_);
1243 ASSERT(!rd.IsZero());
1244 umaddl(rd, rn, rm, ra);
1245 }
1246
1247
1248 void MacroAssembler::Umsubl(const Register& rd,
1249 const Register& rn,
1250 const Register& rm,
1251 const Register& ra) {
1252 ASSERT(allow_macro_instructions_);
1253 ASSERT(!rd.IsZero());
1254 umsubl(rd, rn, rm, ra);
1255 }
1256
1257
1258 void MacroAssembler::Unreachable() {
1259 ASSERT(allow_macro_instructions_);
1260 hlt(kImmExceptionIsUnreachable);
1261 }
1262
1263
1264 void MacroAssembler::Uxtb(const Register& rd, const Register& rn) {
1265 ASSERT(allow_macro_instructions_);
1266 ASSERT(!rd.IsZero());
1267 uxtb(rd, rn);
1268 }
1269
1270
1271 void MacroAssembler::Uxth(const Register& rd, const Register& rn) {
1272 ASSERT(allow_macro_instructions_);
1273 ASSERT(!rd.IsZero());
1274 uxth(rd, rn);
1275 }
1276
1277
1278 void MacroAssembler::Uxtw(const Register& rd, const Register& rn) {
1279 ASSERT(allow_macro_instructions_);
1280 ASSERT(!rd.IsZero());
1281 uxtw(rd, rn);
1282 }
1283
1284
1285 void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
1286 ASSERT(!csp.Is(sp_));
1287 // TODO(jbramley): Several callers rely on this not using scratch registers,
1288 // so we use the assembler directly here. However, this means that large
1289 // immediate values of 'space' cannot be handled. Once we merge with V8, we
1290 // should try to use the new scope that controls scratch register usage.
1291 InstructionAccurateScope scope(this);
1292 if ((space.IsImmediate()) && !is_uint12(space.immediate())) {
1293 // The subtract instruction supports a 12-bit immediate, shifted left by
1294 // zero or 12 bits. So, in two instructions, we can subtract any immediate
1295 // between zero and (1 << 24) - 1.
1296 int64_t imm = space.immediate();
1297 ASSERT(is_uint24(imm));
1298
1299 int64_t imm_top_12_bits = imm >> 12;
1300 sub(csp, StackPointer(), imm_top_12_bits << 12);
1301 imm -= imm_top_12_bits << 12;
1302 if (imm > 0) {
1303 sub(csp, csp, imm);
1304 }
1305 } else {
1306 sub(csp, StackPointer(), space);
1307 }
1308 }
1309
1310
1311 void MacroAssembler::InitializeRootRegister() {
1312 ExternalReference roots_array_start =
1313 ExternalReference::roots_array_start(isolate());
1314 Mov(root, Operand(roots_array_start));
1315 }
1316
1317
1318 void MacroAssembler::SmiTag(Register dst, Register src) {
1319 ASSERT(dst.Is64Bits() && src.Is64Bits());
1320 Lsl(dst, src, kSmiShift);
1321 }
1322
1323
1324 void MacroAssembler::SmiTag(Register smi) { SmiTag(smi, smi); }
1325
1326
1327 void MacroAssembler::SmiUntag(Register dst, Register src) {
1328 ASSERT(dst.Is64Bits() && src.Is64Bits());
1329 if (FLAG_enable_slow_asserts) {
1330 AssertSmi(src);
1331 }
1332 Asr(dst, src, kSmiShift);
1333 }
1334
1335
1336 void MacroAssembler::SmiUntag(Register smi) { SmiUntag(smi, smi); }
1337
1338
1339 void MacroAssembler::SmiUntagToDouble(FPRegister dst,
1340 Register src,
1341 UntagMode mode) {
1342 ASSERT(dst.Is64Bits() && src.Is64Bits());
1343 if (FLAG_enable_slow_asserts && (mode == kNotSpeculativeUntag)) {
1344 AssertSmi(src);
1345 }
1346 Scvtf(dst, src, kSmiShift);
1347 }
1348
1349
1350 void MacroAssembler::SmiUntagToFloat(FPRegister dst,
1351 Register src,
1352 UntagMode mode) {
1353 ASSERT(dst.Is32Bits() && src.Is64Bits());
1354 if (FLAG_enable_slow_asserts && (mode == kNotSpeculativeUntag)) {
1355 AssertSmi(src);
1356 }
1357 Scvtf(dst, src, kSmiShift);
1358 }
1359
1360
1361 void MacroAssembler::JumpIfSmi(Register value,
1362 Label* smi_label,
1363 Label* not_smi_label) {
1364 STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
1365 // Check if the tag bit is set.
1366 if (smi_label) {
1367 Tbz(value, 0, smi_label);
1368 if (not_smi_label) {
1369 B(not_smi_label);
1370 }
1371 } else {
1372 ASSERT(not_smi_label);
1373 Tbnz(value, 0, not_smi_label);
1374 }
1375 }
1376
1377
1378 void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
1379 JumpIfSmi(value, NULL, not_smi_label);
1380 }
1381
1382
1383 void MacroAssembler::JumpIfBothSmi(Register value1,
1384 Register value2,
1385 Label* both_smi_label,
1386 Label* not_smi_label) {
1387 STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
1388 // Check if both tag bits are clear.
1389 Orr(Tmp0(), value1, value2);
1390 JumpIfSmi(Tmp0(), both_smi_label, not_smi_label);
1391 }
1392
1393
1394 void MacroAssembler::JumpIfEitherSmi(Register value1,
1395 Register value2,
1396 Label* either_smi_label,
1397 Label* not_smi_label) {
1398 STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
1399 // Check if either tag bit is clear.
1400 And(Tmp0(), value1, value2);
1401 JumpIfSmi(Tmp0(), either_smi_label, not_smi_label);
1402 }
1403
1404
1405 void MacroAssembler::JumpIfEitherNotSmi(Register value1,
1406 Register value2,
1407 Label* not_smi_label) {
1408 JumpIfBothSmi(value1, value2, NULL, not_smi_label);
1409 }
1410
1411
1412 void MacroAssembler::JumpIfBothNotSmi(Register value1,
1413 Register value2,
1414 Label* not_smi_label) {
1415 JumpIfEitherSmi(value1, value2, NULL, not_smi_label);
1416 }
1417
1418
1419 void MacroAssembler::IsObjectNameType(Register object,
1420 Register type,
1421 Label* fail) {
1422 CompareObjectType(object, type, type, LAST_NAME_TYPE);
1423 B(hi, fail);
1424 }
1425
1426
1427 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
1428 Register map,
1429 Register scratch,
1430 Label* fail) {
1431 Ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
1432 IsInstanceJSObjectType(map, scratch, fail);
1433 }
1434
1435
1436 void MacroAssembler::IsInstanceJSObjectType(Register map,
1437 Register scratch,
1438 Label* fail) {
1439 Ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
1440 // If cmp result is lt, the following ccmp will clear all flags.
1441 // Z == 0, N == V implies gt condition.
1442 Cmp(scratch, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
1443 Ccmp(scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE, NoFlag, ge);
1444
1445 // If we didn't get a valid label object just fall through and leave the
1446 // flags updated.
1447 if (fail != NULL) {
1448 B(gt, fail);
1449 }
1450 }
1451
1452
1453 void MacroAssembler::IsObjectJSStringType(Register object,
1454 Register type,
1455 Label* not_string,
1456 Label* string) {
1457 Ldr(type, FieldMemOperand(object, HeapObject::kMapOffset));
1458 Ldrb(type.W(), FieldMemOperand(type, Map::kInstanceTypeOffset));
1459
1460 STATIC_ASSERT(kStringTag == 0);
1461 ASSERT((string != NULL) || (not_string != NULL));
1462 if (string == NULL) {
1463 TestAndBranchIfAnySet(type.W(), kIsNotStringMask, not_string);
1464 } else if (not_string == NULL) {
1465 TestAndBranchIfAllClear(type.W(), kIsNotStringMask, string);
1466 } else {
1467 TestAndBranchIfAnySet(type.W(), kIsNotStringMask, not_string);
1468 B(string);
1469 }
1470 }
1471
1472
1473 void MacroAssembler::Push(Handle<Object> handle) {
1474 Mov(Tmp0(), Operand(handle));
1475 Push(Tmp0());
1476 }
1477
1478
1479 void MacroAssembler::Claim(uint64_t count, uint64_t unit_size) {
1480 uint64_t size = count * unit_size;
1481
1482 if (size == 0) {
1483 return;
1484 }
1485
1486 if (csp.Is(StackPointer())) {
1487 ASSERT(size % 16 == 0);
1488 } else {
1489 BumpSystemStackPointer(size);
1490 }
1491
1492 Sub(StackPointer(), StackPointer(), size);
1493 }
1494
1495
1496 void MacroAssembler::Claim(const Register& count, uint64_t unit_size) {
1497 ASSERT(IsPowerOf2(unit_size));
1498
1499 if (unit_size == 0) {
1500 return;
1501 }
1502
1503 const int shift = CountTrailingZeros(unit_size, kXRegSize);
1504 const Operand size(count, LSL, shift);
1505
1506 if (size.IsZero()) {
1507 return;
1508 }
1509
1510 if (!csp.Is(StackPointer())) {
1511 BumpSystemStackPointer(size);
1512 }
1513
1514 Sub(StackPointer(), StackPointer(), size);
1515 }
1516
1517
1518 void MacroAssembler::ClaimBySMI(const Register& count_smi, uint64_t unit_size) {
1519 ASSERT(IsPowerOf2(unit_size));
1520 const int shift = CountTrailingZeros(unit_size, kXRegSize) - kSmiShift;
1521 const Operand size(count_smi,
1522 (shift >= 0) ? (LSL) : (LSR),
1523 (shift >= 0) ? (shift) : (-shift));
1524
1525 if (size.IsZero()) {
1526 return;
1527 }
1528
1529 if (!csp.Is(StackPointer())) {
1530 BumpSystemStackPointer(size);
1531 }
1532
1533 Sub(StackPointer(), StackPointer(), size);
1534 }
1535
1536
1537 void MacroAssembler::Drop(uint64_t count, uint64_t unit_size) {
1538 uint64_t size = count * unit_size;
1539
1540 if (size == 0) {
1541 return;
1542 }
1543
1544 Add(StackPointer(), StackPointer(), size);
1545
1546 if (csp.Is(StackPointer())) {
1547 ASSERT(size % 16 == 0);
1548 } else if (emit_debug_code()) {
1549 // It is safe to leave csp where it is when unwinding the JavaScript stack,
1550 // but if we keep it matching StackPointer, the simulator can detect memory
1551 // accesses in the now-free part of the stack.
1552 Mov(csp, StackPointer());
1553 }
1554 }
1555
1556
1557 void MacroAssembler::Drop(const Register& count, uint64_t unit_size) {
1558 ASSERT(IsPowerOf2(unit_size));
1559
1560 if (unit_size == 0) {
1561 return;
1562 }
1563
1564 const int shift = CountTrailingZeros(unit_size, kXRegSize);
1565 const Operand size(count, LSL, shift);
1566
1567 if (size.IsZero()) {
1568 return;
1569 }
1570
1571 Add(StackPointer(), StackPointer(), size);
1572
1573 if (!csp.Is(StackPointer()) && emit_debug_code()) {
1574 // It is safe to leave csp where it is when unwinding the JavaScript stack,
1575 // but if we keep it matching StackPointer, the simulator can detect memory
1576 // accesses in the now-free part of the stack.
1577 Mov(csp, StackPointer());
1578 }
1579 }
1580
1581
1582 void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) {
1583 ASSERT(IsPowerOf2(unit_size));
1584 const int shift = CountTrailingZeros(unit_size, kXRegSize) - kSmiShift;
1585 const Operand size(count_smi,
1586 (shift >= 0) ? (LSL) : (LSR),
1587 (shift >= 0) ? (shift) : (-shift));
1588
1589 if (size.IsZero()) {
1590 return;
1591 }
1592
1593 Add(StackPointer(), StackPointer(), size);
1594
1595 if (!csp.Is(StackPointer()) && emit_debug_code()) {
1596 // It is safe to leave csp where it is when unwinding the JavaScript stack,
1597 // but if we keep it matching StackPointer, the simulator can detect memory
1598 // accesses in the now-free part of the stack.
1599 Mov(csp, StackPointer());
1600 }
1601 }
1602
1603
1604 void MacroAssembler::CompareAndBranch(const Register& lhs,
1605 const Operand& rhs,
1606 Condition cond,
1607 Label* label) {
1608 if (rhs.IsImmediate() && (rhs.immediate() == 0) &&
1609 ((cond == eq) || (cond == ne))) {
1610 if (cond == eq) {
1611 Cbz(lhs, label);
1612 } else {
1613 Cbnz(lhs, label);
1614 }
1615 } else {
1616 Cmp(lhs, rhs);
1617 B(cond, label);
1618 }
1619 }
1620
1621
1622 void MacroAssembler::TestAndBranchIfAnySet(const Register& reg,
1623 const uint64_t bit_pattern,
1624 Label* label) {
1625 int bits = reg.SizeInBits();
1626 ASSERT(CountSetBits(bit_pattern, bits) > 0);
1627 if (CountSetBits(bit_pattern, bits) == 1) {
1628 Tbnz(reg, MaskToBit(bit_pattern), label);
1629 } else {
1630 Tst(reg, bit_pattern);
1631 B(ne, label);
1632 }
1633 }
1634
1635
1636 void MacroAssembler::TestAndBranchIfAllClear(const Register& reg,
1637 const uint64_t bit_pattern,
1638 Label* label) {
1639 int bits = reg.SizeInBits();
1640 ASSERT(CountSetBits(bit_pattern, bits) > 0);
1641 if (CountSetBits(bit_pattern, bits) == 1) {
1642 Tbz(reg, MaskToBit(bit_pattern), label);
1643 } else {
1644 Tst(reg, bit_pattern);
1645 B(eq, label);
1646 }
1647 }
1648
1649
1650 void MacroAssembler::InlineData(uint64_t data) {
1651 ASSERT(is_uint16(data));
1652 InstructionAccurateScope scope(this, 1);
1653 movz(xzr, data);
1654 }
1655
1656
1657 void MacroAssembler::EnableInstrumentation() {
1658 InstructionAccurateScope scope(this, 1);
1659 movn(xzr, InstrumentStateEnable);
1660 }
1661
1662
1663 void MacroAssembler::DisableInstrumentation() {
1664 InstructionAccurateScope scope(this, 1);
1665 movn(xzr, InstrumentStateDisable);
1666 }
1667
1668
1669 void MacroAssembler::AnnotateInstrumentation(const char* marker_name) {
1670 ASSERT(strlen(marker_name) == 2);
1671
1672 // We allow only printable characters in the marker names. Unprintable
1673 // characters are reserved for controlling features of the instrumentation.
1674 ASSERT(isprint(marker_name[0]) && isprint(marker_name[1]));
1675
1676 InstructionAccurateScope scope(this, 1);
1677 movn(xzr, (marker_name[1] << 8) | marker_name[0]);
1678 }
1679
1680 } } // namespace v8::internal
1681
1682 #endif // V8_A64_MACRO_ASSEMBLER_A64_INL_H_
OLDNEW
« no previous file with comments | « src/a64/macro-assembler-a64.cc ('k') | src/a64/regexp-macro-assembler-a64.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698