Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(245)

Side by Side Diff: src/a64/macro-assembler-a64-inl.h

Issue 178223011: Reset trunk to 3.24.35.4 (Closed) Base URL: https://v8.googlecode.com/svn/trunk
Patch Set: Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/a64/macro-assembler-a64.cc ('k') | src/a64/regexp-macro-assembler-a64.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #ifndef V8_A64_MACRO_ASSEMBLER_A64_INL_H_
29 #define V8_A64_MACRO_ASSEMBLER_A64_INL_H_
30
31 #include <ctype.h>
32
33 #include "v8globals.h"
34 #include "globals.h"
35
36 #include "a64/assembler-a64.h"
37 #include "a64/assembler-a64-inl.h"
38 #include "a64/macro-assembler-a64.h"
39 #include "a64/instrument-a64.h"
40
41
42 namespace v8 {
43 namespace internal {
44
45
46 MemOperand FieldMemOperand(Register object, int offset) {
47 return MemOperand(object, offset - kHeapObjectTag);
48 }
49
50
51 MemOperand UntagSmiFieldMemOperand(Register object, int offset) {
52 return UntagSmiMemOperand(object, offset - kHeapObjectTag);
53 }
54
55
56 MemOperand UntagSmiMemOperand(Register object, int offset) {
57 // Assumes that Smis are shifted by 32 bits and little endianness.
58 STATIC_ASSERT(kSmiShift == 32);
59 return MemOperand(object, offset + (kSmiShift / kBitsPerByte));
60 }
61
62
63 Handle<Object> MacroAssembler::CodeObject() {
64 ASSERT(!code_object_.is_null());
65 return code_object_;
66 }
67
68
69 void MacroAssembler::And(const Register& rd,
70 const Register& rn,
71 const Operand& operand) {
72 ASSERT(allow_macro_instructions_);
73 ASSERT(!rd.IsZero());
74 LogicalMacro(rd, rn, operand, AND);
75 }
76
77
78 void MacroAssembler::Ands(const Register& rd,
79 const Register& rn,
80 const Operand& operand) {
81 ASSERT(allow_macro_instructions_);
82 ASSERT(!rd.IsZero());
83 LogicalMacro(rd, rn, operand, ANDS);
84 }
85
86
87 void MacroAssembler::Tst(const Register& rn,
88 const Operand& operand) {
89 ASSERT(allow_macro_instructions_);
90 LogicalMacro(AppropriateZeroRegFor(rn), rn, operand, ANDS);
91 }
92
93
94 void MacroAssembler::Bic(const Register& rd,
95 const Register& rn,
96 const Operand& operand) {
97 ASSERT(allow_macro_instructions_);
98 ASSERT(!rd.IsZero());
99 LogicalMacro(rd, rn, operand, BIC);
100 }
101
102
103 void MacroAssembler::Bics(const Register& rd,
104 const Register& rn,
105 const Operand& operand) {
106 ASSERT(allow_macro_instructions_);
107 ASSERT(!rd.IsZero());
108 LogicalMacro(rd, rn, operand, BICS);
109 }
110
111
112 void MacroAssembler::Orr(const Register& rd,
113 const Register& rn,
114 const Operand& operand) {
115 ASSERT(allow_macro_instructions_);
116 ASSERT(!rd.IsZero());
117 LogicalMacro(rd, rn, operand, ORR);
118 }
119
120
121 void MacroAssembler::Orn(const Register& rd,
122 const Register& rn,
123 const Operand& operand) {
124 ASSERT(allow_macro_instructions_);
125 ASSERT(!rd.IsZero());
126 LogicalMacro(rd, rn, operand, ORN);
127 }
128
129
130 void MacroAssembler::Eor(const Register& rd,
131 const Register& rn,
132 const Operand& operand) {
133 ASSERT(allow_macro_instructions_);
134 ASSERT(!rd.IsZero());
135 LogicalMacro(rd, rn, operand, EOR);
136 }
137
138
139 void MacroAssembler::Eon(const Register& rd,
140 const Register& rn,
141 const Operand& operand) {
142 ASSERT(allow_macro_instructions_);
143 ASSERT(!rd.IsZero());
144 LogicalMacro(rd, rn, operand, EON);
145 }
146
147
148 void MacroAssembler::Ccmp(const Register& rn,
149 const Operand& operand,
150 StatusFlags nzcv,
151 Condition cond) {
152 ASSERT(allow_macro_instructions_);
153 if (operand.IsImmediate() && (operand.immediate() < 0)) {
154 ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMN);
155 } else {
156 ConditionalCompareMacro(rn, operand, nzcv, cond, CCMP);
157 }
158 }
159
160
161 void MacroAssembler::Ccmn(const Register& rn,
162 const Operand& operand,
163 StatusFlags nzcv,
164 Condition cond) {
165 ASSERT(allow_macro_instructions_);
166 if (operand.IsImmediate() && (operand.immediate() < 0)) {
167 ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMP);
168 } else {
169 ConditionalCompareMacro(rn, operand, nzcv, cond, CCMN);
170 }
171 }
172
173
174 void MacroAssembler::Add(const Register& rd,
175 const Register& rn,
176 const Operand& operand) {
177 ASSERT(allow_macro_instructions_);
178 if (operand.IsImmediate() && (operand.immediate() < 0)) {
179 AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, SUB);
180 } else {
181 AddSubMacro(rd, rn, operand, LeaveFlags, ADD);
182 }
183 }
184
185 void MacroAssembler::Adds(const Register& rd,
186 const Register& rn,
187 const Operand& operand) {
188 ASSERT(allow_macro_instructions_);
189 if (operand.IsImmediate() && (operand.immediate() < 0)) {
190 AddSubMacro(rd, rn, -operand.immediate(), SetFlags, SUB);
191 } else {
192 AddSubMacro(rd, rn, operand, SetFlags, ADD);
193 }
194 }
195
196
197 void MacroAssembler::Sub(const Register& rd,
198 const Register& rn,
199 const Operand& operand) {
200 ASSERT(allow_macro_instructions_);
201 if (operand.IsImmediate() && (operand.immediate() < 0)) {
202 AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, ADD);
203 } else {
204 AddSubMacro(rd, rn, operand, LeaveFlags, SUB);
205 }
206 }
207
208
209 void MacroAssembler::Subs(const Register& rd,
210 const Register& rn,
211 const Operand& operand) {
212 ASSERT(allow_macro_instructions_);
213 if (operand.IsImmediate() && (operand.immediate() < 0)) {
214 AddSubMacro(rd, rn, -operand.immediate(), SetFlags, ADD);
215 } else {
216 AddSubMacro(rd, rn, operand, SetFlags, SUB);
217 }
218 }
219
220
221 void MacroAssembler::Cmn(const Register& rn, const Operand& operand) {
222 ASSERT(allow_macro_instructions_);
223 Adds(AppropriateZeroRegFor(rn), rn, operand);
224 }
225
226
227 void MacroAssembler::Cmp(const Register& rn, const Operand& operand) {
228 ASSERT(allow_macro_instructions_);
229 Subs(AppropriateZeroRegFor(rn), rn, operand);
230 }
231
232
233 void MacroAssembler::Neg(const Register& rd,
234 const Operand& operand) {
235 ASSERT(allow_macro_instructions_);
236 ASSERT(!rd.IsZero());
237 if (operand.IsImmediate()) {
238 Mov(rd, -operand.immediate());
239 } else {
240 Sub(rd, AppropriateZeroRegFor(rd), operand);
241 }
242 }
243
244
245 void MacroAssembler::Negs(const Register& rd,
246 const Operand& operand) {
247 ASSERT(allow_macro_instructions_);
248 Subs(rd, AppropriateZeroRegFor(rd), operand);
249 }
250
251
252 void MacroAssembler::Adc(const Register& rd,
253 const Register& rn,
254 const Operand& operand) {
255 ASSERT(allow_macro_instructions_);
256 ASSERT(!rd.IsZero());
257 AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, ADC);
258 }
259
260
261 void MacroAssembler::Adcs(const Register& rd,
262 const Register& rn,
263 const Operand& operand) {
264 ASSERT(allow_macro_instructions_);
265 ASSERT(!rd.IsZero());
266 AddSubWithCarryMacro(rd, rn, operand, SetFlags, ADC);
267 }
268
269
270 void MacroAssembler::Sbc(const Register& rd,
271 const Register& rn,
272 const Operand& operand) {
273 ASSERT(allow_macro_instructions_);
274 ASSERT(!rd.IsZero());
275 AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, SBC);
276 }
277
278
279 void MacroAssembler::Sbcs(const Register& rd,
280 const Register& rn,
281 const Operand& operand) {
282 ASSERT(allow_macro_instructions_);
283 ASSERT(!rd.IsZero());
284 AddSubWithCarryMacro(rd, rn, operand, SetFlags, SBC);
285 }
286
287
288 void MacroAssembler::Ngc(const Register& rd,
289 const Operand& operand) {
290 ASSERT(allow_macro_instructions_);
291 ASSERT(!rd.IsZero());
292 Register zr = AppropriateZeroRegFor(rd);
293 Sbc(rd, zr, operand);
294 }
295
296
297 void MacroAssembler::Ngcs(const Register& rd,
298 const Operand& operand) {
299 ASSERT(allow_macro_instructions_);
300 ASSERT(!rd.IsZero());
301 Register zr = AppropriateZeroRegFor(rd);
302 Sbcs(rd, zr, operand);
303 }
304
305
306 void MacroAssembler::Mvn(const Register& rd, uint64_t imm) {
307 ASSERT(allow_macro_instructions_);
308 ASSERT(!rd.IsZero());
309 Mov(rd, ~imm);
310 }
311
312
313 #define DEFINE_FUNCTION(FN, REGTYPE, REG, OP) \
314 void MacroAssembler::FN(const REGTYPE REG, const MemOperand& addr) { \
315 ASSERT(allow_macro_instructions_); \
316 LoadStoreMacro(REG, addr, OP); \
317 }
318 LS_MACRO_LIST(DEFINE_FUNCTION)
319 #undef DEFINE_FUNCTION
320
321
322 void MacroAssembler::Adr(const Register& rd, Label* label) {
323 ASSERT(allow_macro_instructions_);
324 ASSERT(!rd.IsZero());
325 adr(rd, label);
326 }
327
328
329 void MacroAssembler::Asr(const Register& rd,
330 const Register& rn,
331 unsigned shift) {
332 ASSERT(allow_macro_instructions_);
333 ASSERT(!rd.IsZero());
334 asr(rd, rn, shift);
335 }
336
337
338 void MacroAssembler::Asr(const Register& rd,
339 const Register& rn,
340 const Register& rm) {
341 ASSERT(allow_macro_instructions_);
342 ASSERT(!rd.IsZero());
343 asrv(rd, rn, rm);
344 }
345
346
347 void MacroAssembler::B(Label* label) {
348 b(label);
349 CheckVeneers(false);
350 }
351
352
353 void MacroAssembler::B(Condition cond, Label* label) {
354 ASSERT(allow_macro_instructions_);
355 B(label, cond);
356 }
357
358
359 void MacroAssembler::Bfi(const Register& rd,
360 const Register& rn,
361 unsigned lsb,
362 unsigned width) {
363 ASSERT(allow_macro_instructions_);
364 ASSERT(!rd.IsZero());
365 bfi(rd, rn, lsb, width);
366 }
367
368
369 void MacroAssembler::Bfxil(const Register& rd,
370 const Register& rn,
371 unsigned lsb,
372 unsigned width) {
373 ASSERT(allow_macro_instructions_);
374 ASSERT(!rd.IsZero());
375 bfxil(rd, rn, lsb, width);
376 }
377
378
379 void MacroAssembler::Bind(Label* label) {
380 ASSERT(allow_macro_instructions_);
381 bind(label);
382 }
383
384
385 void MacroAssembler::Bl(Label* label) {
386 ASSERT(allow_macro_instructions_);
387 bl(label);
388 }
389
390
391 void MacroAssembler::Blr(const Register& xn) {
392 ASSERT(allow_macro_instructions_);
393 ASSERT(!xn.IsZero());
394 blr(xn);
395 }
396
397
398 void MacroAssembler::Br(const Register& xn) {
399 ASSERT(allow_macro_instructions_);
400 ASSERT(!xn.IsZero());
401 br(xn);
402 }
403
404
405 void MacroAssembler::Brk(int code) {
406 ASSERT(allow_macro_instructions_);
407 brk(code);
408 }
409
410
411 void MacroAssembler::Cinc(const Register& rd,
412 const Register& rn,
413 Condition cond) {
414 ASSERT(allow_macro_instructions_);
415 ASSERT(!rd.IsZero());
416 ASSERT((cond != al) && (cond != nv));
417 cinc(rd, rn, cond);
418 }
419
420
421 void MacroAssembler::Cinv(const Register& rd,
422 const Register& rn,
423 Condition cond) {
424 ASSERT(allow_macro_instructions_);
425 ASSERT(!rd.IsZero());
426 ASSERT((cond != al) && (cond != nv));
427 cinv(rd, rn, cond);
428 }
429
430
431 void MacroAssembler::Cls(const Register& rd, const Register& rn) {
432 ASSERT(allow_macro_instructions_);
433 ASSERT(!rd.IsZero());
434 cls(rd, rn);
435 }
436
437
438 void MacroAssembler::Clz(const Register& rd, const Register& rn) {
439 ASSERT(allow_macro_instructions_);
440 ASSERT(!rd.IsZero());
441 clz(rd, rn);
442 }
443
444
445 void MacroAssembler::Cneg(const Register& rd,
446 const Register& rn,
447 Condition cond) {
448 ASSERT(allow_macro_instructions_);
449 ASSERT(!rd.IsZero());
450 ASSERT((cond != al) && (cond != nv));
451 cneg(rd, rn, cond);
452 }
453
454
455 // Conditionally zero the destination register. Only X registers are supported
456 // due to the truncation side-effect when used on W registers.
457 void MacroAssembler::CzeroX(const Register& rd,
458 Condition cond) {
459 ASSERT(allow_macro_instructions_);
460 ASSERT(!rd.IsSP() && rd.Is64Bits());
461 ASSERT((cond != al) && (cond != nv));
462 csel(rd, xzr, rd, cond);
463 }
464
465
466 // Conditionally move a value into the destination register. Only X registers
467 // are supported due to the truncation side-effect when used on W registers.
468 void MacroAssembler::CmovX(const Register& rd,
469 const Register& rn,
470 Condition cond) {
471 ASSERT(allow_macro_instructions_);
472 ASSERT(!rd.IsSP());
473 ASSERT(rd.Is64Bits() && rn.Is64Bits());
474 ASSERT((cond != al) && (cond != nv));
475 if (!rd.is(rn)) {
476 csel(rd, rn, rd, cond);
477 }
478 }
479
480
481 void MacroAssembler::Cset(const Register& rd, Condition cond) {
482 ASSERT(allow_macro_instructions_);
483 ASSERT(!rd.IsZero());
484 ASSERT((cond != al) && (cond != nv));
485 cset(rd, cond);
486 }
487
488
489 void MacroAssembler::Csetm(const Register& rd, Condition cond) {
490 ASSERT(allow_macro_instructions_);
491 ASSERT(!rd.IsZero());
492 ASSERT((cond != al) && (cond != nv));
493 csetm(rd, cond);
494 }
495
496
497 void MacroAssembler::Csinc(const Register& rd,
498 const Register& rn,
499 const Register& rm,
500 Condition cond) {
501 ASSERT(allow_macro_instructions_);
502 ASSERT(!rd.IsZero());
503 ASSERT((cond != al) && (cond != nv));
504 csinc(rd, rn, rm, cond);
505 }
506
507
508 void MacroAssembler::Csinv(const Register& rd,
509 const Register& rn,
510 const Register& rm,
511 Condition cond) {
512 ASSERT(allow_macro_instructions_);
513 ASSERT(!rd.IsZero());
514 ASSERT((cond != al) && (cond != nv));
515 csinv(rd, rn, rm, cond);
516 }
517
518
519 void MacroAssembler::Csneg(const Register& rd,
520 const Register& rn,
521 const Register& rm,
522 Condition cond) {
523 ASSERT(allow_macro_instructions_);
524 ASSERT(!rd.IsZero());
525 ASSERT((cond != al) && (cond != nv));
526 csneg(rd, rn, rm, cond);
527 }
528
529
530 void MacroAssembler::Dmb(BarrierDomain domain, BarrierType type) {
531 ASSERT(allow_macro_instructions_);
532 dmb(domain, type);
533 }
534
535
536 void MacroAssembler::Dsb(BarrierDomain domain, BarrierType type) {
537 ASSERT(allow_macro_instructions_);
538 dsb(domain, type);
539 }
540
541
542 void MacroAssembler::Debug(const char* message, uint32_t code, Instr params) {
543 ASSERT(allow_macro_instructions_);
544 debug(message, code, params);
545 }
546
547
548 void MacroAssembler::Extr(const Register& rd,
549 const Register& rn,
550 const Register& rm,
551 unsigned lsb) {
552 ASSERT(allow_macro_instructions_);
553 ASSERT(!rd.IsZero());
554 extr(rd, rn, rm, lsb);
555 }
556
557
558 void MacroAssembler::Fabs(const FPRegister& fd, const FPRegister& fn) {
559 ASSERT(allow_macro_instructions_);
560 fabs(fd, fn);
561 }
562
563
564 void MacroAssembler::Fadd(const FPRegister& fd,
565 const FPRegister& fn,
566 const FPRegister& fm) {
567 ASSERT(allow_macro_instructions_);
568 fadd(fd, fn, fm);
569 }
570
571
572 void MacroAssembler::Fccmp(const FPRegister& fn,
573 const FPRegister& fm,
574 StatusFlags nzcv,
575 Condition cond) {
576 ASSERT(allow_macro_instructions_);
577 ASSERT((cond != al) && (cond != nv));
578 fccmp(fn, fm, nzcv, cond);
579 }
580
581
582 void MacroAssembler::Fcmp(const FPRegister& fn, const FPRegister& fm) {
583 ASSERT(allow_macro_instructions_);
584 fcmp(fn, fm);
585 }
586
587
588 void MacroAssembler::Fcmp(const FPRegister& fn, double value) {
589 ASSERT(allow_macro_instructions_);
590 if (value != 0.0) {
591 FPRegister tmp = AppropriateTempFor(fn);
592 Fmov(tmp, value);
593 fcmp(fn, tmp);
594 } else {
595 fcmp(fn, value);
596 }
597 }
598
599
600 void MacroAssembler::Fcsel(const FPRegister& fd,
601 const FPRegister& fn,
602 const FPRegister& fm,
603 Condition cond) {
604 ASSERT(allow_macro_instructions_);
605 ASSERT((cond != al) && (cond != nv));
606 fcsel(fd, fn, fm, cond);
607 }
608
609
610 void MacroAssembler::Fcvt(const FPRegister& fd, const FPRegister& fn) {
611 ASSERT(allow_macro_instructions_);
612 fcvt(fd, fn);
613 }
614
615
616 void MacroAssembler::Fcvtas(const Register& rd, const FPRegister& fn) {
617 ASSERT(allow_macro_instructions_);
618 ASSERT(!rd.IsZero());
619 fcvtas(rd, fn);
620 }
621
622
623 void MacroAssembler::Fcvtau(const Register& rd, const FPRegister& fn) {
624 ASSERT(allow_macro_instructions_);
625 ASSERT(!rd.IsZero());
626 fcvtau(rd, fn);
627 }
628
629
630 void MacroAssembler::Fcvtms(const Register& rd, const FPRegister& fn) {
631 ASSERT(allow_macro_instructions_);
632 ASSERT(!rd.IsZero());
633 fcvtms(rd, fn);
634 }
635
636
637 void MacroAssembler::Fcvtmu(const Register& rd, const FPRegister& fn) {
638 ASSERT(allow_macro_instructions_);
639 ASSERT(!rd.IsZero());
640 fcvtmu(rd, fn);
641 }
642
643
644 void MacroAssembler::Fcvtns(const Register& rd, const FPRegister& fn) {
645 ASSERT(allow_macro_instructions_);
646 ASSERT(!rd.IsZero());
647 fcvtns(rd, fn);
648 }
649
650
651 void MacroAssembler::Fcvtnu(const Register& rd, const FPRegister& fn) {
652 ASSERT(allow_macro_instructions_);
653 ASSERT(!rd.IsZero());
654 fcvtnu(rd, fn);
655 }
656
657
658 void MacroAssembler::Fcvtzs(const Register& rd, const FPRegister& fn) {
659 ASSERT(allow_macro_instructions_);
660 ASSERT(!rd.IsZero());
661 fcvtzs(rd, fn);
662 }
663 void MacroAssembler::Fcvtzu(const Register& rd, const FPRegister& fn) {
664 ASSERT(allow_macro_instructions_);
665 ASSERT(!rd.IsZero());
666 fcvtzu(rd, fn);
667 }
668
669
670 void MacroAssembler::Fdiv(const FPRegister& fd,
671 const FPRegister& fn,
672 const FPRegister& fm) {
673 ASSERT(allow_macro_instructions_);
674 fdiv(fd, fn, fm);
675 }
676
677
678 void MacroAssembler::Fmadd(const FPRegister& fd,
679 const FPRegister& fn,
680 const FPRegister& fm,
681 const FPRegister& fa) {
682 ASSERT(allow_macro_instructions_);
683 fmadd(fd, fn, fm, fa);
684 }
685
686
687 void MacroAssembler::Fmax(const FPRegister& fd,
688 const FPRegister& fn,
689 const FPRegister& fm) {
690 ASSERT(allow_macro_instructions_);
691 fmax(fd, fn, fm);
692 }
693
694
695 void MacroAssembler::Fmaxnm(const FPRegister& fd,
696 const FPRegister& fn,
697 const FPRegister& fm) {
698 ASSERT(allow_macro_instructions_);
699 fmaxnm(fd, fn, fm);
700 }
701
702
703 void MacroAssembler::Fmin(const FPRegister& fd,
704 const FPRegister& fn,
705 const FPRegister& fm) {
706 ASSERT(allow_macro_instructions_);
707 fmin(fd, fn, fm);
708 }
709
710
711 void MacroAssembler::Fminnm(const FPRegister& fd,
712 const FPRegister& fn,
713 const FPRegister& fm) {
714 ASSERT(allow_macro_instructions_);
715 fminnm(fd, fn, fm);
716 }
717
718
719 void MacroAssembler::Fmov(FPRegister fd, FPRegister fn) {
720 ASSERT(allow_macro_instructions_);
721 // Only emit an instruction if fd and fn are different, and they are both D
722 // registers. fmov(s0, s0) is not a no-op because it clears the top word of
723 // d0. Technically, fmov(d0, d0) is not a no-op either because it clears the
724 // top of q0, but FPRegister does not currently support Q registers.
725 if (!fd.Is(fn) || !fd.Is64Bits()) {
726 fmov(fd, fn);
727 }
728 }
729
730
731 void MacroAssembler::Fmov(FPRegister fd, Register rn) {
732 ASSERT(allow_macro_instructions_);
733 fmov(fd, rn);
734 }
735
736
737 void MacroAssembler::Fmov(FPRegister fd, double imm) {
738 ASSERT(allow_macro_instructions_);
739 if ((fd.Is64Bits() && IsImmFP64(imm)) ||
740 (fd.Is32Bits() && IsImmFP32(imm)) ||
741 ((imm == 0.0) && (copysign(1.0, imm) == 1.0))) {
742 // These cases can be handled by the Assembler.
743 fmov(fd, imm);
744 } else {
745 // TODO(all): The Assembler would try to relocate the immediate with
746 // Assembler::ldr(const FPRegister& ft, double imm) but it is not
747 // implemented yet.
748 if (fd.SizeInBits() == kDRegSize) {
749 Mov(Tmp0(), double_to_rawbits(imm));
750 Fmov(fd, Tmp0());
751 } else {
752 ASSERT(fd.SizeInBits() == kSRegSize);
753 Mov(WTmp0(), float_to_rawbits(static_cast<float>(imm)));
754 Fmov(fd, WTmp0());
755 }
756 }
757 }
758
759
760 void MacroAssembler::Fmov(Register rd, FPRegister fn) {
761 ASSERT(allow_macro_instructions_);
762 ASSERT(!rd.IsZero());
763 fmov(rd, fn);
764 }
765
766
767 void MacroAssembler::Fmsub(const FPRegister& fd,
768 const FPRegister& fn,
769 const FPRegister& fm,
770 const FPRegister& fa) {
771 ASSERT(allow_macro_instructions_);
772 fmsub(fd, fn, fm, fa);
773 }
774
775
776 void MacroAssembler::Fmul(const FPRegister& fd,
777 const FPRegister& fn,
778 const FPRegister& fm) {
779 ASSERT(allow_macro_instructions_);
780 fmul(fd, fn, fm);
781 }
782
783
784 void MacroAssembler::Fneg(const FPRegister& fd, const FPRegister& fn) {
785 ASSERT(allow_macro_instructions_);
786 fneg(fd, fn);
787 }
788
789
790 void MacroAssembler::Fnmadd(const FPRegister& fd,
791 const FPRegister& fn,
792 const FPRegister& fm,
793 const FPRegister& fa) {
794 ASSERT(allow_macro_instructions_);
795 fnmadd(fd, fn, fm, fa);
796 }
797
798
799 void MacroAssembler::Fnmsub(const FPRegister& fd,
800 const FPRegister& fn,
801 const FPRegister& fm,
802 const FPRegister& fa) {
803 ASSERT(allow_macro_instructions_);
804 fnmsub(fd, fn, fm, fa);
805 }
806
807
808 void MacroAssembler::Frinta(const FPRegister& fd, const FPRegister& fn) {
809 ASSERT(allow_macro_instructions_);
810 frinta(fd, fn);
811 }
812
813
814 void MacroAssembler::Frintn(const FPRegister& fd, const FPRegister& fn) {
815 ASSERT(allow_macro_instructions_);
816 frintn(fd, fn);
817 }
818
819
820 void MacroAssembler::Frintz(const FPRegister& fd, const FPRegister& fn) {
821 ASSERT(allow_macro_instructions_);
822 frintz(fd, fn);
823 }
824
825
826 void MacroAssembler::Fsqrt(const FPRegister& fd, const FPRegister& fn) {
827 ASSERT(allow_macro_instructions_);
828 fsqrt(fd, fn);
829 }
830
831
832 void MacroAssembler::Fsub(const FPRegister& fd,
833 const FPRegister& fn,
834 const FPRegister& fm) {
835 ASSERT(allow_macro_instructions_);
836 fsub(fd, fn, fm);
837 }
838
839
840 void MacroAssembler::Hint(SystemHint code) {
841 ASSERT(allow_macro_instructions_);
842 hint(code);
843 }
844
845
846 void MacroAssembler::Hlt(int code) {
847 ASSERT(allow_macro_instructions_);
848 hlt(code);
849 }
850
851
852 void MacroAssembler::Isb() {
853 ASSERT(allow_macro_instructions_);
854 isb();
855 }
856
857
858 void MacroAssembler::Ldnp(const CPURegister& rt,
859 const CPURegister& rt2,
860 const MemOperand& src) {
861 ASSERT(allow_macro_instructions_);
862 ASSERT(!AreAliased(rt, rt2));
863 ldnp(rt, rt2, src);
864 }
865
866
867 void MacroAssembler::Ldp(const CPURegister& rt,
868 const CPURegister& rt2,
869 const MemOperand& src) {
870 ASSERT(allow_macro_instructions_);
871 ASSERT(!AreAliased(rt, rt2));
872 ldp(rt, rt2, src);
873 }
874
875
876 void MacroAssembler::Ldpsw(const Register& rt,
877 const Register& rt2,
878 const MemOperand& src) {
879 ASSERT(allow_macro_instructions_);
880 ASSERT(!rt.IsZero());
881 ASSERT(!rt2.IsZero());
882 ldpsw(rt, rt2, src);
883 }
884
885
886 void MacroAssembler::Ldr(const FPRegister& ft, double imm) {
887 ASSERT(allow_macro_instructions_);
888 ldr(ft, imm);
889 }
890
891
892 void MacroAssembler::Ldr(const Register& rt, uint64_t imm) {
893 ASSERT(allow_macro_instructions_);
894 ASSERT(!rt.IsZero());
895 ldr(rt, imm);
896 }
897
898
899 void MacroAssembler::Lsl(const Register& rd,
900 const Register& rn,
901 unsigned shift) {
902 ASSERT(allow_macro_instructions_);
903 ASSERT(!rd.IsZero());
904 lsl(rd, rn, shift);
905 }
906
907
908 void MacroAssembler::Lsl(const Register& rd,
909 const Register& rn,
910 const Register& rm) {
911 ASSERT(allow_macro_instructions_);
912 ASSERT(!rd.IsZero());
913 lslv(rd, rn, rm);
914 }
915
916
917 void MacroAssembler::Lsr(const Register& rd,
918 const Register& rn,
919 unsigned shift) {
920 ASSERT(allow_macro_instructions_);
921 ASSERT(!rd.IsZero());
922 lsr(rd, rn, shift);
923 }
924
925
926 void MacroAssembler::Lsr(const Register& rd,
927 const Register& rn,
928 const Register& rm) {
929 ASSERT(allow_macro_instructions_);
930 ASSERT(!rd.IsZero());
931 lsrv(rd, rn, rm);
932 }
933
934
935 void MacroAssembler::Madd(const Register& rd,
936 const Register& rn,
937 const Register& rm,
938 const Register& ra) {
939 ASSERT(allow_macro_instructions_);
940 ASSERT(!rd.IsZero());
941 madd(rd, rn, rm, ra);
942 }
943
944
945 void MacroAssembler::Mneg(const Register& rd,
946 const Register& rn,
947 const Register& rm) {
948 ASSERT(allow_macro_instructions_);
949 ASSERT(!rd.IsZero());
950 mneg(rd, rn, rm);
951 }
952
953
954 void MacroAssembler::Mov(const Register& rd, const Register& rn) {
955 ASSERT(allow_macro_instructions_);
956 ASSERT(!rd.IsZero());
957 // Emit a register move only if the registers are distinct, or if they are
958 // not X registers. Note that mov(w0, w0) is not a no-op because it clears
959 // the top word of x0.
960 if (!rd.Is(rn) || !rd.Is64Bits()) {
961 Assembler::mov(rd, rn);
962 }
963 }
964
965
966 void MacroAssembler::Movk(const Register& rd, uint64_t imm, int shift) {
967 ASSERT(allow_macro_instructions_);
968 ASSERT(!rd.IsZero());
969 movk(rd, imm, shift);
970 }
971
972
973 void MacroAssembler::Mrs(const Register& rt, SystemRegister sysreg) {
974 ASSERT(allow_macro_instructions_);
975 ASSERT(!rt.IsZero());
976 mrs(rt, sysreg);
977 }
978
979
980 void MacroAssembler::Msr(SystemRegister sysreg, const Register& rt) {
981 ASSERT(allow_macro_instructions_);
982 ASSERT(!rt.IsZero());
983 msr(sysreg, rt);
984 }
985
986
987 void MacroAssembler::Msub(const Register& rd,
988 const Register& rn,
989 const Register& rm,
990 const Register& ra) {
991 ASSERT(allow_macro_instructions_);
992 ASSERT(!rd.IsZero());
993 msub(rd, rn, rm, ra);
994 }
995
996
997 void MacroAssembler::Mul(const Register& rd,
998 const Register& rn,
999 const Register& rm) {
1000 ASSERT(allow_macro_instructions_);
1001 ASSERT(!rd.IsZero());
1002 mul(rd, rn, rm);
1003 }
1004
1005
1006 void MacroAssembler::Rbit(const Register& rd, const Register& rn) {
1007 ASSERT(allow_macro_instructions_);
1008 ASSERT(!rd.IsZero());
1009 rbit(rd, rn);
1010 }
1011
1012
1013 void MacroAssembler::Ret(const Register& xn) {
1014 ASSERT(allow_macro_instructions_);
1015 ASSERT(!xn.IsZero());
1016 ret(xn);
1017 CheckVeneers(false);
1018 }
1019
1020
1021 void MacroAssembler::Rev(const Register& rd, const Register& rn) {
1022 ASSERT(allow_macro_instructions_);
1023 ASSERT(!rd.IsZero());
1024 rev(rd, rn);
1025 }
1026
1027
1028 void MacroAssembler::Rev16(const Register& rd, const Register& rn) {
1029 ASSERT(allow_macro_instructions_);
1030 ASSERT(!rd.IsZero());
1031 rev16(rd, rn);
1032 }
1033
1034
1035 void MacroAssembler::Rev32(const Register& rd, const Register& rn) {
1036 ASSERT(allow_macro_instructions_);
1037 ASSERT(!rd.IsZero());
1038 rev32(rd, rn);
1039 }
1040
1041
1042 void MacroAssembler::Ror(const Register& rd,
1043 const Register& rs,
1044 unsigned shift) {
1045 ASSERT(allow_macro_instructions_);
1046 ASSERT(!rd.IsZero());
1047 ror(rd, rs, shift);
1048 }
1049
1050
1051 void MacroAssembler::Ror(const Register& rd,
1052 const Register& rn,
1053 const Register& rm) {
1054 ASSERT(allow_macro_instructions_);
1055 ASSERT(!rd.IsZero());
1056 rorv(rd, rn, rm);
1057 }
1058
1059
1060 void MacroAssembler::Sbfiz(const Register& rd,
1061 const Register& rn,
1062 unsigned lsb,
1063 unsigned width) {
1064 ASSERT(allow_macro_instructions_);
1065 ASSERT(!rd.IsZero());
1066 sbfiz(rd, rn, lsb, width);
1067 }
1068
1069
1070 void MacroAssembler::Sbfx(const Register& rd,
1071 const Register& rn,
1072 unsigned lsb,
1073 unsigned width) {
1074 ASSERT(allow_macro_instructions_);
1075 ASSERT(!rd.IsZero());
1076 sbfx(rd, rn, lsb, width);
1077 }
1078
1079
1080 void MacroAssembler::Scvtf(const FPRegister& fd,
1081 const Register& rn,
1082 unsigned fbits) {
1083 ASSERT(allow_macro_instructions_);
1084 scvtf(fd, rn, fbits);
1085 }
1086
1087
1088 void MacroAssembler::Sdiv(const Register& rd,
1089 const Register& rn,
1090 const Register& rm) {
1091 ASSERT(allow_macro_instructions_);
1092 ASSERT(!rd.IsZero());
1093 sdiv(rd, rn, rm);
1094 }
1095
1096
1097 void MacroAssembler::Smaddl(const Register& rd,
1098 const Register& rn,
1099 const Register& rm,
1100 const Register& ra) {
1101 ASSERT(allow_macro_instructions_);
1102 ASSERT(!rd.IsZero());
1103 smaddl(rd, rn, rm, ra);
1104 }
1105
1106
1107 void MacroAssembler::Smsubl(const Register& rd,
1108 const Register& rn,
1109 const Register& rm,
1110 const Register& ra) {
1111 ASSERT(allow_macro_instructions_);
1112 ASSERT(!rd.IsZero());
1113 smsubl(rd, rn, rm, ra);
1114 }
1115
1116
1117 void MacroAssembler::Smull(const Register& rd,
1118 const Register& rn,
1119 const Register& rm) {
1120 ASSERT(allow_macro_instructions_);
1121 ASSERT(!rd.IsZero());
1122 smull(rd, rn, rm);
1123 }
1124
1125
1126 void MacroAssembler::Smulh(const Register& rd,
1127 const Register& rn,
1128 const Register& rm) {
1129 ASSERT(allow_macro_instructions_);
1130 ASSERT(!rd.IsZero());
1131 smulh(rd, rn, rm);
1132 }
1133
1134
1135 void MacroAssembler::Stnp(const CPURegister& rt,
1136 const CPURegister& rt2,
1137 const MemOperand& dst) {
1138 ASSERT(allow_macro_instructions_);
1139 stnp(rt, rt2, dst);
1140 }
1141
1142
1143 void MacroAssembler::Stp(const CPURegister& rt,
1144 const CPURegister& rt2,
1145 const MemOperand& dst) {
1146 ASSERT(allow_macro_instructions_);
1147 stp(rt, rt2, dst);
1148 }
1149
1150
1151 void MacroAssembler::Sxtb(const Register& rd, const Register& rn) {
1152 ASSERT(allow_macro_instructions_);
1153 ASSERT(!rd.IsZero());
1154 sxtb(rd, rn);
1155 }
1156
1157
1158 void MacroAssembler::Sxth(const Register& rd, const Register& rn) {
1159 ASSERT(allow_macro_instructions_);
1160 ASSERT(!rd.IsZero());
1161 sxth(rd, rn);
1162 }
1163
1164
1165 void MacroAssembler::Sxtw(const Register& rd, const Register& rn) {
1166 ASSERT(allow_macro_instructions_);
1167 ASSERT(!rd.IsZero());
1168 sxtw(rd, rn);
1169 }
1170
1171
1172 void MacroAssembler::Ubfiz(const Register& rd,
1173 const Register& rn,
1174 unsigned lsb,
1175 unsigned width) {
1176 ASSERT(allow_macro_instructions_);
1177 ASSERT(!rd.IsZero());
1178 ubfiz(rd, rn, lsb, width);
1179 }
1180
1181
1182 void MacroAssembler::Ubfx(const Register& rd,
1183 const Register& rn,
1184 unsigned lsb,
1185 unsigned width) {
1186 ASSERT(allow_macro_instructions_);
1187 ASSERT(!rd.IsZero());
1188 ubfx(rd, rn, lsb, width);
1189 }
1190
1191
1192 void MacroAssembler::Ucvtf(const FPRegister& fd,
1193 const Register& rn,
1194 unsigned fbits) {
1195 ASSERT(allow_macro_instructions_);
1196 ucvtf(fd, rn, fbits);
1197 }
1198
1199
1200 void MacroAssembler::Udiv(const Register& rd,
1201 const Register& rn,
1202 const Register& rm) {
1203 ASSERT(allow_macro_instructions_);
1204 ASSERT(!rd.IsZero());
1205 udiv(rd, rn, rm);
1206 }
1207
1208
1209 void MacroAssembler::Umaddl(const Register& rd,
1210 const Register& rn,
1211 const Register& rm,
1212 const Register& ra) {
1213 ASSERT(allow_macro_instructions_);
1214 ASSERT(!rd.IsZero());
1215 umaddl(rd, rn, rm, ra);
1216 }
1217
1218
1219 void MacroAssembler::Umsubl(const Register& rd,
1220 const Register& rn,
1221 const Register& rm,
1222 const Register& ra) {
1223 ASSERT(allow_macro_instructions_);
1224 ASSERT(!rd.IsZero());
1225 umsubl(rd, rn, rm, ra);
1226 }
1227
1228
1229 void MacroAssembler::Uxtb(const Register& rd, const Register& rn) {
1230 ASSERT(allow_macro_instructions_);
1231 ASSERT(!rd.IsZero());
1232 uxtb(rd, rn);
1233 }
1234
1235
1236 void MacroAssembler::Uxth(const Register& rd, const Register& rn) {
1237 ASSERT(allow_macro_instructions_);
1238 ASSERT(!rd.IsZero());
1239 uxth(rd, rn);
1240 }
1241
1242
1243 void MacroAssembler::Uxtw(const Register& rd, const Register& rn) {
1244 ASSERT(allow_macro_instructions_);
1245 ASSERT(!rd.IsZero());
1246 uxtw(rd, rn);
1247 }
1248
1249
1250 void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
1251 ASSERT(!csp.Is(sp_));
1252 // TODO(jbramley): Several callers rely on this not using scratch registers,
1253 // so we use the assembler directly here. However, this means that large
1254 // immediate values of 'space' cannot be handled cleanly. Once we implement
1255 // our flexible scratch register idea, we could greatly simplify this
1256 // function.
1257 InstructionAccurateScope scope(this);
1258 if ((space.IsImmediate()) && !is_uint12(space.immediate())) {
1259 // The subtract instruction supports a 12-bit immediate, shifted left by
1260 // zero or 12 bits. So, in two instructions, we can subtract any immediate
1261 // between zero and (1 << 24) - 1.
1262 int64_t imm = space.immediate();
1263 ASSERT(is_uint24(imm));
1264
1265 int64_t imm_top_12_bits = imm >> 12;
1266 sub(csp, StackPointer(), imm_top_12_bits << 12);
1267 imm -= imm_top_12_bits << 12;
1268 if (imm > 0) {
1269 sub(csp, csp, imm);
1270 }
1271 } else {
1272 sub(csp, StackPointer(), space);
1273 }
1274 }
1275
1276
1277 void MacroAssembler::InitializeRootRegister() {
1278 ExternalReference roots_array_start =
1279 ExternalReference::roots_array_start(isolate());
1280 Mov(root, Operand(roots_array_start));
1281 }
1282
1283
1284 void MacroAssembler::SmiTag(Register dst, Register src) {
1285 ASSERT(dst.Is64Bits() && src.Is64Bits());
1286 Lsl(dst, src, kSmiShift);
1287 }
1288
1289
1290 void MacroAssembler::SmiTag(Register smi) { SmiTag(smi, smi); }
1291
1292
1293 void MacroAssembler::SmiUntag(Register dst, Register src) {
1294 ASSERT(dst.Is64Bits() && src.Is64Bits());
1295 if (FLAG_enable_slow_asserts) {
1296 AssertSmi(src);
1297 }
1298 Asr(dst, src, kSmiShift);
1299 }
1300
1301
1302 void MacroAssembler::SmiUntag(Register smi) { SmiUntag(smi, smi); }
1303
1304
1305 void MacroAssembler::SmiUntagToDouble(FPRegister dst,
1306 Register src,
1307 UntagMode mode) {
1308 ASSERT(dst.Is64Bits() && src.Is64Bits());
1309 if (FLAG_enable_slow_asserts && (mode == kNotSpeculativeUntag)) {
1310 AssertSmi(src);
1311 }
1312 Scvtf(dst, src, kSmiShift);
1313 }
1314
1315
1316 void MacroAssembler::SmiUntagToFloat(FPRegister dst,
1317 Register src,
1318 UntagMode mode) {
1319 ASSERT(dst.Is32Bits() && src.Is64Bits());
1320 if (FLAG_enable_slow_asserts && (mode == kNotSpeculativeUntag)) {
1321 AssertSmi(src);
1322 }
1323 Scvtf(dst, src, kSmiShift);
1324 }
1325
1326
1327 void MacroAssembler::JumpIfSmi(Register value,
1328 Label* smi_label,
1329 Label* not_smi_label) {
1330 STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
1331 // Check if the tag bit is set.
1332 if (smi_label) {
1333 Tbz(value, 0, smi_label);
1334 if (not_smi_label) {
1335 B(not_smi_label);
1336 }
1337 } else {
1338 ASSERT(not_smi_label);
1339 Tbnz(value, 0, not_smi_label);
1340 }
1341 }
1342
1343
1344 void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
1345 JumpIfSmi(value, NULL, not_smi_label);
1346 }
1347
1348
1349 void MacroAssembler::JumpIfBothSmi(Register value1,
1350 Register value2,
1351 Label* both_smi_label,
1352 Label* not_smi_label) {
1353 STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
1354 // Check if both tag bits are clear.
1355 Orr(Tmp0(), value1, value2);
1356 JumpIfSmi(Tmp0(), both_smi_label, not_smi_label);
1357 }
1358
1359
1360 void MacroAssembler::JumpIfEitherSmi(Register value1,
1361 Register value2,
1362 Label* either_smi_label,
1363 Label* not_smi_label) {
1364 STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
1365 // Check if either tag bit is clear.
1366 And(Tmp0(), value1, value2);
1367 JumpIfSmi(Tmp0(), either_smi_label, not_smi_label);
1368 }
1369
1370
1371 void MacroAssembler::JumpIfEitherNotSmi(Register value1,
1372 Register value2,
1373 Label* not_smi_label) {
1374 JumpIfBothSmi(value1, value2, NULL, not_smi_label);
1375 }
1376
1377
1378 void MacroAssembler::JumpIfBothNotSmi(Register value1,
1379 Register value2,
1380 Label* not_smi_label) {
1381 JumpIfEitherSmi(value1, value2, NULL, not_smi_label);
1382 }
1383
1384
1385 void MacroAssembler::IsObjectNameType(Register object,
1386 Register type,
1387 Label* fail) {
1388 CompareObjectType(object, type, type, LAST_NAME_TYPE);
1389 B(hi, fail);
1390 }
1391
1392
1393 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
1394 Register map,
1395 Register scratch,
1396 Label* fail) {
1397 Ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
1398 IsInstanceJSObjectType(map, scratch, fail);
1399 }
1400
1401
1402 void MacroAssembler::IsInstanceJSObjectType(Register map,
1403 Register scratch,
1404 Label* fail) {
1405 Ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
1406 // If cmp result is lt, the following ccmp will clear all flags.
1407 // Z == 0, N == V implies gt condition.
1408 Cmp(scratch, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
1409 Ccmp(scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE, NoFlag, ge);
1410
1411 // If we didn't get a valid label object just fall through and leave the
1412 // flags updated.
1413 if (fail != NULL) {
1414 B(gt, fail);
1415 }
1416 }
1417
1418
1419 void MacroAssembler::IsObjectJSStringType(Register object,
1420 Register type,
1421 Label* not_string,
1422 Label* string) {
1423 Ldr(type, FieldMemOperand(object, HeapObject::kMapOffset));
1424 Ldrb(type.W(), FieldMemOperand(type, Map::kInstanceTypeOffset));
1425
1426 STATIC_ASSERT(kStringTag == 0);
1427 ASSERT((string != NULL) || (not_string != NULL));
1428 if (string == NULL) {
1429 TestAndBranchIfAnySet(type.W(), kIsNotStringMask, not_string);
1430 } else if (not_string == NULL) {
1431 TestAndBranchIfAllClear(type.W(), kIsNotStringMask, string);
1432 } else {
1433 TestAndBranchIfAnySet(type.W(), kIsNotStringMask, not_string);
1434 B(string);
1435 }
1436 }
1437
1438
1439 void MacroAssembler::Push(Handle<Object> handle) {
1440 Mov(Tmp0(), Operand(handle));
1441 Push(Tmp0());
1442 }
1443
1444
1445 void MacroAssembler::Claim(uint64_t count, uint64_t unit_size) {
1446 uint64_t size = count * unit_size;
1447
1448 if (size == 0) {
1449 return;
1450 }
1451
1452 if (csp.Is(StackPointer())) {
1453 ASSERT(size % 16 == 0);
1454 } else {
1455 BumpSystemStackPointer(size);
1456 }
1457
1458 Sub(StackPointer(), StackPointer(), size);
1459 }
1460
1461
1462 void MacroAssembler::Claim(const Register& count, uint64_t unit_size) {
1463 ASSERT(IsPowerOf2(unit_size));
1464
1465 if (unit_size == 0) {
1466 return;
1467 }
1468
1469 const int shift = CountTrailingZeros(unit_size, kXRegSize);
1470 const Operand size(count, LSL, shift);
1471
1472 if (size.IsZero()) {
1473 return;
1474 }
1475
1476 if (!csp.Is(StackPointer())) {
1477 BumpSystemStackPointer(size);
1478 }
1479
1480 Sub(StackPointer(), StackPointer(), size);
1481 }
1482
1483
1484 void MacroAssembler::ClaimBySMI(const Register& count_smi, uint64_t unit_size) {
1485 ASSERT(IsPowerOf2(unit_size));
1486 const int shift = CountTrailingZeros(unit_size, kXRegSize) - kSmiShift;
1487 const Operand size(count_smi,
1488 (shift >= 0) ? (LSL) : (LSR),
1489 (shift >= 0) ? (shift) : (-shift));
1490
1491 if (size.IsZero()) {
1492 return;
1493 }
1494
1495 if (!csp.Is(StackPointer())) {
1496 BumpSystemStackPointer(size);
1497 }
1498
1499 Sub(StackPointer(), StackPointer(), size);
1500 }
1501
1502
1503 void MacroAssembler::Drop(uint64_t count, uint64_t unit_size) {
1504 uint64_t size = count * unit_size;
1505
1506 if (size == 0) {
1507 return;
1508 }
1509
1510 Add(StackPointer(), StackPointer(), size);
1511
1512 if (csp.Is(StackPointer())) {
1513 ASSERT(size % 16 == 0);
1514 } else if (emit_debug_code()) {
1515 // It is safe to leave csp where it is when unwinding the JavaScript stack,
1516 // but if we keep it matching StackPointer, the simulator can detect memory
1517 // accesses in the now-free part of the stack.
1518 Mov(csp, StackPointer());
1519 }
1520 }
1521
1522
1523 void MacroAssembler::Drop(const Register& count, uint64_t unit_size) {
1524 ASSERT(IsPowerOf2(unit_size));
1525
1526 if (unit_size == 0) {
1527 return;
1528 }
1529
1530 const int shift = CountTrailingZeros(unit_size, kXRegSize);
1531 const Operand size(count, LSL, shift);
1532
1533 if (size.IsZero()) {
1534 return;
1535 }
1536
1537 Add(StackPointer(), StackPointer(), size);
1538
1539 if (!csp.Is(StackPointer()) && emit_debug_code()) {
1540 // It is safe to leave csp where it is when unwinding the JavaScript stack,
1541 // but if we keep it matching StackPointer, the simulator can detect memory
1542 // accesses in the now-free part of the stack.
1543 Mov(csp, StackPointer());
1544 }
1545 }
1546
1547
1548 void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) {
1549 ASSERT(IsPowerOf2(unit_size));
1550 const int shift = CountTrailingZeros(unit_size, kXRegSize) - kSmiShift;
1551 const Operand size(count_smi,
1552 (shift >= 0) ? (LSL) : (LSR),
1553 (shift >= 0) ? (shift) : (-shift));
1554
1555 if (size.IsZero()) {
1556 return;
1557 }
1558
1559 Add(StackPointer(), StackPointer(), size);
1560
1561 if (!csp.Is(StackPointer()) && emit_debug_code()) {
1562 // It is safe to leave csp where it is when unwinding the JavaScript stack,
1563 // but if we keep it matching StackPointer, the simulator can detect memory
1564 // accesses in the now-free part of the stack.
1565 Mov(csp, StackPointer());
1566 }
1567 }
1568
1569
1570 void MacroAssembler::CompareAndBranch(const Register& lhs,
1571 const Operand& rhs,
1572 Condition cond,
1573 Label* label) {
1574 if (rhs.IsImmediate() && (rhs.immediate() == 0) &&
1575 ((cond == eq) || (cond == ne))) {
1576 if (cond == eq) {
1577 Cbz(lhs, label);
1578 } else {
1579 Cbnz(lhs, label);
1580 }
1581 } else {
1582 Cmp(lhs, rhs);
1583 B(cond, label);
1584 }
1585 }
1586
1587
1588 void MacroAssembler::TestAndBranchIfAnySet(const Register& reg,
1589 const uint64_t bit_pattern,
1590 Label* label) {
1591 int bits = reg.SizeInBits();
1592 ASSERT(CountSetBits(bit_pattern, bits) > 0);
1593 if (CountSetBits(bit_pattern, bits) == 1) {
1594 Tbnz(reg, MaskToBit(bit_pattern), label);
1595 } else {
1596 Tst(reg, bit_pattern);
1597 B(ne, label);
1598 }
1599 }
1600
1601
1602 void MacroAssembler::TestAndBranchIfAllClear(const Register& reg,
1603 const uint64_t bit_pattern,
1604 Label* label) {
1605 int bits = reg.SizeInBits();
1606 ASSERT(CountSetBits(bit_pattern, bits) > 0);
1607 if (CountSetBits(bit_pattern, bits) == 1) {
1608 Tbz(reg, MaskToBit(bit_pattern), label);
1609 } else {
1610 Tst(reg, bit_pattern);
1611 B(eq, label);
1612 }
1613 }
1614
1615
1616 void MacroAssembler::InlineData(uint64_t data) {
1617 ASSERT(is_uint16(data));
1618 InstructionAccurateScope scope(this, 1);
1619 movz(xzr, data);
1620 }
1621
1622
1623 void MacroAssembler::EnableInstrumentation() {
1624 InstructionAccurateScope scope(this, 1);
1625 movn(xzr, InstrumentStateEnable);
1626 }
1627
1628
1629 void MacroAssembler::DisableInstrumentation() {
1630 InstructionAccurateScope scope(this, 1);
1631 movn(xzr, InstrumentStateDisable);
1632 }
1633
1634
1635 void MacroAssembler::AnnotateInstrumentation(const char* marker_name) {
1636 ASSERT(strlen(marker_name) == 2);
1637
1638 // We allow only printable characters in the marker names. Unprintable
1639 // characters are reserved for controlling features of the instrumentation.
1640 ASSERT(isprint(marker_name[0]) && isprint(marker_name[1]));
1641
1642 InstructionAccurateScope scope(this, 1);
1643 movn(xzr, (marker_name[1] << 8) | marker_name[0]);
1644 }
1645
1646 } } // namespace v8::internal
1647
1648 #endif // V8_A64_MACRO_ASSEMBLER_A64_INL_H_
OLDNEW
« no previous file with comments | « src/a64/macro-assembler-a64.cc ('k') | src/a64/regexp-macro-assembler-a64.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698