OLD | NEW |
| (Empty) |
1 // Copyright 2013 the V8 project authors. All rights reserved. | |
2 // Redistribution and use in source and binary forms, with or without | |
3 // modification, are permitted provided that the following conditions are | |
4 // met: | |
5 // | |
6 // * Redistributions of source code must retain the above copyright | |
7 // notice, this list of conditions and the following disclaimer. | |
8 // * Redistributions in binary form must reproduce the above | |
9 // copyright notice, this list of conditions and the following | |
10 // disclaimer in the documentation and/or other materials provided | |
11 // with the distribution. | |
12 // * Neither the name of Google Inc. nor the names of its | |
13 // contributors may be used to endorse or promote products derived | |
14 // from this software without specific prior written permission. | |
15 // | |
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
27 | |
28 #ifndef V8_A64_MACRO_ASSEMBLER_A64_INL_H_ | |
29 #define V8_A64_MACRO_ASSEMBLER_A64_INL_H_ | |
30 | |
31 #include <ctype.h> | |
32 | |
33 #include "v8globals.h" | |
34 #include "globals.h" | |
35 | |
36 #include "a64/assembler-a64.h" | |
37 #include "a64/assembler-a64-inl.h" | |
38 #include "a64/macro-assembler-a64.h" | |
39 #include "a64/instrument-a64.h" | |
40 | |
41 | |
42 namespace v8 { | |
43 namespace internal { | |
44 | |
45 | |
46 MemOperand FieldMemOperand(Register object, int offset) { | |
47 return MemOperand(object, offset - kHeapObjectTag); | |
48 } | |
49 | |
50 | |
51 MemOperand UntagSmiFieldMemOperand(Register object, int offset) { | |
52 return UntagSmiMemOperand(object, offset - kHeapObjectTag); | |
53 } | |
54 | |
55 | |
56 MemOperand UntagSmiMemOperand(Register object, int offset) { | |
57 // Assumes that Smis are shifted by 32 bits and little endianness. | |
58 STATIC_ASSERT(kSmiShift == 32); | |
59 return MemOperand(object, offset + (kSmiShift / kBitsPerByte)); | |
60 } | |
61 | |
62 | |
63 Handle<Object> MacroAssembler::CodeObject() { | |
64 ASSERT(!code_object_.is_null()); | |
65 return code_object_; | |
66 } | |
67 | |
68 | |
69 void MacroAssembler::And(const Register& rd, | |
70 const Register& rn, | |
71 const Operand& operand) { | |
72 ASSERT(allow_macro_instructions_); | |
73 ASSERT(!rd.IsZero()); | |
74 LogicalMacro(rd, rn, operand, AND); | |
75 } | |
76 | |
77 | |
78 void MacroAssembler::Ands(const Register& rd, | |
79 const Register& rn, | |
80 const Operand& operand) { | |
81 ASSERT(allow_macro_instructions_); | |
82 ASSERT(!rd.IsZero()); | |
83 LogicalMacro(rd, rn, operand, ANDS); | |
84 } | |
85 | |
86 | |
87 void MacroAssembler::Tst(const Register& rn, | |
88 const Operand& operand) { | |
89 ASSERT(allow_macro_instructions_); | |
90 LogicalMacro(AppropriateZeroRegFor(rn), rn, operand, ANDS); | |
91 } | |
92 | |
93 | |
94 void MacroAssembler::Bic(const Register& rd, | |
95 const Register& rn, | |
96 const Operand& operand) { | |
97 ASSERT(allow_macro_instructions_); | |
98 ASSERT(!rd.IsZero()); | |
99 LogicalMacro(rd, rn, operand, BIC); | |
100 } | |
101 | |
102 | |
103 void MacroAssembler::Bics(const Register& rd, | |
104 const Register& rn, | |
105 const Operand& operand) { | |
106 ASSERT(allow_macro_instructions_); | |
107 ASSERT(!rd.IsZero()); | |
108 LogicalMacro(rd, rn, operand, BICS); | |
109 } | |
110 | |
111 | |
112 void MacroAssembler::Orr(const Register& rd, | |
113 const Register& rn, | |
114 const Operand& operand) { | |
115 ASSERT(allow_macro_instructions_); | |
116 ASSERT(!rd.IsZero()); | |
117 LogicalMacro(rd, rn, operand, ORR); | |
118 } | |
119 | |
120 | |
121 void MacroAssembler::Orn(const Register& rd, | |
122 const Register& rn, | |
123 const Operand& operand) { | |
124 ASSERT(allow_macro_instructions_); | |
125 ASSERT(!rd.IsZero()); | |
126 LogicalMacro(rd, rn, operand, ORN); | |
127 } | |
128 | |
129 | |
130 void MacroAssembler::Eor(const Register& rd, | |
131 const Register& rn, | |
132 const Operand& operand) { | |
133 ASSERT(allow_macro_instructions_); | |
134 ASSERT(!rd.IsZero()); | |
135 LogicalMacro(rd, rn, operand, EOR); | |
136 } | |
137 | |
138 | |
139 void MacroAssembler::Eon(const Register& rd, | |
140 const Register& rn, | |
141 const Operand& operand) { | |
142 ASSERT(allow_macro_instructions_); | |
143 ASSERT(!rd.IsZero()); | |
144 LogicalMacro(rd, rn, operand, EON); | |
145 } | |
146 | |
147 | |
148 void MacroAssembler::Ccmp(const Register& rn, | |
149 const Operand& operand, | |
150 StatusFlags nzcv, | |
151 Condition cond) { | |
152 ASSERT(allow_macro_instructions_); | |
153 if (operand.IsImmediate() && (operand.immediate() < 0)) { | |
154 ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMN); | |
155 } else { | |
156 ConditionalCompareMacro(rn, operand, nzcv, cond, CCMP); | |
157 } | |
158 } | |
159 | |
160 | |
161 void MacroAssembler::Ccmn(const Register& rn, | |
162 const Operand& operand, | |
163 StatusFlags nzcv, | |
164 Condition cond) { | |
165 ASSERT(allow_macro_instructions_); | |
166 if (operand.IsImmediate() && (operand.immediate() < 0)) { | |
167 ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMP); | |
168 } else { | |
169 ConditionalCompareMacro(rn, operand, nzcv, cond, CCMN); | |
170 } | |
171 } | |
172 | |
173 | |
174 void MacroAssembler::Add(const Register& rd, | |
175 const Register& rn, | |
176 const Operand& operand) { | |
177 ASSERT(allow_macro_instructions_); | |
178 if (operand.IsImmediate() && (operand.immediate() < 0)) { | |
179 AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, SUB); | |
180 } else { | |
181 AddSubMacro(rd, rn, operand, LeaveFlags, ADD); | |
182 } | |
183 } | |
184 | |
185 void MacroAssembler::Adds(const Register& rd, | |
186 const Register& rn, | |
187 const Operand& operand) { | |
188 ASSERT(allow_macro_instructions_); | |
189 if (operand.IsImmediate() && (operand.immediate() < 0)) { | |
190 AddSubMacro(rd, rn, -operand.immediate(), SetFlags, SUB); | |
191 } else { | |
192 AddSubMacro(rd, rn, operand, SetFlags, ADD); | |
193 } | |
194 } | |
195 | |
196 | |
197 void MacroAssembler::Sub(const Register& rd, | |
198 const Register& rn, | |
199 const Operand& operand) { | |
200 ASSERT(allow_macro_instructions_); | |
201 if (operand.IsImmediate() && (operand.immediate() < 0)) { | |
202 AddSubMacro(rd, rn, -operand.immediate(), LeaveFlags, ADD); | |
203 } else { | |
204 AddSubMacro(rd, rn, operand, LeaveFlags, SUB); | |
205 } | |
206 } | |
207 | |
208 | |
209 void MacroAssembler::Subs(const Register& rd, | |
210 const Register& rn, | |
211 const Operand& operand) { | |
212 ASSERT(allow_macro_instructions_); | |
213 if (operand.IsImmediate() && (operand.immediate() < 0)) { | |
214 AddSubMacro(rd, rn, -operand.immediate(), SetFlags, ADD); | |
215 } else { | |
216 AddSubMacro(rd, rn, operand, SetFlags, SUB); | |
217 } | |
218 } | |
219 | |
220 | |
221 void MacroAssembler::Cmn(const Register& rn, const Operand& operand) { | |
222 ASSERT(allow_macro_instructions_); | |
223 Adds(AppropriateZeroRegFor(rn), rn, operand); | |
224 } | |
225 | |
226 | |
227 void MacroAssembler::Cmp(const Register& rn, const Operand& operand) { | |
228 ASSERT(allow_macro_instructions_); | |
229 Subs(AppropriateZeroRegFor(rn), rn, operand); | |
230 } | |
231 | |
232 | |
233 void MacroAssembler::Neg(const Register& rd, | |
234 const Operand& operand) { | |
235 ASSERT(allow_macro_instructions_); | |
236 ASSERT(!rd.IsZero()); | |
237 if (operand.IsImmediate()) { | |
238 Mov(rd, -operand.immediate()); | |
239 } else { | |
240 Sub(rd, AppropriateZeroRegFor(rd), operand); | |
241 } | |
242 } | |
243 | |
244 | |
245 void MacroAssembler::Negs(const Register& rd, | |
246 const Operand& operand) { | |
247 ASSERT(allow_macro_instructions_); | |
248 Subs(rd, AppropriateZeroRegFor(rd), operand); | |
249 } | |
250 | |
251 | |
252 void MacroAssembler::Adc(const Register& rd, | |
253 const Register& rn, | |
254 const Operand& operand) { | |
255 ASSERT(allow_macro_instructions_); | |
256 ASSERT(!rd.IsZero()); | |
257 AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, ADC); | |
258 } | |
259 | |
260 | |
261 void MacroAssembler::Adcs(const Register& rd, | |
262 const Register& rn, | |
263 const Operand& operand) { | |
264 ASSERT(allow_macro_instructions_); | |
265 ASSERT(!rd.IsZero()); | |
266 AddSubWithCarryMacro(rd, rn, operand, SetFlags, ADC); | |
267 } | |
268 | |
269 | |
270 void MacroAssembler::Sbc(const Register& rd, | |
271 const Register& rn, | |
272 const Operand& operand) { | |
273 ASSERT(allow_macro_instructions_); | |
274 ASSERT(!rd.IsZero()); | |
275 AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, SBC); | |
276 } | |
277 | |
278 | |
279 void MacroAssembler::Sbcs(const Register& rd, | |
280 const Register& rn, | |
281 const Operand& operand) { | |
282 ASSERT(allow_macro_instructions_); | |
283 ASSERT(!rd.IsZero()); | |
284 AddSubWithCarryMacro(rd, rn, operand, SetFlags, SBC); | |
285 } | |
286 | |
287 | |
288 void MacroAssembler::Ngc(const Register& rd, | |
289 const Operand& operand) { | |
290 ASSERT(allow_macro_instructions_); | |
291 ASSERT(!rd.IsZero()); | |
292 Register zr = AppropriateZeroRegFor(rd); | |
293 Sbc(rd, zr, operand); | |
294 } | |
295 | |
296 | |
297 void MacroAssembler::Ngcs(const Register& rd, | |
298 const Operand& operand) { | |
299 ASSERT(allow_macro_instructions_); | |
300 ASSERT(!rd.IsZero()); | |
301 Register zr = AppropriateZeroRegFor(rd); | |
302 Sbcs(rd, zr, operand); | |
303 } | |
304 | |
305 | |
306 void MacroAssembler::Mvn(const Register& rd, uint64_t imm) { | |
307 ASSERT(allow_macro_instructions_); | |
308 ASSERT(!rd.IsZero()); | |
309 Mov(rd, ~imm); | |
310 } | |
311 | |
312 | |
313 #define DEFINE_FUNCTION(FN, REGTYPE, REG, OP) \ | |
314 void MacroAssembler::FN(const REGTYPE REG, const MemOperand& addr) { \ | |
315 ASSERT(allow_macro_instructions_); \ | |
316 LoadStoreMacro(REG, addr, OP); \ | |
317 } | |
318 LS_MACRO_LIST(DEFINE_FUNCTION) | |
319 #undef DEFINE_FUNCTION | |
320 | |
321 | |
322 void MacroAssembler::Adr(const Register& rd, Label* label) { | |
323 ASSERT(allow_macro_instructions_); | |
324 ASSERT(!rd.IsZero()); | |
325 adr(rd, label); | |
326 } | |
327 | |
328 | |
329 void MacroAssembler::Asr(const Register& rd, | |
330 const Register& rn, | |
331 unsigned shift) { | |
332 ASSERT(allow_macro_instructions_); | |
333 ASSERT(!rd.IsZero()); | |
334 asr(rd, rn, shift); | |
335 } | |
336 | |
337 | |
338 void MacroAssembler::Asr(const Register& rd, | |
339 const Register& rn, | |
340 const Register& rm) { | |
341 ASSERT(allow_macro_instructions_); | |
342 ASSERT(!rd.IsZero()); | |
343 asrv(rd, rn, rm); | |
344 } | |
345 | |
346 | |
347 void MacroAssembler::B(Label* label) { | |
348 b(label); | |
349 CheckVeneerPool(false, false); | |
350 } | |
351 | |
352 | |
353 void MacroAssembler::B(Condition cond, Label* label) { | |
354 ASSERT(allow_macro_instructions_); | |
355 B(label, cond); | |
356 } | |
357 | |
358 | |
359 void MacroAssembler::Bfi(const Register& rd, | |
360 const Register& rn, | |
361 unsigned lsb, | |
362 unsigned width) { | |
363 ASSERT(allow_macro_instructions_); | |
364 ASSERT(!rd.IsZero()); | |
365 bfi(rd, rn, lsb, width); | |
366 } | |
367 | |
368 | |
369 void MacroAssembler::Bfxil(const Register& rd, | |
370 const Register& rn, | |
371 unsigned lsb, | |
372 unsigned width) { | |
373 ASSERT(allow_macro_instructions_); | |
374 ASSERT(!rd.IsZero()); | |
375 bfxil(rd, rn, lsb, width); | |
376 } | |
377 | |
378 | |
379 void MacroAssembler::Bind(Label* label) { | |
380 ASSERT(allow_macro_instructions_); | |
381 bind(label); | |
382 } | |
383 | |
384 | |
385 void MacroAssembler::Bl(Label* label) { | |
386 ASSERT(allow_macro_instructions_); | |
387 bl(label); | |
388 } | |
389 | |
390 | |
391 void MacroAssembler::Blr(const Register& xn) { | |
392 ASSERT(allow_macro_instructions_); | |
393 ASSERT(!xn.IsZero()); | |
394 blr(xn); | |
395 } | |
396 | |
397 | |
398 void MacroAssembler::Br(const Register& xn) { | |
399 ASSERT(allow_macro_instructions_); | |
400 ASSERT(!xn.IsZero()); | |
401 br(xn); | |
402 } | |
403 | |
404 | |
405 void MacroAssembler::Brk(int code) { | |
406 ASSERT(allow_macro_instructions_); | |
407 brk(code); | |
408 } | |
409 | |
410 | |
411 void MacroAssembler::Cinc(const Register& rd, | |
412 const Register& rn, | |
413 Condition cond) { | |
414 ASSERT(allow_macro_instructions_); | |
415 ASSERT(!rd.IsZero()); | |
416 ASSERT((cond != al) && (cond != nv)); | |
417 cinc(rd, rn, cond); | |
418 } | |
419 | |
420 | |
421 void MacroAssembler::Cinv(const Register& rd, | |
422 const Register& rn, | |
423 Condition cond) { | |
424 ASSERT(allow_macro_instructions_); | |
425 ASSERT(!rd.IsZero()); | |
426 ASSERT((cond != al) && (cond != nv)); | |
427 cinv(rd, rn, cond); | |
428 } | |
429 | |
430 | |
431 void MacroAssembler::Cls(const Register& rd, const Register& rn) { | |
432 ASSERT(allow_macro_instructions_); | |
433 ASSERT(!rd.IsZero()); | |
434 cls(rd, rn); | |
435 } | |
436 | |
437 | |
438 void MacroAssembler::Clz(const Register& rd, const Register& rn) { | |
439 ASSERT(allow_macro_instructions_); | |
440 ASSERT(!rd.IsZero()); | |
441 clz(rd, rn); | |
442 } | |
443 | |
444 | |
445 void MacroAssembler::Cneg(const Register& rd, | |
446 const Register& rn, | |
447 Condition cond) { | |
448 ASSERT(allow_macro_instructions_); | |
449 ASSERT(!rd.IsZero()); | |
450 ASSERT((cond != al) && (cond != nv)); | |
451 cneg(rd, rn, cond); | |
452 } | |
453 | |
454 | |
455 // Conditionally zero the destination register. Only X registers are supported | |
456 // due to the truncation side-effect when used on W registers. | |
457 void MacroAssembler::CzeroX(const Register& rd, | |
458 Condition cond) { | |
459 ASSERT(allow_macro_instructions_); | |
460 ASSERT(!rd.IsSP() && rd.Is64Bits()); | |
461 ASSERT((cond != al) && (cond != nv)); | |
462 csel(rd, xzr, rd, cond); | |
463 } | |
464 | |
465 | |
466 // Conditionally move a value into the destination register. Only X registers | |
467 // are supported due to the truncation side-effect when used on W registers. | |
468 void MacroAssembler::CmovX(const Register& rd, | |
469 const Register& rn, | |
470 Condition cond) { | |
471 ASSERT(allow_macro_instructions_); | |
472 ASSERT(!rd.IsSP()); | |
473 ASSERT(rd.Is64Bits() && rn.Is64Bits()); | |
474 ASSERT((cond != al) && (cond != nv)); | |
475 if (!rd.is(rn)) { | |
476 csel(rd, rn, rd, cond); | |
477 } | |
478 } | |
479 | |
480 | |
481 void MacroAssembler::Cset(const Register& rd, Condition cond) { | |
482 ASSERT(allow_macro_instructions_); | |
483 ASSERT(!rd.IsZero()); | |
484 ASSERT((cond != al) && (cond != nv)); | |
485 cset(rd, cond); | |
486 } | |
487 | |
488 | |
489 void MacroAssembler::Csetm(const Register& rd, Condition cond) { | |
490 ASSERT(allow_macro_instructions_); | |
491 ASSERT(!rd.IsZero()); | |
492 ASSERT((cond != al) && (cond != nv)); | |
493 csetm(rd, cond); | |
494 } | |
495 | |
496 | |
497 void MacroAssembler::Csinc(const Register& rd, | |
498 const Register& rn, | |
499 const Register& rm, | |
500 Condition cond) { | |
501 ASSERT(allow_macro_instructions_); | |
502 ASSERT(!rd.IsZero()); | |
503 ASSERT((cond != al) && (cond != nv)); | |
504 csinc(rd, rn, rm, cond); | |
505 } | |
506 | |
507 | |
508 void MacroAssembler::Csinv(const Register& rd, | |
509 const Register& rn, | |
510 const Register& rm, | |
511 Condition cond) { | |
512 ASSERT(allow_macro_instructions_); | |
513 ASSERT(!rd.IsZero()); | |
514 ASSERT((cond != al) && (cond != nv)); | |
515 csinv(rd, rn, rm, cond); | |
516 } | |
517 | |
518 | |
519 void MacroAssembler::Csneg(const Register& rd, | |
520 const Register& rn, | |
521 const Register& rm, | |
522 Condition cond) { | |
523 ASSERT(allow_macro_instructions_); | |
524 ASSERT(!rd.IsZero()); | |
525 ASSERT((cond != al) && (cond != nv)); | |
526 csneg(rd, rn, rm, cond); | |
527 } | |
528 | |
529 | |
530 void MacroAssembler::Dmb(BarrierDomain domain, BarrierType type) { | |
531 ASSERT(allow_macro_instructions_); | |
532 dmb(domain, type); | |
533 } | |
534 | |
535 | |
536 void MacroAssembler::Dsb(BarrierDomain domain, BarrierType type) { | |
537 ASSERT(allow_macro_instructions_); | |
538 dsb(domain, type); | |
539 } | |
540 | |
541 | |
542 void MacroAssembler::Debug(const char* message, uint32_t code, Instr params) { | |
543 ASSERT(allow_macro_instructions_); | |
544 debug(message, code, params); | |
545 } | |
546 | |
547 | |
548 void MacroAssembler::Extr(const Register& rd, | |
549 const Register& rn, | |
550 const Register& rm, | |
551 unsigned lsb) { | |
552 ASSERT(allow_macro_instructions_); | |
553 ASSERT(!rd.IsZero()); | |
554 extr(rd, rn, rm, lsb); | |
555 } | |
556 | |
557 | |
558 void MacroAssembler::Fabs(const FPRegister& fd, const FPRegister& fn) { | |
559 ASSERT(allow_macro_instructions_); | |
560 fabs(fd, fn); | |
561 } | |
562 | |
563 | |
564 void MacroAssembler::Fadd(const FPRegister& fd, | |
565 const FPRegister& fn, | |
566 const FPRegister& fm) { | |
567 ASSERT(allow_macro_instructions_); | |
568 fadd(fd, fn, fm); | |
569 } | |
570 | |
571 | |
572 void MacroAssembler::Fccmp(const FPRegister& fn, | |
573 const FPRegister& fm, | |
574 StatusFlags nzcv, | |
575 Condition cond) { | |
576 ASSERT(allow_macro_instructions_); | |
577 ASSERT((cond != al) && (cond != nv)); | |
578 fccmp(fn, fm, nzcv, cond); | |
579 } | |
580 | |
581 | |
582 void MacroAssembler::Fcmp(const FPRegister& fn, const FPRegister& fm) { | |
583 ASSERT(allow_macro_instructions_); | |
584 fcmp(fn, fm); | |
585 } | |
586 | |
587 | |
588 void MacroAssembler::Fcmp(const FPRegister& fn, double value) { | |
589 ASSERT(allow_macro_instructions_); | |
590 if (value != 0.0) { | |
591 UseScratchRegisterScope temps(this); | |
592 FPRegister tmp = temps.AcquireSameSizeAs(fn); | |
593 Fmov(tmp, value); | |
594 fcmp(fn, tmp); | |
595 } else { | |
596 fcmp(fn, value); | |
597 } | |
598 } | |
599 | |
600 | |
601 void MacroAssembler::Fcsel(const FPRegister& fd, | |
602 const FPRegister& fn, | |
603 const FPRegister& fm, | |
604 Condition cond) { | |
605 ASSERT(allow_macro_instructions_); | |
606 ASSERT((cond != al) && (cond != nv)); | |
607 fcsel(fd, fn, fm, cond); | |
608 } | |
609 | |
610 | |
611 void MacroAssembler::Fcvt(const FPRegister& fd, const FPRegister& fn) { | |
612 ASSERT(allow_macro_instructions_); | |
613 fcvt(fd, fn); | |
614 } | |
615 | |
616 | |
617 void MacroAssembler::Fcvtas(const Register& rd, const FPRegister& fn) { | |
618 ASSERT(allow_macro_instructions_); | |
619 ASSERT(!rd.IsZero()); | |
620 fcvtas(rd, fn); | |
621 } | |
622 | |
623 | |
624 void MacroAssembler::Fcvtau(const Register& rd, const FPRegister& fn) { | |
625 ASSERT(allow_macro_instructions_); | |
626 ASSERT(!rd.IsZero()); | |
627 fcvtau(rd, fn); | |
628 } | |
629 | |
630 | |
631 void MacroAssembler::Fcvtms(const Register& rd, const FPRegister& fn) { | |
632 ASSERT(allow_macro_instructions_); | |
633 ASSERT(!rd.IsZero()); | |
634 fcvtms(rd, fn); | |
635 } | |
636 | |
637 | |
638 void MacroAssembler::Fcvtmu(const Register& rd, const FPRegister& fn) { | |
639 ASSERT(allow_macro_instructions_); | |
640 ASSERT(!rd.IsZero()); | |
641 fcvtmu(rd, fn); | |
642 } | |
643 | |
644 | |
645 void MacroAssembler::Fcvtns(const Register& rd, const FPRegister& fn) { | |
646 ASSERT(allow_macro_instructions_); | |
647 ASSERT(!rd.IsZero()); | |
648 fcvtns(rd, fn); | |
649 } | |
650 | |
651 | |
652 void MacroAssembler::Fcvtnu(const Register& rd, const FPRegister& fn) { | |
653 ASSERT(allow_macro_instructions_); | |
654 ASSERT(!rd.IsZero()); | |
655 fcvtnu(rd, fn); | |
656 } | |
657 | |
658 | |
659 void MacroAssembler::Fcvtzs(const Register& rd, const FPRegister& fn) { | |
660 ASSERT(allow_macro_instructions_); | |
661 ASSERT(!rd.IsZero()); | |
662 fcvtzs(rd, fn); | |
663 } | |
664 void MacroAssembler::Fcvtzu(const Register& rd, const FPRegister& fn) { | |
665 ASSERT(allow_macro_instructions_); | |
666 ASSERT(!rd.IsZero()); | |
667 fcvtzu(rd, fn); | |
668 } | |
669 | |
670 | |
671 void MacroAssembler::Fdiv(const FPRegister& fd, | |
672 const FPRegister& fn, | |
673 const FPRegister& fm) { | |
674 ASSERT(allow_macro_instructions_); | |
675 fdiv(fd, fn, fm); | |
676 } | |
677 | |
678 | |
679 void MacroAssembler::Fmadd(const FPRegister& fd, | |
680 const FPRegister& fn, | |
681 const FPRegister& fm, | |
682 const FPRegister& fa) { | |
683 ASSERT(allow_macro_instructions_); | |
684 fmadd(fd, fn, fm, fa); | |
685 } | |
686 | |
687 | |
688 void MacroAssembler::Fmax(const FPRegister& fd, | |
689 const FPRegister& fn, | |
690 const FPRegister& fm) { | |
691 ASSERT(allow_macro_instructions_); | |
692 fmax(fd, fn, fm); | |
693 } | |
694 | |
695 | |
696 void MacroAssembler::Fmaxnm(const FPRegister& fd, | |
697 const FPRegister& fn, | |
698 const FPRegister& fm) { | |
699 ASSERT(allow_macro_instructions_); | |
700 fmaxnm(fd, fn, fm); | |
701 } | |
702 | |
703 | |
704 void MacroAssembler::Fmin(const FPRegister& fd, | |
705 const FPRegister& fn, | |
706 const FPRegister& fm) { | |
707 ASSERT(allow_macro_instructions_); | |
708 fmin(fd, fn, fm); | |
709 } | |
710 | |
711 | |
712 void MacroAssembler::Fminnm(const FPRegister& fd, | |
713 const FPRegister& fn, | |
714 const FPRegister& fm) { | |
715 ASSERT(allow_macro_instructions_); | |
716 fminnm(fd, fn, fm); | |
717 } | |
718 | |
719 | |
720 void MacroAssembler::Fmov(FPRegister fd, FPRegister fn) { | |
721 ASSERT(allow_macro_instructions_); | |
722 // Only emit an instruction if fd and fn are different, and they are both D | |
723 // registers. fmov(s0, s0) is not a no-op because it clears the top word of | |
724 // d0. Technically, fmov(d0, d0) is not a no-op either because it clears the | |
725 // top of q0, but FPRegister does not currently support Q registers. | |
726 if (!fd.Is(fn) || !fd.Is64Bits()) { | |
727 fmov(fd, fn); | |
728 } | |
729 } | |
730 | |
731 | |
732 void MacroAssembler::Fmov(FPRegister fd, Register rn) { | |
733 ASSERT(allow_macro_instructions_); | |
734 fmov(fd, rn); | |
735 } | |
736 | |
737 | |
738 void MacroAssembler::Fmov(FPRegister fd, double imm) { | |
739 ASSERT(allow_macro_instructions_); | |
740 if (fd.Is32Bits()) { | |
741 Fmov(fd, static_cast<float>(imm)); | |
742 return; | |
743 } | |
744 | |
745 ASSERT(fd.Is64Bits()); | |
746 if (IsImmFP64(imm)) { | |
747 fmov(fd, imm); | |
748 } else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) { | |
749 fmov(fd, xzr); | |
750 } else { | |
751 UseScratchRegisterScope temps(this); | |
752 Register tmp = temps.AcquireX(); | |
753 // TODO(all): Use Assembler::ldr(const FPRegister& ft, double imm). | |
754 Mov(tmp, double_to_rawbits(imm)); | |
755 Fmov(fd, tmp); | |
756 } | |
757 } | |
758 | |
759 | |
760 void MacroAssembler::Fmov(FPRegister fd, float imm) { | |
761 ASSERT(allow_macro_instructions_); | |
762 if (fd.Is64Bits()) { | |
763 Fmov(fd, static_cast<double>(imm)); | |
764 return; | |
765 } | |
766 | |
767 ASSERT(fd.Is32Bits()); | |
768 if (IsImmFP32(imm)) { | |
769 fmov(fd, imm); | |
770 } else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) { | |
771 fmov(fd, wzr); | |
772 } else { | |
773 UseScratchRegisterScope temps(this); | |
774 Register tmp = temps.AcquireW(); | |
775 // TODO(all): Use Assembler::ldr(const FPRegister& ft, float imm). | |
776 Mov(tmp, float_to_rawbits(imm)); | |
777 Fmov(fd, tmp); | |
778 } | |
779 } | |
780 | |
781 | |
782 void MacroAssembler::Fmov(Register rd, FPRegister fn) { | |
783 ASSERT(allow_macro_instructions_); | |
784 ASSERT(!rd.IsZero()); | |
785 fmov(rd, fn); | |
786 } | |
787 | |
788 | |
789 void MacroAssembler::Fmsub(const FPRegister& fd, | |
790 const FPRegister& fn, | |
791 const FPRegister& fm, | |
792 const FPRegister& fa) { | |
793 ASSERT(allow_macro_instructions_); | |
794 fmsub(fd, fn, fm, fa); | |
795 } | |
796 | |
797 | |
798 void MacroAssembler::Fmul(const FPRegister& fd, | |
799 const FPRegister& fn, | |
800 const FPRegister& fm) { | |
801 ASSERT(allow_macro_instructions_); | |
802 fmul(fd, fn, fm); | |
803 } | |
804 | |
805 | |
806 void MacroAssembler::Fneg(const FPRegister& fd, const FPRegister& fn) { | |
807 ASSERT(allow_macro_instructions_); | |
808 fneg(fd, fn); | |
809 } | |
810 | |
811 | |
812 void MacroAssembler::Fnmadd(const FPRegister& fd, | |
813 const FPRegister& fn, | |
814 const FPRegister& fm, | |
815 const FPRegister& fa) { | |
816 ASSERT(allow_macro_instructions_); | |
817 fnmadd(fd, fn, fm, fa); | |
818 } | |
819 | |
820 | |
821 void MacroAssembler::Fnmsub(const FPRegister& fd, | |
822 const FPRegister& fn, | |
823 const FPRegister& fm, | |
824 const FPRegister& fa) { | |
825 ASSERT(allow_macro_instructions_); | |
826 fnmsub(fd, fn, fm, fa); | |
827 } | |
828 | |
829 | |
830 void MacroAssembler::Frinta(const FPRegister& fd, const FPRegister& fn) { | |
831 ASSERT(allow_macro_instructions_); | |
832 frinta(fd, fn); | |
833 } | |
834 | |
835 | |
836 void MacroAssembler::Frintn(const FPRegister& fd, const FPRegister& fn) { | |
837 ASSERT(allow_macro_instructions_); | |
838 frintn(fd, fn); | |
839 } | |
840 | |
841 | |
842 void MacroAssembler::Frintz(const FPRegister& fd, const FPRegister& fn) { | |
843 ASSERT(allow_macro_instructions_); | |
844 frintz(fd, fn); | |
845 } | |
846 | |
847 | |
848 void MacroAssembler::Fsqrt(const FPRegister& fd, const FPRegister& fn) { | |
849 ASSERT(allow_macro_instructions_); | |
850 fsqrt(fd, fn); | |
851 } | |
852 | |
853 | |
854 void MacroAssembler::Fsub(const FPRegister& fd, | |
855 const FPRegister& fn, | |
856 const FPRegister& fm) { | |
857 ASSERT(allow_macro_instructions_); | |
858 fsub(fd, fn, fm); | |
859 } | |
860 | |
861 | |
862 void MacroAssembler::Hint(SystemHint code) { | |
863 ASSERT(allow_macro_instructions_); | |
864 hint(code); | |
865 } | |
866 | |
867 | |
868 void MacroAssembler::Hlt(int code) { | |
869 ASSERT(allow_macro_instructions_); | |
870 hlt(code); | |
871 } | |
872 | |
873 | |
874 void MacroAssembler::Isb() { | |
875 ASSERT(allow_macro_instructions_); | |
876 isb(); | |
877 } | |
878 | |
879 | |
880 void MacroAssembler::Ldnp(const CPURegister& rt, | |
881 const CPURegister& rt2, | |
882 const MemOperand& src) { | |
883 ASSERT(allow_macro_instructions_); | |
884 ASSERT(!AreAliased(rt, rt2)); | |
885 ldnp(rt, rt2, src); | |
886 } | |
887 | |
888 | |
889 void MacroAssembler::Ldp(const CPURegister& rt, | |
890 const CPURegister& rt2, | |
891 const MemOperand& src) { | |
892 ASSERT(allow_macro_instructions_); | |
893 ASSERT(!AreAliased(rt, rt2)); | |
894 ldp(rt, rt2, src); | |
895 } | |
896 | |
897 | |
898 void MacroAssembler::Ldpsw(const Register& rt, | |
899 const Register& rt2, | |
900 const MemOperand& src) { | |
901 ASSERT(allow_macro_instructions_); | |
902 ASSERT(!rt.IsZero()); | |
903 ASSERT(!rt2.IsZero()); | |
904 ldpsw(rt, rt2, src); | |
905 } | |
906 | |
907 | |
908 void MacroAssembler::Ldr(const FPRegister& ft, double imm) { | |
909 ASSERT(allow_macro_instructions_); | |
910 ldr(ft, imm); | |
911 } | |
912 | |
913 | |
914 void MacroAssembler::Ldr(const Register& rt, uint64_t imm) { | |
915 ASSERT(allow_macro_instructions_); | |
916 ASSERT(!rt.IsZero()); | |
917 ldr(rt, imm); | |
918 } | |
919 | |
920 | |
921 void MacroAssembler::Lsl(const Register& rd, | |
922 const Register& rn, | |
923 unsigned shift) { | |
924 ASSERT(allow_macro_instructions_); | |
925 ASSERT(!rd.IsZero()); | |
926 lsl(rd, rn, shift); | |
927 } | |
928 | |
929 | |
930 void MacroAssembler::Lsl(const Register& rd, | |
931 const Register& rn, | |
932 const Register& rm) { | |
933 ASSERT(allow_macro_instructions_); | |
934 ASSERT(!rd.IsZero()); | |
935 lslv(rd, rn, rm); | |
936 } | |
937 | |
938 | |
939 void MacroAssembler::Lsr(const Register& rd, | |
940 const Register& rn, | |
941 unsigned shift) { | |
942 ASSERT(allow_macro_instructions_); | |
943 ASSERT(!rd.IsZero()); | |
944 lsr(rd, rn, shift); | |
945 } | |
946 | |
947 | |
948 void MacroAssembler::Lsr(const Register& rd, | |
949 const Register& rn, | |
950 const Register& rm) { | |
951 ASSERT(allow_macro_instructions_); | |
952 ASSERT(!rd.IsZero()); | |
953 lsrv(rd, rn, rm); | |
954 } | |
955 | |
956 | |
957 void MacroAssembler::Madd(const Register& rd, | |
958 const Register& rn, | |
959 const Register& rm, | |
960 const Register& ra) { | |
961 ASSERT(allow_macro_instructions_); | |
962 ASSERT(!rd.IsZero()); | |
963 madd(rd, rn, rm, ra); | |
964 } | |
965 | |
966 | |
967 void MacroAssembler::Mneg(const Register& rd, | |
968 const Register& rn, | |
969 const Register& rm) { | |
970 ASSERT(allow_macro_instructions_); | |
971 ASSERT(!rd.IsZero()); | |
972 mneg(rd, rn, rm); | |
973 } | |
974 | |
975 | |
976 void MacroAssembler::Mov(const Register& rd, const Register& rn) { | |
977 ASSERT(allow_macro_instructions_); | |
978 ASSERT(!rd.IsZero()); | |
979 // Emit a register move only if the registers are distinct, or if they are | |
980 // not X registers. Note that mov(w0, w0) is not a no-op because it clears | |
981 // the top word of x0. | |
982 if (!rd.Is(rn) || !rd.Is64Bits()) { | |
983 Assembler::mov(rd, rn); | |
984 } | |
985 } | |
986 | |
987 | |
988 void MacroAssembler::Movk(const Register& rd, uint64_t imm, int shift) { | |
989 ASSERT(allow_macro_instructions_); | |
990 ASSERT(!rd.IsZero()); | |
991 movk(rd, imm, shift); | |
992 } | |
993 | |
994 | |
995 void MacroAssembler::Mrs(const Register& rt, SystemRegister sysreg) { | |
996 ASSERT(allow_macro_instructions_); | |
997 ASSERT(!rt.IsZero()); | |
998 mrs(rt, sysreg); | |
999 } | |
1000 | |
1001 | |
1002 void MacroAssembler::Msr(SystemRegister sysreg, const Register& rt) { | |
1003 ASSERT(allow_macro_instructions_); | |
1004 ASSERT(!rt.IsZero()); | |
1005 msr(sysreg, rt); | |
1006 } | |
1007 | |
1008 | |
1009 void MacroAssembler::Msub(const Register& rd, | |
1010 const Register& rn, | |
1011 const Register& rm, | |
1012 const Register& ra) { | |
1013 ASSERT(allow_macro_instructions_); | |
1014 ASSERT(!rd.IsZero()); | |
1015 msub(rd, rn, rm, ra); | |
1016 } | |
1017 | |
1018 | |
1019 void MacroAssembler::Mul(const Register& rd, | |
1020 const Register& rn, | |
1021 const Register& rm) { | |
1022 ASSERT(allow_macro_instructions_); | |
1023 ASSERT(!rd.IsZero()); | |
1024 mul(rd, rn, rm); | |
1025 } | |
1026 | |
1027 | |
1028 void MacroAssembler::Rbit(const Register& rd, const Register& rn) { | |
1029 ASSERT(allow_macro_instructions_); | |
1030 ASSERT(!rd.IsZero()); | |
1031 rbit(rd, rn); | |
1032 } | |
1033 | |
1034 | |
1035 void MacroAssembler::Ret(const Register& xn) { | |
1036 ASSERT(allow_macro_instructions_); | |
1037 ASSERT(!xn.IsZero()); | |
1038 ret(xn); | |
1039 CheckVeneerPool(false, false); | |
1040 } | |
1041 | |
1042 | |
1043 void MacroAssembler::Rev(const Register& rd, const Register& rn) { | |
1044 ASSERT(allow_macro_instructions_); | |
1045 ASSERT(!rd.IsZero()); | |
1046 rev(rd, rn); | |
1047 } | |
1048 | |
1049 | |
1050 void MacroAssembler::Rev16(const Register& rd, const Register& rn) { | |
1051 ASSERT(allow_macro_instructions_); | |
1052 ASSERT(!rd.IsZero()); | |
1053 rev16(rd, rn); | |
1054 } | |
1055 | |
1056 | |
1057 void MacroAssembler::Rev32(const Register& rd, const Register& rn) { | |
1058 ASSERT(allow_macro_instructions_); | |
1059 ASSERT(!rd.IsZero()); | |
1060 rev32(rd, rn); | |
1061 } | |
1062 | |
1063 | |
1064 void MacroAssembler::Ror(const Register& rd, | |
1065 const Register& rs, | |
1066 unsigned shift) { | |
1067 ASSERT(allow_macro_instructions_); | |
1068 ASSERT(!rd.IsZero()); | |
1069 ror(rd, rs, shift); | |
1070 } | |
1071 | |
1072 | |
1073 void MacroAssembler::Ror(const Register& rd, | |
1074 const Register& rn, | |
1075 const Register& rm) { | |
1076 ASSERT(allow_macro_instructions_); | |
1077 ASSERT(!rd.IsZero()); | |
1078 rorv(rd, rn, rm); | |
1079 } | |
1080 | |
1081 | |
1082 void MacroAssembler::Sbfiz(const Register& rd, | |
1083 const Register& rn, | |
1084 unsigned lsb, | |
1085 unsigned width) { | |
1086 ASSERT(allow_macro_instructions_); | |
1087 ASSERT(!rd.IsZero()); | |
1088 sbfiz(rd, rn, lsb, width); | |
1089 } | |
1090 | |
1091 | |
1092 void MacroAssembler::Sbfx(const Register& rd, | |
1093 const Register& rn, | |
1094 unsigned lsb, | |
1095 unsigned width) { | |
1096 ASSERT(allow_macro_instructions_); | |
1097 ASSERT(!rd.IsZero()); | |
1098 sbfx(rd, rn, lsb, width); | |
1099 } | |
1100 | |
1101 | |
1102 void MacroAssembler::Scvtf(const FPRegister& fd, | |
1103 const Register& rn, | |
1104 unsigned fbits) { | |
1105 ASSERT(allow_macro_instructions_); | |
1106 scvtf(fd, rn, fbits); | |
1107 } | |
1108 | |
1109 | |
1110 void MacroAssembler::Sdiv(const Register& rd, | |
1111 const Register& rn, | |
1112 const Register& rm) { | |
1113 ASSERT(allow_macro_instructions_); | |
1114 ASSERT(!rd.IsZero()); | |
1115 sdiv(rd, rn, rm); | |
1116 } | |
1117 | |
1118 | |
1119 void MacroAssembler::Smaddl(const Register& rd, | |
1120 const Register& rn, | |
1121 const Register& rm, | |
1122 const Register& ra) { | |
1123 ASSERT(allow_macro_instructions_); | |
1124 ASSERT(!rd.IsZero()); | |
1125 smaddl(rd, rn, rm, ra); | |
1126 } | |
1127 | |
1128 | |
1129 void MacroAssembler::Smsubl(const Register& rd, | |
1130 const Register& rn, | |
1131 const Register& rm, | |
1132 const Register& ra) { | |
1133 ASSERT(allow_macro_instructions_); | |
1134 ASSERT(!rd.IsZero()); | |
1135 smsubl(rd, rn, rm, ra); | |
1136 } | |
1137 | |
1138 | |
1139 void MacroAssembler::Smull(const Register& rd, | |
1140 const Register& rn, | |
1141 const Register& rm) { | |
1142 ASSERT(allow_macro_instructions_); | |
1143 ASSERT(!rd.IsZero()); | |
1144 smull(rd, rn, rm); | |
1145 } | |
1146 | |
1147 | |
1148 void MacroAssembler::Smulh(const Register& rd, | |
1149 const Register& rn, | |
1150 const Register& rm) { | |
1151 ASSERT(allow_macro_instructions_); | |
1152 ASSERT(!rd.IsZero()); | |
1153 smulh(rd, rn, rm); | |
1154 } | |
1155 | |
1156 | |
1157 void MacroAssembler::Stnp(const CPURegister& rt, | |
1158 const CPURegister& rt2, | |
1159 const MemOperand& dst) { | |
1160 ASSERT(allow_macro_instructions_); | |
1161 stnp(rt, rt2, dst); | |
1162 } | |
1163 | |
1164 | |
1165 void MacroAssembler::Stp(const CPURegister& rt, | |
1166 const CPURegister& rt2, | |
1167 const MemOperand& dst) { | |
1168 ASSERT(allow_macro_instructions_); | |
1169 stp(rt, rt2, dst); | |
1170 } | |
1171 | |
1172 | |
1173 void MacroAssembler::Sxtb(const Register& rd, const Register& rn) { | |
1174 ASSERT(allow_macro_instructions_); | |
1175 ASSERT(!rd.IsZero()); | |
1176 sxtb(rd, rn); | |
1177 } | |
1178 | |
1179 | |
1180 void MacroAssembler::Sxth(const Register& rd, const Register& rn) { | |
1181 ASSERT(allow_macro_instructions_); | |
1182 ASSERT(!rd.IsZero()); | |
1183 sxth(rd, rn); | |
1184 } | |
1185 | |
1186 | |
1187 void MacroAssembler::Sxtw(const Register& rd, const Register& rn) { | |
1188 ASSERT(allow_macro_instructions_); | |
1189 ASSERT(!rd.IsZero()); | |
1190 sxtw(rd, rn); | |
1191 } | |
1192 | |
1193 | |
1194 void MacroAssembler::Ubfiz(const Register& rd, | |
1195 const Register& rn, | |
1196 unsigned lsb, | |
1197 unsigned width) { | |
1198 ASSERT(allow_macro_instructions_); | |
1199 ASSERT(!rd.IsZero()); | |
1200 ubfiz(rd, rn, lsb, width); | |
1201 } | |
1202 | |
1203 | |
1204 void MacroAssembler::Ubfx(const Register& rd, | |
1205 const Register& rn, | |
1206 unsigned lsb, | |
1207 unsigned width) { | |
1208 ASSERT(allow_macro_instructions_); | |
1209 ASSERT(!rd.IsZero()); | |
1210 ubfx(rd, rn, lsb, width); | |
1211 } | |
1212 | |
1213 | |
1214 void MacroAssembler::Ucvtf(const FPRegister& fd, | |
1215 const Register& rn, | |
1216 unsigned fbits) { | |
1217 ASSERT(allow_macro_instructions_); | |
1218 ucvtf(fd, rn, fbits); | |
1219 } | |
1220 | |
1221 | |
1222 void MacroAssembler::Udiv(const Register& rd, | |
1223 const Register& rn, | |
1224 const Register& rm) { | |
1225 ASSERT(allow_macro_instructions_); | |
1226 ASSERT(!rd.IsZero()); | |
1227 udiv(rd, rn, rm); | |
1228 } | |
1229 | |
1230 | |
1231 void MacroAssembler::Umaddl(const Register& rd, | |
1232 const Register& rn, | |
1233 const Register& rm, | |
1234 const Register& ra) { | |
1235 ASSERT(allow_macro_instructions_); | |
1236 ASSERT(!rd.IsZero()); | |
1237 umaddl(rd, rn, rm, ra); | |
1238 } | |
1239 | |
1240 | |
1241 void MacroAssembler::Umsubl(const Register& rd, | |
1242 const Register& rn, | |
1243 const Register& rm, | |
1244 const Register& ra) { | |
1245 ASSERT(allow_macro_instructions_); | |
1246 ASSERT(!rd.IsZero()); | |
1247 umsubl(rd, rn, rm, ra); | |
1248 } | |
1249 | |
1250 | |
1251 void MacroAssembler::Uxtb(const Register& rd, const Register& rn) { | |
1252 ASSERT(allow_macro_instructions_); | |
1253 ASSERT(!rd.IsZero()); | |
1254 uxtb(rd, rn); | |
1255 } | |
1256 | |
1257 | |
1258 void MacroAssembler::Uxth(const Register& rd, const Register& rn) { | |
1259 ASSERT(allow_macro_instructions_); | |
1260 ASSERT(!rd.IsZero()); | |
1261 uxth(rd, rn); | |
1262 } | |
1263 | |
1264 | |
1265 void MacroAssembler::Uxtw(const Register& rd, const Register& rn) { | |
1266 ASSERT(allow_macro_instructions_); | |
1267 ASSERT(!rd.IsZero()); | |
1268 uxtw(rd, rn); | |
1269 } | |
1270 | |
1271 | |
1272 void MacroAssembler::BumpSystemStackPointer(const Operand& space) { | |
1273 ASSERT(!csp.Is(sp_)); | |
1274 // TODO(jbramley): Several callers rely on this not using scratch registers, | |
1275 // so we use the assembler directly here. However, this means that large | |
1276 // immediate values of 'space' cannot be handled cleanly. (Only 24-bits | |
1277 // immediates or values of 'space' that can be encoded in one instruction are | |
1278 // accepted.) Once we implement our flexible scratch register idea, we could | |
1279 // greatly simplify this function. | |
1280 InstructionAccurateScope scope(this); | |
1281 if ((space.IsImmediate()) && !is_uint12(space.immediate())) { | |
1282 // The subtract instruction supports a 12-bit immediate, shifted left by | |
1283 // zero or 12 bits. So, in two instructions, we can subtract any immediate | |
1284 // between zero and (1 << 24) - 1. | |
1285 int64_t imm = space.immediate(); | |
1286 ASSERT(is_uint24(imm)); | |
1287 | |
1288 int64_t imm_top_12_bits = imm >> 12; | |
1289 sub(csp, StackPointer(), imm_top_12_bits << 12); | |
1290 imm -= imm_top_12_bits << 12; | |
1291 if (imm > 0) { | |
1292 sub(csp, csp, imm); | |
1293 } | |
1294 } else { | |
1295 sub(csp, StackPointer(), space); | |
1296 } | |
1297 } | |
1298 | |
1299 | |
1300 void MacroAssembler::InitializeRootRegister() { | |
1301 ExternalReference roots_array_start = | |
1302 ExternalReference::roots_array_start(isolate()); | |
1303 Mov(root, Operand(roots_array_start)); | |
1304 } | |
1305 | |
1306 | |
1307 void MacroAssembler::SmiTag(Register dst, Register src) { | |
1308 ASSERT(dst.Is64Bits() && src.Is64Bits()); | |
1309 Lsl(dst, src, kSmiShift); | |
1310 } | |
1311 | |
1312 | |
1313 void MacroAssembler::SmiTag(Register smi) { SmiTag(smi, smi); } | |
1314 | |
1315 | |
1316 void MacroAssembler::SmiUntag(Register dst, Register src) { | |
1317 ASSERT(dst.Is64Bits() && src.Is64Bits()); | |
1318 if (FLAG_enable_slow_asserts) { | |
1319 AssertSmi(src); | |
1320 } | |
1321 Asr(dst, src, kSmiShift); | |
1322 } | |
1323 | |
1324 | |
1325 void MacroAssembler::SmiUntag(Register smi) { SmiUntag(smi, smi); } | |
1326 | |
1327 | |
1328 void MacroAssembler::SmiUntagToDouble(FPRegister dst, | |
1329 Register src, | |
1330 UntagMode mode) { | |
1331 ASSERT(dst.Is64Bits() && src.Is64Bits()); | |
1332 if (FLAG_enable_slow_asserts && (mode == kNotSpeculativeUntag)) { | |
1333 AssertSmi(src); | |
1334 } | |
1335 Scvtf(dst, src, kSmiShift); | |
1336 } | |
1337 | |
1338 | |
1339 void MacroAssembler::SmiUntagToFloat(FPRegister dst, | |
1340 Register src, | |
1341 UntagMode mode) { | |
1342 ASSERT(dst.Is32Bits() && src.Is64Bits()); | |
1343 if (FLAG_enable_slow_asserts && (mode == kNotSpeculativeUntag)) { | |
1344 AssertSmi(src); | |
1345 } | |
1346 Scvtf(dst, src, kSmiShift); | |
1347 } | |
1348 | |
1349 | |
1350 void MacroAssembler::JumpIfSmi(Register value, | |
1351 Label* smi_label, | |
1352 Label* not_smi_label) { | |
1353 STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0)); | |
1354 // Check if the tag bit is set. | |
1355 if (smi_label) { | |
1356 Tbz(value, 0, smi_label); | |
1357 if (not_smi_label) { | |
1358 B(not_smi_label); | |
1359 } | |
1360 } else { | |
1361 ASSERT(not_smi_label); | |
1362 Tbnz(value, 0, not_smi_label); | |
1363 } | |
1364 } | |
1365 | |
1366 | |
1367 void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) { | |
1368 JumpIfSmi(value, NULL, not_smi_label); | |
1369 } | |
1370 | |
1371 | |
1372 void MacroAssembler::JumpIfBothSmi(Register value1, | |
1373 Register value2, | |
1374 Label* both_smi_label, | |
1375 Label* not_smi_label) { | |
1376 STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0)); | |
1377 UseScratchRegisterScope temps(this); | |
1378 Register tmp = temps.AcquireX(); | |
1379 // Check if both tag bits are clear. | |
1380 Orr(tmp, value1, value2); | |
1381 JumpIfSmi(tmp, both_smi_label, not_smi_label); | |
1382 } | |
1383 | |
1384 | |
1385 void MacroAssembler::JumpIfEitherSmi(Register value1, | |
1386 Register value2, | |
1387 Label* either_smi_label, | |
1388 Label* not_smi_label) { | |
1389 STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0)); | |
1390 UseScratchRegisterScope temps(this); | |
1391 Register tmp = temps.AcquireX(); | |
1392 // Check if either tag bit is clear. | |
1393 And(tmp, value1, value2); | |
1394 JumpIfSmi(tmp, either_smi_label, not_smi_label); | |
1395 } | |
1396 | |
1397 | |
1398 void MacroAssembler::JumpIfEitherNotSmi(Register value1, | |
1399 Register value2, | |
1400 Label* not_smi_label) { | |
1401 JumpIfBothSmi(value1, value2, NULL, not_smi_label); | |
1402 } | |
1403 | |
1404 | |
1405 void MacroAssembler::JumpIfBothNotSmi(Register value1, | |
1406 Register value2, | |
1407 Label* not_smi_label) { | |
1408 JumpIfEitherSmi(value1, value2, NULL, not_smi_label); | |
1409 } | |
1410 | |
1411 | |
1412 void MacroAssembler::IsObjectNameType(Register object, | |
1413 Register type, | |
1414 Label* fail) { | |
1415 CompareObjectType(object, type, type, LAST_NAME_TYPE); | |
1416 B(hi, fail); | |
1417 } | |
1418 | |
1419 | |
1420 void MacroAssembler::IsObjectJSObjectType(Register heap_object, | |
1421 Register map, | |
1422 Register scratch, | |
1423 Label* fail) { | |
1424 Ldr(map, FieldMemOperand(heap_object, HeapObject::kMapOffset)); | |
1425 IsInstanceJSObjectType(map, scratch, fail); | |
1426 } | |
1427 | |
1428 | |
1429 void MacroAssembler::IsInstanceJSObjectType(Register map, | |
1430 Register scratch, | |
1431 Label* fail) { | |
1432 Ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); | |
1433 // If cmp result is lt, the following ccmp will clear all flags. | |
1434 // Z == 0, N == V implies gt condition. | |
1435 Cmp(scratch, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE); | |
1436 Ccmp(scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE, NoFlag, ge); | |
1437 | |
1438 // If we didn't get a valid label object just fall through and leave the | |
1439 // flags updated. | |
1440 if (fail != NULL) { | |
1441 B(gt, fail); | |
1442 } | |
1443 } | |
1444 | |
1445 | |
1446 void MacroAssembler::IsObjectJSStringType(Register object, | |
1447 Register type, | |
1448 Label* not_string, | |
1449 Label* string) { | |
1450 Ldr(type, FieldMemOperand(object, HeapObject::kMapOffset)); | |
1451 Ldrb(type.W(), FieldMemOperand(type, Map::kInstanceTypeOffset)); | |
1452 | |
1453 STATIC_ASSERT(kStringTag == 0); | |
1454 ASSERT((string != NULL) || (not_string != NULL)); | |
1455 if (string == NULL) { | |
1456 TestAndBranchIfAnySet(type.W(), kIsNotStringMask, not_string); | |
1457 } else if (not_string == NULL) { | |
1458 TestAndBranchIfAllClear(type.W(), kIsNotStringMask, string); | |
1459 } else { | |
1460 TestAndBranchIfAnySet(type.W(), kIsNotStringMask, not_string); | |
1461 B(string); | |
1462 } | |
1463 } | |
1464 | |
1465 | |
1466 void MacroAssembler::Push(Handle<Object> handle) { | |
1467 UseScratchRegisterScope temps(this); | |
1468 Register tmp = temps.AcquireX(); | |
1469 Mov(tmp, Operand(handle)); | |
1470 Push(tmp); | |
1471 } | |
1472 | |
1473 | |
1474 void MacroAssembler::Claim(uint64_t count, uint64_t unit_size) { | |
1475 uint64_t size = count * unit_size; | |
1476 | |
1477 if (size == 0) { | |
1478 return; | |
1479 } | |
1480 | |
1481 if (csp.Is(StackPointer())) { | |
1482 ASSERT(size % 16 == 0); | |
1483 } else { | |
1484 BumpSystemStackPointer(size); | |
1485 } | |
1486 | |
1487 Sub(StackPointer(), StackPointer(), size); | |
1488 } | |
1489 | |
1490 | |
1491 void MacroAssembler::Claim(const Register& count, uint64_t unit_size) { | |
1492 ASSERT(IsPowerOf2(unit_size)); | |
1493 | |
1494 if (unit_size == 0) { | |
1495 return; | |
1496 } | |
1497 | |
1498 const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits); | |
1499 const Operand size(count, LSL, shift); | |
1500 | |
1501 if (size.IsZero()) { | |
1502 return; | |
1503 } | |
1504 | |
1505 if (!csp.Is(StackPointer())) { | |
1506 BumpSystemStackPointer(size); | |
1507 } | |
1508 | |
1509 Sub(StackPointer(), StackPointer(), size); | |
1510 } | |
1511 | |
1512 | |
1513 void MacroAssembler::ClaimBySMI(const Register& count_smi, uint64_t unit_size) { | |
1514 ASSERT(IsPowerOf2(unit_size)); | |
1515 const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift; | |
1516 const Operand size(count_smi, | |
1517 (shift >= 0) ? (LSL) : (LSR), | |
1518 (shift >= 0) ? (shift) : (-shift)); | |
1519 | |
1520 if (size.IsZero()) { | |
1521 return; | |
1522 } | |
1523 | |
1524 if (!csp.Is(StackPointer())) { | |
1525 BumpSystemStackPointer(size); | |
1526 } | |
1527 | |
1528 Sub(StackPointer(), StackPointer(), size); | |
1529 } | |
1530 | |
1531 | |
1532 void MacroAssembler::Drop(uint64_t count, uint64_t unit_size) { | |
1533 uint64_t size = count * unit_size; | |
1534 | |
1535 if (size == 0) { | |
1536 return; | |
1537 } | |
1538 | |
1539 Add(StackPointer(), StackPointer(), size); | |
1540 | |
1541 if (csp.Is(StackPointer())) { | |
1542 ASSERT(size % 16 == 0); | |
1543 } else if (emit_debug_code()) { | |
1544 // It is safe to leave csp where it is when unwinding the JavaScript stack, | |
1545 // but if we keep it matching StackPointer, the simulator can detect memory | |
1546 // accesses in the now-free part of the stack. | |
1547 Mov(csp, StackPointer()); | |
1548 } | |
1549 } | |
1550 | |
1551 | |
1552 void MacroAssembler::Drop(const Register& count, uint64_t unit_size) { | |
1553 ASSERT(IsPowerOf2(unit_size)); | |
1554 | |
1555 if (unit_size == 0) { | |
1556 return; | |
1557 } | |
1558 | |
1559 const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits); | |
1560 const Operand size(count, LSL, shift); | |
1561 | |
1562 if (size.IsZero()) { | |
1563 return; | |
1564 } | |
1565 | |
1566 Add(StackPointer(), StackPointer(), size); | |
1567 | |
1568 if (!csp.Is(StackPointer()) && emit_debug_code()) { | |
1569 // It is safe to leave csp where it is when unwinding the JavaScript stack, | |
1570 // but if we keep it matching StackPointer, the simulator can detect memory | |
1571 // accesses in the now-free part of the stack. | |
1572 Mov(csp, StackPointer()); | |
1573 } | |
1574 } | |
1575 | |
1576 | |
1577 void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) { | |
1578 ASSERT(IsPowerOf2(unit_size)); | |
1579 const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift; | |
1580 const Operand size(count_smi, | |
1581 (shift >= 0) ? (LSL) : (LSR), | |
1582 (shift >= 0) ? (shift) : (-shift)); | |
1583 | |
1584 if (size.IsZero()) { | |
1585 return; | |
1586 } | |
1587 | |
1588 Add(StackPointer(), StackPointer(), size); | |
1589 | |
1590 if (!csp.Is(StackPointer()) && emit_debug_code()) { | |
1591 // It is safe to leave csp where it is when unwinding the JavaScript stack, | |
1592 // but if we keep it matching StackPointer, the simulator can detect memory | |
1593 // accesses in the now-free part of the stack. | |
1594 Mov(csp, StackPointer()); | |
1595 } | |
1596 } | |
1597 | |
1598 | |
1599 void MacroAssembler::CompareAndBranch(const Register& lhs, | |
1600 const Operand& rhs, | |
1601 Condition cond, | |
1602 Label* label) { | |
1603 if (rhs.IsImmediate() && (rhs.immediate() == 0) && | |
1604 ((cond == eq) || (cond == ne))) { | |
1605 if (cond == eq) { | |
1606 Cbz(lhs, label); | |
1607 } else { | |
1608 Cbnz(lhs, label); | |
1609 } | |
1610 } else { | |
1611 Cmp(lhs, rhs); | |
1612 B(cond, label); | |
1613 } | |
1614 } | |
1615 | |
1616 | |
1617 void MacroAssembler::TestAndBranchIfAnySet(const Register& reg, | |
1618 const uint64_t bit_pattern, | |
1619 Label* label) { | |
1620 int bits = reg.SizeInBits(); | |
1621 ASSERT(CountSetBits(bit_pattern, bits) > 0); | |
1622 if (CountSetBits(bit_pattern, bits) == 1) { | |
1623 Tbnz(reg, MaskToBit(bit_pattern), label); | |
1624 } else { | |
1625 Tst(reg, bit_pattern); | |
1626 B(ne, label); | |
1627 } | |
1628 } | |
1629 | |
1630 | |
1631 void MacroAssembler::TestAndBranchIfAllClear(const Register& reg, | |
1632 const uint64_t bit_pattern, | |
1633 Label* label) { | |
1634 int bits = reg.SizeInBits(); | |
1635 ASSERT(CountSetBits(bit_pattern, bits) > 0); | |
1636 if (CountSetBits(bit_pattern, bits) == 1) { | |
1637 Tbz(reg, MaskToBit(bit_pattern), label); | |
1638 } else { | |
1639 Tst(reg, bit_pattern); | |
1640 B(eq, label); | |
1641 } | |
1642 } | |
1643 | |
1644 | |
1645 void MacroAssembler::InlineData(uint64_t data) { | |
1646 ASSERT(is_uint16(data)); | |
1647 InstructionAccurateScope scope(this, 1); | |
1648 movz(xzr, data); | |
1649 } | |
1650 | |
1651 | |
1652 void MacroAssembler::EnableInstrumentation() { | |
1653 InstructionAccurateScope scope(this, 1); | |
1654 movn(xzr, InstrumentStateEnable); | |
1655 } | |
1656 | |
1657 | |
1658 void MacroAssembler::DisableInstrumentation() { | |
1659 InstructionAccurateScope scope(this, 1); | |
1660 movn(xzr, InstrumentStateDisable); | |
1661 } | |
1662 | |
1663 | |
1664 void MacroAssembler::AnnotateInstrumentation(const char* marker_name) { | |
1665 ASSERT(strlen(marker_name) == 2); | |
1666 | |
1667 // We allow only printable characters in the marker names. Unprintable | |
1668 // characters are reserved for controlling features of the instrumentation. | |
1669 ASSERT(isprint(marker_name[0]) && isprint(marker_name[1])); | |
1670 | |
1671 InstructionAccurateScope scope(this, 1); | |
1672 movn(xzr, (marker_name[1] << 8) | marker_name[0]); | |
1673 } | |
1674 | |
1675 } } // namespace v8::internal | |
1676 | |
1677 #endif // V8_A64_MACRO_ASSEMBLER_A64_INL_H_ | |
OLD | NEW |