Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(187)

Side by Side Diff: src/a64/macro-assembler-a64.cc

Issue 148293020: Merge experimental/a64 to bleeding_edge. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Remove ARM from OWNERS Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/a64/macro-assembler-a64.h ('k') | src/a64/macro-assembler-a64-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #if V8_TARGET_ARCH_A64
31
32 #include "bootstrapper.h"
33 #include "codegen.h"
34 #include "cpu-profiler.h"
35 #include "debug.h"
36 #include "isolate-inl.h"
37 #include "runtime.h"
38
39 namespace v8 {
40 namespace internal {
41
42 // Define a fake double underscore to use with the ASM_UNIMPLEMENTED macros.
43 #define __
44
45
46 MacroAssembler::MacroAssembler(Isolate* arg_isolate,
47 byte * buffer,
48 unsigned buffer_size)
49 : Assembler(arg_isolate, buffer, buffer_size),
50 generating_stub_(false),
51 #if DEBUG
52 allow_macro_instructions_(true),
53 #endif
54 has_frame_(false),
55 use_real_aborts_(true),
56 sp_(jssp), tmp0_(ip0), tmp1_(ip1), fptmp0_(fp_scratch) {
57 if (isolate() != NULL) {
58 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
59 isolate());
60 }
61 }
62
63
64 void MacroAssembler::LogicalMacro(const Register& rd,
65 const Register& rn,
66 const Operand& operand,
67 LogicalOp op) {
68 if (operand.NeedsRelocation()) {
69 LoadRelocated(Tmp0(), operand);
70 Logical(rd, rn, Tmp0(), op);
71
72 } else if (operand.IsImmediate()) {
73 int64_t immediate = operand.immediate();
74 unsigned reg_size = rd.SizeInBits();
75 ASSERT(rd.Is64Bits() || is_uint32(immediate));
76
77 // If the operation is NOT, invert the operation and immediate.
78 if ((op & NOT) == NOT) {
79 op = static_cast<LogicalOp>(op & ~NOT);
80 immediate = ~immediate;
81 if (rd.Is32Bits()) {
82 immediate &= kWRegMask;
83 }
84 }
85
86 // Special cases for all set or all clear immediates.
87 if (immediate == 0) {
88 switch (op) {
89 case AND:
90 Mov(rd, 0);
91 return;
92 case ORR: // Fall through.
93 case EOR:
94 Mov(rd, rn);
95 return;
96 case ANDS: // Fall through.
97 case BICS:
98 break;
99 default:
100 UNREACHABLE();
101 }
102 } else if ((rd.Is64Bits() && (immediate == -1L)) ||
103 (rd.Is32Bits() && (immediate == 0xffffffffL))) {
104 switch (op) {
105 case AND:
106 Mov(rd, rn);
107 return;
108 case ORR:
109 Mov(rd, immediate);
110 return;
111 case EOR:
112 Mvn(rd, rn);
113 return;
114 case ANDS: // Fall through.
115 case BICS:
116 break;
117 default:
118 UNREACHABLE();
119 }
120 }
121
122 unsigned n, imm_s, imm_r;
123 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
124 // Immediate can be encoded in the instruction.
125 LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
126 } else {
127 // Immediate can't be encoded: synthesize using move immediate.
128 Register temp = AppropriateTempFor(rn);
129 Mov(temp, immediate);
130 if (rd.Is(csp)) {
131 // If rd is the stack pointer we cannot use it as the destination
132 // register so we use the temp register as an intermediate again.
133 Logical(temp, rn, temp, op);
134 Mov(csp, temp);
135 } else {
136 Logical(rd, rn, temp, op);
137 }
138 }
139
140 } else if (operand.IsExtendedRegister()) {
141 ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits());
142 // Add/sub extended supports shift <= 4. We want to support exactly the
143 // same modes here.
144 ASSERT(operand.shift_amount() <= 4);
145 ASSERT(operand.reg().Is64Bits() ||
146 ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
147 Register temp = AppropriateTempFor(rn, operand.reg());
148 EmitExtendShift(temp, operand.reg(), operand.extend(),
149 operand.shift_amount());
150 Logical(rd, rn, temp, op);
151
152 } else {
153 // The operand can be encoded in the instruction.
154 ASSERT(operand.IsShiftedRegister());
155 Logical(rd, rn, operand, op);
156 }
157 }
158
159
160 void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
161 ASSERT(allow_macro_instructions_);
162 ASSERT(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
163 ASSERT(!rd.IsZero());
164
165 // TODO(all) extend to support more immediates.
166 //
167 // Immediates on Aarch64 can be produced using an initial value, and zero to
168 // three move keep operations.
169 //
170 // Initial values can be generated with:
171 // 1. 64-bit move zero (movz).
172 // 2. 32-bit move inverted (movn).
173 // 3. 64-bit move inverted.
174 // 4. 32-bit orr immediate.
175 // 5. 64-bit orr immediate.
176 // Move-keep may then be used to modify each of the 16-bit half-words.
177 //
178 // The code below supports all five initial value generators, and
179 // applying move-keep operations to move-zero and move-inverted initial
180 // values.
181
182 unsigned reg_size = rd.SizeInBits();
183 unsigned n, imm_s, imm_r;
184 if (IsImmMovz(imm, reg_size) && !rd.IsSP()) {
185 // Immediate can be represented in a move zero instruction. Movz can't
186 // write to the stack pointer.
187 movz(rd, imm);
188 } else if (IsImmMovn(imm, reg_size) && !rd.IsSP()) {
189 // Immediate can be represented in a move inverted instruction. Movn can't
190 // write to the stack pointer.
191 movn(rd, rd.Is64Bits() ? ~imm : (~imm & kWRegMask));
192 } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
193 // Immediate can be represented in a logical orr instruction.
194 LogicalImmediate(rd, AppropriateZeroRegFor(rd), n, imm_s, imm_r, ORR);
195 } else {
196 // Generic immediate case. Imm will be represented by
197 // [imm3, imm2, imm1, imm0], where each imm is 16 bits.
198 // A move-zero or move-inverted is generated for the first non-zero or
199 // non-0xffff immX, and a move-keep for subsequent non-zero immX.
200
201 uint64_t ignored_halfword = 0;
202 bool invert_move = false;
203 // If the number of 0xffff halfwords is greater than the number of 0x0000
204 // halfwords, it's more efficient to use move-inverted.
205 if (CountClearHalfWords(~imm, reg_size) >
206 CountClearHalfWords(imm, reg_size)) {
207 ignored_halfword = 0xffffL;
208 invert_move = true;
209 }
210
211 // Mov instructions can't move value into the stack pointer, so set up a
212 // temporary register, if needed.
213 Register temp = rd.IsSP() ? AppropriateTempFor(rd) : rd;
214
215 // Iterate through the halfwords. Use movn/movz for the first non-ignored
216 // halfword, and movk for subsequent halfwords.
217 ASSERT((reg_size % 16) == 0);
218 bool first_mov_done = false;
219 for (unsigned i = 0; i < (rd.SizeInBits() / 16); i++) {
220 uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
221 if (imm16 != ignored_halfword) {
222 if (!first_mov_done) {
223 if (invert_move) {
224 movn(temp, (~imm16) & 0xffffL, 16 * i);
225 } else {
226 movz(temp, imm16, 16 * i);
227 }
228 first_mov_done = true;
229 } else {
230 // Construct a wider constant.
231 movk(temp, imm16, 16 * i);
232 }
233 }
234 }
235 ASSERT(first_mov_done);
236
237 // Move the temporary if the original destination register was the stack
238 // pointer.
239 if (rd.IsSP()) {
240 mov(rd, temp);
241 }
242 }
243 }
244
245
246 void MacroAssembler::Mov(const Register& rd,
247 const Operand& operand,
248 DiscardMoveMode discard_mode) {
249 ASSERT(allow_macro_instructions_);
250 ASSERT(!rd.IsZero());
251 // Provide a swap register for instructions that need to write into the
252 // system stack pointer (and can't do this inherently).
253 Register dst = (rd.Is(csp)) ? (Tmp1()) : (rd);
254
255 if (operand.NeedsRelocation()) {
256 LoadRelocated(dst, operand);
257
258 } else if (operand.IsImmediate()) {
259 // Call the macro assembler for generic immediates.
260 Mov(dst, operand.immediate());
261
262 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
263 // Emit a shift instruction if moving a shifted register. This operation
264 // could also be achieved using an orr instruction (like orn used by Mvn),
265 // but using a shift instruction makes the disassembly clearer.
266 EmitShift(dst, operand.reg(), operand.shift(), operand.shift_amount());
267
268 } else if (operand.IsExtendedRegister()) {
269 // Emit an extend instruction if moving an extended register. This handles
270 // extend with post-shift operations, too.
271 EmitExtendShift(dst, operand.reg(), operand.extend(),
272 operand.shift_amount());
273
274 } else {
275 // Otherwise, emit a register move only if the registers are distinct, or
276 // if they are not X registers.
277 //
278 // Note that mov(w0, w0) is not a no-op because it clears the top word of
279 // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W
280 // registers is not required to clear the top word of the X register. In
281 // this case, the instruction is discarded.
282 //
283 // If csp is an operand, add #0 is emitted, otherwise, orr #0.
284 if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
285 (discard_mode == kDontDiscardForSameWReg))) {
286 Assembler::mov(rd, operand.reg());
287 }
288 // This case can handle writes into the system stack pointer directly.
289 dst = rd;
290 }
291
292 // Copy the result to the system stack pointer.
293 if (!dst.Is(rd)) {
294 ASSERT(rd.IsZero());
295 ASSERT(dst.Is(Tmp1()));
296 Assembler::mov(rd, dst);
297 }
298 }
299
300
301 void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
302 ASSERT(allow_macro_instructions_);
303
304 if (operand.NeedsRelocation()) {
305 LoadRelocated(Tmp0(), operand);
306 Mvn(rd, Tmp0());
307
308 } else if (operand.IsImmediate()) {
309 // Call the macro assembler for generic immediates.
310 Mov(rd, ~operand.immediate());
311
312 } else if (operand.IsExtendedRegister()) {
313 // Emit two instructions for the extend case. This differs from Mov, as
314 // the extend and invert can't be achieved in one instruction.
315 Register temp = AppropriateTempFor(rd, operand.reg());
316 EmitExtendShift(temp, operand.reg(), operand.extend(),
317 operand.shift_amount());
318 mvn(rd, temp);
319
320 } else {
321 // Otherwise, emit a register move only if the registers are distinct.
322 // If the jssp is an operand, add #0 is emitted, otherwise, orr #0.
323 mvn(rd, operand);
324 }
325 }
326
327
328 unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
329 ASSERT((reg_size % 8) == 0);
330 int count = 0;
331 for (unsigned i = 0; i < (reg_size / 16); i++) {
332 if ((imm & 0xffff) == 0) {
333 count++;
334 }
335 imm >>= 16;
336 }
337 return count;
338 }
339
340
341 // The movz instruction can generate immediates containing an arbitrary 16-bit
342 // half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
343 bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
344 ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize));
345 return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
346 }
347
348
349 // The movn instruction can generate immediates containing an arbitrary 16-bit
350 // half-word, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff.
351 bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
352 return IsImmMovz(~imm, reg_size);
353 }
354
355
356 void MacroAssembler::ConditionalCompareMacro(const Register& rn,
357 const Operand& operand,
358 StatusFlags nzcv,
359 Condition cond,
360 ConditionalCompareOp op) {
361 ASSERT((cond != al) && (cond != nv));
362 if (operand.NeedsRelocation()) {
363 LoadRelocated(Tmp0(), operand);
364 ConditionalCompareMacro(rn, Tmp0(), nzcv, cond, op);
365
366 } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
367 (operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) {
368 // The immediate can be encoded in the instruction, or the operand is an
369 // unshifted register: call the assembler.
370 ConditionalCompare(rn, operand, nzcv, cond, op);
371
372 } else {
373 // The operand isn't directly supported by the instruction: perform the
374 // operation on a temporary register.
375 Register temp = AppropriateTempFor(rn);
376 Mov(temp, operand);
377 ConditionalCompare(rn, temp, nzcv, cond, op);
378 }
379 }
380
381
382 void MacroAssembler::Csel(const Register& rd,
383 const Register& rn,
384 const Operand& operand,
385 Condition cond) {
386 ASSERT(allow_macro_instructions_);
387 ASSERT(!rd.IsZero());
388 ASSERT((cond != al) && (cond != nv));
389 if (operand.IsImmediate()) {
390 // Immediate argument. Handle special cases of 0, 1 and -1 using zero
391 // register.
392 int64_t imm = operand.immediate();
393 Register zr = AppropriateZeroRegFor(rn);
394 if (imm == 0) {
395 csel(rd, rn, zr, cond);
396 } else if (imm == 1) {
397 csinc(rd, rn, zr, cond);
398 } else if (imm == -1) {
399 csinv(rd, rn, zr, cond);
400 } else {
401 Register temp = AppropriateTempFor(rn);
402 Mov(temp, operand.immediate());
403 csel(rd, rn, temp, cond);
404 }
405 } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
406 // Unshifted register argument.
407 csel(rd, rn, operand.reg(), cond);
408 } else {
409 // All other arguments.
410 Register temp = AppropriateTempFor(rn);
411 Mov(temp, operand);
412 csel(rd, rn, temp, cond);
413 }
414 }
415
416
417 void MacroAssembler::AddSubMacro(const Register& rd,
418 const Register& rn,
419 const Operand& operand,
420 FlagsUpdate S,
421 AddSubOp op) {
422 if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
423 !operand.NeedsRelocation() && (S == LeaveFlags)) {
424 // The instruction would be a nop. Avoid generating useless code.
425 return;
426 }
427
428 if (operand.NeedsRelocation()) {
429 LoadRelocated(Tmp0(), operand);
430 AddSubMacro(rd, rn, Tmp0(), S, op);
431 } else if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) ||
432 (rn.IsZero() && !operand.IsShiftedRegister()) ||
433 (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
434 Register temp = AppropriateTempFor(rn);
435 Mov(temp, operand);
436 AddSub(rd, rn, temp, S, op);
437 } else {
438 AddSub(rd, rn, operand, S, op);
439 }
440 }
441
442
443 void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
444 const Register& rn,
445 const Operand& operand,
446 FlagsUpdate S,
447 AddSubWithCarryOp op) {
448 ASSERT(rd.SizeInBits() == rn.SizeInBits());
449
450 if (operand.NeedsRelocation()) {
451 LoadRelocated(Tmp0(), operand);
452 AddSubWithCarryMacro(rd, rn, Tmp0(), S, op);
453
454 } else if (operand.IsImmediate() ||
455 (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
456 // Add/sub with carry (immediate or ROR shifted register.)
457 Register temp = AppropriateTempFor(rn);
458 Mov(temp, operand);
459 AddSubWithCarry(rd, rn, temp, S, op);
460 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
461 // Add/sub with carry (shifted register).
462 ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
463 ASSERT(operand.shift() != ROR);
464 ASSERT(is_uintn(operand.shift_amount(),
465 rd.SizeInBits() == kXRegSize ? kXRegSizeLog2 : kWRegSizeLog2));
466 Register temp = AppropriateTempFor(rn, operand.reg());
467 EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
468 AddSubWithCarry(rd, rn, temp, S, op);
469
470 } else if (operand.IsExtendedRegister()) {
471 // Add/sub with carry (extended register).
472 ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits());
473 // Add/sub extended supports a shift <= 4. We want to support exactly the
474 // same modes.
475 ASSERT(operand.shift_amount() <= 4);
476 ASSERT(operand.reg().Is64Bits() ||
477 ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
478 Register temp = AppropriateTempFor(rn, operand.reg());
479 EmitExtendShift(temp, operand.reg(), operand.extend(),
480 operand.shift_amount());
481 AddSubWithCarry(rd, rn, temp, S, op);
482
483 } else {
484 // The addressing mode is directly supported by the instruction.
485 AddSubWithCarry(rd, rn, operand, S, op);
486 }
487 }
488
489
490 void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
491 const MemOperand& addr,
492 LoadStoreOp op) {
493 int64_t offset = addr.offset();
494 LSDataSize size = CalcLSDataSize(op);
495
496 // Check if an immediate offset fits in the immediate field of the
497 // appropriate instruction. If not, emit two instructions to perform
498 // the operation.
499 if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) &&
500 !IsImmLSUnscaled(offset)) {
501 // Immediate offset that can't be encoded using unsigned or unscaled
502 // addressing modes.
503 Register temp = AppropriateTempFor(addr.base());
504 Mov(temp, addr.offset());
505 LoadStore(rt, MemOperand(addr.base(), temp), op);
506 } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) {
507 // Post-index beyond unscaled addressing range.
508 LoadStore(rt, MemOperand(addr.base()), op);
509 add(addr.base(), addr.base(), offset);
510 } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) {
511 // Pre-index beyond unscaled addressing range.
512 add(addr.base(), addr.base(), offset);
513 LoadStore(rt, MemOperand(addr.base()), op);
514 } else {
515 // Encodable in one load/store instruction.
516 LoadStore(rt, addr, op);
517 }
518 }
519
520
521 void MacroAssembler::Load(const Register& rt,
522 const MemOperand& addr,
523 Representation r) {
524 ASSERT(!r.IsDouble());
525
526 if (r.IsInteger8()) {
527 Ldrsb(rt, addr);
528 } else if (r.IsUInteger8()) {
529 Ldrb(rt, addr);
530 } else if (r.IsInteger16()) {
531 Ldrsh(rt, addr);
532 } else if (r.IsUInteger16()) {
533 Ldrh(rt, addr);
534 } else if (r.IsInteger32()) {
535 Ldr(rt.W(), addr);
536 } else {
537 ASSERT(rt.Is64Bits());
538 Ldr(rt, addr);
539 }
540 }
541
542
543 void MacroAssembler::Store(const Register& rt,
544 const MemOperand& addr,
545 Representation r) {
546 ASSERT(!r.IsDouble());
547
548 if (r.IsInteger8() || r.IsUInteger8()) {
549 Strb(rt, addr);
550 } else if (r.IsInteger16() || r.IsUInteger16()) {
551 Strh(rt, addr);
552 } else if (r.IsInteger32()) {
553 Str(rt.W(), addr);
554 } else {
555 ASSERT(rt.Is64Bits());
556 Str(rt, addr);
557 }
558 }
559
560
561 // Pseudo-instructions.
562
563
564 void MacroAssembler::Abs(const Register& rd, const Register& rm,
565 Label* is_not_representable,
566 Label* is_representable) {
567 ASSERT(allow_macro_instructions_);
568 ASSERT(AreSameSizeAndType(rd, rm));
569
570 Cmp(rm, 1);
571 Cneg(rd, rm, lt);
572
573 // If the comparison sets the v flag, the input was the smallest value
574 // representable by rm, and the mathematical result of abs(rm) is not
575 // representable using two's complement.
576 if ((is_not_representable != NULL) && (is_representable != NULL)) {
577 B(is_not_representable, vs);
578 B(is_representable);
579 } else if (is_not_representable != NULL) {
580 B(is_not_representable, vs);
581 } else if (is_representable != NULL) {
582 B(is_representable, vc);
583 }
584 }
585
586
587 // Abstracted stack operations.
588
589
590 void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
591 const CPURegister& src2, const CPURegister& src3) {
592 ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
593 ASSERT(src0.IsValid());
594
595 int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
596 int size = src0.SizeInBytes();
597
598 PrepareForPush(count, size);
599 PushHelper(count, size, src0, src1, src2, src3);
600 }
601
602
603 void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
604 const CPURegister& dst2, const CPURegister& dst3) {
605 // It is not valid to pop into the same register more than once in one
606 // instruction, not even into the zero register.
607 ASSERT(!AreAliased(dst0, dst1, dst2, dst3));
608 ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
609 ASSERT(dst0.IsValid());
610
611 int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
612 int size = dst0.SizeInBytes();
613
614 PrepareForPop(count, size);
615 PopHelper(count, size, dst0, dst1, dst2, dst3);
616
617 if (!csp.Is(StackPointer()) && emit_debug_code()) {
618 // It is safe to leave csp where it is when unwinding the JavaScript stack,
619 // but if we keep it matching StackPointer, the simulator can detect memory
620 // accesses in the now-free part of the stack.
621 Mov(csp, StackPointer());
622 }
623 }
624
625
626 void MacroAssembler::PushCPURegList(CPURegList registers) {
627 int size = registers.RegisterSizeInBytes();
628
629 PrepareForPush(registers.Count(), size);
630 // Push up to four registers at a time because if the current stack pointer is
631 // csp and reg_size is 32, registers must be pushed in blocks of four in order
632 // to maintain the 16-byte alignment for csp.
633 while (!registers.IsEmpty()) {
634 int count_before = registers.Count();
635 const CPURegister& src0 = registers.PopHighestIndex();
636 const CPURegister& src1 = registers.PopHighestIndex();
637 const CPURegister& src2 = registers.PopHighestIndex();
638 const CPURegister& src3 = registers.PopHighestIndex();
639 int count = count_before - registers.Count();
640 PushHelper(count, size, src0, src1, src2, src3);
641 }
642 }
643
644
645 void MacroAssembler::PopCPURegList(CPURegList registers) {
646 int size = registers.RegisterSizeInBytes();
647
648 PrepareForPop(registers.Count(), size);
649 // Pop up to four registers at a time because if the current stack pointer is
650 // csp and reg_size is 32, registers must be pushed in blocks of four in
651 // order to maintain the 16-byte alignment for csp.
652 while (!registers.IsEmpty()) {
653 int count_before = registers.Count();
654 const CPURegister& dst0 = registers.PopLowestIndex();
655 const CPURegister& dst1 = registers.PopLowestIndex();
656 const CPURegister& dst2 = registers.PopLowestIndex();
657 const CPURegister& dst3 = registers.PopLowestIndex();
658 int count = count_before - registers.Count();
659 PopHelper(count, size, dst0, dst1, dst2, dst3);
660 }
661
662 if (!csp.Is(StackPointer()) && emit_debug_code()) {
663 // It is safe to leave csp where it is when unwinding the JavaScript stack,
664 // but if we keep it matching StackPointer, the simulator can detect memory
665 // accesses in the now-free part of the stack.
666 Mov(csp, StackPointer());
667 }
668 }
669
670
671 void MacroAssembler::PushMultipleTimes(int count, Register src) {
672 int size = src.SizeInBytes();
673
674 PrepareForPush(count, size);
675
676 if (FLAG_optimize_for_size && count > 8) {
677 Label loop;
678 __ Mov(Tmp0(), count / 2);
679 __ Bind(&loop);
680 PushHelper(2, size, src, src, NoReg, NoReg);
681 __ Subs(Tmp0(), Tmp0(), 1);
682 __ B(ne, &loop);
683
684 count %= 2;
685 }
686
687 // Push up to four registers at a time if possible because if the current
688 // stack pointer is csp and the register size is 32, registers must be pushed
689 // in blocks of four in order to maintain the 16-byte alignment for csp.
690 while (count >= 4) {
691 PushHelper(4, size, src, src, src, src);
692 count -= 4;
693 }
694 if (count >= 2) {
695 PushHelper(2, size, src, src, NoReg, NoReg);
696 count -= 2;
697 }
698 if (count == 1) {
699 PushHelper(1, size, src, NoReg, NoReg, NoReg);
700 count -= 1;
701 }
702 ASSERT(count == 0);
703 }
704
705
706 void MacroAssembler::PushHelper(int count, int size,
707 const CPURegister& src0,
708 const CPURegister& src1,
709 const CPURegister& src2,
710 const CPURegister& src3) {
711 // Ensure that we don't unintentially modify scratch or debug registers.
712 InstructionAccurateScope scope(this);
713
714 ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
715 ASSERT(size == src0.SizeInBytes());
716
717 // When pushing multiple registers, the store order is chosen such that
718 // Push(a, b) is equivalent to Push(a) followed by Push(b).
719 switch (count) {
720 case 1:
721 ASSERT(src1.IsNone() && src2.IsNone() && src3.IsNone());
722 str(src0, MemOperand(StackPointer(), -1 * size, PreIndex));
723 break;
724 case 2:
725 ASSERT(src2.IsNone() && src3.IsNone());
726 stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex));
727 break;
728 case 3:
729 ASSERT(src3.IsNone());
730 stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex));
731 str(src0, MemOperand(StackPointer(), 2 * size));
732 break;
733 case 4:
734 // Skip over 4 * size, then fill in the gap. This allows four W registers
735 // to be pushed using csp, whilst maintaining 16-byte alignment for csp
736 // at all times.
737 stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex));
738 stp(src1, src0, MemOperand(StackPointer(), 2 * size));
739 break;
740 default:
741 UNREACHABLE();
742 }
743 }
744
745
746 void MacroAssembler::PopHelper(int count, int size,
747 const CPURegister& dst0,
748 const CPURegister& dst1,
749 const CPURegister& dst2,
750 const CPURegister& dst3) {
751 // Ensure that we don't unintentially modify scratch or debug registers.
752 InstructionAccurateScope scope(this);
753
754 ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
755 ASSERT(size == dst0.SizeInBytes());
756
757 // When popping multiple registers, the load order is chosen such that
758 // Pop(a, b) is equivalent to Pop(a) followed by Pop(b).
759 switch (count) {
760 case 1:
761 ASSERT(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
762 ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex));
763 break;
764 case 2:
765 ASSERT(dst2.IsNone() && dst3.IsNone());
766 ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex));
767 break;
768 case 3:
769 ASSERT(dst3.IsNone());
770 ldr(dst2, MemOperand(StackPointer(), 2 * size));
771 ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex));
772 break;
773 case 4:
774 // Load the higher addresses first, then load the lower addresses and
775 // skip the whole block in the second instruction. This allows four W
776 // registers to be popped using csp, whilst maintaining 16-byte alignment
777 // for csp at all times.
778 ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size));
779 ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex));
780 break;
781 default:
782 UNREACHABLE();
783 }
784 }
785
786
787 void MacroAssembler::PrepareForPush(int count, int size) {
788 // TODO(jbramley): Use AssertStackConsistency here, if possible. See the
789 // AssertStackConsistency for details of why we can't at the moment.
790 if (csp.Is(StackPointer())) {
791 // If the current stack pointer is csp, then it must be aligned to 16 bytes
792 // on entry and the total size of the specified registers must also be a
793 // multiple of 16 bytes.
794 ASSERT((count * size) % 16 == 0);
795 } else {
796 // Even if the current stack pointer is not the system stack pointer (csp),
797 // the system stack pointer will still be modified in order to comply with
798 // ABI rules about accessing memory below the system stack pointer.
799 BumpSystemStackPointer(count * size);
800 }
801 }
802
803
804 void MacroAssembler::PrepareForPop(int count, int size) {
805 AssertStackConsistency();
806 if (csp.Is(StackPointer())) {
807 // If the current stack pointer is csp, then it must be aligned to 16 bytes
808 // on entry and the total size of the specified registers must also be a
809 // multiple of 16 bytes.
810 ASSERT((count * size) % 16 == 0);
811 }
812 }
813
814
815 void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) {
816 if (offset.IsImmediate()) {
817 ASSERT(offset.immediate() >= 0);
818 } else if (emit_debug_code()) {
819 Cmp(xzr, offset);
820 Check(le, kStackAccessBelowStackPointer);
821 }
822
823 Str(src, MemOperand(StackPointer(), offset));
824 }
825
826
827 void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
828 if (offset.IsImmediate()) {
829 ASSERT(offset.immediate() >= 0);
830 } else if (emit_debug_code()) {
831 Cmp(xzr, offset);
832 Check(le, kStackAccessBelowStackPointer);
833 }
834
835 Ldr(dst, MemOperand(StackPointer(), offset));
836 }
837
838
839 void MacroAssembler::PokePair(const CPURegister& src1,
840 const CPURegister& src2,
841 int offset) {
842 ASSERT(AreSameSizeAndType(src1, src2));
843 ASSERT((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
844 Stp(src1, src2, MemOperand(StackPointer(), offset));
845 }
846
847
848 void MacroAssembler::PeekPair(const CPURegister& dst1,
849 const CPURegister& dst2,
850 int offset) {
851 ASSERT(AreSameSizeAndType(dst1, dst2));
852 ASSERT((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
853 Ldp(dst1, dst2, MemOperand(StackPointer(), offset));
854 }
855
856
857 void MacroAssembler::PushCalleeSavedRegisters() {
858 // Ensure that the macro-assembler doesn't use any scratch registers.
859 InstructionAccurateScope scope(this);
860
861 // This method must not be called unless the current stack pointer is the
862 // system stack pointer (csp).
863 ASSERT(csp.Is(StackPointer()));
864
865 MemOperand tos(csp, -2 * kXRegSizeInBytes, PreIndex);
866
867 stp(d14, d15, tos);
868 stp(d12, d13, tos);
869 stp(d10, d11, tos);
870 stp(d8, d9, tos);
871
872 stp(x29, x30, tos);
873 stp(x27, x28, tos); // x28 = jssp
874 stp(x25, x26, tos);
875 stp(x23, x24, tos);
876 stp(x21, x22, tos);
877 stp(x19, x20, tos);
878 }
879
880
881 void MacroAssembler::PopCalleeSavedRegisters() {
882 // Ensure that the macro-assembler doesn't use any scratch registers.
883 InstructionAccurateScope scope(this);
884
885 // This method must not be called unless the current stack pointer is the
886 // system stack pointer (csp).
887 ASSERT(csp.Is(StackPointer()));
888
889 MemOperand tos(csp, 2 * kXRegSizeInBytes, PostIndex);
890
891 ldp(x19, x20, tos);
892 ldp(x21, x22, tos);
893 ldp(x23, x24, tos);
894 ldp(x25, x26, tos);
895 ldp(x27, x28, tos); // x28 = jssp
896 ldp(x29, x30, tos);
897
898 ldp(d8, d9, tos);
899 ldp(d10, d11, tos);
900 ldp(d12, d13, tos);
901 ldp(d14, d15, tos);
902 }
903
904
905 void MacroAssembler::AssertStackConsistency() {
906 if (emit_debug_code() && !csp.Is(StackPointer())) {
907 if (csp.Is(StackPointer())) {
908 // TODO(jbramley): Check for csp alignment if it is the stack pointer.
909 } else {
910 // TODO(jbramley): Currently we cannot use this assertion in Push because
911 // some calling code assumes that the flags are preserved. For an example,
912 // look at Builtins::Generate_ArgumentsAdaptorTrampoline.
913 Cmp(csp, StackPointer());
914 Check(ls, kTheCurrentStackPointerIsBelowCsp);
915 }
916 }
917 }
918
919
920 void MacroAssembler::LoadRoot(Register destination,
921 Heap::RootListIndex index) {
922 // TODO(jbramley): Most root values are constants, and can be synthesized
923 // without a load. Refer to the ARM back end for details.
924 Ldr(destination, MemOperand(root, index << kPointerSizeLog2));
925 }
926
927
928 void MacroAssembler::StoreRoot(Register source,
929 Heap::RootListIndex index) {
930 Str(source, MemOperand(root, index << kPointerSizeLog2));
931 }
932
933
934 void MacroAssembler::LoadTrueFalseRoots(Register true_root,
935 Register false_root) {
936 STATIC_ASSERT((Heap::kTrueValueRootIndex + 1) == Heap::kFalseValueRootIndex);
937 Ldp(true_root, false_root,
938 MemOperand(root, Heap::kTrueValueRootIndex << kPointerSizeLog2));
939 }
940
941
942 void MacroAssembler::LoadHeapObject(Register result,
943 Handle<HeapObject> object) {
944 AllowDeferredHandleDereference using_raw_address;
945 if (isolate()->heap()->InNewSpace(*object)) {
946 Handle<Cell> cell = isolate()->factory()->NewCell(object);
947 Mov(result, Operand(cell));
948 Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
949 } else {
950 Mov(result, Operand(object));
951 }
952 }
953
954
955 void MacroAssembler::LoadInstanceDescriptors(Register map,
956 Register descriptors) {
957 Ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
958 }
959
960
961 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
962 Ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
963 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
964 }
965
966
967 void MacroAssembler::EnumLengthUntagged(Register dst, Register map) {
968 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
969 Ldrsw(dst, UntagSmiFieldMemOperand(map, Map::kBitField3Offset));
970 And(dst, dst, Map::EnumLengthBits::kMask);
971 }
972
973
974 void MacroAssembler::EnumLengthSmi(Register dst, Register map) {
975 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
976 Ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
977 And(dst, dst, Operand(Smi::FromInt(Map::EnumLengthBits::kMask)));
978 }
979
980
981 void MacroAssembler::CheckEnumCache(Register object,
982 Register null_value,
983 Register scratch0,
984 Register scratch1,
985 Register scratch2,
986 Register scratch3,
987 Label* call_runtime) {
988 ASSERT(!AreAliased(object, null_value, scratch0, scratch1, scratch2,
989 scratch3));
990
991 Register empty_fixed_array_value = scratch0;
992 Register current_object = scratch1;
993
994 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
995 Label next, start;
996
997 Mov(current_object, object);
998
999 // Check if the enum length field is properly initialized, indicating that
1000 // there is an enum cache.
1001 Register map = scratch2;
1002 Register enum_length = scratch3;
1003 Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
1004
1005 EnumLengthUntagged(enum_length, map);
1006 Cmp(enum_length, kInvalidEnumCacheSentinel);
1007 B(eq, call_runtime);
1008
1009 B(&start);
1010
1011 Bind(&next);
1012 Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
1013
1014 // For all objects but the receiver, check that the cache is empty.
1015 EnumLengthUntagged(enum_length, map);
1016 Cbnz(enum_length, call_runtime);
1017
1018 Bind(&start);
1019
1020 // Check that there are no elements. Register current_object contains the
1021 // current JS object we've reached through the prototype chain.
1022 Label no_elements;
1023 Ldr(current_object, FieldMemOperand(current_object,
1024 JSObject::kElementsOffset));
1025 Cmp(current_object, empty_fixed_array_value);
1026 B(eq, &no_elements);
1027
1028 // Second chance, the object may be using the empty slow element dictionary.
1029 CompareRoot(current_object, Heap::kEmptySlowElementDictionaryRootIndex);
1030 B(ne, call_runtime);
1031
1032 Bind(&no_elements);
1033 Ldr(current_object, FieldMemOperand(map, Map::kPrototypeOffset));
1034 Cmp(current_object, null_value);
1035 B(ne, &next);
1036 }
1037
1038
1039 void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver,
1040 Register scratch1,
1041 Register scratch2,
1042 Label* no_memento_found) {
1043 ExternalReference new_space_start =
1044 ExternalReference::new_space_start(isolate());
1045 ExternalReference new_space_allocation_top =
1046 ExternalReference::new_space_allocation_top_address(isolate());
1047
1048 Add(scratch1, receiver,
1049 JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag);
1050 Cmp(scratch1, Operand(new_space_start));
1051 B(lt, no_memento_found);
1052
1053 Mov(scratch2, Operand(new_space_allocation_top));
1054 Ldr(scratch2, MemOperand(scratch2));
1055 Cmp(scratch1, scratch2);
1056 B(gt, no_memento_found);
1057
1058 Ldr(scratch1, MemOperand(scratch1, -AllocationMemento::kSize));
1059 Cmp(scratch1,
1060 Operand(isolate()->factory()->allocation_memento_map()));
1061 }
1062
1063
1064 void MacroAssembler::JumpToHandlerEntry(Register exception,
1065 Register object,
1066 Register state,
1067 Register scratch1,
1068 Register scratch2) {
1069 // Handler expects argument in x0.
1070 ASSERT(exception.Is(x0));
1071
1072 // Compute the handler entry address and jump to it. The handler table is
1073 // a fixed array of (smi-tagged) code offsets.
1074 Ldr(scratch1, FieldMemOperand(object, Code::kHandlerTableOffset));
1075 Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
1076 STATIC_ASSERT(StackHandler::kKindWidth < kPointerSizeLog2);
1077 Lsr(scratch2, state, StackHandler::kKindWidth);
1078 Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
1079 Add(scratch1, object, Code::kHeaderSize - kHeapObjectTag);
1080 Add(scratch1, scratch1, Operand::UntagSmi(scratch2));
1081 Br(scratch1);
1082 }
1083
1084
1085 void MacroAssembler::InNewSpace(Register object,
1086 Condition cond,
1087 Label* branch) {
1088 ASSERT(cond == eq || cond == ne);
1089 // Use Tmp1() to have a different destination register, as Tmp0() will be used
1090 // for relocation.
1091 And(Tmp1(), object, Operand(ExternalReference::new_space_mask(isolate())));
1092 Cmp(Tmp1(), Operand(ExternalReference::new_space_start(isolate())));
1093 B(cond, branch);
1094 }
1095
1096
1097 void MacroAssembler::Throw(Register value,
1098 Register scratch1,
1099 Register scratch2,
1100 Register scratch3,
1101 Register scratch4) {
1102 // Adjust this code if not the case.
1103 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1104 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1105 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1106 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1107 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1108 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1109
1110 // The handler expects the exception in x0.
1111 ASSERT(value.Is(x0));
1112
1113 // Drop the stack pointer to the top of the top handler.
1114 ASSERT(jssp.Is(StackPointer()));
1115 Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
1116 isolate())));
1117 Ldr(jssp, MemOperand(scratch1));
1118 // Restore the next handler.
1119 Pop(scratch2);
1120 Str(scratch2, MemOperand(scratch1));
1121
1122 // Get the code object and state. Restore the context and frame pointer.
1123 Register object = scratch1;
1124 Register state = scratch2;
1125 Pop(object, state, cp, fp);
1126
1127 // If the handler is a JS frame, restore the context to the frame.
1128 // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
1129 // or cp.
1130 Label not_js_frame;
1131 Cbz(cp, &not_js_frame);
1132 Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
1133 Bind(&not_js_frame);
1134
1135 JumpToHandlerEntry(value, object, state, scratch3, scratch4);
1136 }
1137
1138
1139 void MacroAssembler::ThrowUncatchable(Register value,
1140 Register scratch1,
1141 Register scratch2,
1142 Register scratch3,
1143 Register scratch4) {
1144 // Adjust this code if not the case.
1145 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1146 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1147 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1148 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1149 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1150 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1151
1152 // The handler expects the exception in x0.
1153 ASSERT(value.Is(x0));
1154
1155 // Drop the stack pointer to the top of the top stack handler.
1156 ASSERT(jssp.Is(StackPointer()));
1157 Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
1158 isolate())));
1159 Ldr(jssp, MemOperand(scratch1));
1160
1161 // Unwind the handlers until the ENTRY handler is found.
1162 Label fetch_next, check_kind;
1163 B(&check_kind);
1164 Bind(&fetch_next);
1165 Peek(jssp, StackHandlerConstants::kNextOffset);
1166
1167 Bind(&check_kind);
1168 STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
1169 Peek(scratch2, StackHandlerConstants::kStateOffset);
1170 TestAndBranchIfAnySet(scratch2, StackHandler::KindField::kMask, &fetch_next);
1171
1172 // Set the top handler address to next handler past the top ENTRY handler.
1173 Pop(scratch2);
1174 Str(scratch2, MemOperand(scratch1));
1175
1176 // Get the code object and state. Clear the context and frame pointer (0 was
1177 // saved in the handler).
1178 Register object = scratch1;
1179 Register state = scratch2;
1180 Pop(object, state, cp, fp);
1181
1182 JumpToHandlerEntry(value, object, state, scratch3, scratch4);
1183 }
1184
1185
1186 void MacroAssembler::Throw(BailoutReason reason) {
1187 Label throw_start;
1188 Bind(&throw_start);
1189 #ifdef DEBUG
1190 const char* msg = GetBailoutReason(reason);
1191 RecordComment("Throw message: ");
1192 RecordComment((msg != NULL) ? msg : "UNKNOWN");
1193 #endif
1194
1195 Mov(x0, Operand(Smi::FromInt(reason)));
1196 Push(x0);
1197
1198 // Disable stub call restrictions to always allow calls to throw.
1199 if (!has_frame_) {
1200 // We don't actually want to generate a pile of code for this, so just
1201 // claim there is a stack frame, without generating one.
1202 FrameScope scope(this, StackFrame::NONE);
1203 CallRuntime(Runtime::kThrowMessage, 1);
1204 } else {
1205 CallRuntime(Runtime::kThrowMessage, 1);
1206 }
1207 // ThrowMessage should not return here.
1208 Unreachable();
1209 }
1210
1211
1212 void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) {
1213 Label ok;
1214 B(InvertCondition(cc), &ok);
1215 Throw(reason);
1216 Bind(&ok);
1217 }
1218
1219
1220 void MacroAssembler::ThrowIfSmi(const Register& value, BailoutReason reason) {
1221 Label ok;
1222 JumpIfNotSmi(value, &ok);
1223 Throw(reason);
1224 Bind(&ok);
1225 }
1226
1227
1228 void MacroAssembler::SmiAbs(const Register& smi, Label* slow) {
1229 ASSERT(smi.Is64Bits());
1230 Abs(smi, smi, slow);
1231 }
1232
1233
1234 void MacroAssembler::AssertSmi(Register object, BailoutReason reason) {
1235 if (emit_debug_code()) {
1236 STATIC_ASSERT(kSmiTag == 0);
1237 Tst(object, kSmiTagMask);
1238 Check(eq, reason);
1239 }
1240 }
1241
1242
1243 void MacroAssembler::AssertNotSmi(Register object, BailoutReason reason) {
1244 if (emit_debug_code()) {
1245 STATIC_ASSERT(kSmiTag == 0);
1246 Tst(object, kSmiTagMask);
1247 Check(ne, reason);
1248 }
1249 }
1250
1251
1252 void MacroAssembler::AssertName(Register object) {
1253 if (emit_debug_code()) {
1254 STATIC_ASSERT(kSmiTag == 0);
1255 // TODO(jbramley): Add AbortIfSmi and related functions.
1256 Label not_smi;
1257 JumpIfNotSmi(object, &not_smi);
1258 Abort(kOperandIsASmiAndNotAName);
1259 Bind(&not_smi);
1260
1261 Ldr(Tmp1(), FieldMemOperand(object, HeapObject::kMapOffset));
1262 CompareInstanceType(Tmp1(), Tmp1(), LAST_NAME_TYPE);
1263 Check(ls, kOperandIsNotAName);
1264 }
1265 }
1266
1267
1268 void MacroAssembler::AssertString(Register object) {
1269 if (emit_debug_code()) {
1270 Register temp = Tmp1();
1271 STATIC_ASSERT(kSmiTag == 0);
1272 Tst(object, kSmiTagMask);
1273 Check(ne, kOperandIsASmiAndNotAString);
1274 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
1275 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
1276 Check(lo, kOperandIsNotAString);
1277 }
1278 }
1279
1280
1281 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
1282 ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
1283 Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id);
1284 }
1285
1286
1287 void MacroAssembler::TailCallStub(CodeStub* stub) {
1288 Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
1289 }
1290
1291
1292 void MacroAssembler::CallRuntime(const Runtime::Function* f,
1293 int num_arguments,
1294 SaveFPRegsMode save_doubles) {
1295 // All arguments must be on the stack before this function is called.
1296 // x0 holds the return value after the call.
1297
1298 // Check that the number of arguments matches what the function expects.
1299 // If f->nargs is -1, the function can accept a variable number of arguments.
1300 if (f->nargs >= 0 && f->nargs != num_arguments) {
1301 // Illegal operation: drop the stack arguments and return undefined.
1302 if (num_arguments > 0) {
1303 Drop(num_arguments);
1304 }
1305 LoadRoot(x0, Heap::kUndefinedValueRootIndex);
1306 return;
1307 }
1308
1309 // Place the necessary arguments.
1310 Mov(x0, num_arguments);
1311 Mov(x1, Operand(ExternalReference(f, isolate())));
1312
1313 CEntryStub stub(1, save_doubles);
1314 CallStub(&stub);
1315 }
1316
1317
1318 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
1319 return ref0.address() - ref1.address();
1320 }
1321
1322
1323 void MacroAssembler::CallApiFunctionAndReturn(
1324 Register function_address,
1325 ExternalReference thunk_ref,
1326 int stack_space,
1327 int spill_offset,
1328 MemOperand return_value_operand,
1329 MemOperand* context_restore_operand) {
1330 ASM_LOCATION("CallApiFunctionAndReturn");
1331 ExternalReference next_address =
1332 ExternalReference::handle_scope_next_address(isolate());
1333 const int kNextOffset = 0;
1334 const int kLimitOffset = AddressOffset(
1335 ExternalReference::handle_scope_limit_address(isolate()),
1336 next_address);
1337 const int kLevelOffset = AddressOffset(
1338 ExternalReference::handle_scope_level_address(isolate()),
1339 next_address);
1340
1341 ASSERT(function_address.is(x1) || function_address.is(x2));
1342
1343 Label profiler_disabled;
1344 Label end_profiler_check;
1345 bool* is_profiling_flag = isolate()->cpu_profiler()->is_profiling_address();
1346 STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
1347 Mov(x10, reinterpret_cast<uintptr_t>(is_profiling_flag));
1348 Ldrb(w10, MemOperand(x10));
1349 Cbz(w10, &profiler_disabled);
1350 Mov(x3, Operand(thunk_ref));
1351 B(&end_profiler_check);
1352
1353 Bind(&profiler_disabled);
1354 Mov(x3, function_address);
1355 Bind(&end_profiler_check);
1356
1357 // Save the callee-save registers we are going to use.
1358 // TODO(all): Is this necessary? ARM doesn't do it.
1359 STATIC_ASSERT(kCallApiFunctionSpillSpace == 4);
1360 Poke(x19, (spill_offset + 0) * kXRegSizeInBytes);
1361 Poke(x20, (spill_offset + 1) * kXRegSizeInBytes);
1362 Poke(x21, (spill_offset + 2) * kXRegSizeInBytes);
1363 Poke(x22, (spill_offset + 3) * kXRegSizeInBytes);
1364
1365 // Allocate HandleScope in callee-save registers.
1366 // We will need to restore the HandleScope after the call to the API function,
1367 // by allocating it in callee-save registers they will be preserved by C code.
1368 Register handle_scope_base = x22;
1369 Register next_address_reg = x19;
1370 Register limit_reg = x20;
1371 Register level_reg = w21;
1372
1373 Mov(handle_scope_base, Operand(next_address));
1374 Ldr(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
1375 Ldr(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
1376 Ldr(level_reg, MemOperand(handle_scope_base, kLevelOffset));
1377 Add(level_reg, level_reg, 1);
1378 Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
1379
1380 if (FLAG_log_timer_events) {
1381 FrameScope frame(this, StackFrame::MANUAL);
1382 PushSafepointRegisters();
1383 Mov(x0, Operand(ExternalReference::isolate_address(isolate())));
1384 CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
1385 PopSafepointRegisters();
1386 }
1387
1388 // Native call returns to the DirectCEntry stub which redirects to the
1389 // return address pushed on stack (could have moved after GC).
1390 // DirectCEntry stub itself is generated early and never moves.
1391 DirectCEntryStub stub;
1392 stub.GenerateCall(this, x3);
1393
1394 if (FLAG_log_timer_events) {
1395 FrameScope frame(this, StackFrame::MANUAL);
1396 PushSafepointRegisters();
1397 Mov(x0, Operand(ExternalReference::isolate_address(isolate())));
1398 CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
1399 PopSafepointRegisters();
1400 }
1401
1402 Label promote_scheduled_exception;
1403 Label exception_handled;
1404 Label delete_allocated_handles;
1405 Label leave_exit_frame;
1406 Label return_value_loaded;
1407
1408 // Load value from ReturnValue.
1409 Ldr(x0, return_value_operand);
1410 Bind(&return_value_loaded);
1411 // No more valid handles (the result handle was the last one). Restore
1412 // previous handle scope.
1413 Str(next_address_reg, MemOperand(handle_scope_base, kNextOffset));
1414 if (emit_debug_code()) {
1415 Ldr(w1, MemOperand(handle_scope_base, kLevelOffset));
1416 Cmp(w1, level_reg);
1417 Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
1418 }
1419 Sub(level_reg, level_reg, 1);
1420 Str(level_reg, MemOperand(handle_scope_base, kLevelOffset));
1421 Ldr(x1, MemOperand(handle_scope_base, kLimitOffset));
1422 Cmp(limit_reg, x1);
1423 B(ne, &delete_allocated_handles);
1424
1425 Bind(&leave_exit_frame);
1426 // Restore callee-saved registers.
1427 Peek(x19, (spill_offset + 0) * kXRegSizeInBytes);
1428 Peek(x20, (spill_offset + 1) * kXRegSizeInBytes);
1429 Peek(x21, (spill_offset + 2) * kXRegSizeInBytes);
1430 Peek(x22, (spill_offset + 3) * kXRegSizeInBytes);
1431
1432 // Check if the function scheduled an exception.
1433 Mov(x5, Operand(ExternalReference::scheduled_exception_address(isolate())));
1434 Ldr(x5, MemOperand(x5));
1435 JumpIfNotRoot(x5, Heap::kTheHoleValueRootIndex, &promote_scheduled_exception);
1436 Bind(&exception_handled);
1437
1438 bool restore_context = context_restore_operand != NULL;
1439 if (restore_context) {
1440 Ldr(cp, *context_restore_operand);
1441 }
1442
1443 LeaveExitFrame(false, x1, !restore_context);
1444 Drop(stack_space);
1445 Ret();
1446
1447 Bind(&promote_scheduled_exception);
1448 {
1449 FrameScope frame(this, StackFrame::INTERNAL);
1450 CallExternalReference(
1451 ExternalReference(Runtime::kPromoteScheduledException, isolate()), 0);
1452 }
1453 B(&exception_handled);
1454
1455 // HandleScope limit has changed. Delete allocated extensions.
1456 Bind(&delete_allocated_handles);
1457 Str(limit_reg, MemOperand(handle_scope_base, kLimitOffset));
1458 // Save the return value in a callee-save register.
1459 Register saved_result = x19;
1460 Mov(saved_result, x0);
1461 Mov(x0, Operand(ExternalReference::isolate_address(isolate())));
1462 CallCFunction(
1463 ExternalReference::delete_handle_scope_extensions(isolate()), 1);
1464 Mov(x0, saved_result);
1465 B(&leave_exit_frame);
1466 }
1467
1468
1469 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
1470 int num_arguments) {
1471 Mov(x0, num_arguments);
1472 Mov(x1, Operand(ext));
1473
1474 CEntryStub stub(1);
1475 CallStub(&stub);
1476 }
1477
1478
1479 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
1480 Mov(x1, Operand(builtin));
1481 CEntryStub stub(1);
1482 Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
1483 }
1484
1485
1486 void MacroAssembler::GetBuiltinFunction(Register target,
1487 Builtins::JavaScript id) {
1488 // Load the builtins object into target register.
1489 Ldr(target, GlobalObjectMemOperand());
1490 Ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
1491 // Load the JavaScript builtin function from the builtins object.
1492 Ldr(target, FieldMemOperand(target,
1493 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
1494 }
1495
1496
1497 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
1498 ASSERT(!target.is(x1));
1499 GetBuiltinFunction(x1, id);
1500 // Load the code entry point from the builtins object.
1501 Ldr(target, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
1502 }
1503
1504
1505 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
1506 InvokeFlag flag,
1507 const CallWrapper& call_wrapper) {
1508 ASM_LOCATION("MacroAssembler::InvokeBuiltin");
1509 // You can't call a builtin without a valid frame.
1510 ASSERT(flag == JUMP_FUNCTION || has_frame());
1511
1512 GetBuiltinEntry(x2, id);
1513 if (flag == CALL_FUNCTION) {
1514 call_wrapper.BeforeCall(CallSize(x2));
1515 Call(x2);
1516 call_wrapper.AfterCall();
1517 } else {
1518 ASSERT(flag == JUMP_FUNCTION);
1519 Jump(x2);
1520 }
1521 }
1522
1523
1524 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
1525 int num_arguments,
1526 int result_size) {
1527 // TODO(1236192): Most runtime routines don't need the number of
1528 // arguments passed in because it is constant. At some point we
1529 // should remove this need and make the runtime routine entry code
1530 // smarter.
1531 Mov(x0, num_arguments);
1532 JumpToExternalReference(ext);
1533 }
1534
1535
1536 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
1537 int num_arguments,
1538 int result_size) {
1539 TailCallExternalReference(ExternalReference(fid, isolate()),
1540 num_arguments,
1541 result_size);
1542 }
1543
1544
1545 void MacroAssembler::InitializeNewString(Register string,
1546 Register length,
1547 Heap::RootListIndex map_index,
1548 Register scratch1,
1549 Register scratch2) {
1550 ASSERT(!AreAliased(string, length, scratch1, scratch2));
1551 LoadRoot(scratch2, map_index);
1552 SmiTag(scratch1, length);
1553 Str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
1554
1555 Mov(scratch2, String::kEmptyHashField);
1556 Str(scratch1, FieldMemOperand(string, String::kLengthOffset));
1557 Str(scratch2, FieldMemOperand(string, String::kHashFieldOffset));
1558 }
1559
1560
1561 int MacroAssembler::ActivationFrameAlignment() {
1562 #if V8_HOST_ARCH_A64
1563 // Running on the real platform. Use the alignment as mandated by the local
1564 // environment.
1565 // Note: This will break if we ever start generating snapshots on one ARM
1566 // platform for another ARM platform with a different alignment.
1567 return OS::ActivationFrameAlignment();
1568 #else // V8_HOST_ARCH_A64
1569 // If we are using the simulator then we should always align to the expected
1570 // alignment. As the simulator is used to generate snapshots we do not know
1571 // if the target platform will need alignment, so this is controlled from a
1572 // flag.
1573 return FLAG_sim_stack_alignment;
1574 #endif // V8_HOST_ARCH_A64
1575 }
1576
1577
1578 void MacroAssembler::CallCFunction(ExternalReference function,
1579 int num_of_reg_args) {
1580 CallCFunction(function, num_of_reg_args, 0);
1581 }
1582
1583
1584 void MacroAssembler::CallCFunction(ExternalReference function,
1585 int num_of_reg_args,
1586 int num_of_double_args) {
1587 Mov(Tmp0(), Operand(function));
1588 CallCFunction(Tmp0(), num_of_reg_args, num_of_double_args);
1589 }
1590
1591
1592 void MacroAssembler::CallCFunction(Register function,
1593 int num_of_reg_args,
1594 int num_of_double_args) {
1595 ASSERT(has_frame());
1596 // We can pass 8 integer arguments in registers. If we need to pass more than
1597 // that, we'll need to implement support for passing them on the stack.
1598 ASSERT(num_of_reg_args <= 8);
1599
1600 // If we're passing doubles, we're limited to the following prototypes
1601 // (defined by ExternalReference::Type):
1602 // BUILTIN_COMPARE_CALL: int f(double, double)
1603 // BUILTIN_FP_FP_CALL: double f(double, double)
1604 // BUILTIN_FP_CALL: double f(double)
1605 // BUILTIN_FP_INT_CALL: double f(double, int)
1606 if (num_of_double_args > 0) {
1607 ASSERT(num_of_reg_args <= 1);
1608 ASSERT((num_of_double_args + num_of_reg_args) <= 2);
1609 }
1610
1611
1612 // If the stack pointer is not csp, we need to derive an aligned csp from the
1613 // current stack pointer.
1614 const Register old_stack_pointer = StackPointer();
1615 if (!csp.Is(old_stack_pointer)) {
1616 AssertStackConsistency();
1617
1618 int sp_alignment = ActivationFrameAlignment();
1619 // The ABI mandates at least 16-byte alignment.
1620 ASSERT(sp_alignment >= 16);
1621 ASSERT(IsPowerOf2(sp_alignment));
1622
1623 // The current stack pointer is a callee saved register, and is preserved
1624 // across the call.
1625 ASSERT(kCalleeSaved.IncludesAliasOf(old_stack_pointer));
1626
1627 // Align and synchronize the system stack pointer with jssp.
1628 Bic(csp, old_stack_pointer, sp_alignment - 1);
1629 SetStackPointer(csp);
1630 }
1631
1632 // Call directly. The function called cannot cause a GC, or allow preemption,
1633 // so the return address in the link register stays correct.
1634 Call(function);
1635
1636 if (!csp.Is(old_stack_pointer)) {
1637 if (emit_debug_code()) {
1638 // Because the stack pointer must be aligned on a 16-byte boundary, the
1639 // aligned csp can be up to 12 bytes below the jssp. This is the case
1640 // where we only pushed one W register on top of an aligned jssp.
1641 Register temp = Tmp1();
1642 ASSERT(ActivationFrameAlignment() == 16);
1643 Sub(temp, csp, old_stack_pointer);
1644 // We want temp <= 0 && temp >= -12.
1645 Cmp(temp, 0);
1646 Ccmp(temp, -12, NFlag, le);
1647 Check(ge, kTheStackWasCorruptedByMacroAssemblerCall);
1648 }
1649 SetStackPointer(old_stack_pointer);
1650 }
1651 }
1652
1653
1654 void MacroAssembler::Jump(Register target) {
1655 Br(target);
1656 }
1657
1658
1659 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode) {
1660 Mov(Tmp0(), Operand(target, rmode));
1661 Br(Tmp0());
1662 }
1663
1664
1665 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode) {
1666 ASSERT(!RelocInfo::IsCodeTarget(rmode));
1667 Jump(reinterpret_cast<intptr_t>(target), rmode);
1668 }
1669
1670
1671 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode) {
1672 ASSERT(RelocInfo::IsCodeTarget(rmode));
1673 AllowDeferredHandleDereference embedding_raw_address;
1674 Jump(reinterpret_cast<intptr_t>(code.location()), rmode);
1675 }
1676
1677
1678 void MacroAssembler::Call(Register target) {
1679 BlockConstPoolScope scope(this);
1680 #ifdef DEBUG
1681 Label start_call;
1682 Bind(&start_call);
1683 #endif
1684
1685 Blr(target);
1686
1687 #ifdef DEBUG
1688 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
1689 #endif
1690 }
1691
1692
1693 void MacroAssembler::Call(Label* target) {
1694 BlockConstPoolScope scope(this);
1695 #ifdef DEBUG
1696 Label start_call;
1697 Bind(&start_call);
1698 #endif
1699
1700 Bl(target);
1701
1702 #ifdef DEBUG
1703 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
1704 #endif
1705 }
1706
1707
1708 // MacroAssembler::CallSize is sensitive to changes in this function, as it
1709 // requires to know how many instructions are used to branch to the target.
1710 void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) {
1711 BlockConstPoolScope scope(this);
1712 #ifdef DEBUG
1713 Label start_call;
1714 Bind(&start_call);
1715 #endif
1716 // Statement positions are expected to be recorded when the target
1717 // address is loaded.
1718 positions_recorder()->WriteRecordedPositions();
1719
1720 // Addresses always have 64 bits, so we shouldn't encounter NONE32.
1721 ASSERT(rmode != RelocInfo::NONE32);
1722
1723 if (rmode == RelocInfo::NONE64) {
1724 uint64_t imm = reinterpret_cast<uint64_t>(target);
1725 movz(Tmp0(), (imm >> 0) & 0xffff, 0);
1726 movk(Tmp0(), (imm >> 16) & 0xffff, 16);
1727 movk(Tmp0(), (imm >> 32) & 0xffff, 32);
1728 movk(Tmp0(), (imm >> 48) & 0xffff, 48);
1729 } else {
1730 LoadRelocated(Tmp0(), Operand(reinterpret_cast<intptr_t>(target), rmode));
1731 }
1732 Blr(Tmp0());
1733 #ifdef DEBUG
1734 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode));
1735 #endif
1736 }
1737
1738
1739 void MacroAssembler::Call(Handle<Code> code,
1740 RelocInfo::Mode rmode,
1741 TypeFeedbackId ast_id) {
1742 #ifdef DEBUG
1743 Label start_call;
1744 Bind(&start_call);
1745 #endif
1746
1747 if ((rmode == RelocInfo::CODE_TARGET) && (!ast_id.IsNone())) {
1748 SetRecordedAstId(ast_id);
1749 rmode = RelocInfo::CODE_TARGET_WITH_ID;
1750 }
1751
1752 AllowDeferredHandleDereference embedding_raw_address;
1753 Call(reinterpret_cast<Address>(code.location()), rmode);
1754
1755 #ifdef DEBUG
1756 // Check the size of the code generated.
1757 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(code, rmode, ast_id));
1758 #endif
1759 }
1760
1761
1762 int MacroAssembler::CallSize(Register target) {
1763 USE(target);
1764 return kInstructionSize;
1765 }
1766
1767
1768 int MacroAssembler::CallSize(Label* target) {
1769 USE(target);
1770 return kInstructionSize;
1771 }
1772
1773
1774 int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
1775 USE(target);
1776
1777 // Addresses always have 64 bits, so we shouldn't encounter NONE32.
1778 ASSERT(rmode != RelocInfo::NONE32);
1779
1780 if (rmode == RelocInfo::NONE64) {
1781 return kCallSizeWithoutRelocation;
1782 } else {
1783 return kCallSizeWithRelocation;
1784 }
1785 }
1786
1787
1788 int MacroAssembler::CallSize(Handle<Code> code,
1789 RelocInfo::Mode rmode,
1790 TypeFeedbackId ast_id) {
1791 USE(code);
1792 USE(ast_id);
1793
1794 // Addresses always have 64 bits, so we shouldn't encounter NONE32.
1795 ASSERT(rmode != RelocInfo::NONE32);
1796
1797 if (rmode == RelocInfo::NONE64) {
1798 return kCallSizeWithoutRelocation;
1799 } else {
1800 return kCallSizeWithRelocation;
1801 }
1802 }
1803
1804
1805
1806
1807
1808 void MacroAssembler::JumpForHeapNumber(Register object,
1809 Register heap_number_map,
1810 Label* on_heap_number,
1811 Label* on_not_heap_number) {
1812 ASSERT(on_heap_number || on_not_heap_number);
1813 // Tmp0() is used as a scratch register.
1814 ASSERT(!AreAliased(Tmp0(), heap_number_map));
1815 AssertNotSmi(object);
1816
1817 // Load the HeapNumber map if it is not passed.
1818 if (heap_number_map.Is(NoReg)) {
1819 heap_number_map = Tmp1();
1820 LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1821 } else {
1822 // This assert clobbers Tmp0(), so do it before loading Tmp0() with the map.
1823 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1824 }
1825
1826 Ldr(Tmp0(), FieldMemOperand(object, HeapObject::kMapOffset));
1827 Cmp(Tmp0(), heap_number_map);
1828
1829 if (on_heap_number) {
1830 B(eq, on_heap_number);
1831 }
1832 if (on_not_heap_number) {
1833 B(ne, on_not_heap_number);
1834 }
1835 }
1836
1837
1838 void MacroAssembler::JumpIfHeapNumber(Register object,
1839 Label* on_heap_number,
1840 Register heap_number_map) {
1841 JumpForHeapNumber(object,
1842 heap_number_map,
1843 on_heap_number,
1844 NULL);
1845 }
1846
1847
1848 void MacroAssembler::JumpIfNotHeapNumber(Register object,
1849 Label* on_not_heap_number,
1850 Register heap_number_map) {
1851 JumpForHeapNumber(object,
1852 heap_number_map,
1853 NULL,
1854 on_not_heap_number);
1855 }
1856
1857
1858 void MacroAssembler::LookupNumberStringCache(Register object,
1859 Register result,
1860 Register scratch1,
1861 Register scratch2,
1862 Register scratch3,
1863 Label* not_found) {
1864 ASSERT(!AreAliased(object, result, scratch1, scratch2, scratch3));
1865
1866 // Use of registers. Register result is used as a temporary.
1867 Register number_string_cache = result;
1868 Register mask = scratch3;
1869
1870 // Load the number string cache.
1871 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
1872
1873 // Make the hash mask from the length of the number string cache. It
1874 // contains two elements (number and string) for each cache entry.
1875 Ldrsw(mask, UntagSmiFieldMemOperand(number_string_cache,
1876 FixedArray::kLengthOffset));
1877 Asr(mask, mask, 1); // Divide length by two.
1878 Sub(mask, mask, 1); // Make mask.
1879
1880 // Calculate the entry in the number string cache. The hash value in the
1881 // number string cache for smis is just the smi value, and the hash for
1882 // doubles is the xor of the upper and lower words. See
1883 // Heap::GetNumberStringCache.
1884 Label is_smi;
1885 Label load_result_from_cache;
1886
1887 JumpIfSmi(object, &is_smi);
1888 CheckMap(object, scratch1, Heap::kHeapNumberMapRootIndex, not_found,
1889 DONT_DO_SMI_CHECK);
1890
1891 STATIC_ASSERT(kDoubleSize == (kWRegSizeInBytes * 2));
1892 Add(scratch1, object, HeapNumber::kValueOffset - kHeapObjectTag);
1893 Ldp(scratch1.W(), scratch2.W(), MemOperand(scratch1));
1894 Eor(scratch1, scratch1, scratch2);
1895 And(scratch1, scratch1, mask);
1896
1897 // Calculate address of entry in string cache: each entry consists of two
1898 // pointer sized fields.
1899 Add(scratch1, number_string_cache,
1900 Operand(scratch1, LSL, kPointerSizeLog2 + 1));
1901
1902 Register probe = mask;
1903 Ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
1904 JumpIfSmi(probe, not_found);
1905 Ldr(d0, FieldMemOperand(object, HeapNumber::kValueOffset));
1906 Ldr(d1, FieldMemOperand(probe, HeapNumber::kValueOffset));
1907 Fcmp(d0, d1);
1908 B(ne, not_found);
1909 B(&load_result_from_cache);
1910
1911 Bind(&is_smi);
1912 Register scratch = scratch1;
1913 And(scratch, mask, Operand::UntagSmi(object));
1914 // Calculate address of entry in string cache: each entry consists
1915 // of two pointer sized fields.
1916 Add(scratch, number_string_cache,
1917 Operand(scratch, LSL, kPointerSizeLog2 + 1));
1918
1919 // Check if the entry is the smi we are looking for.
1920 Ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
1921 Cmp(object, probe);
1922 B(ne, not_found);
1923
1924 // Get the result from the cache.
1925 Bind(&load_result_from_cache);
1926 Ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
1927 IncrementCounter(isolate()->counters()->number_to_string_native(), 1,
1928 scratch1, scratch2);
1929 }
1930
1931
1932 void MacroAssembler::TryConvertDoubleToInt(Register as_int,
1933 FPRegister value,
1934 FPRegister scratch_d,
1935 Label* on_successful_conversion,
1936 Label* on_failed_conversion) {
1937 // Convert to an int and back again, then compare with the original value.
1938 Fcvtzs(as_int, value);
1939 Scvtf(scratch_d, as_int);
1940 Fcmp(value, scratch_d);
1941
1942 if (on_successful_conversion) {
1943 B(on_successful_conversion, eq);
1944 }
1945 if (on_failed_conversion) {
1946 B(on_failed_conversion, ne);
1947 }
1948 }
1949
1950
1951 void MacroAssembler::JumpIfMinusZero(DoubleRegister input,
1952 Label* on_negative_zero) {
1953 // Floating point -0.0 is kMinInt as an integer, so subtracting 1 (cmp) will
1954 // cause overflow.
1955 Fmov(Tmp0(), input);
1956 Cmp(Tmp0(), 1);
1957 B(vs, on_negative_zero);
1958 }
1959
1960
1961 void MacroAssembler::ClampInt32ToUint8(Register output, Register input) {
1962 // Clamp the value to [0..255].
1963 Cmp(input.W(), Operand(input.W(), UXTB));
1964 // If input < input & 0xff, it must be < 0, so saturate to 0.
1965 Csel(output.W(), wzr, input.W(), lt);
1966 // Create a constant 0xff.
1967 Mov(WTmp0(), 255);
1968 // If input > input & 0xff, it must be > 255, so saturate to 255.
1969 Csel(output.W(), WTmp0(), output.W(), gt);
1970 }
1971
1972
1973 void MacroAssembler::ClampInt32ToUint8(Register in_out) {
1974 ClampInt32ToUint8(in_out, in_out);
1975 }
1976
1977
1978 void MacroAssembler::ClampDoubleToUint8(Register output,
1979 DoubleRegister input,
1980 DoubleRegister dbl_scratch) {
1981 // This conversion follows the WebIDL "[Clamp]" rules for PIXEL types:
1982 // - Inputs lower than 0 (including -infinity) produce 0.
1983 // - Inputs higher than 255 (including +infinity) produce 255.
1984 // Also, it seems that PIXEL types use round-to-nearest rather than
1985 // round-towards-zero.
1986
1987 // Squash +infinity before the conversion, since Fcvtnu will normally
1988 // convert it to 0.
1989 Fmov(dbl_scratch, 255);
1990 Fmin(dbl_scratch, dbl_scratch, input);
1991
1992 // Convert double to unsigned integer. Values less than zero become zero.
1993 // Values greater than 255 have already been clamped to 255.
1994 Fcvtnu(output, dbl_scratch);
1995 }
1996
1997
1998 void MacroAssembler::CopyFieldsLoopPairsHelper(Register dst,
1999 Register src,
2000 unsigned count,
2001 Register scratch1,
2002 Register scratch2,
2003 Register scratch3) {
2004 // Untag src and dst into scratch registers.
2005 // Copy src->dst in a tight loop.
2006 ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3, Tmp0(), Tmp1()));
2007 ASSERT(count >= 2);
2008
2009 const Register& remaining = scratch3;
2010 Mov(remaining, count / 2);
2011
2012 // Only use the Assembler, so we can use Tmp0() and Tmp1().
2013 InstructionAccurateScope scope(this);
2014
2015 const Register& dst_untagged = scratch1;
2016 const Register& src_untagged = scratch2;
2017 sub(dst_untagged, dst, kHeapObjectTag);
2018 sub(src_untagged, src, kHeapObjectTag);
2019
2020 // Copy fields in pairs.
2021 Label loop;
2022 bind(&loop);
2023 ldp(Tmp0(), Tmp1(), MemOperand(src_untagged, kXRegSizeInBytes * 2,
2024 PostIndex));
2025 stp(Tmp0(), Tmp1(), MemOperand(dst_untagged, kXRegSizeInBytes * 2,
2026 PostIndex));
2027 sub(remaining, remaining, 1);
2028 cbnz(remaining, &loop);
2029
2030 // Handle the leftovers.
2031 if (count & 1) {
2032 ldr(Tmp0(), MemOperand(src_untagged));
2033 str(Tmp0(), MemOperand(dst_untagged));
2034 }
2035 }
2036
2037
2038 void MacroAssembler::CopyFieldsUnrolledPairsHelper(Register dst,
2039 Register src,
2040 unsigned count,
2041 Register scratch1,
2042 Register scratch2) {
2043 // Untag src and dst into scratch registers.
2044 // Copy src->dst in an unrolled loop.
2045 ASSERT(!AreAliased(dst, src, scratch1, scratch2, Tmp0(), Tmp1()));
2046
2047 // Only use the Assembler, so we can use Tmp0() and Tmp1().
2048 InstructionAccurateScope scope(this);
2049
2050 const Register& dst_untagged = scratch1;
2051 const Register& src_untagged = scratch2;
2052 sub(dst_untagged, dst, kHeapObjectTag);
2053 sub(src_untagged, src, kHeapObjectTag);
2054
2055 // Copy fields in pairs.
2056 for (unsigned i = 0; i < count / 2; i++) {
2057 ldp(Tmp0(), Tmp1(), MemOperand(src_untagged, kXRegSizeInBytes * 2,
2058 PostIndex));
2059 stp(Tmp0(), Tmp1(), MemOperand(dst_untagged, kXRegSizeInBytes * 2,
2060 PostIndex));
2061 }
2062
2063 // Handle the leftovers.
2064 if (count & 1) {
2065 ldr(Tmp0(), MemOperand(src_untagged));
2066 str(Tmp0(), MemOperand(dst_untagged));
2067 }
2068 }
2069
2070
2071 void MacroAssembler::CopyFieldsUnrolledHelper(Register dst,
2072 Register src,
2073 unsigned count,
2074 Register scratch1) {
2075 // Untag src and dst into scratch registers.
2076 // Copy src->dst in an unrolled loop.
2077 ASSERT(!AreAliased(dst, src, scratch1, Tmp0(), Tmp1()));
2078
2079 // Only use the Assembler, so we can use Tmp0() and Tmp1().
2080 InstructionAccurateScope scope(this);
2081
2082 const Register& dst_untagged = scratch1;
2083 const Register& src_untagged = Tmp1();
2084 sub(dst_untagged, dst, kHeapObjectTag);
2085 sub(src_untagged, src, kHeapObjectTag);
2086
2087 // Copy fields one by one.
2088 for (unsigned i = 0; i < count; i++) {
2089 ldr(Tmp0(), MemOperand(src_untagged, kXRegSizeInBytes, PostIndex));
2090 str(Tmp0(), MemOperand(dst_untagged, kXRegSizeInBytes, PostIndex));
2091 }
2092 }
2093
2094
2095 void MacroAssembler::CopyFields(Register dst, Register src, CPURegList temps,
2096 unsigned count) {
2097 // One of two methods is used:
2098 //
2099 // For high 'count' values where many scratch registers are available:
2100 // Untag src and dst into scratch registers.
2101 // Copy src->dst in a tight loop.
2102 //
2103 // For low 'count' values or where few scratch registers are available:
2104 // Untag src and dst into scratch registers.
2105 // Copy src->dst in an unrolled loop.
2106 //
2107 // In both cases, fields are copied in pairs if possible, and left-overs are
2108 // handled separately.
2109 ASSERT(!temps.IncludesAliasOf(dst));
2110 ASSERT(!temps.IncludesAliasOf(src));
2111 ASSERT(!temps.IncludesAliasOf(Tmp0()));
2112 ASSERT(!temps.IncludesAliasOf(Tmp1()));
2113 ASSERT(!temps.IncludesAliasOf(xzr));
2114 ASSERT(!AreAliased(dst, src, Tmp0(), Tmp1()));
2115
2116 if (emit_debug_code()) {
2117 Cmp(dst, src);
2118 Check(ne, kTheSourceAndDestinationAreTheSame);
2119 }
2120
2121 // The value of 'count' at which a loop will be generated (if there are
2122 // enough scratch registers).
2123 static const unsigned kLoopThreshold = 8;
2124
2125 ASSERT(!temps.IsEmpty());
2126 Register scratch1 = Register(temps.PopLowestIndex());
2127 Register scratch2 = Register(temps.PopLowestIndex());
2128 Register scratch3 = Register(temps.PopLowestIndex());
2129
2130 if (scratch3.IsValid() && (count >= kLoopThreshold)) {
2131 CopyFieldsLoopPairsHelper(dst, src, count, scratch1, scratch2, scratch3);
2132 } else if (scratch2.IsValid()) {
2133 CopyFieldsUnrolledPairsHelper(dst, src, count, scratch1, scratch2);
2134 } else if (scratch1.IsValid()) {
2135 CopyFieldsUnrolledHelper(dst, src, count, scratch1);
2136 } else {
2137 UNREACHABLE();
2138 }
2139 }
2140
2141
2142 void MacroAssembler::CopyBytes(Register dst,
2143 Register src,
2144 Register length,
2145 Register scratch,
2146 CopyHint hint) {
2147 ASSERT(!AreAliased(src, dst, length, scratch));
2148
2149 // TODO(all): Implement a faster copy function, and use hint to determine
2150 // which algorithm to use for copies.
2151 if (emit_debug_code()) {
2152 // Check copy length.
2153 Cmp(length, 0);
2154 Assert(ge, kUnexpectedNegativeValue);
2155
2156 // Check src and dst buffers don't overlap.
2157 Add(scratch, src, length); // Calculate end of src buffer.
2158 Cmp(scratch, dst);
2159 Add(scratch, dst, length); // Calculate end of dst buffer.
2160 Ccmp(scratch, src, ZFlag, gt);
2161 Assert(le, kCopyBuffersOverlap);
2162 }
2163
2164 Label loop, done;
2165 Cbz(length, &done);
2166
2167 Bind(&loop);
2168 Sub(length, length, 1);
2169 Ldrb(scratch, MemOperand(src, 1, PostIndex));
2170 Strb(scratch, MemOperand(dst, 1, PostIndex));
2171 Cbnz(length, &loop);
2172 Bind(&done);
2173 }
2174
2175
2176 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
2177 Register end_offset,
2178 Register filler) {
2179 Label loop, entry;
2180 B(&entry);
2181 Bind(&loop);
2182 // TODO(all): consider using stp here.
2183 Str(filler, MemOperand(start_offset, kPointerSize, PostIndex));
2184 Bind(&entry);
2185 Cmp(start_offset, end_offset);
2186 B(lt, &loop);
2187 }
2188
2189
2190 void MacroAssembler::JumpIfEitherIsNotSequentialAsciiStrings(
2191 Register first,
2192 Register second,
2193 Register scratch1,
2194 Register scratch2,
2195 Label* failure,
2196 SmiCheckType smi_check) {
2197
2198 if (smi_check == DO_SMI_CHECK) {
2199 JumpIfEitherSmi(first, second, failure);
2200 } else if (emit_debug_code()) {
2201 ASSERT(smi_check == DONT_DO_SMI_CHECK);
2202 Label not_smi;
2203 JumpIfEitherSmi(first, second, NULL, &not_smi);
2204
2205 // At least one input is a smi, but the flags indicated a smi check wasn't
2206 // needed.
2207 Abort(kUnexpectedSmi);
2208
2209 Bind(&not_smi);
2210 }
2211
2212 // Test that both first and second are sequential ASCII strings.
2213 Ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
2214 Ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
2215 Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
2216 Ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
2217
2218 JumpIfEitherInstanceTypeIsNotSequentialAscii(scratch1,
2219 scratch2,
2220 scratch1,
2221 scratch2,
2222 failure);
2223 }
2224
2225
2226 void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialAscii(
2227 Register first,
2228 Register second,
2229 Register scratch1,
2230 Register scratch2,
2231 Label* failure) {
2232 ASSERT(!AreAliased(scratch1, second));
2233 ASSERT(!AreAliased(scratch1, scratch2));
2234 static const int kFlatAsciiStringMask =
2235 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2236 static const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
2237 And(scratch1, first, kFlatAsciiStringMask);
2238 And(scratch2, second, kFlatAsciiStringMask);
2239 Cmp(scratch1, kFlatAsciiStringTag);
2240 Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq);
2241 B(ne, failure);
2242 }
2243
2244
2245 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
2246 Register scratch,
2247 Label* failure) {
2248 const int kFlatAsciiStringMask =
2249 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2250 const int kFlatAsciiStringTag =
2251 kStringTag | kOneByteStringTag | kSeqStringTag;
2252 And(scratch, type, kFlatAsciiStringMask);
2253 Cmp(scratch, kFlatAsciiStringTag);
2254 B(ne, failure);
2255 }
2256
2257
2258 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
2259 Register first,
2260 Register second,
2261 Register scratch1,
2262 Register scratch2,
2263 Label* failure) {
2264 ASSERT(!AreAliased(first, second, scratch1, scratch2));
2265 const int kFlatAsciiStringMask =
2266 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2267 const int kFlatAsciiStringTag =
2268 kStringTag | kOneByteStringTag | kSeqStringTag;
2269 And(scratch1, first, kFlatAsciiStringMask);
2270 And(scratch2, second, kFlatAsciiStringMask);
2271 Cmp(scratch1, kFlatAsciiStringTag);
2272 Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq);
2273 B(ne, failure);
2274 }
2275
2276
2277 void MacroAssembler::JumpIfNotUniqueName(Register type,
2278 Label* not_unique_name) {
2279 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
2280 // if ((type is string && type is internalized) || type == SYMBOL_TYPE) {
2281 // continue
2282 // } else {
2283 // goto not_unique_name
2284 // }
2285 Tst(type, kIsNotStringMask | kIsNotInternalizedMask);
2286 Ccmp(type, SYMBOL_TYPE, ZFlag, ne);
2287 B(ne, not_unique_name);
2288 }
2289
2290
2291 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
2292 const ParameterCount& actual,
2293 Handle<Code> code_constant,
2294 Register code_reg,
2295 Label* done,
2296 InvokeFlag flag,
2297 bool* definitely_mismatches,
2298 const CallWrapper& call_wrapper) {
2299 bool definitely_matches = false;
2300 *definitely_mismatches = false;
2301 Label regular_invoke;
2302
2303 // Check whether the expected and actual arguments count match. If not,
2304 // setup registers according to contract with ArgumentsAdaptorTrampoline:
2305 // x0: actual arguments count.
2306 // x1: function (passed through to callee).
2307 // x2: expected arguments count.
2308
2309 // The code below is made a lot easier because the calling code already sets
2310 // up actual and expected registers according to the contract if values are
2311 // passed in registers.
2312 ASSERT(actual.is_immediate() || actual.reg().is(x0));
2313 ASSERT(expected.is_immediate() || expected.reg().is(x2));
2314 ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(x3));
2315
2316 if (expected.is_immediate()) {
2317 ASSERT(actual.is_immediate());
2318 if (expected.immediate() == actual.immediate()) {
2319 definitely_matches = true;
2320
2321 } else {
2322 Mov(x0, actual.immediate());
2323 if (expected.immediate() ==
2324 SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
2325 // Don't worry about adapting arguments for builtins that
2326 // don't want that done. Skip adaption code by making it look
2327 // like we have a match between expected and actual number of
2328 // arguments.
2329 definitely_matches = true;
2330 } else {
2331 *definitely_mismatches = true;
2332 // Set up x2 for the argument adaptor.
2333 Mov(x2, expected.immediate());
2334 }
2335 }
2336
2337 } else { // expected is a register.
2338 Operand actual_op = actual.is_immediate() ? Operand(actual.immediate())
2339 : Operand(actual.reg());
2340 // If actual == expected perform a regular invocation.
2341 Cmp(expected.reg(), actual_op);
2342 B(eq, &regular_invoke);
2343 // Otherwise set up x0 for the argument adaptor.
2344 Mov(x0, actual_op);
2345 }
2346
2347 // If the argument counts may mismatch, generate a call to the argument
2348 // adaptor.
2349 if (!definitely_matches) {
2350 if (!code_constant.is_null()) {
2351 Mov(x3, Operand(code_constant));
2352 Add(x3, x3, Code::kHeaderSize - kHeapObjectTag);
2353 }
2354
2355 Handle<Code> adaptor =
2356 isolate()->builtins()->ArgumentsAdaptorTrampoline();
2357 if (flag == CALL_FUNCTION) {
2358 call_wrapper.BeforeCall(CallSize(adaptor));
2359 Call(adaptor);
2360 call_wrapper.AfterCall();
2361 if (!*definitely_mismatches) {
2362 // If the arg counts don't match, no extra code is emitted by
2363 // MAsm::InvokeCode and we can just fall through.
2364 B(done);
2365 }
2366 } else {
2367 Jump(adaptor, RelocInfo::CODE_TARGET);
2368 }
2369 }
2370 Bind(&regular_invoke);
2371 }
2372
2373
2374 void MacroAssembler::InvokeCode(Register code,
2375 const ParameterCount& expected,
2376 const ParameterCount& actual,
2377 InvokeFlag flag,
2378 const CallWrapper& call_wrapper) {
2379 // You can't call a function without a valid frame.
2380 ASSERT(flag == JUMP_FUNCTION || has_frame());
2381
2382 Label done;
2383
2384 bool definitely_mismatches = false;
2385 InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
2386 &definitely_mismatches, call_wrapper);
2387
2388 // If we are certain that actual != expected, then we know InvokePrologue will
2389 // have handled the call through the argument adaptor mechanism.
2390 // The called function expects the call kind in x5.
2391 if (!definitely_mismatches) {
2392 if (flag == CALL_FUNCTION) {
2393 call_wrapper.BeforeCall(CallSize(code));
2394 Call(code);
2395 call_wrapper.AfterCall();
2396 } else {
2397 ASSERT(flag == JUMP_FUNCTION);
2398 Jump(code);
2399 }
2400 }
2401
2402 // Continue here if InvokePrologue does handle the invocation due to
2403 // mismatched parameter counts.
2404 Bind(&done);
2405 }
2406
2407
2408 void MacroAssembler::InvokeFunction(Register function,
2409 const ParameterCount& actual,
2410 InvokeFlag flag,
2411 const CallWrapper& call_wrapper) {
2412 // You can't call a function without a valid frame.
2413 ASSERT(flag == JUMP_FUNCTION || has_frame());
2414
2415 // Contract with called JS functions requires that function is passed in x1.
2416 // (See FullCodeGenerator::Generate().)
2417 ASSERT(function.is(x1));
2418
2419 Register expected_reg = x2;
2420 Register code_reg = x3;
2421
2422 Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
2423 // The number of arguments is stored as an int32_t, and -1 is a marker
2424 // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign
2425 // extension to correctly handle it.
2426 Ldr(expected_reg, FieldMemOperand(function,
2427 JSFunction::kSharedFunctionInfoOffset));
2428 Ldrsw(expected_reg,
2429 FieldMemOperand(expected_reg,
2430 SharedFunctionInfo::kFormalParameterCountOffset));
2431 Ldr(code_reg,
2432 FieldMemOperand(function, JSFunction::kCodeEntryOffset));
2433
2434 ParameterCount expected(expected_reg);
2435 InvokeCode(code_reg, expected, actual, flag, call_wrapper);
2436 }
2437
2438
2439 void MacroAssembler::InvokeFunction(Register function,
2440 const ParameterCount& expected,
2441 const ParameterCount& actual,
2442 InvokeFlag flag,
2443 const CallWrapper& call_wrapper) {
2444 // You can't call a function without a valid frame.
2445 ASSERT(flag == JUMP_FUNCTION || has_frame());
2446
2447 // Contract with called JS functions requires that function is passed in x1.
2448 // (See FullCodeGenerator::Generate().)
2449 ASSERT(function.Is(x1));
2450
2451 Register code_reg = x3;
2452
2453 // Set up the context.
2454 Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
2455
2456 // We call indirectly through the code field in the function to
2457 // allow recompilation to take effect without changing any of the
2458 // call sites.
2459 Ldr(code_reg, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
2460 InvokeCode(code_reg, expected, actual, flag, call_wrapper);
2461 }
2462
2463
2464 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
2465 const ParameterCount& expected,
2466 const ParameterCount& actual,
2467 InvokeFlag flag,
2468 const CallWrapper& call_wrapper) {
2469 // Contract with called JS functions requires that function is passed in x1.
2470 // (See FullCodeGenerator::Generate().)
2471 __ LoadObject(x1, function);
2472 InvokeFunction(x1, expected, actual, flag, call_wrapper);
2473 }
2474
2475
2476 void MacroAssembler::ECMA262ToInt32(Register result,
2477 DoubleRegister input,
2478 Register scratch1,
2479 Register scratch2,
2480 ECMA262ToInt32Result format) {
2481 ASSERT(!AreAliased(result, scratch1, scratch2));
2482 ASSERT(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
2483 STATIC_ASSERT(kSmiTag == 0);
2484 STATIC_ASSERT(kSmiValueSize == 32);
2485
2486 Label done, tag, manual_conversion;
2487
2488 // 1. Try to convert with a FPU convert instruction. It's trivial to compute
2489 // the modulo operation on an integer register so we convert to a 64-bit
2490 // integer, then find the 32-bit result from that.
2491 //
2492 // Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff)
2493 // when the double is out of range. NaNs and infinities will be converted to 0
2494 // (as ECMA-262 requires).
2495 Fcvtzs(result, input);
2496
2497 // The values INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) are not
2498 // representable using a double, so if the result is one of those then we know
2499 // that saturation occured, and we need to manually handle the conversion.
2500 //
2501 // It is easy to detect INT64_MIN and INT64_MAX because adding or subtracting
2502 // 1 will cause signed overflow.
2503 Cmp(result, 1);
2504 Ccmp(result, -1, VFlag, vc);
2505 B(vc, &tag);
2506
2507 // 2. Manually convert the input to an int32.
2508 Fmov(result, input);
2509
2510 // Extract the exponent.
2511 Register exponent = scratch1;
2512 Ubfx(exponent, result, HeapNumber::kMantissaBits, HeapNumber::kExponentBits);
2513
2514 // It the exponent is >= 84 (kMantissaBits + 32), the result is always 0 since
2515 // the mantissa gets shifted completely out of the int32_t result.
2516 Cmp(exponent, HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 32);
2517 CzeroX(result, ge);
2518 B(ge, &done);
2519
2520 // The Fcvtzs sequence handles all cases except where the conversion causes
2521 // signed overflow in the int64_t target. Since we've already handled
2522 // exponents >= 84, we can guarantee that 63 <= exponent < 84.
2523
2524 if (emit_debug_code()) {
2525 Cmp(exponent, HeapNumber::kExponentBias + 63);
2526 // Exponents less than this should have been handled by the Fcvt case.
2527 Check(ge, kUnexpectedValue);
2528 }
2529
2530 // Isolate the mantissa bits, and set the implicit '1'.
2531 Register mantissa = scratch2;
2532 Ubfx(mantissa, result, 0, HeapNumber::kMantissaBits);
2533 Orr(mantissa, mantissa, 1UL << HeapNumber::kMantissaBits);
2534
2535 // Negate the mantissa if necessary.
2536 Tst(result, kXSignMask);
2537 Cneg(mantissa, mantissa, ne);
2538
2539 // Shift the mantissa bits in the correct place. We know that we have to shift
2540 // it left here, because exponent >= 63 >= kMantissaBits.
2541 Sub(exponent, exponent,
2542 HeapNumber::kExponentBias + HeapNumber::kMantissaBits);
2543 Lsl(result, mantissa, exponent);
2544
2545 Bind(&tag);
2546 switch (format) {
2547 case INT32_IN_W:
2548 // There is nothing to do; the upper 32 bits are undefined.
2549 if (emit_debug_code()) {
2550 __ Mov(scratch1, 0x55555555);
2551 __ Bfi(result, scratch1, 32, 32);
2552 }
2553 break;
2554 case INT32_IN_X:
2555 Sxtw(result, result);
2556 break;
2557 case SMI:
2558 SmiTag(result);
2559 break;
2560 }
2561
2562 Bind(&done);
2563 }
2564
2565
2566 void MacroAssembler::HeapNumberECMA262ToInt32(Register result,
2567 Register heap_number,
2568 Register scratch1,
2569 Register scratch2,
2570 DoubleRegister double_scratch,
2571 ECMA262ToInt32Result format) {
2572 if (emit_debug_code()) {
2573 // Verify we indeed have a HeapNumber.
2574 Label ok;
2575 JumpIfHeapNumber(heap_number, &ok);
2576 Abort(kExpectedHeapNumber);
2577 Bind(&ok);
2578 }
2579
2580 Ldr(double_scratch, FieldMemOperand(heap_number, HeapNumber::kValueOffset));
2581 ECMA262ToInt32(result, double_scratch, scratch1, scratch2, format);
2582 }
2583
2584
2585 void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
2586 if (frame_mode == BUILD_STUB_FRAME) {
2587 ASSERT(StackPointer().Is(jssp));
2588 // TODO(jbramley): Does x1 contain a JSFunction here, or does it already
2589 // have the special STUB smi?
2590 __ Mov(Tmp0(), Operand(Smi::FromInt(StackFrame::STUB)));
2591 // Compiled stubs don't age, and so they don't need the predictable code
2592 // ageing sequence.
2593 __ Push(lr, fp, cp, Tmp0());
2594 __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
2595 } else {
2596 if (isolate()->IsCodePreAgingActive()) {
2597 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
2598 __ EmitCodeAgeSequence(stub);
2599 } else {
2600 __ EmitFrameSetupForCodeAgePatching();
2601 }
2602 }
2603 }
2604
2605
2606 void MacroAssembler::EnterFrame(StackFrame::Type type) {
2607 ASSERT(jssp.Is(StackPointer()));
2608 Push(lr, fp, cp);
2609 Mov(Tmp1(), Operand(Smi::FromInt(type)));
2610 Mov(Tmp0(), Operand(CodeObject()));
2611 Push(Tmp1(), Tmp0());
2612 // jssp[4] : lr
2613 // jssp[3] : fp
2614 // jssp[2] : cp
2615 // jssp[1] : type
2616 // jssp[0] : code object
2617
2618 // Adjust FP to point to saved FP.
2619 add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
2620 }
2621
2622
2623 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
2624 ASSERT(jssp.Is(StackPointer()));
2625 // Drop the execution stack down to the frame pointer and restore
2626 // the caller frame pointer and return address.
2627 Mov(jssp, fp);
2628 AssertStackConsistency();
2629 Pop(fp, lr);
2630 }
2631
2632
2633 void MacroAssembler::ExitFramePreserveFPRegs() {
2634 PushCPURegList(kCallerSavedFP);
2635 }
2636
2637
2638 void MacroAssembler::ExitFrameRestoreFPRegs() {
2639 // Read the registers from the stack without popping them. The stack pointer
2640 // will be reset as part of the unwinding process.
2641 CPURegList saved_fp_regs = kCallerSavedFP;
2642 ASSERT(saved_fp_regs.Count() % 2 == 0);
2643
2644 int offset = ExitFrameConstants::kLastExitFrameField;
2645 while (!saved_fp_regs.IsEmpty()) {
2646 const CPURegister& dst0 = saved_fp_regs.PopHighestIndex();
2647 const CPURegister& dst1 = saved_fp_regs.PopHighestIndex();
2648 offset -= 2 * kDRegSizeInBytes;
2649 Ldp(dst1, dst0, MemOperand(fp, offset));
2650 }
2651 }
2652
2653
2654 // TODO(jbramley): Check that we're handling the frame pointer correctly.
2655 void MacroAssembler::EnterExitFrame(bool save_doubles,
2656 const Register& scratch,
2657 int extra_space) {
2658 ASSERT(jssp.Is(StackPointer()));
2659
2660 // Set up the new stack frame.
2661 Mov(scratch, Operand(CodeObject()));
2662 Push(lr, fp);
2663 Mov(fp, StackPointer());
2664 Push(xzr, scratch);
2665 // fp[8]: CallerPC (lr)
2666 // fp -> fp[0]: CallerFP (old fp)
2667 // fp[-8]: Space reserved for SPOffset.
2668 // jssp -> fp[-16]: CodeObject()
2669 STATIC_ASSERT((2 * kPointerSize) ==
2670 ExitFrameConstants::kCallerSPDisplacement);
2671 STATIC_ASSERT((1 * kPointerSize) == ExitFrameConstants::kCallerPCOffset);
2672 STATIC_ASSERT((0 * kPointerSize) == ExitFrameConstants::kCallerFPOffset);
2673 STATIC_ASSERT((-1 * kPointerSize) == ExitFrameConstants::kSPOffset);
2674 STATIC_ASSERT((-2 * kPointerSize) == ExitFrameConstants::kCodeOffset);
2675
2676 // Save the frame pointer and context pointer in the top frame.
2677 Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
2678 isolate())));
2679 Str(fp, MemOperand(scratch));
2680 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
2681 isolate())));
2682 Str(cp, MemOperand(scratch));
2683
2684 STATIC_ASSERT((-2 * kPointerSize) ==
2685 ExitFrameConstants::kLastExitFrameField);
2686 if (save_doubles) {
2687 ExitFramePreserveFPRegs();
2688 }
2689
2690 // Reserve space for the return address and for user requested memory.
2691 // We do this before aligning to make sure that we end up correctly
2692 // aligned with the minimum of wasted space.
2693 Claim(extra_space + 1, kXRegSizeInBytes);
2694 // fp[8]: CallerPC (lr)
2695 // fp -> fp[0]: CallerFP (old fp)
2696 // fp[-8]: Space reserved for SPOffset.
2697 // fp[-16]: CodeObject()
2698 // jssp[-16 - fp_size]: Saved doubles (if save_doubles is true).
2699 // jssp[8]: Extra space reserved for caller (if extra_space != 0).
2700 // jssp -> jssp[0]: Space reserved for the return address.
2701
2702 // Align and synchronize the system stack pointer with jssp.
2703 AlignAndSetCSPForFrame();
2704 ASSERT(csp.Is(StackPointer()));
2705
2706 // fp[8]: CallerPC (lr)
2707 // fp -> fp[0]: CallerFP (old fp)
2708 // fp[-8]: Space reserved for SPOffset.
2709 // fp[-16]: CodeObject()
2710 // csp[...]: Saved doubles, if saved_doubles is true.
2711 // csp[8]: Memory reserved for the caller if extra_space != 0.
2712 // Alignment padding, if necessary.
2713 // csp -> csp[0]: Space reserved for the return address.
2714
2715 // ExitFrame::GetStateForFramePointer expects to find the return address at
2716 // the memory address immediately below the pointer stored in SPOffset.
2717 // It is not safe to derive much else from SPOffset, because the size of the
2718 // padding can vary.
2719 Add(scratch, csp, kXRegSizeInBytes);
2720 Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
2721 }
2722
2723
2724 // Leave the current exit frame.
2725 void MacroAssembler::LeaveExitFrame(bool restore_doubles,
2726 const Register& scratch,
2727 bool restore_context) {
2728 ASSERT(csp.Is(StackPointer()));
2729
2730 if (restore_doubles) {
2731 ExitFrameRestoreFPRegs();
2732 }
2733
2734 // Restore the context pointer from the top frame.
2735 if (restore_context) {
2736 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
2737 isolate())));
2738 Ldr(cp, MemOperand(scratch));
2739 }
2740
2741 if (emit_debug_code()) {
2742 // Also emit debug code to clear the cp in the top frame.
2743 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
2744 isolate())));
2745 Str(xzr, MemOperand(scratch));
2746 }
2747 // Clear the frame pointer from the top frame.
2748 Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
2749 isolate())));
2750 Str(xzr, MemOperand(scratch));
2751
2752 // Pop the exit frame.
2753 // fp[8]: CallerPC (lr)
2754 // fp -> fp[0]: CallerFP (old fp)
2755 // fp[...]: The rest of the frame.
2756 Mov(jssp, fp);
2757 SetStackPointer(jssp);
2758 AssertStackConsistency();
2759 Pop(fp, lr);
2760 }
2761
2762
2763 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
2764 Register scratch1, Register scratch2) {
2765 if (FLAG_native_code_counters && counter->Enabled()) {
2766 Mov(scratch1, value);
2767 Mov(scratch2, Operand(ExternalReference(counter)));
2768 Str(scratch1, MemOperand(scratch2));
2769 }
2770 }
2771
2772
2773 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2774 Register scratch1, Register scratch2) {
2775 ASSERT(value != 0);
2776 if (FLAG_native_code_counters && counter->Enabled()) {
2777 Mov(scratch2, Operand(ExternalReference(counter)));
2778 Ldr(scratch1, MemOperand(scratch2));
2779 Add(scratch1, scratch1, value);
2780 Str(scratch1, MemOperand(scratch2));
2781 }
2782 }
2783
2784
2785 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
2786 Register scratch1, Register scratch2) {
2787 IncrementCounter(counter, -value, scratch1, scratch2);
2788 }
2789
2790
2791 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2792 if (context_chain_length > 0) {
2793 // Move up the chain of contexts to the context containing the slot.
2794 Ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2795 for (int i = 1; i < context_chain_length; i++) {
2796 Ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2797 }
2798 } else {
2799 // Slot is in the current function context. Move it into the
2800 // destination register in case we store into it (the write barrier
2801 // cannot be allowed to destroy the context in cp).
2802 Mov(dst, cp);
2803 }
2804 }
2805
2806
2807 #ifdef ENABLE_DEBUGGER_SUPPORT
2808 void MacroAssembler::DebugBreak() {
2809 Mov(x0, 0);
2810 Mov(x1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
2811 CEntryStub ces(1);
2812 ASSERT(AllowThisStubCall(&ces));
2813 Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
2814 }
2815 #endif
2816
2817
2818 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
2819 int handler_index) {
2820 ASSERT(jssp.Is(StackPointer()));
2821 // Adjust this code if the asserts don't hold.
2822 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2823 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2824 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2825 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2826 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2827 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2828
2829 // For the JSEntry handler, we must preserve the live registers x0-x4.
2830 // (See JSEntryStub::GenerateBody().)
2831
2832 unsigned state =
2833 StackHandler::IndexField::encode(handler_index) |
2834 StackHandler::KindField::encode(kind);
2835
2836 // Set up the code object and the state for pushing.
2837 Mov(x10, Operand(CodeObject()));
2838 Mov(x11, state);
2839
2840 // Push the frame pointer, context, state, and code object.
2841 if (kind == StackHandler::JS_ENTRY) {
2842 ASSERT(Smi::FromInt(0) == 0);
2843 Push(xzr, xzr, x11, x10);
2844 } else {
2845 Push(fp, cp, x11, x10);
2846 }
2847
2848 // Link the current handler as the next handler.
2849 Mov(x11, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2850 Ldr(x10, MemOperand(x11));
2851 Push(x10);
2852 // Set this new handler as the current one.
2853 Str(jssp, MemOperand(x11));
2854 }
2855
2856
2857 void MacroAssembler::PopTryHandler() {
2858 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2859 Pop(x10);
2860 Mov(x11, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2861 Drop(StackHandlerConstants::kSize - kXRegSizeInBytes, kByteSizeInBytes);
2862 Str(x10, MemOperand(x11));
2863 }
2864
2865
2866 void MacroAssembler::Allocate(int object_size,
2867 Register result,
2868 Register scratch1,
2869 Register scratch2,
2870 Label* gc_required,
2871 AllocationFlags flags) {
2872 ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
2873 if (!FLAG_inline_new) {
2874 if (emit_debug_code()) {
2875 // Trash the registers to simulate an allocation failure.
2876 // We apply salt to the original zap value to easily spot the values.
2877 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
2878 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
2879 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
2880 }
2881 B(gc_required);
2882 return;
2883 }
2884
2885 ASSERT(!AreAliased(result, scratch1, scratch2, Tmp0(), Tmp1()));
2886 ASSERT(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits() &&
2887 Tmp0().Is64Bits() && Tmp1().Is64Bits());
2888
2889 // Make object size into bytes.
2890 if ((flags & SIZE_IN_WORDS) != 0) {
2891 object_size *= kPointerSize;
2892 }
2893 ASSERT(0 == (object_size & kObjectAlignmentMask));
2894
2895 // Check relative positions of allocation top and limit addresses.
2896 // The values must be adjacent in memory to allow the use of LDP.
2897 ExternalReference heap_allocation_top =
2898 AllocationUtils::GetAllocationTopReference(isolate(), flags);
2899 ExternalReference heap_allocation_limit =
2900 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
2901 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
2902 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
2903 ASSERT((limit - top) == kPointerSize);
2904
2905 // Set up allocation top address and object size registers.
2906 Register top_address = scratch1;
2907 Register allocation_limit = scratch2;
2908 Mov(top_address, Operand(heap_allocation_top));
2909
2910 if ((flags & RESULT_CONTAINS_TOP) == 0) {
2911 // Load allocation top into result and the allocation limit.
2912 Ldp(result, allocation_limit, MemOperand(top_address));
2913 } else {
2914 if (emit_debug_code()) {
2915 // Assert that result actually contains top on entry.
2916 Ldr(Tmp0(), MemOperand(top_address));
2917 Cmp(result, Tmp0());
2918 Check(eq, kUnexpectedAllocationTop);
2919 }
2920 // Load the allocation limit. 'result' already contains the allocation top.
2921 Ldr(allocation_limit, MemOperand(top_address, limit - top));
2922 }
2923
2924 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
2925 // the same alignment on A64.
2926 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
2927
2928 // Calculate new top and bail out if new space is exhausted.
2929 Adds(Tmp1(), result, object_size);
2930 B(vs, gc_required);
2931 Cmp(Tmp1(), allocation_limit);
2932 B(hi, gc_required);
2933 Str(Tmp1(), MemOperand(top_address));
2934
2935 // Tag the object if requested.
2936 if ((flags & TAG_OBJECT) != 0) {
2937 Orr(result, result, kHeapObjectTag);
2938 }
2939 }
2940
2941
2942 void MacroAssembler::Allocate(Register object_size,
2943 Register result,
2944 Register scratch1,
2945 Register scratch2,
2946 Label* gc_required,
2947 AllocationFlags flags) {
2948 if (!FLAG_inline_new) {
2949 if (emit_debug_code()) {
2950 // Trash the registers to simulate an allocation failure.
2951 // We apply salt to the original zap value to easily spot the values.
2952 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
2953 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
2954 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
2955 }
2956 B(gc_required);
2957 return;
2958 }
2959
2960 ASSERT(!AreAliased(object_size, result, scratch1, scratch2, Tmp0(), Tmp1()));
2961 ASSERT(object_size.Is64Bits() && result.Is64Bits() && scratch1.Is64Bits() &&
2962 scratch2.Is64Bits() && Tmp0().Is64Bits() && Tmp1().Is64Bits());
2963
2964 // Check relative positions of allocation top and limit addresses.
2965 // The values must be adjacent in memory to allow the use of LDP.
2966 ExternalReference heap_allocation_top =
2967 AllocationUtils::GetAllocationTopReference(isolate(), flags);
2968 ExternalReference heap_allocation_limit =
2969 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
2970 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
2971 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
2972 ASSERT((limit - top) == kPointerSize);
2973
2974 // Set up allocation top address and object size registers.
2975 Register top_address = scratch1;
2976 Register allocation_limit = scratch2;
2977 Mov(top_address, Operand(heap_allocation_top));
2978
2979 if ((flags & RESULT_CONTAINS_TOP) == 0) {
2980 // Load allocation top into result and the allocation limit.
2981 Ldp(result, allocation_limit, MemOperand(top_address));
2982 } else {
2983 if (emit_debug_code()) {
2984 // Assert that result actually contains top on entry.
2985 Ldr(Tmp0(), MemOperand(top_address));
2986 Cmp(result, Tmp0());
2987 Check(eq, kUnexpectedAllocationTop);
2988 }
2989 // Load the allocation limit. 'result' already contains the allocation top.
2990 Ldr(allocation_limit, MemOperand(top_address, limit - top));
2991 }
2992
2993 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
2994 // the same alignment on A64.
2995 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
2996
2997 // Calculate new top and bail out if new space is exhausted
2998 if ((flags & SIZE_IN_WORDS) != 0) {
2999 Adds(Tmp1(), result, Operand(object_size, LSL, kPointerSizeLog2));
3000 } else {
3001 Adds(Tmp1(), result, object_size);
3002 }
3003
3004 if (emit_debug_code()) {
3005 Tst(Tmp1(), kObjectAlignmentMask);
3006 Check(eq, kUnalignedAllocationInNewSpace);
3007 }
3008
3009 B(vs, gc_required);
3010 Cmp(Tmp1(), allocation_limit);
3011 B(hi, gc_required);
3012 Str(Tmp1(), MemOperand(top_address));
3013
3014 // Tag the object if requested.
3015 if ((flags & TAG_OBJECT) != 0) {
3016 Orr(result, result, kHeapObjectTag);
3017 }
3018 }
3019
3020
3021 void MacroAssembler::UndoAllocationInNewSpace(Register object,
3022 Register scratch) {
3023 ExternalReference new_space_allocation_top =
3024 ExternalReference::new_space_allocation_top_address(isolate());
3025
3026 // Make sure the object has no tag before resetting top.
3027 Bic(object, object, kHeapObjectTagMask);
3028 #ifdef DEBUG
3029 // Check that the object un-allocated is below the current top.
3030 Mov(scratch, Operand(new_space_allocation_top));
3031 Ldr(scratch, MemOperand(scratch));
3032 Cmp(object, scratch);
3033 Check(lt, kUndoAllocationOfNonAllocatedMemory);
3034 #endif
3035 // Write the address of the object to un-allocate as the current top.
3036 Mov(scratch, Operand(new_space_allocation_top));
3037 Str(object, MemOperand(scratch));
3038 }
3039
3040
3041 void MacroAssembler::AllocateTwoByteString(Register result,
3042 Register length,
3043 Register scratch1,
3044 Register scratch2,
3045 Register scratch3,
3046 Label* gc_required) {
3047 ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3));
3048 // Calculate the number of bytes needed for the characters in the string while
3049 // observing object alignment.
3050 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3051 Add(scratch1, length, length); // Length in bytes, not chars.
3052 Add(scratch1, scratch1, kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
3053 Bic(scratch1, scratch1, kObjectAlignmentMask);
3054
3055 // Allocate two-byte string in new space.
3056 Allocate(scratch1,
3057 result,
3058 scratch2,
3059 scratch3,
3060 gc_required,
3061 TAG_OBJECT);
3062
3063 // Set the map, length and hash field.
3064 InitializeNewString(result,
3065 length,
3066 Heap::kStringMapRootIndex,
3067 scratch1,
3068 scratch2);
3069 }
3070
3071
3072 void MacroAssembler::AllocateAsciiString(Register result,
3073 Register length,
3074 Register scratch1,
3075 Register scratch2,
3076 Register scratch3,
3077 Label* gc_required) {
3078 ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3));
3079 // Calculate the number of bytes needed for the characters in the string while
3080 // observing object alignment.
3081 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3082 STATIC_ASSERT(kCharSize == 1);
3083 Add(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
3084 Bic(scratch1, scratch1, kObjectAlignmentMask);
3085
3086 // Allocate ASCII string in new space.
3087 Allocate(scratch1,
3088 result,
3089 scratch2,
3090 scratch3,
3091 gc_required,
3092 TAG_OBJECT);
3093
3094 // Set the map, length and hash field.
3095 InitializeNewString(result,
3096 length,
3097 Heap::kAsciiStringMapRootIndex,
3098 scratch1,
3099 scratch2);
3100 }
3101
3102
3103 void MacroAssembler::AllocateTwoByteConsString(Register result,
3104 Register length,
3105 Register scratch1,
3106 Register scratch2,
3107 Label* gc_required) {
3108 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
3109 TAG_OBJECT);
3110
3111 InitializeNewString(result,
3112 length,
3113 Heap::kConsStringMapRootIndex,
3114 scratch1,
3115 scratch2);
3116 }
3117
3118
3119 void MacroAssembler::AllocateAsciiConsString(Register result,
3120 Register length,
3121 Register scratch1,
3122 Register scratch2,
3123 Label* gc_required) {
3124 Label allocate_new_space, install_map;
3125 AllocationFlags flags = TAG_OBJECT;
3126
3127 ExternalReference high_promotion_mode = ExternalReference::
3128 new_space_high_promotion_mode_active_address(isolate());
3129 Mov(scratch1, Operand(high_promotion_mode));
3130 Ldr(scratch1, MemOperand(scratch1));
3131 Cbz(scratch1, &allocate_new_space);
3132
3133 Allocate(ConsString::kSize,
3134 result,
3135 scratch1,
3136 scratch2,
3137 gc_required,
3138 static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
3139
3140 B(&install_map);
3141
3142 Bind(&allocate_new_space);
3143 Allocate(ConsString::kSize,
3144 result,
3145 scratch1,
3146 scratch2,
3147 gc_required,
3148 flags);
3149
3150 Bind(&install_map);
3151
3152 InitializeNewString(result,
3153 length,
3154 Heap::kConsAsciiStringMapRootIndex,
3155 scratch1,
3156 scratch2);
3157 }
3158
3159
3160 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3161 Register length,
3162 Register scratch1,
3163 Register scratch2,
3164 Label* gc_required) {
3165 ASSERT(!AreAliased(result, length, scratch1, scratch2));
3166 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3167 TAG_OBJECT);
3168
3169 InitializeNewString(result,
3170 length,
3171 Heap::kSlicedStringMapRootIndex,
3172 scratch1,
3173 scratch2);
3174 }
3175
3176
3177 void MacroAssembler::AllocateAsciiSlicedString(Register result,
3178 Register length,
3179 Register scratch1,
3180 Register scratch2,
3181 Label* gc_required) {
3182 ASSERT(!AreAliased(result, length, scratch1, scratch2));
3183 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3184 TAG_OBJECT);
3185
3186 InitializeNewString(result,
3187 length,
3188 Heap::kSlicedAsciiStringMapRootIndex,
3189 scratch1,
3190 scratch2);
3191 }
3192
3193
3194 // Allocates a heap number or jumps to the need_gc label if the young space
3195 // is full and a scavenge is needed.
3196 void MacroAssembler::AllocateHeapNumber(Register result,
3197 Label* gc_required,
3198 Register scratch1,
3199 Register scratch2,
3200 Register heap_number_map) {
3201 // Allocate an object in the heap for the heap number and tag it as a heap
3202 // object.
3203 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
3204 TAG_OBJECT);
3205
3206 // Store heap number map in the allocated object.
3207 if (heap_number_map.Is(NoReg)) {
3208 heap_number_map = scratch1;
3209 LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3210 }
3211 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3212 Str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3213 }
3214
3215
3216 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3217 DoubleRegister value,
3218 Label* gc_required,
3219 Register scratch1,
3220 Register scratch2,
3221 Register heap_number_map) {
3222 // TODO(all): Check if it would be more efficient to use STP to store both
3223 // the map and the value.
3224 AllocateHeapNumber(result, gc_required, scratch1, scratch2, heap_number_map);
3225 Str(value, FieldMemOperand(result, HeapNumber::kValueOffset));
3226 }
3227
3228
3229 void MacroAssembler::JumpIfObjectType(Register object,
3230 Register map,
3231 Register type_reg,
3232 InstanceType type,
3233 Label* if_cond_pass,
3234 Condition cond) {
3235 CompareObjectType(object, map, type_reg, type);
3236 B(cond, if_cond_pass);
3237 }
3238
3239
3240 void MacroAssembler::JumpIfNotObjectType(Register object,
3241 Register map,
3242 Register type_reg,
3243 InstanceType type,
3244 Label* if_not_object) {
3245 JumpIfObjectType(object, map, type_reg, type, if_not_object, ne);
3246 }
3247
3248
3249 // Sets condition flags based on comparison, and returns type in type_reg.
3250 void MacroAssembler::CompareObjectType(Register object,
3251 Register map,
3252 Register type_reg,
3253 InstanceType type) {
3254 Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
3255 CompareInstanceType(map, type_reg, type);
3256 }
3257
3258
3259 // Sets condition flags based on comparison, and returns type in type_reg.
3260 void MacroAssembler::CompareInstanceType(Register map,
3261 Register type_reg,
3262 InstanceType type) {
3263 Ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
3264 Cmp(type_reg, type);
3265 }
3266
3267
3268 void MacroAssembler::CompareMap(Register obj,
3269 Register scratch,
3270 Handle<Map> map,
3271 Label* early_success) {
3272 // TODO(jbramley): The early_success label isn't used. Remove it.
3273 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3274 CompareMap(scratch, map, early_success);
3275 }
3276
3277
3278 void MacroAssembler::CompareMap(Register obj_map,
3279 Handle<Map> map,
3280 Label* early_success) {
3281 // TODO(jbramley): The early_success label isn't used. Remove it.
3282 Cmp(obj_map, Operand(map));
3283 }
3284
3285
3286 void MacroAssembler::CheckMap(Register obj,
3287 Register scratch,
3288 Handle<Map> map,
3289 Label* fail,
3290 SmiCheckType smi_check_type) {
3291 if (smi_check_type == DO_SMI_CHECK) {
3292 JumpIfSmi(obj, fail);
3293 }
3294
3295 Label success;
3296 CompareMap(obj, scratch, map, &success);
3297 B(ne, fail);
3298 Bind(&success);
3299 }
3300
3301
3302 void MacroAssembler::CheckMap(Register obj,
3303 Register scratch,
3304 Heap::RootListIndex index,
3305 Label* fail,
3306 SmiCheckType smi_check_type) {
3307 if (smi_check_type == DO_SMI_CHECK) {
3308 JumpIfSmi(obj, fail);
3309 }
3310 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3311 JumpIfNotRoot(scratch, index, fail);
3312 }
3313
3314
3315 void MacroAssembler::CheckMap(Register obj_map,
3316 Handle<Map> map,
3317 Label* fail,
3318 SmiCheckType smi_check_type) {
3319 if (smi_check_type == DO_SMI_CHECK) {
3320 JumpIfSmi(obj_map, fail);
3321 }
3322 Label success;
3323 CompareMap(obj_map, map, &success);
3324 B(ne, fail);
3325 Bind(&success);
3326 }
3327
3328
3329 void MacroAssembler::DispatchMap(Register obj,
3330 Register scratch,
3331 Handle<Map> map,
3332 Handle<Code> success,
3333 SmiCheckType smi_check_type) {
3334 Label fail;
3335 if (smi_check_type == DO_SMI_CHECK) {
3336 JumpIfSmi(obj, &fail);
3337 }
3338 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3339 Cmp(scratch, Operand(map));
3340 B(ne, &fail);
3341 Jump(success, RelocInfo::CODE_TARGET);
3342 Bind(&fail);
3343 }
3344
3345
3346 void MacroAssembler::TestMapBitfield(Register object, uint64_t mask) {
3347 Ldr(Tmp0(), FieldMemOperand(object, HeapObject::kMapOffset));
3348 Ldrb(Tmp0(), FieldMemOperand(Tmp0(), Map::kBitFieldOffset));
3349 Tst(Tmp0(), mask);
3350 }
3351
3352
3353 void MacroAssembler::LoadElementsKind(Register result, Register object) {
3354 // Load map.
3355 __ Ldr(result, FieldMemOperand(object, HeapObject::kMapOffset));
3356 // Load the map's "bit field 2".
3357 __ Ldrb(result, FieldMemOperand(result, Map::kBitField2Offset));
3358 // Retrieve elements_kind from bit field 2.
3359 __ Ubfx(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
3360 }
3361
3362
3363 void MacroAssembler::TryGetFunctionPrototype(Register function,
3364 Register result,
3365 Register scratch,
3366 Label* miss,
3367 BoundFunctionAction action) {
3368 ASSERT(!AreAliased(function, result, scratch));
3369
3370 // Check that the receiver isn't a smi.
3371 JumpIfSmi(function, miss);
3372
3373 // Check that the function really is a function. Load map into result reg.
3374 JumpIfNotObjectType(function, result, scratch, JS_FUNCTION_TYPE, miss);
3375
3376 if (action == kMissOnBoundFunction) {
3377 Register scratch_w = scratch.W();
3378 Ldr(scratch,
3379 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3380 // On 64-bit platforms, compiler hints field is not a smi. See definition of
3381 // kCompilerHintsOffset in src/objects.h.
3382 Ldr(scratch_w,
3383 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3384 Tbnz(scratch, SharedFunctionInfo::kBoundFunction, miss);
3385 }
3386
3387 // Make sure that the function has an instance prototype.
3388 Label non_instance;
3389 Ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
3390 Tbnz(scratch, Map::kHasNonInstancePrototype, &non_instance);
3391
3392 // Get the prototype or initial map from the function.
3393 Ldr(result,
3394 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3395
3396 // If the prototype or initial map is the hole, don't return it and simply
3397 // miss the cache instead. This will allow us to allocate a prototype object
3398 // on-demand in the runtime system.
3399 JumpIfRoot(result, Heap::kTheHoleValueRootIndex, miss);
3400
3401 // If the function does not have an initial map, we're done.
3402 Label done;
3403 JumpIfNotObjectType(result, scratch, scratch, MAP_TYPE, &done);
3404
3405 // Get the prototype from the initial map.
3406 Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
3407 B(&done);
3408
3409 // Non-instance prototype: fetch prototype from constructor field in initial
3410 // map.
3411 Bind(&non_instance);
3412 Ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
3413
3414 // All done.
3415 Bind(&done);
3416 }
3417
3418
3419 void MacroAssembler::CompareRoot(const Register& obj,
3420 Heap::RootListIndex index) {
3421 ASSERT(!AreAliased(obj, Tmp0()));
3422 LoadRoot(Tmp0(), index);
3423 Cmp(obj, Tmp0());
3424 }
3425
3426
3427 void MacroAssembler::JumpIfRoot(const Register& obj,
3428 Heap::RootListIndex index,
3429 Label* if_equal) {
3430 CompareRoot(obj, index);
3431 B(eq, if_equal);
3432 }
3433
3434
3435 void MacroAssembler::JumpIfNotRoot(const Register& obj,
3436 Heap::RootListIndex index,
3437 Label* if_not_equal) {
3438 CompareRoot(obj, index);
3439 B(ne, if_not_equal);
3440 }
3441
3442
3443 void MacroAssembler::CompareAndSplit(const Register& lhs,
3444 const Operand& rhs,
3445 Condition cond,
3446 Label* if_true,
3447 Label* if_false,
3448 Label* fall_through) {
3449 if ((if_true == if_false) && (if_false == fall_through)) {
3450 // Fall through.
3451 } else if (if_true == if_false) {
3452 B(if_true);
3453 } else if (if_false == fall_through) {
3454 CompareAndBranch(lhs, rhs, cond, if_true);
3455 } else if (if_true == fall_through) {
3456 CompareAndBranch(lhs, rhs, InvertCondition(cond), if_false);
3457 } else {
3458 CompareAndBranch(lhs, rhs, cond, if_true);
3459 B(if_false);
3460 }
3461 }
3462
3463
3464 void MacroAssembler::TestAndSplit(const Register& reg,
3465 uint64_t bit_pattern,
3466 Label* if_all_clear,
3467 Label* if_any_set,
3468 Label* fall_through) {
3469 if ((if_all_clear == if_any_set) && (if_any_set == fall_through)) {
3470 // Fall through.
3471 } else if (if_all_clear == if_any_set) {
3472 B(if_all_clear);
3473 } else if (if_all_clear == fall_through) {
3474 TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
3475 } else if (if_any_set == fall_through) {
3476 TestAndBranchIfAllClear(reg, bit_pattern, if_all_clear);
3477 } else {
3478 TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
3479 B(if_all_clear);
3480 }
3481 }
3482
3483
3484 void MacroAssembler::CheckFastElements(Register map,
3485 Register scratch,
3486 Label* fail) {
3487 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3488 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3489 STATIC_ASSERT(FAST_ELEMENTS == 2);
3490 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3491 Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3492 Cmp(scratch, Map::kMaximumBitField2FastHoleyElementValue);
3493 B(hi, fail);
3494 }
3495
3496
3497 void MacroAssembler::CheckFastObjectElements(Register map,
3498 Register scratch,
3499 Label* fail) {
3500 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3501 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3502 STATIC_ASSERT(FAST_ELEMENTS == 2);
3503 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
3504 Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3505 Cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
3506 // If cond==ls, set cond=hi, otherwise compare.
3507 Ccmp(scratch,
3508 Operand(Map::kMaximumBitField2FastHoleyElementValue), CFlag, hi);
3509 B(hi, fail);
3510 }
3511
3512
3513 void MacroAssembler::CheckFastSmiElements(Register map,
3514 Register scratch,
3515 Label* fail) {
3516 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3517 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
3518 Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset));
3519 Cmp(scratch, Map::kMaximumBitField2FastHoleySmiElementValue);
3520 B(hi, fail);
3521 }
3522
3523
3524 // Note: The ARM version of this clobbers elements_reg, but this version does
3525 // not. Some uses of this in A64 assume that elements_reg will be preserved.
3526 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3527 Register key_reg,
3528 Register elements_reg,
3529 Register scratch1,
3530 FPRegister fpscratch1,
3531 FPRegister fpscratch2,
3532 Label* fail,
3533 int elements_offset) {
3534 ASSERT(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
3535 Label store_num;
3536
3537 // Speculatively convert the smi to a double - all smis can be exactly
3538 // represented as a double.
3539 SmiUntagToDouble(fpscratch1, value_reg, kSpeculativeUntag);
3540
3541 // If value_reg is a smi, we're done.
3542 JumpIfSmi(value_reg, &store_num);
3543
3544 // Ensure that the object is a heap number.
3545 CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(),
3546 fail, DONT_DO_SMI_CHECK);
3547
3548 Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
3549 Fmov(fpscratch2, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
3550
3551 // Check for NaN by comparing the number to itself: NaN comparison will
3552 // report unordered, indicated by the overflow flag being set.
3553 Fcmp(fpscratch1, fpscratch1);
3554 Fcsel(fpscratch1, fpscratch2, fpscratch1, vs);
3555
3556 // Store the result.
3557 Bind(&store_num);
3558 Add(scratch1, elements_reg,
3559 Operand::UntagSmiAndScale(key_reg, kDoubleSizeLog2));
3560 Str(fpscratch1,
3561 FieldMemOperand(scratch1,
3562 FixedDoubleArray::kHeaderSize - elements_offset));
3563 }
3564
3565
3566 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
3567 return has_frame_ || !stub->SometimesSetsUpAFrame();
3568 }
3569
3570
3571 void MacroAssembler::IndexFromHash(Register hash, Register index) {
3572 // If the hash field contains an array index pick it out. The assert checks
3573 // that the constants for the maximum number of digits for an array index
3574 // cached in the hash field and the number of bits reserved for it does not
3575 // conflict.
3576 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
3577 (1 << String::kArrayIndexValueBits));
3578 // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
3579 // the low kHashShift bits.
3580 STATIC_ASSERT(kSmiTag == 0);
3581 Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
3582 SmiTag(index, hash);
3583 }
3584
3585
3586 void MacroAssembler::EmitSeqStringSetCharCheck(
3587 Register string,
3588 Register index,
3589 SeqStringSetCharCheckIndexType index_type,
3590 Register scratch,
3591 uint32_t encoding_mask) {
3592 ASSERT(!AreAliased(string, index, scratch));
3593
3594 if (index_type == kIndexIsSmi) {
3595 AssertSmi(index);
3596 }
3597
3598 // Check that string is an object.
3599 AssertNotSmi(string, kNonObject);
3600
3601 // Check that string has an appropriate map.
3602 Ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
3603 Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3604
3605 And(scratch, scratch, kStringRepresentationMask | kStringEncodingMask);
3606 Cmp(scratch, encoding_mask);
3607 Check(eq, kUnexpectedStringType);
3608
3609 Ldr(scratch, FieldMemOperand(string, String::kLengthOffset));
3610 Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch));
3611 Check(lt, kIndexIsTooLarge);
3612
3613 ASSERT_EQ(0, Smi::FromInt(0));
3614 Cmp(index, 0);
3615 Check(ge, kIndexIsNegative);
3616 }
3617
3618
3619 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
3620 Register scratch,
3621 Label* miss) {
3622 // TODO(jbramley): Sort out the uses of Tmp0() and Tmp1() in this function.
3623 // The ARM version takes two scratch registers, and that should be enough for
3624 // all of the checks.
3625
3626 Label same_contexts;
3627
3628 ASSERT(!AreAliased(holder_reg, scratch));
3629
3630 // Load current lexical context from the stack frame.
3631 Ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
3632 // In debug mode, make sure the lexical context is set.
3633 #ifdef DEBUG
3634 Cmp(scratch, 0);
3635 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
3636 #endif
3637
3638 // Load the native context of the current context.
3639 int offset =
3640 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
3641 Ldr(scratch, FieldMemOperand(scratch, offset));
3642 Ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
3643
3644 // Check the context is a native context.
3645 if (emit_debug_code()) {
3646 // Read the first word and compare to the global_context_map.
3647 Register temp = Tmp1();
3648 Ldr(temp, FieldMemOperand(scratch, HeapObject::kMapOffset));
3649 CompareRoot(temp, Heap::kNativeContextMapRootIndex);
3650 Check(eq, kExpectedNativeContext);
3651 }
3652
3653 // Check if both contexts are the same.
3654 ldr(Tmp0(), FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
3655 cmp(scratch, Tmp0());
3656 b(&same_contexts, eq);
3657
3658 // Check the context is a native context.
3659 if (emit_debug_code()) {
3660 // Move Tmp0() into a different register, as CompareRoot will use it.
3661 Register temp = Tmp1();
3662 mov(temp, Tmp0());
3663 CompareRoot(temp, Heap::kNullValueRootIndex);
3664 Check(ne, kExpectedNonNullContext);
3665
3666 Ldr(temp, FieldMemOperand(temp, HeapObject::kMapOffset));
3667 CompareRoot(temp, Heap::kNativeContextMapRootIndex);
3668 Check(eq, kExpectedNativeContext);
3669
3670 // Let's consider that Tmp0() has been cloberred by the MacroAssembler.
3671 // We reload it with its value.
3672 ldr(Tmp0(), FieldMemOperand(holder_reg,
3673 JSGlobalProxy::kNativeContextOffset));
3674 }
3675
3676 // Check that the security token in the calling global object is
3677 // compatible with the security token in the receiving global
3678 // object.
3679 int token_offset = Context::kHeaderSize +
3680 Context::SECURITY_TOKEN_INDEX * kPointerSize;
3681
3682 ldr(scratch, FieldMemOperand(scratch, token_offset));
3683 ldr(Tmp0(), FieldMemOperand(Tmp0(), token_offset));
3684 cmp(scratch, Tmp0());
3685 b(miss, ne);
3686
3687 bind(&same_contexts);
3688 }
3689
3690
3691 // Compute the hash code from the untagged key. This must be kept in sync with
3692 // ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
3693 // code-stub-hydrogen.cc
3694 void MacroAssembler::GetNumberHash(Register key, Register scratch) {
3695 ASSERT(!AreAliased(key, scratch));
3696
3697 // Xor original key with a seed.
3698 LoadRoot(scratch, Heap::kHashSeedRootIndex);
3699 Eor(key, key, Operand::UntagSmi(scratch));
3700
3701 // The algorithm uses 32-bit integer values.
3702 key = key.W();
3703 scratch = scratch.W();
3704
3705 // Compute the hash code from the untagged key. This must be kept in sync
3706 // with ComputeIntegerHash in utils.h.
3707 //
3708 // hash = ~hash + (hash <<1 15);
3709 Mvn(scratch, key);
3710 Add(key, scratch, Operand(key, LSL, 15));
3711 // hash = hash ^ (hash >> 12);
3712 Eor(key, key, Operand(key, LSR, 12));
3713 // hash = hash + (hash << 2);
3714 Add(key, key, Operand(key, LSL, 2));
3715 // hash = hash ^ (hash >> 4);
3716 Eor(key, key, Operand(key, LSR, 4));
3717 // hash = hash * 2057;
3718 Mov(scratch, Operand(key, LSL, 11));
3719 Add(key, key, Operand(key, LSL, 3));
3720 Add(key, key, scratch);
3721 // hash = hash ^ (hash >> 16);
3722 Eor(key, key, Operand(key, LSR, 16));
3723 }
3724
3725
3726 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
3727 Register elements,
3728 Register key,
3729 Register result,
3730 Register scratch0,
3731 Register scratch1,
3732 Register scratch2,
3733 Register scratch3) {
3734 ASSERT(!AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3));
3735
3736 Label done;
3737
3738 SmiUntag(scratch0, key);
3739 GetNumberHash(scratch0, scratch1);
3740
3741 // Compute the capacity mask.
3742 Ldrsw(scratch1,
3743 UntagSmiFieldMemOperand(elements,
3744 SeededNumberDictionary::kCapacityOffset));
3745 Sub(scratch1, scratch1, 1);
3746
3747 // Generate an unrolled loop that performs a few probes before giving up.
3748 for (int i = 0; i < kNumberDictionaryProbes; i++) {
3749 // Compute the masked index: (hash + i + i * i) & mask.
3750 if (i > 0) {
3751 Add(scratch2, scratch0, SeededNumberDictionary::GetProbeOffset(i));
3752 } else {
3753 Mov(scratch2, scratch0);
3754 }
3755 And(scratch2, scratch2, scratch1);
3756
3757 // Scale the index by multiplying by the element size.
3758 ASSERT(SeededNumberDictionary::kEntrySize == 3);
3759 Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
3760
3761 // Check if the key is identical to the name.
3762 Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
3763 Ldr(scratch3,
3764 FieldMemOperand(scratch2,
3765 SeededNumberDictionary::kElementsStartOffset));
3766 Cmp(key, scratch3);
3767 if (i != (kNumberDictionaryProbes - 1)) {
3768 B(eq, &done);
3769 } else {
3770 B(ne, miss);
3771 }
3772 }
3773
3774 Bind(&done);
3775 // Check that the value is a normal property.
3776 const int kDetailsOffset =
3777 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
3778 Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
3779 TestAndBranchIfAnySet(scratch1, PropertyDetails::TypeField::kMask, miss);
3780
3781 // Get the value at the masked, scaled index and return.
3782 const int kValueOffset =
3783 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
3784 Ldr(result, FieldMemOperand(scratch2, kValueOffset));
3785 }
3786
3787
3788 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
3789 Register address,
3790 Register scratch,
3791 SaveFPRegsMode fp_mode,
3792 RememberedSetFinalAction and_then) {
3793 ASSERT(!AreAliased(object, address, scratch));
3794 Label done, store_buffer_overflow;
3795 if (emit_debug_code()) {
3796 Label ok;
3797 JumpIfNotInNewSpace(object, &ok);
3798 Abort(kRememberedSetPointerInNewSpace);
3799 bind(&ok);
3800 }
3801 // Load store buffer top.
3802 Mov(Tmp0(), Operand(ExternalReference::store_buffer_top(isolate())));
3803 Ldr(scratch, MemOperand(Tmp0()));
3804 // Store pointer to buffer and increment buffer top.
3805 Str(address, MemOperand(scratch, kPointerSize, PostIndex));
3806 // Write back new top of buffer.
3807 Str(scratch, MemOperand(Tmp0()));
3808 // Call stub on end of buffer.
3809 // Check for end of buffer.
3810 ASSERT(StoreBuffer::kStoreBufferOverflowBit ==
3811 (1 << (14 + kPointerSizeLog2)));
3812 if (and_then == kFallThroughAtEnd) {
3813 Tbz(scratch, (14 + kPointerSizeLog2), &done);
3814 } else {
3815 ASSERT(and_then == kReturnAtEnd);
3816 Tbnz(scratch, (14 + kPointerSizeLog2), &store_buffer_overflow);
3817 Ret();
3818 }
3819
3820 Bind(&store_buffer_overflow);
3821 Push(lr);
3822 StoreBufferOverflowStub store_buffer_overflow_stub =
3823 StoreBufferOverflowStub(fp_mode);
3824 CallStub(&store_buffer_overflow_stub);
3825 Pop(lr);
3826
3827 Bind(&done);
3828 if (and_then == kReturnAtEnd) {
3829 Ret();
3830 }
3831 }
3832
3833
3834 void MacroAssembler::PopSafepointRegisters() {
3835 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
3836 PopXRegList(kSafepointSavedRegisters);
3837 Drop(num_unsaved);
3838 }
3839
3840
3841 void MacroAssembler::PushSafepointRegisters() {
3842 // Safepoints expect a block of kNumSafepointRegisters values on the stack, so
3843 // adjust the stack for unsaved registers.
3844 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
3845 ASSERT(num_unsaved >= 0);
3846 Claim(num_unsaved);
3847 PushXRegList(kSafepointSavedRegisters);
3848 }
3849
3850
3851 void MacroAssembler::PushSafepointFPRegisters() {
3852 PushCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSize,
3853 FPRegister::kAllocatableFPRegisters));
3854 }
3855
3856
3857 void MacroAssembler::PopSafepointFPRegisters() {
3858 PopCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSize,
3859 FPRegister::kAllocatableFPRegisters));
3860 }
3861
3862
3863 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
3864 // Make sure the safepoint registers list is what we expect.
3865 ASSERT(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff);
3866
3867 // Safepoint registers are stored contiguously on the stack, but not all the
3868 // registers are saved. The following registers are excluded:
3869 // - x16 and x17 (ip0 and ip1) because they shouldn't be preserved outside of
3870 // the macro assembler.
3871 // - x28 (jssp) because JS stack pointer doesn't need to be included in
3872 // safepoint registers.
3873 // - x31 (csp) because the system stack pointer doesn't need to be included
3874 // in safepoint registers.
3875 //
3876 // This function implements the mapping of register code to index into the
3877 // safepoint register slots.
3878 if ((reg_code >= 0) && (reg_code <= 15)) {
3879 return reg_code;
3880 } else if ((reg_code >= 18) && (reg_code <= 27)) {
3881 // Skip ip0 and ip1.
3882 return reg_code - 2;
3883 } else if ((reg_code == 29) || (reg_code == 30)) {
3884 // Also skip jssp.
3885 return reg_code - 3;
3886 } else {
3887 // This register has no safepoint register slot.
3888 UNREACHABLE();
3889 return -1;
3890 }
3891 }
3892
3893
3894 void MacroAssembler::CheckPageFlagSet(const Register& object,
3895 const Register& scratch,
3896 int mask,
3897 Label* if_any_set) {
3898 And(scratch, object, ~Page::kPageAlignmentMask);
3899 Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
3900 TestAndBranchIfAnySet(scratch, mask, if_any_set);
3901 }
3902
3903
3904 void MacroAssembler::CheckPageFlagClear(const Register& object,
3905 const Register& scratch,
3906 int mask,
3907 Label* if_all_clear) {
3908 And(scratch, object, ~Page::kPageAlignmentMask);
3909 Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
3910 TestAndBranchIfAllClear(scratch, mask, if_all_clear);
3911 }
3912
3913
3914 void MacroAssembler::RecordWriteField(
3915 Register object,
3916 int offset,
3917 Register value,
3918 Register scratch,
3919 LinkRegisterStatus lr_status,
3920 SaveFPRegsMode save_fp,
3921 RememberedSetAction remembered_set_action,
3922 SmiCheck smi_check) {
3923 // First, check if a write barrier is even needed. The tests below
3924 // catch stores of Smis.
3925 Label done;
3926
3927 // Skip the barrier if writing a smi.
3928 if (smi_check == INLINE_SMI_CHECK) {
3929 JumpIfSmi(value, &done);
3930 }
3931
3932 // Although the object register is tagged, the offset is relative to the start
3933 // of the object, so offset must be a multiple of kPointerSize.
3934 ASSERT(IsAligned(offset, kPointerSize));
3935
3936 Add(scratch, object, offset - kHeapObjectTag);
3937 if (emit_debug_code()) {
3938 Label ok;
3939 Tst(scratch, (1 << kPointerSizeLog2) - 1);
3940 B(eq, &ok);
3941 Abort(kUnalignedCellInWriteBarrier);
3942 Bind(&ok);
3943 }
3944
3945 RecordWrite(object,
3946 scratch,
3947 value,
3948 lr_status,
3949 save_fp,
3950 remembered_set_action,
3951 OMIT_SMI_CHECK);
3952
3953 Bind(&done);
3954
3955 // Clobber clobbered input registers when running with the debug-code flag
3956 // turned on to provoke errors.
3957 if (emit_debug_code()) {
3958 Mov(value, Operand(BitCast<int64_t>(kZapValue + 4)));
3959 Mov(scratch, Operand(BitCast<int64_t>(kZapValue + 8)));
3960 }
3961 }
3962
3963
3964 // Will clobber: object, address, value, Tmp0(), Tmp1().
3965 // If lr_status is kLRHasBeenSaved, lr will also be clobbered.
3966 //
3967 // The register 'object' contains a heap object pointer. The heap object tag is
3968 // shifted away.
3969 void MacroAssembler::RecordWrite(Register object,
3970 Register address,
3971 Register value,
3972 LinkRegisterStatus lr_status,
3973 SaveFPRegsMode fp_mode,
3974 RememberedSetAction remembered_set_action,
3975 SmiCheck smi_check) {
3976 ASM_LOCATION("MacroAssembler::RecordWrite");
3977 ASSERT(!AreAliased(object, value));
3978
3979 if (emit_debug_code()) {
3980 Ldr(Tmp0(), MemOperand(address));
3981 Cmp(Tmp0(), value);
3982 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
3983 }
3984
3985 // Count number of write barriers in generated code.
3986 isolate()->counters()->write_barriers_static()->Increment();
3987 // TODO(mstarzinger): Dynamic counter missing.
3988
3989 // First, check if a write barrier is even needed. The tests below
3990 // catch stores of smis and stores into the young generation.
3991 Label done;
3992
3993 if (smi_check == INLINE_SMI_CHECK) {
3994 ASSERT_EQ(0, kSmiTag);
3995 JumpIfSmi(value, &done);
3996 }
3997
3998 CheckPageFlagClear(value,
3999 value, // Used as scratch.
4000 MemoryChunk::kPointersToHereAreInterestingMask,
4001 &done);
4002 CheckPageFlagClear(object,
4003 value, // Used as scratch.
4004 MemoryChunk::kPointersFromHereAreInterestingMask,
4005 &done);
4006
4007 // Record the actual write.
4008 if (lr_status == kLRHasNotBeenSaved) {
4009 Push(lr);
4010 }
4011 RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
4012 CallStub(&stub);
4013 if (lr_status == kLRHasNotBeenSaved) {
4014 Pop(lr);
4015 }
4016
4017 Bind(&done);
4018
4019 // Clobber clobbered registers when running with the debug-code flag
4020 // turned on to provoke errors.
4021 if (emit_debug_code()) {
4022 Mov(address, Operand(BitCast<int64_t>(kZapValue + 12)));
4023 Mov(value, Operand(BitCast<int64_t>(kZapValue + 16)));
4024 }
4025 }
4026
4027
4028 void MacroAssembler::AssertHasValidColor(const Register& reg) {
4029 if (emit_debug_code()) {
4030 // The bit sequence is backward. The first character in the string
4031 // represents the least significant bit.
4032 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
4033
4034 Label color_is_valid;
4035 Tbnz(reg, 0, &color_is_valid);
4036 Tbz(reg, 1, &color_is_valid);
4037 Abort(kUnexpectedColorFound);
4038 Bind(&color_is_valid);
4039 }
4040 }
4041
4042
4043 void MacroAssembler::GetMarkBits(Register addr_reg,
4044 Register bitmap_reg,
4045 Register shift_reg) {
4046 ASSERT(!AreAliased(addr_reg, bitmap_reg, shift_reg, no_reg));
4047 // addr_reg is divided into fields:
4048 // |63 page base 20|19 high 8|7 shift 3|2 0|
4049 // 'high' gives the index of the cell holding color bits for the object.
4050 // 'shift' gives the offset in the cell for this object's color.
4051 const int kShiftBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
4052 Ubfx(Tmp0(), addr_reg, kShiftBits, kPageSizeBits - kShiftBits);
4053 Bic(bitmap_reg, addr_reg, Page::kPageAlignmentMask);
4054 Add(bitmap_reg, bitmap_reg, Operand(Tmp0(), LSL, Bitmap::kBytesPerCellLog2));
4055 // bitmap_reg:
4056 // |63 page base 20|19 zeros 15|14 high 3|2 0|
4057 Ubfx(shift_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
4058 }
4059
4060
4061 void MacroAssembler::HasColor(Register object,
4062 Register bitmap_scratch,
4063 Register shift_scratch,
4064 Label* has_color,
4065 int first_bit,
4066 int second_bit) {
4067 // See mark-compact.h for color definitions.
4068 ASSERT(!AreAliased(object, bitmap_scratch, shift_scratch));
4069
4070 GetMarkBits(object, bitmap_scratch, shift_scratch);
4071 Ldr(bitmap_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4072 // Shift the bitmap down to get the color of the object in bits [1:0].
4073 Lsr(bitmap_scratch, bitmap_scratch, shift_scratch);
4074
4075 AssertHasValidColor(bitmap_scratch);
4076
4077 // These bit sequences are backwards. The first character in the string
4078 // represents the least significant bit.
4079 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
4080 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
4081 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
4082
4083 // Check for the color.
4084 if (first_bit == 0) {
4085 // Checking for white.
4086 ASSERT(second_bit == 0);
4087 // We only need to test the first bit.
4088 Tbz(bitmap_scratch, 0, has_color);
4089 } else {
4090 Label other_color;
4091 // Checking for grey or black.
4092 Tbz(bitmap_scratch, 0, &other_color);
4093 if (second_bit == 0) {
4094 Tbz(bitmap_scratch, 1, has_color);
4095 } else {
4096 Tbnz(bitmap_scratch, 1, has_color);
4097 }
4098 Bind(&other_color);
4099 }
4100
4101 // Fall through if it does not have the right color.
4102 }
4103
4104
4105 void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
4106 Register scratch,
4107 Label* if_deprecated) {
4108 if (map->CanBeDeprecated()) {
4109 Mov(scratch, Operand(map));
4110 Ldrsw(scratch, UntagSmiFieldMemOperand(scratch, Map::kBitField3Offset));
4111 TestAndBranchIfAnySet(scratch, Map::Deprecated::kMask, if_deprecated);
4112 }
4113 }
4114
4115
4116 void MacroAssembler::JumpIfBlack(Register object,
4117 Register scratch0,
4118 Register scratch1,
4119 Label* on_black) {
4120 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
4121 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
4122 }
4123
4124
4125 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
4126 Register object,
4127 Register scratch0,
4128 Register scratch1,
4129 Label* found) {
4130 ASSERT(!AreAliased(object, scratch0, scratch1));
4131 Factory* factory = isolate()->factory();
4132 Register current = scratch0;
4133 Label loop_again;
4134
4135 // Scratch contains elements pointer.
4136 Mov(current, object);
4137
4138 // Loop based on the map going up the prototype chain.
4139 Bind(&loop_again);
4140 Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
4141 Ldrb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
4142 Ubfx(scratch1, scratch1, Map::kElementsKindShift, Map::kElementsKindBitCount);
4143 CompareAndBranch(scratch1, DICTIONARY_ELEMENTS, eq, found);
4144 Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
4145 CompareAndBranch(current, Operand(factory->null_value()), ne, &loop_again);
4146 }
4147
4148
4149 void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
4150 Register result) {
4151 ASSERT(!result.Is(ldr_location));
4152 const uint32_t kLdrLitOffset_lsb = 5;
4153 const uint32_t kLdrLitOffset_width = 19;
4154 Ldr(result, MemOperand(ldr_location));
4155 if (emit_debug_code()) {
4156 And(result, result, LoadLiteralFMask);
4157 Cmp(result, LoadLiteralFixed);
4158 Check(eq, kTheInstructionToPatchShouldBeAnLdrLiteral);
4159 // The instruction was clobbered. Reload it.
4160 Ldr(result, MemOperand(ldr_location));
4161 }
4162 Sbfx(result, result, kLdrLitOffset_lsb, kLdrLitOffset_width);
4163 Add(result, ldr_location, Operand(result, LSL, kWordSizeInBytesLog2));
4164 }
4165
4166
4167 void MacroAssembler::EnsureNotWhite(
4168 Register value,
4169 Register bitmap_scratch,
4170 Register shift_scratch,
4171 Register load_scratch,
4172 Register length_scratch,
4173 Label* value_is_white_and_not_data) {
4174 ASSERT(!AreAliased(
4175 value, bitmap_scratch, shift_scratch, load_scratch, length_scratch));
4176
4177 // These bit sequences are backwards. The first character in the string
4178 // represents the least significant bit.
4179 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
4180 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
4181 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
4182
4183 GetMarkBits(value, bitmap_scratch, shift_scratch);
4184 Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4185 Lsr(load_scratch, load_scratch, shift_scratch);
4186
4187 AssertHasValidColor(load_scratch);
4188
4189 // If the value is black or grey we don't need to do anything.
4190 // Since both black and grey have a 1 in the first position and white does
4191 // not have a 1 there we only need to check one bit.
4192 Label done;
4193 Tbnz(load_scratch, 0, &done);
4194
4195 // Value is white. We check whether it is data that doesn't need scanning.
4196 Register map = load_scratch; // Holds map while checking type.
4197 Label is_data_object;
4198
4199 // Check for heap-number.
4200 Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
4201 Mov(length_scratch, HeapNumber::kSize);
4202 JumpIfRoot(map, Heap::kHeapNumberMapRootIndex, &is_data_object);
4203
4204 // Check for strings.
4205 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
4206 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
4207 // If it's a string and it's not a cons string then it's an object containing
4208 // no GC pointers.
4209 Register instance_type = load_scratch;
4210 Ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
4211 TestAndBranchIfAnySet(instance_type,
4212 kIsIndirectStringMask | kIsNotStringMask,
4213 value_is_white_and_not_data);
4214
4215 // It's a non-indirect (non-cons and non-slice) string.
4216 // If it's external, the length is just ExternalString::kSize.
4217 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
4218 // External strings are the only ones with the kExternalStringTag bit
4219 // set.
4220 ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
4221 ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
4222 Mov(length_scratch, ExternalString::kSize);
4223 TestAndBranchIfAnySet(instance_type, kExternalStringTag, &is_data_object);
4224
4225 // Sequential string, either ASCII or UC16.
4226 // For ASCII (char-size of 1) we shift the smi tag away to get the length.
4227 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
4228 // getting the length multiplied by 2.
4229 ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
4230 Ldrsw(length_scratch, UntagSmiFieldMemOperand(value,
4231 String::kLengthOffset));
4232 Tst(instance_type, kStringEncodingMask);
4233 Cset(load_scratch, eq);
4234 Lsl(length_scratch, length_scratch, load_scratch);
4235 Add(length_scratch,
4236 length_scratch,
4237 SeqString::kHeaderSize + kObjectAlignmentMask);
4238 Bic(length_scratch, length_scratch, kObjectAlignmentMask);
4239
4240 Bind(&is_data_object);
4241 // Value is a data object, and it is white. Mark it black. Since we know
4242 // that the object is white we can make it black by flipping one bit.
4243 Register mask = shift_scratch;
4244 Mov(load_scratch, 1);
4245 Lsl(mask, load_scratch, shift_scratch);
4246
4247 Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4248 Orr(load_scratch, load_scratch, mask);
4249 Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4250
4251 Bic(bitmap_scratch, bitmap_scratch, Page::kPageAlignmentMask);
4252 Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
4253 Add(load_scratch, load_scratch, length_scratch);
4254 Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
4255
4256 Bind(&done);
4257 }
4258
4259
4260 void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
4261 if (emit_debug_code()) {
4262 Check(cond, reason);
4263 }
4264 }
4265
4266
4267
4268 void MacroAssembler::AssertRegisterIsClear(Register reg, BailoutReason reason) {
4269 if (emit_debug_code()) {
4270 CheckRegisterIsClear(reg, reason);
4271 }
4272 }
4273
4274
4275 void MacroAssembler::AssertRegisterIsRoot(Register reg,
4276 Heap::RootListIndex index,
4277 BailoutReason reason) {
4278 // CompareRoot uses Tmp0().
4279 ASSERT(!reg.Is(Tmp0()));
4280 if (emit_debug_code()) {
4281 CompareRoot(reg, index);
4282 Check(eq, reason);
4283 }
4284 }
4285
4286
4287 void MacroAssembler::AssertFastElements(Register elements) {
4288 if (emit_debug_code()) {
4289 Register temp = Tmp1();
4290 Label ok;
4291 Ldr(temp, FieldMemOperand(elements, HeapObject::kMapOffset));
4292 JumpIfRoot(temp, Heap::kFixedArrayMapRootIndex, &ok);
4293 JumpIfRoot(temp, Heap::kFixedDoubleArrayMapRootIndex, &ok);
4294 JumpIfRoot(temp, Heap::kFixedCOWArrayMapRootIndex, &ok);
4295 Abort(kJSObjectWithFastElementsMapHasSlowElements);
4296 Bind(&ok);
4297 }
4298 }
4299
4300
4301 void MacroAssembler::AssertIsString(const Register& object) {
4302 if (emit_debug_code()) {
4303 Register temp = Tmp1();
4304 STATIC_ASSERT(kSmiTag == 0);
4305 Tst(object, Operand(kSmiTagMask));
4306 Check(ne, kOperandIsNotAString);
4307 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
4308 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
4309 Check(lo, kOperandIsNotAString);
4310 }
4311 }
4312
4313
4314 void MacroAssembler::Check(Condition cond, BailoutReason reason) {
4315 Label ok;
4316 B(cond, &ok);
4317 Abort(reason);
4318 // Will not return here.
4319 Bind(&ok);
4320 }
4321
4322
4323 void MacroAssembler::CheckRegisterIsClear(Register reg, BailoutReason reason) {
4324 Label ok;
4325 Cbz(reg, &ok);
4326 Abort(reason);
4327 // Will not return here.
4328 Bind(&ok);
4329 }
4330
4331
4332 void MacroAssembler::Abort(BailoutReason reason) {
4333 #ifdef DEBUG
4334 RecordComment("Abort message: ");
4335 RecordComment(GetBailoutReason(reason));
4336
4337 if (FLAG_trap_on_abort) {
4338 Brk(0);
4339 return;
4340 }
4341 #endif
4342
4343 Label msg_address;
4344 Adr(x0, &msg_address);
4345
4346 if (use_real_aborts()) {
4347 // Split the message pointer into two SMI to avoid the GC
4348 // trying to scan the string.
4349 STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
4350 SmiTag(x1, x0);
4351 Bic(x0, x0, kSmiShiftMask);
4352
4353 Push(x0, x1);
4354
4355 if (!has_frame_) {
4356 // We don't actually want to generate a pile of code for this, so just
4357 // claim there is a stack frame, without generating one.
4358 FrameScope scope(this, StackFrame::NONE);
4359 CallRuntime(Runtime::kAbort, 2);
4360 } else {
4361 CallRuntime(Runtime::kAbort, 2);
4362 }
4363 } else {
4364 // Call Printf directly, to report the error. The message is in x0, which is
4365 // the first argument to Printf.
4366 if (!csp.Is(StackPointer())) {
4367 Bic(csp, StackPointer(), 0xf);
4368 }
4369 CallPrintf();
4370
4371 // The CallPrintf will return, so this point is actually reachable in this
4372 // context. However:
4373 // - We're already executing an abort (which shouldn't be reachable in
4374 // valid code).
4375 // - We need a way to stop execution on both the simulator and real
4376 // hardware, and Unreachable() is the best option.
4377 Unreachable();
4378 }
4379
4380 // Emit the message string directly in the instruction stream.
4381 {
4382 BlockConstPoolScope scope(this);
4383 Bind(&msg_address);
4384 // TODO(jbramley): Since the reason is an enum, why do we still encode the
4385 // string (and a pointer to it) in the instruction stream?
4386 EmitStringData(GetBailoutReason(reason));
4387 }
4388 }
4389
4390
4391 void MacroAssembler::LoadTransitionedArrayMapConditional(
4392 ElementsKind expected_kind,
4393 ElementsKind transitioned_kind,
4394 Register map_in_out,
4395 Register scratch,
4396 Label* no_map_match) {
4397 // Load the global or builtins object from the current context.
4398 Ldr(scratch, GlobalObjectMemOperand());
4399 Ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
4400
4401 // Check that the function's map is the same as the expected cached map.
4402 Ldr(scratch, ContextMemOperand(scratch, Context::JS_ARRAY_MAPS_INDEX));
4403 size_t offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
4404 Ldr(Tmp0(), FieldMemOperand(scratch, offset));
4405 Cmp(map_in_out, Tmp0());
4406 B(ne, no_map_match);
4407
4408 // Use the transitioned cached map.
4409 offset = (transitioned_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
4410 Ldr(map_in_out, FieldMemOperand(scratch, offset));
4411 }
4412
4413
4414 void MacroAssembler::LoadInitialArrayMap(Register function_in,
4415 Register scratch,
4416 Register map_out,
4417 ArrayHasHoles holes) {
4418 ASSERT(!AreAliased(function_in, scratch, map_out));
4419 Label done;
4420 Ldr(map_out, FieldMemOperand(function_in,
4421 JSFunction::kPrototypeOrInitialMapOffset));
4422
4423 if (!FLAG_smi_only_arrays) {
4424 ElementsKind kind = (holes == kArrayCanHaveHoles) ? FAST_HOLEY_ELEMENTS
4425 : FAST_ELEMENTS;
4426 LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, kind, map_out,
4427 scratch, &done);
4428 } else if (holes == kArrayCanHaveHoles) {
4429 LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
4430 FAST_HOLEY_SMI_ELEMENTS, map_out,
4431 scratch, &done);
4432 }
4433 Bind(&done);
4434 }
4435
4436
4437 void MacroAssembler::LoadArrayFunction(Register function) {
4438 // Load the global or builtins object from the current context.
4439 Ldr(function, GlobalObjectMemOperand());
4440 // Load the global context from the global or builtins object.
4441 Ldr(function,
4442 FieldMemOperand(function, GlobalObject::kGlobalContextOffset));
4443 // Load the array function from the native context.
4444 Ldr(function, ContextMemOperand(function, Context::ARRAY_FUNCTION_INDEX));
4445 }
4446
4447
4448 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4449 // Load the global or builtins object from the current context.
4450 Ldr(function, GlobalObjectMemOperand());
4451 // Load the native context from the global or builtins object.
4452 Ldr(function, FieldMemOperand(function,
4453 GlobalObject::kNativeContextOffset));
4454 // Load the function from the native context.
4455 Ldr(function, ContextMemOperand(function, index));
4456 }
4457
4458
4459 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4460 Register map,
4461 Register scratch) {
4462 // Load the initial map. The global functions all have initial maps.
4463 Ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4464 if (emit_debug_code()) {
4465 Label ok, fail;
4466 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
4467 B(&ok);
4468 Bind(&fail);
4469 Abort(kGlobalFunctionsMustHaveInitialMap);
4470 Bind(&ok);
4471 }
4472 }
4473
4474
4475 // This is the main Printf implementation. All other Printf variants call
4476 // PrintfNoPreserve after setting up one or more PreserveRegisterScopes.
4477 void MacroAssembler::PrintfNoPreserve(const char * format,
4478 const CPURegister& arg0,
4479 const CPURegister& arg1,
4480 const CPURegister& arg2,
4481 const CPURegister& arg3) {
4482 // We cannot handle a caller-saved stack pointer. It doesn't make much sense
4483 // in most cases anyway, so this restriction shouldn't be too serious.
4484 ASSERT(!kCallerSaved.IncludesAliasOf(__ StackPointer()));
4485
4486 // We cannot print Tmp0() or Tmp1() as they're used internally by the macro
4487 // assembler. We cannot print the stack pointer because it is typically used
4488 // to preserve caller-saved registers (using other Printf variants which
4489 // depend on this helper).
4490 ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg0));
4491 ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg1));
4492 ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg2));
4493 ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg3));
4494
4495 static const int kMaxArgCount = 4;
4496 // Assume that we have the maximum number of arguments until we know
4497 // otherwise.
4498 int arg_count = kMaxArgCount;
4499
4500 // The provided arguments.
4501 CPURegister args[kMaxArgCount] = {arg0, arg1, arg2, arg3};
4502
4503 // The PCS registers where the arguments need to end up.
4504 CPURegister pcs[kMaxArgCount] = {NoCPUReg, NoCPUReg, NoCPUReg, NoCPUReg};
4505
4506 // Promote FP arguments to doubles, and integer arguments to X registers.
4507 // Note that FP and integer arguments cannot be mixed, but we'll check
4508 // AreSameSizeAndType once we've processed these promotions.
4509 for (int i = 0; i < kMaxArgCount; i++) {
4510 if (args[i].IsRegister()) {
4511 // Note that we use x1 onwards, because x0 will hold the format string.
4512 pcs[i] = Register::XRegFromCode(i + 1);
4513 // For simplicity, we handle all integer arguments as X registers. An X
4514 // register argument takes the same space as a W register argument in the
4515 // PCS anyway. The only limitation is that we must explicitly clear the
4516 // top word for W register arguments as the callee will expect it to be
4517 // clear.
4518 if (!args[i].Is64Bits()) {
4519 const Register& as_x = args[i].X();
4520 And(as_x, as_x, 0x00000000ffffffff);
4521 args[i] = as_x;
4522 }
4523 } else if (args[i].IsFPRegister()) {
4524 pcs[i] = FPRegister::DRegFromCode(i);
4525 // C and C++ varargs functions (such as printf) implicitly promote float
4526 // arguments to doubles.
4527 if (!args[i].Is64Bits()) {
4528 FPRegister s(args[i]);
4529 const FPRegister& as_d = args[i].D();
4530 Fcvt(as_d, s);
4531 args[i] = as_d;
4532 }
4533 } else {
4534 // This is the first empty (NoCPUReg) argument, so use it to set the
4535 // argument count and bail out.
4536 arg_count = i;
4537 break;
4538 }
4539 }
4540 ASSERT((arg_count >= 0) && (arg_count <= kMaxArgCount));
4541 // Check that every remaining argument is NoCPUReg.
4542 for (int i = arg_count; i < kMaxArgCount; i++) {
4543 ASSERT(args[i].IsNone());
4544 }
4545 ASSERT((arg_count == 0) || AreSameSizeAndType(args[0], args[1],
4546 args[2], args[3],
4547 pcs[0], pcs[1],
4548 pcs[2], pcs[3]));
4549
4550 // Move the arguments into the appropriate PCS registers.
4551 //
4552 // Arranging an arbitrary list of registers into x1-x4 (or d0-d3) is
4553 // surprisingly complicated.
4554 //
4555 // * For even numbers of registers, we push the arguments and then pop them
4556 // into their final registers. This maintains 16-byte stack alignment in
4557 // case csp is the stack pointer, since we're only handling X or D
4558 // registers at this point.
4559 //
4560 // * For odd numbers of registers, we push and pop all but one register in
4561 // the same way, but the left-over register is moved directly, since we
4562 // can always safely move one register without clobbering any source.
4563 if (arg_count >= 4) {
4564 Push(args[3], args[2], args[1], args[0]);
4565 } else if (arg_count >= 2) {
4566 Push(args[1], args[0]);
4567 }
4568
4569 if ((arg_count % 2) != 0) {
4570 // Move the left-over register directly.
4571 const CPURegister& leftover_arg = args[arg_count - 1];
4572 const CPURegister& leftover_pcs = pcs[arg_count - 1];
4573 if (leftover_arg.IsRegister()) {
4574 Mov(Register(leftover_pcs), Register(leftover_arg));
4575 } else {
4576 Fmov(FPRegister(leftover_pcs), FPRegister(leftover_arg));
4577 }
4578 }
4579
4580 if (arg_count >= 4) {
4581 Pop(pcs[0], pcs[1], pcs[2], pcs[3]);
4582 } else if (arg_count >= 2) {
4583 Pop(pcs[0], pcs[1]);
4584 }
4585
4586 // Load the format string into x0, as per the procedure-call standard.
4587 //
4588 // To make the code as portable as possible, the format string is encoded
4589 // directly in the instruction stream. It might be cleaner to encode it in a
4590 // literal pool, but since Printf is usually used for debugging, it is
4591 // beneficial for it to be minimally dependent on other features.
4592 Label format_address;
4593 Adr(x0, &format_address);
4594
4595 // Emit the format string directly in the instruction stream.
4596 { BlockConstPoolScope scope(this);
4597 Label after_data;
4598 B(&after_data);
4599 Bind(&format_address);
4600 EmitStringData(format);
4601 Unreachable();
4602 Bind(&after_data);
4603 }
4604
4605 // We don't pass any arguments on the stack, but we still need to align the C
4606 // stack pointer to a 16-byte boundary for PCS compliance.
4607 if (!csp.Is(StackPointer())) {
4608 Bic(csp, StackPointer(), 0xf);
4609 }
4610
4611 CallPrintf(pcs[0].type());
4612 }
4613
4614
4615 void MacroAssembler::CallPrintf(CPURegister::RegisterType type) {
4616 // A call to printf needs special handling for the simulator, since the system
4617 // printf function will use a different instruction set and the procedure-call
4618 // standard will not be compatible.
4619 #ifdef USE_SIMULATOR
4620 { InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize);
4621 hlt(kImmExceptionIsPrintf);
4622 dc32(type);
4623 }
4624 #else
4625 Call(FUNCTION_ADDR(printf), RelocInfo::EXTERNAL_REFERENCE);
4626 #endif
4627 }
4628
4629
4630 void MacroAssembler::Printf(const char * format,
4631 const CPURegister& arg0,
4632 const CPURegister& arg1,
4633 const CPURegister& arg2,
4634 const CPURegister& arg3) {
4635 // Preserve all caller-saved registers as well as NZCV.
4636 // If csp is the stack pointer, PushCPURegList asserts that the size of each
4637 // list is a multiple of 16 bytes.
4638 PushCPURegList(kCallerSaved);
4639 PushCPURegList(kCallerSavedFP);
4640 // Use Tmp0() as a scratch register. It is not accepted by Printf so it will
4641 // never overlap an argument register.
4642 Mrs(Tmp0(), NZCV);
4643 Push(Tmp0(), xzr);
4644
4645 PrintfNoPreserve(format, arg0, arg1, arg2, arg3);
4646
4647 Pop(xzr, Tmp0());
4648 Msr(NZCV, Tmp0());
4649 PopCPURegList(kCallerSavedFP);
4650 PopCPURegList(kCallerSaved);
4651 }
4652
4653
4654 void MacroAssembler::EmitFrameSetupForCodeAgePatching() {
4655 // TODO(jbramley): Other architectures use the internal memcpy to copy the
4656 // sequence. If this is a performance bottleneck, we should consider caching
4657 // the sequence and copying it in the same way.
4658 InstructionAccurateScope scope(this, kCodeAgeSequenceSize / kInstructionSize);
4659 ASSERT(jssp.Is(StackPointer()));
4660 EmitFrameSetupForCodeAgePatching(this);
4661 }
4662
4663
4664
4665 void MacroAssembler::EmitCodeAgeSequence(Code* stub) {
4666 InstructionAccurateScope scope(this, kCodeAgeSequenceSize / kInstructionSize);
4667 ASSERT(jssp.Is(StackPointer()));
4668 EmitCodeAgeSequence(this, stub);
4669 }
4670
4671
4672 #undef __
4673 #define __ assm->
4674
4675
4676 void MacroAssembler::EmitFrameSetupForCodeAgePatching(Assembler * assm) {
4677 Label start;
4678 __ bind(&start);
4679
4680 // We can do this sequence using four instructions, but the code ageing
4681 // sequence that patches it needs five, so we use the extra space to try to
4682 // simplify some addressing modes and remove some dependencies (compared to
4683 // using two stp instructions with write-back).
4684 __ sub(jssp, jssp, 4 * kXRegSizeInBytes);
4685 __ sub(csp, csp, 4 * kXRegSizeInBytes);
4686 __ stp(x1, cp, MemOperand(jssp, 0 * kXRegSizeInBytes));
4687 __ stp(fp, lr, MemOperand(jssp, 2 * kXRegSizeInBytes));
4688 __ add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
4689
4690 __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeSequenceSize);
4691 }
4692
4693
4694 void MacroAssembler::EmitCodeAgeSequence(Assembler * assm,
4695 Code * stub) {
4696 Label start;
4697 __ bind(&start);
4698 // When the stub is called, the sequence is replaced with the young sequence
4699 // (as in EmitFrameSetupForCodeAgePatching). After the code is replaced, the
4700 // stub jumps to &start, stored in x0. The young sequence does not call the
4701 // stub so there is no infinite loop here.
4702 //
4703 // A branch (br) is used rather than a call (blr) because this code replaces
4704 // the frame setup code that would normally preserve lr.
4705 __ LoadLiteral(ip0, kCodeAgeStubEntryOffset);
4706 __ adr(x0, &start);
4707 __ br(ip0);
4708 // IsCodeAgeSequence in codegen-a64.cc assumes that the code generated up
4709 // until now (kCodeAgeStubEntryOffset) is the same for all code age sequences.
4710 __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeStubEntryOffset);
4711 if (stub) {
4712 __ dc64(reinterpret_cast<uint64_t>(stub->instruction_start()));
4713 __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeSequenceSize);
4714 }
4715 }
4716
4717
4718 bool MacroAssembler::IsYoungSequence(byte* sequence) {
4719 // Generate a young sequence to compare with.
4720 const int length = kCodeAgeSequenceSize / kInstructionSize;
4721 static bool initialized = false;
4722 static byte young[kCodeAgeSequenceSize];
4723 if (!initialized) {
4724 PatchingAssembler patcher(young, length);
4725 // The young sequence is the frame setup code for FUNCTION code types. It is
4726 // generated by FullCodeGenerator::Generate.
4727 MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher);
4728 initialized = true;
4729 }
4730
4731 bool is_young = (memcmp(sequence, young, kCodeAgeSequenceSize) == 0);
4732 ASSERT(is_young || IsCodeAgeSequence(sequence));
4733 return is_young;
4734 }
4735
4736
4737 #ifdef DEBUG
4738 bool MacroAssembler::IsCodeAgeSequence(byte* sequence) {
4739 // The old sequence varies depending on the code age. However, the code up
4740 // until kCodeAgeStubEntryOffset does not change, so we can check that part to
4741 // get a reasonable level of verification.
4742 const int length = kCodeAgeStubEntryOffset / kInstructionSize;
4743 static bool initialized = false;
4744 static byte old[kCodeAgeStubEntryOffset];
4745 if (!initialized) {
4746 PatchingAssembler patcher(old, length);
4747 MacroAssembler::EmitCodeAgeSequence(&patcher, NULL);
4748 initialized = true;
4749 }
4750 return memcmp(sequence, old, kCodeAgeStubEntryOffset) == 0;
4751 }
4752 #endif
4753
4754
4755 #undef __
4756 #define __ masm->
4757
4758
4759 void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
4760 const Label* smi_check) {
4761 Assembler::BlockConstPoolScope scope(masm);
4762 if (reg.IsValid()) {
4763 ASSERT(smi_check->is_bound());
4764 ASSERT(reg.Is64Bits());
4765
4766 // Encode the register (x0-x30) in the lowest 5 bits, then the offset to
4767 // 'check' in the other bits. The possible offset is limited in that we
4768 // use BitField to pack the data, and the underlying data type is a
4769 // uint32_t.
4770 uint32_t delta = __ InstructionsGeneratedSince(smi_check);
4771 __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta));
4772 } else {
4773 ASSERT(!smi_check->is_bound());
4774
4775 // An offset of 0 indicates that there is no patch site.
4776 __ InlineData(0);
4777 }
4778 }
4779
4780
4781 InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
4782 : reg_(NoReg), smi_check_(NULL) {
4783 InstructionSequence* inline_data = InstructionSequence::At(info);
4784 ASSERT(inline_data->IsInlineData());
4785 if (inline_data->IsInlineData()) {
4786 uint64_t payload = inline_data->InlineData();
4787 // We use BitField to decode the payload, and BitField can only handle
4788 // 32-bit values.
4789 ASSERT(is_uint32(payload));
4790 if (payload != 0) {
4791 int reg_code = RegisterBits::decode(payload);
4792 reg_ = Register::XRegFromCode(reg_code);
4793 uint64_t smi_check_delta = DeltaBits::decode(payload);
4794 ASSERT(smi_check_delta != 0);
4795 smi_check_ = inline_data - (smi_check_delta * kInstructionSize);
4796 }
4797 }
4798 }
4799
4800
4801 #undef __
4802
4803
4804 } } // namespace v8::internal
4805
4806 #endif // V8_TARGET_ARCH_A64
OLDNEW
« no previous file with comments | « src/a64/macro-assembler-a64.h ('k') | src/a64/macro-assembler-a64-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698