OLD | NEW |
| (Empty) |
1 // Copyright 2013 the V8 project authors. All rights reserved. | |
2 // Redistribution and use in source and binary forms, with or without | |
3 // modification, are permitted provided that the following conditions are | |
4 // met: | |
5 // | |
6 // * Redistributions of source code must retain the above copyright | |
7 // notice, this list of conditions and the following disclaimer. | |
8 // * Redistributions in binary form must reproduce the above | |
9 // copyright notice, this list of conditions and the following | |
10 // disclaimer in the documentation and/or other materials provided | |
11 // with the distribution. | |
12 // * Neither the name of Google Inc. nor the names of its | |
13 // contributors may be used to endorse or promote products derived | |
14 // from this software without specific prior written permission. | |
15 // | |
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
27 | |
28 #include "v8.h" | |
29 | |
30 #if V8_TARGET_ARCH_A64 | |
31 | |
32 #include "bootstrapper.h" | |
33 #include "codegen.h" | |
34 #include "cpu-profiler.h" | |
35 #include "debug.h" | |
36 #include "isolate-inl.h" | |
37 #include "runtime.h" | |
38 | |
39 namespace v8 { | |
40 namespace internal { | |
41 | |
42 // Define a fake double underscore to use with the ASM_UNIMPLEMENTED macros. | |
43 #define __ | |
44 | |
45 | |
46 MacroAssembler::MacroAssembler(Isolate* arg_isolate, | |
47 byte * buffer, | |
48 unsigned buffer_size) | |
49 : Assembler(arg_isolate, buffer, buffer_size), | |
50 generating_stub_(false), | |
51 #if DEBUG | |
52 allow_macro_instructions_(true), | |
53 #endif | |
54 has_frame_(false), | |
55 use_real_aborts_(true), | |
56 sp_(jssp), tmp_list_(ip0, ip1), fptmp_list_(fp_scratch) { | |
57 if (isolate() != NULL) { | |
58 code_object_ = Handle<Object>(isolate()->heap()->undefined_value(), | |
59 isolate()); | |
60 } | |
61 } | |
62 | |
63 | |
64 void MacroAssembler::LogicalMacro(const Register& rd, | |
65 const Register& rn, | |
66 const Operand& operand, | |
67 LogicalOp op) { | |
68 UseScratchRegisterScope temps(this); | |
69 | |
70 if (operand.NeedsRelocation()) { | |
71 Register temp = temps.AcquireX(); | |
72 LoadRelocated(temp, operand); | |
73 Logical(rd, rn, temp, op); | |
74 | |
75 } else if (operand.IsImmediate()) { | |
76 int64_t immediate = operand.immediate(); | |
77 unsigned reg_size = rd.SizeInBits(); | |
78 ASSERT(rd.Is64Bits() || is_uint32(immediate)); | |
79 | |
80 // If the operation is NOT, invert the operation and immediate. | |
81 if ((op & NOT) == NOT) { | |
82 op = static_cast<LogicalOp>(op & ~NOT); | |
83 immediate = ~immediate; | |
84 if (rd.Is32Bits()) { | |
85 immediate &= kWRegMask; | |
86 } | |
87 } | |
88 | |
89 // Special cases for all set or all clear immediates. | |
90 if (immediate == 0) { | |
91 switch (op) { | |
92 case AND: | |
93 Mov(rd, 0); | |
94 return; | |
95 case ORR: // Fall through. | |
96 case EOR: | |
97 Mov(rd, rn); | |
98 return; | |
99 case ANDS: // Fall through. | |
100 case BICS: | |
101 break; | |
102 default: | |
103 UNREACHABLE(); | |
104 } | |
105 } else if ((rd.Is64Bits() && (immediate == -1L)) || | |
106 (rd.Is32Bits() && (immediate == 0xffffffffL))) { | |
107 switch (op) { | |
108 case AND: | |
109 Mov(rd, rn); | |
110 return; | |
111 case ORR: | |
112 Mov(rd, immediate); | |
113 return; | |
114 case EOR: | |
115 Mvn(rd, rn); | |
116 return; | |
117 case ANDS: // Fall through. | |
118 case BICS: | |
119 break; | |
120 default: | |
121 UNREACHABLE(); | |
122 } | |
123 } | |
124 | |
125 unsigned n, imm_s, imm_r; | |
126 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) { | |
127 // Immediate can be encoded in the instruction. | |
128 LogicalImmediate(rd, rn, n, imm_s, imm_r, op); | |
129 } else { | |
130 // Immediate can't be encoded: synthesize using move immediate. | |
131 Register temp = temps.AcquireSameSizeAs(rn); | |
132 Mov(temp, immediate); | |
133 if (rd.Is(csp)) { | |
134 // If rd is the stack pointer we cannot use it as the destination | |
135 // register so we use the temp register as an intermediate again. | |
136 Logical(temp, rn, temp, op); | |
137 Mov(csp, temp); | |
138 } else { | |
139 Logical(rd, rn, temp, op); | |
140 } | |
141 } | |
142 | |
143 } else if (operand.IsExtendedRegister()) { | |
144 ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits()); | |
145 // Add/sub extended supports shift <= 4. We want to support exactly the | |
146 // same modes here. | |
147 ASSERT(operand.shift_amount() <= 4); | |
148 ASSERT(operand.reg().Is64Bits() || | |
149 ((operand.extend() != UXTX) && (operand.extend() != SXTX))); | |
150 Register temp = temps.AcquireSameSizeAs(rn); | |
151 EmitExtendShift(temp, operand.reg(), operand.extend(), | |
152 operand.shift_amount()); | |
153 Logical(rd, rn, temp, op); | |
154 | |
155 } else { | |
156 // The operand can be encoded in the instruction. | |
157 ASSERT(operand.IsShiftedRegister()); | |
158 Logical(rd, rn, operand, op); | |
159 } | |
160 } | |
161 | |
162 | |
163 void MacroAssembler::Mov(const Register& rd, uint64_t imm) { | |
164 ASSERT(allow_macro_instructions_); | |
165 ASSERT(is_uint32(imm) || is_int32(imm) || rd.Is64Bits()); | |
166 ASSERT(!rd.IsZero()); | |
167 | |
168 // TODO(all) extend to support more immediates. | |
169 // | |
170 // Immediates on Aarch64 can be produced using an initial value, and zero to | |
171 // three move keep operations. | |
172 // | |
173 // Initial values can be generated with: | |
174 // 1. 64-bit move zero (movz). | |
175 // 2. 32-bit move inverted (movn). | |
176 // 3. 64-bit move inverted. | |
177 // 4. 32-bit orr immediate. | |
178 // 5. 64-bit orr immediate. | |
179 // Move-keep may then be used to modify each of the 16-bit half-words. | |
180 // | |
181 // The code below supports all five initial value generators, and | |
182 // applying move-keep operations to move-zero and move-inverted initial | |
183 // values. | |
184 | |
185 unsigned reg_size = rd.SizeInBits(); | |
186 unsigned n, imm_s, imm_r; | |
187 if (IsImmMovz(imm, reg_size) && !rd.IsSP()) { | |
188 // Immediate can be represented in a move zero instruction. Movz can't | |
189 // write to the stack pointer. | |
190 movz(rd, imm); | |
191 } else if (IsImmMovn(imm, reg_size) && !rd.IsSP()) { | |
192 // Immediate can be represented in a move inverted instruction. Movn can't | |
193 // write to the stack pointer. | |
194 movn(rd, rd.Is64Bits() ? ~imm : (~imm & kWRegMask)); | |
195 } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) { | |
196 // Immediate can be represented in a logical orr instruction. | |
197 LogicalImmediate(rd, AppropriateZeroRegFor(rd), n, imm_s, imm_r, ORR); | |
198 } else { | |
199 // Generic immediate case. Imm will be represented by | |
200 // [imm3, imm2, imm1, imm0], where each imm is 16 bits. | |
201 // A move-zero or move-inverted is generated for the first non-zero or | |
202 // non-0xffff immX, and a move-keep for subsequent non-zero immX. | |
203 | |
204 uint64_t ignored_halfword = 0; | |
205 bool invert_move = false; | |
206 // If the number of 0xffff halfwords is greater than the number of 0x0000 | |
207 // halfwords, it's more efficient to use move-inverted. | |
208 if (CountClearHalfWords(~imm, reg_size) > | |
209 CountClearHalfWords(imm, reg_size)) { | |
210 ignored_halfword = 0xffffL; | |
211 invert_move = true; | |
212 } | |
213 | |
214 // Mov instructions can't move immediate values into the stack pointer, so | |
215 // set up a temporary register, if needed. | |
216 UseScratchRegisterScope temps(this); | |
217 Register temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd; | |
218 | |
219 // Iterate through the halfwords. Use movn/movz for the first non-ignored | |
220 // halfword, and movk for subsequent halfwords. | |
221 ASSERT((reg_size % 16) == 0); | |
222 bool first_mov_done = false; | |
223 for (unsigned i = 0; i < (rd.SizeInBits() / 16); i++) { | |
224 uint64_t imm16 = (imm >> (16 * i)) & 0xffffL; | |
225 if (imm16 != ignored_halfword) { | |
226 if (!first_mov_done) { | |
227 if (invert_move) { | |
228 movn(temp, (~imm16) & 0xffffL, 16 * i); | |
229 } else { | |
230 movz(temp, imm16, 16 * i); | |
231 } | |
232 first_mov_done = true; | |
233 } else { | |
234 // Construct a wider constant. | |
235 movk(temp, imm16, 16 * i); | |
236 } | |
237 } | |
238 } | |
239 ASSERT(first_mov_done); | |
240 | |
241 // Move the temporary if the original destination register was the stack | |
242 // pointer. | |
243 if (rd.IsSP()) { | |
244 mov(rd, temp); | |
245 } | |
246 } | |
247 } | |
248 | |
249 | |
250 void MacroAssembler::Mov(const Register& rd, | |
251 const Operand& operand, | |
252 DiscardMoveMode discard_mode) { | |
253 ASSERT(allow_macro_instructions_); | |
254 ASSERT(!rd.IsZero()); | |
255 | |
256 // Provide a swap register for instructions that need to write into the | |
257 // system stack pointer (and can't do this inherently). | |
258 UseScratchRegisterScope temps(this); | |
259 Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd; | |
260 | |
261 if (operand.NeedsRelocation()) { | |
262 LoadRelocated(dst, operand); | |
263 | |
264 } else if (operand.IsImmediate()) { | |
265 // Call the macro assembler for generic immediates. | |
266 Mov(dst, operand.immediate()); | |
267 | |
268 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) { | |
269 // Emit a shift instruction if moving a shifted register. This operation | |
270 // could also be achieved using an orr instruction (like orn used by Mvn), | |
271 // but using a shift instruction makes the disassembly clearer. | |
272 EmitShift(dst, operand.reg(), operand.shift(), operand.shift_amount()); | |
273 | |
274 } else if (operand.IsExtendedRegister()) { | |
275 // Emit an extend instruction if moving an extended register. This handles | |
276 // extend with post-shift operations, too. | |
277 EmitExtendShift(dst, operand.reg(), operand.extend(), | |
278 operand.shift_amount()); | |
279 | |
280 } else { | |
281 // Otherwise, emit a register move only if the registers are distinct, or | |
282 // if they are not X registers. | |
283 // | |
284 // Note that mov(w0, w0) is not a no-op because it clears the top word of | |
285 // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W | |
286 // registers is not required to clear the top word of the X register. In | |
287 // this case, the instruction is discarded. | |
288 // | |
289 // If csp is an operand, add #0 is emitted, otherwise, orr #0. | |
290 if (!rd.Is(operand.reg()) || (rd.Is32Bits() && | |
291 (discard_mode == kDontDiscardForSameWReg))) { | |
292 Assembler::mov(rd, operand.reg()); | |
293 } | |
294 // This case can handle writes into the system stack pointer directly. | |
295 dst = rd; | |
296 } | |
297 | |
298 // Copy the result to the system stack pointer. | |
299 if (!dst.Is(rd)) { | |
300 ASSERT(rd.IsSP()); | |
301 Assembler::mov(rd, dst); | |
302 } | |
303 } | |
304 | |
305 | |
306 void MacroAssembler::Mvn(const Register& rd, const Operand& operand) { | |
307 ASSERT(allow_macro_instructions_); | |
308 | |
309 if (operand.NeedsRelocation()) { | |
310 LoadRelocated(rd, operand); | |
311 mvn(rd, rd); | |
312 | |
313 } else if (operand.IsImmediate()) { | |
314 // Call the macro assembler for generic immediates. | |
315 Mov(rd, ~operand.immediate()); | |
316 | |
317 } else if (operand.IsExtendedRegister()) { | |
318 // Emit two instructions for the extend case. This differs from Mov, as | |
319 // the extend and invert can't be achieved in one instruction. | |
320 EmitExtendShift(rd, operand.reg(), operand.extend(), | |
321 operand.shift_amount()); | |
322 mvn(rd, rd); | |
323 | |
324 } else { | |
325 mvn(rd, operand); | |
326 } | |
327 } | |
328 | |
329 | |
330 unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) { | |
331 ASSERT((reg_size % 8) == 0); | |
332 int count = 0; | |
333 for (unsigned i = 0; i < (reg_size / 16); i++) { | |
334 if ((imm & 0xffff) == 0) { | |
335 count++; | |
336 } | |
337 imm >>= 16; | |
338 } | |
339 return count; | |
340 } | |
341 | |
342 | |
343 // The movz instruction can generate immediates containing an arbitrary 16-bit | |
344 // half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000. | |
345 bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) { | |
346 ASSERT((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits)); | |
347 return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1); | |
348 } | |
349 | |
350 | |
351 // The movn instruction can generate immediates containing an arbitrary 16-bit | |
352 // half-word, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff. | |
353 bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) { | |
354 return IsImmMovz(~imm, reg_size); | |
355 } | |
356 | |
357 | |
358 void MacroAssembler::ConditionalCompareMacro(const Register& rn, | |
359 const Operand& operand, | |
360 StatusFlags nzcv, | |
361 Condition cond, | |
362 ConditionalCompareOp op) { | |
363 ASSERT((cond != al) && (cond != nv)); | |
364 if (operand.NeedsRelocation()) { | |
365 UseScratchRegisterScope temps(this); | |
366 Register temp = temps.AcquireX(); | |
367 LoadRelocated(temp, operand); | |
368 ConditionalCompareMacro(rn, temp, nzcv, cond, op); | |
369 | |
370 } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) || | |
371 (operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) { | |
372 // The immediate can be encoded in the instruction, or the operand is an | |
373 // unshifted register: call the assembler. | |
374 ConditionalCompare(rn, operand, nzcv, cond, op); | |
375 | |
376 } else { | |
377 // The operand isn't directly supported by the instruction: perform the | |
378 // operation on a temporary register. | |
379 UseScratchRegisterScope temps(this); | |
380 Register temp = temps.AcquireSameSizeAs(rn); | |
381 Mov(temp, operand); | |
382 ConditionalCompare(rn, temp, nzcv, cond, op); | |
383 } | |
384 } | |
385 | |
386 | |
387 void MacroAssembler::Csel(const Register& rd, | |
388 const Register& rn, | |
389 const Operand& operand, | |
390 Condition cond) { | |
391 ASSERT(allow_macro_instructions_); | |
392 ASSERT(!rd.IsZero()); | |
393 ASSERT((cond != al) && (cond != nv)); | |
394 if (operand.IsImmediate()) { | |
395 // Immediate argument. Handle special cases of 0, 1 and -1 using zero | |
396 // register. | |
397 int64_t imm = operand.immediate(); | |
398 Register zr = AppropriateZeroRegFor(rn); | |
399 if (imm == 0) { | |
400 csel(rd, rn, zr, cond); | |
401 } else if (imm == 1) { | |
402 csinc(rd, rn, zr, cond); | |
403 } else if (imm == -1) { | |
404 csinv(rd, rn, zr, cond); | |
405 } else { | |
406 UseScratchRegisterScope temps(this); | |
407 Register temp = temps.AcquireSameSizeAs(rn); | |
408 Mov(temp, operand.immediate()); | |
409 csel(rd, rn, temp, cond); | |
410 } | |
411 } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) { | |
412 // Unshifted register argument. | |
413 csel(rd, rn, operand.reg(), cond); | |
414 } else { | |
415 // All other arguments. | |
416 UseScratchRegisterScope temps(this); | |
417 Register temp = temps.AcquireSameSizeAs(rn); | |
418 Mov(temp, operand); | |
419 csel(rd, rn, temp, cond); | |
420 } | |
421 } | |
422 | |
423 | |
424 void MacroAssembler::AddSubMacro(const Register& rd, | |
425 const Register& rn, | |
426 const Operand& operand, | |
427 FlagsUpdate S, | |
428 AddSubOp op) { | |
429 if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() && | |
430 !operand.NeedsRelocation() && (S == LeaveFlags)) { | |
431 // The instruction would be a nop. Avoid generating useless code. | |
432 return; | |
433 } | |
434 | |
435 if (operand.NeedsRelocation()) { | |
436 UseScratchRegisterScope temps(this); | |
437 Register temp = temps.AcquireX(); | |
438 LoadRelocated(temp, operand); | |
439 AddSubMacro(rd, rn, temp, S, op); | |
440 } else if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) || | |
441 (rn.IsZero() && !operand.IsShiftedRegister()) || | |
442 (operand.IsShiftedRegister() && (operand.shift() == ROR))) { | |
443 UseScratchRegisterScope temps(this); | |
444 Register temp = temps.AcquireSameSizeAs(rn); | |
445 Mov(temp, operand); | |
446 AddSub(rd, rn, temp, S, op); | |
447 } else { | |
448 AddSub(rd, rn, operand, S, op); | |
449 } | |
450 } | |
451 | |
452 | |
453 void MacroAssembler::AddSubWithCarryMacro(const Register& rd, | |
454 const Register& rn, | |
455 const Operand& operand, | |
456 FlagsUpdate S, | |
457 AddSubWithCarryOp op) { | |
458 ASSERT(rd.SizeInBits() == rn.SizeInBits()); | |
459 UseScratchRegisterScope temps(this); | |
460 | |
461 if (operand.NeedsRelocation()) { | |
462 Register temp = temps.AcquireX(); | |
463 LoadRelocated(temp, operand); | |
464 AddSubWithCarryMacro(rd, rn, temp, S, op); | |
465 | |
466 } else if (operand.IsImmediate() || | |
467 (operand.IsShiftedRegister() && (operand.shift() == ROR))) { | |
468 // Add/sub with carry (immediate or ROR shifted register.) | |
469 Register temp = temps.AcquireSameSizeAs(rn); | |
470 Mov(temp, operand); | |
471 AddSubWithCarry(rd, rn, temp, S, op); | |
472 | |
473 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) { | |
474 // Add/sub with carry (shifted register). | |
475 ASSERT(operand.reg().SizeInBits() == rd.SizeInBits()); | |
476 ASSERT(operand.shift() != ROR); | |
477 ASSERT(is_uintn(operand.shift_amount(), | |
478 rd.SizeInBits() == kXRegSizeInBits ? kXRegSizeInBitsLog2 | |
479 : kWRegSizeInBitsLog2)); | |
480 Register temp = temps.AcquireSameSizeAs(rn); | |
481 EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount()); | |
482 AddSubWithCarry(rd, rn, temp, S, op); | |
483 | |
484 } else if (operand.IsExtendedRegister()) { | |
485 // Add/sub with carry (extended register). | |
486 ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits()); | |
487 // Add/sub extended supports a shift <= 4. We want to support exactly the | |
488 // same modes. | |
489 ASSERT(operand.shift_amount() <= 4); | |
490 ASSERT(operand.reg().Is64Bits() || | |
491 ((operand.extend() != UXTX) && (operand.extend() != SXTX))); | |
492 Register temp = temps.AcquireSameSizeAs(rn); | |
493 EmitExtendShift(temp, operand.reg(), operand.extend(), | |
494 operand.shift_amount()); | |
495 AddSubWithCarry(rd, rn, temp, S, op); | |
496 | |
497 } else { | |
498 // The addressing mode is directly supported by the instruction. | |
499 AddSubWithCarry(rd, rn, operand, S, op); | |
500 } | |
501 } | |
502 | |
503 | |
504 void MacroAssembler::LoadStoreMacro(const CPURegister& rt, | |
505 const MemOperand& addr, | |
506 LoadStoreOp op) { | |
507 int64_t offset = addr.offset(); | |
508 LSDataSize size = CalcLSDataSize(op); | |
509 | |
510 // Check if an immediate offset fits in the immediate field of the | |
511 // appropriate instruction. If not, emit two instructions to perform | |
512 // the operation. | |
513 if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) && | |
514 !IsImmLSUnscaled(offset)) { | |
515 // Immediate offset that can't be encoded using unsigned or unscaled | |
516 // addressing modes. | |
517 UseScratchRegisterScope temps(this); | |
518 Register temp = temps.AcquireSameSizeAs(addr.base()); | |
519 Mov(temp, addr.offset()); | |
520 LoadStore(rt, MemOperand(addr.base(), temp), op); | |
521 } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) { | |
522 // Post-index beyond unscaled addressing range. | |
523 LoadStore(rt, MemOperand(addr.base()), op); | |
524 add(addr.base(), addr.base(), offset); | |
525 } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) { | |
526 // Pre-index beyond unscaled addressing range. | |
527 add(addr.base(), addr.base(), offset); | |
528 LoadStore(rt, MemOperand(addr.base()), op); | |
529 } else { | |
530 // Encodable in one load/store instruction. | |
531 LoadStore(rt, addr, op); | |
532 } | |
533 } | |
534 | |
535 | |
536 void MacroAssembler::Load(const Register& rt, | |
537 const MemOperand& addr, | |
538 Representation r) { | |
539 ASSERT(!r.IsDouble()); | |
540 | |
541 if (r.IsInteger8()) { | |
542 Ldrsb(rt, addr); | |
543 } else if (r.IsUInteger8()) { | |
544 Ldrb(rt, addr); | |
545 } else if (r.IsInteger16()) { | |
546 Ldrsh(rt, addr); | |
547 } else if (r.IsUInteger16()) { | |
548 Ldrh(rt, addr); | |
549 } else if (r.IsInteger32()) { | |
550 Ldr(rt.W(), addr); | |
551 } else { | |
552 ASSERT(rt.Is64Bits()); | |
553 Ldr(rt, addr); | |
554 } | |
555 } | |
556 | |
557 | |
558 void MacroAssembler::Store(const Register& rt, | |
559 const MemOperand& addr, | |
560 Representation r) { | |
561 ASSERT(!r.IsDouble()); | |
562 | |
563 if (r.IsInteger8() || r.IsUInteger8()) { | |
564 Strb(rt, addr); | |
565 } else if (r.IsInteger16() || r.IsUInteger16()) { | |
566 Strh(rt, addr); | |
567 } else if (r.IsInteger32()) { | |
568 Str(rt.W(), addr); | |
569 } else { | |
570 ASSERT(rt.Is64Bits()); | |
571 Str(rt, addr); | |
572 } | |
573 } | |
574 | |
575 | |
576 bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch( | |
577 Label *label, ImmBranchType b_type) { | |
578 bool need_longer_range = false; | |
579 // There are two situations in which we care about the offset being out of | |
580 // range: | |
581 // - The label is bound but too far away. | |
582 // - The label is not bound but linked, and the previous branch | |
583 // instruction in the chain is too far away. | |
584 if (label->is_bound() || label->is_linked()) { | |
585 need_longer_range = | |
586 !Instruction::IsValidImmPCOffset(b_type, label->pos() - pc_offset()); | |
587 } | |
588 if (!need_longer_range && !label->is_bound()) { | |
589 int max_reachable_pc = pc_offset() + Instruction::ImmBranchRange(b_type); | |
590 unresolved_branches_.insert( | |
591 std::pair<int, FarBranchInfo>(max_reachable_pc, | |
592 FarBranchInfo(pc_offset(), label))); | |
593 // Also maintain the next pool check. | |
594 next_veneer_pool_check_ = | |
595 Min(next_veneer_pool_check_, | |
596 max_reachable_pc - kVeneerDistanceCheckMargin); | |
597 } | |
598 return need_longer_range; | |
599 } | |
600 | |
601 | |
602 void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) { | |
603 ASSERT((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) && | |
604 (bit == -1 || type >= kBranchTypeFirstUsingBit)); | |
605 if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) { | |
606 B(static_cast<Condition>(type), label); | |
607 } else { | |
608 switch (type) { | |
609 case always: B(label); break; | |
610 case never: break; | |
611 case reg_zero: Cbz(reg, label); break; | |
612 case reg_not_zero: Cbnz(reg, label); break; | |
613 case reg_bit_clear: Tbz(reg, bit, label); break; | |
614 case reg_bit_set: Tbnz(reg, bit, label); break; | |
615 default: | |
616 UNREACHABLE(); | |
617 } | |
618 } | |
619 } | |
620 | |
621 | |
622 void MacroAssembler::B(Label* label, Condition cond) { | |
623 ASSERT(allow_macro_instructions_); | |
624 ASSERT((cond != al) && (cond != nv)); | |
625 | |
626 Label done; | |
627 bool need_extra_instructions = | |
628 NeedExtraInstructionsOrRegisterBranch(label, CondBranchType); | |
629 | |
630 if (need_extra_instructions) { | |
631 b(&done, InvertCondition(cond)); | |
632 B(label); | |
633 } else { | |
634 b(label, cond); | |
635 } | |
636 bind(&done); | |
637 } | |
638 | |
639 | |
640 void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) { | |
641 ASSERT(allow_macro_instructions_); | |
642 | |
643 Label done; | |
644 bool need_extra_instructions = | |
645 NeedExtraInstructionsOrRegisterBranch(label, TestBranchType); | |
646 | |
647 if (need_extra_instructions) { | |
648 tbz(rt, bit_pos, &done); | |
649 B(label); | |
650 } else { | |
651 tbnz(rt, bit_pos, label); | |
652 } | |
653 bind(&done); | |
654 } | |
655 | |
656 | |
657 void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) { | |
658 ASSERT(allow_macro_instructions_); | |
659 | |
660 Label done; | |
661 bool need_extra_instructions = | |
662 NeedExtraInstructionsOrRegisterBranch(label, TestBranchType); | |
663 | |
664 if (need_extra_instructions) { | |
665 tbnz(rt, bit_pos, &done); | |
666 B(label); | |
667 } else { | |
668 tbz(rt, bit_pos, label); | |
669 } | |
670 bind(&done); | |
671 } | |
672 | |
673 | |
674 void MacroAssembler::Cbnz(const Register& rt, Label* label) { | |
675 ASSERT(allow_macro_instructions_); | |
676 | |
677 Label done; | |
678 bool need_extra_instructions = | |
679 NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType); | |
680 | |
681 if (need_extra_instructions) { | |
682 cbz(rt, &done); | |
683 B(label); | |
684 } else { | |
685 cbnz(rt, label); | |
686 } | |
687 bind(&done); | |
688 } | |
689 | |
690 | |
691 void MacroAssembler::Cbz(const Register& rt, Label* label) { | |
692 ASSERT(allow_macro_instructions_); | |
693 | |
694 Label done; | |
695 bool need_extra_instructions = | |
696 NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType); | |
697 | |
698 if (need_extra_instructions) { | |
699 cbnz(rt, &done); | |
700 B(label); | |
701 } else { | |
702 cbz(rt, label); | |
703 } | |
704 bind(&done); | |
705 } | |
706 | |
707 | |
708 // Pseudo-instructions. | |
709 | |
710 | |
711 void MacroAssembler::Abs(const Register& rd, const Register& rm, | |
712 Label* is_not_representable, | |
713 Label* is_representable) { | |
714 ASSERT(allow_macro_instructions_); | |
715 ASSERT(AreSameSizeAndType(rd, rm)); | |
716 | |
717 Cmp(rm, 1); | |
718 Cneg(rd, rm, lt); | |
719 | |
720 // If the comparison sets the v flag, the input was the smallest value | |
721 // representable by rm, and the mathematical result of abs(rm) is not | |
722 // representable using two's complement. | |
723 if ((is_not_representable != NULL) && (is_representable != NULL)) { | |
724 B(is_not_representable, vs); | |
725 B(is_representable); | |
726 } else if (is_not_representable != NULL) { | |
727 B(is_not_representable, vs); | |
728 } else if (is_representable != NULL) { | |
729 B(is_representable, vc); | |
730 } | |
731 } | |
732 | |
733 | |
734 // Abstracted stack operations. | |
735 | |
736 | |
737 void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1, | |
738 const CPURegister& src2, const CPURegister& src3) { | |
739 ASSERT(AreSameSizeAndType(src0, src1, src2, src3)); | |
740 | |
741 int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid(); | |
742 int size = src0.SizeInBytes(); | |
743 | |
744 PrepareForPush(count, size); | |
745 PushHelper(count, size, src0, src1, src2, src3); | |
746 } | |
747 | |
748 | |
749 void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1, | |
750 const CPURegister& src2, const CPURegister& src3, | |
751 const CPURegister& src4, const CPURegister& src5, | |
752 const CPURegister& src6, const CPURegister& src7) { | |
753 ASSERT(AreSameSizeAndType(src0, src1, src2, src3, src4, src5, src6, src7)); | |
754 | |
755 int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid(); | |
756 int size = src0.SizeInBytes(); | |
757 | |
758 PrepareForPush(count, size); | |
759 PushHelper(4, size, src0, src1, src2, src3); | |
760 PushHelper(count - 4, size, src4, src5, src6, src7); | |
761 } | |
762 | |
763 | |
764 void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1, | |
765 const CPURegister& dst2, const CPURegister& dst3) { | |
766 // It is not valid to pop into the same register more than once in one | |
767 // instruction, not even into the zero register. | |
768 ASSERT(!AreAliased(dst0, dst1, dst2, dst3)); | |
769 ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3)); | |
770 ASSERT(dst0.IsValid()); | |
771 | |
772 int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid(); | |
773 int size = dst0.SizeInBytes(); | |
774 | |
775 PrepareForPop(count, size); | |
776 PopHelper(count, size, dst0, dst1, dst2, dst3); | |
777 | |
778 if (!csp.Is(StackPointer()) && emit_debug_code()) { | |
779 // It is safe to leave csp where it is when unwinding the JavaScript stack, | |
780 // but if we keep it matching StackPointer, the simulator can detect memory | |
781 // accesses in the now-free part of the stack. | |
782 Mov(csp, StackPointer()); | |
783 } | |
784 } | |
785 | |
786 | |
787 void MacroAssembler::PushPopQueue::PushQueued() { | |
788 if (queued_.empty()) return; | |
789 | |
790 masm_->PrepareForPush(size_); | |
791 | |
792 int count = queued_.size(); | |
793 int index = 0; | |
794 while (index < count) { | |
795 // PushHelper can only handle registers with the same size and type, and it | |
796 // can handle only four at a time. Batch them up accordingly. | |
797 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg}; | |
798 int batch_index = 0; | |
799 do { | |
800 batch[batch_index++] = queued_[index++]; | |
801 } while ((batch_index < 4) && (index < count) && | |
802 batch[0].IsSameSizeAndType(queued_[index])); | |
803 | |
804 masm_->PushHelper(batch_index, batch[0].SizeInBytes(), | |
805 batch[0], batch[1], batch[2], batch[3]); | |
806 } | |
807 | |
808 queued_.clear(); | |
809 } | |
810 | |
811 | |
812 void MacroAssembler::PushPopQueue::PopQueued() { | |
813 if (queued_.empty()) return; | |
814 | |
815 masm_->PrepareForPop(size_); | |
816 | |
817 int count = queued_.size(); | |
818 int index = 0; | |
819 while (index < count) { | |
820 // PopHelper can only handle registers with the same size and type, and it | |
821 // can handle only four at a time. Batch them up accordingly. | |
822 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg}; | |
823 int batch_index = 0; | |
824 do { | |
825 batch[batch_index++] = queued_[index++]; | |
826 } while ((batch_index < 4) && (index < count) && | |
827 batch[0].IsSameSizeAndType(queued_[index])); | |
828 | |
829 masm_->PopHelper(batch_index, batch[0].SizeInBytes(), | |
830 batch[0], batch[1], batch[2], batch[3]); | |
831 } | |
832 | |
833 queued_.clear(); | |
834 } | |
835 | |
836 | |
837 void MacroAssembler::PushCPURegList(CPURegList registers) { | |
838 int size = registers.RegisterSizeInBytes(); | |
839 | |
840 PrepareForPush(registers.Count(), size); | |
841 // Push up to four registers at a time because if the current stack pointer is | |
842 // csp and reg_size is 32, registers must be pushed in blocks of four in order | |
843 // to maintain the 16-byte alignment for csp. | |
844 while (!registers.IsEmpty()) { | |
845 int count_before = registers.Count(); | |
846 const CPURegister& src0 = registers.PopHighestIndex(); | |
847 const CPURegister& src1 = registers.PopHighestIndex(); | |
848 const CPURegister& src2 = registers.PopHighestIndex(); | |
849 const CPURegister& src3 = registers.PopHighestIndex(); | |
850 int count = count_before - registers.Count(); | |
851 PushHelper(count, size, src0, src1, src2, src3); | |
852 } | |
853 } | |
854 | |
855 | |
856 void MacroAssembler::PopCPURegList(CPURegList registers) { | |
857 int size = registers.RegisterSizeInBytes(); | |
858 | |
859 PrepareForPop(registers.Count(), size); | |
860 // Pop up to four registers at a time because if the current stack pointer is | |
861 // csp and reg_size is 32, registers must be pushed in blocks of four in | |
862 // order to maintain the 16-byte alignment for csp. | |
863 while (!registers.IsEmpty()) { | |
864 int count_before = registers.Count(); | |
865 const CPURegister& dst0 = registers.PopLowestIndex(); | |
866 const CPURegister& dst1 = registers.PopLowestIndex(); | |
867 const CPURegister& dst2 = registers.PopLowestIndex(); | |
868 const CPURegister& dst3 = registers.PopLowestIndex(); | |
869 int count = count_before - registers.Count(); | |
870 PopHelper(count, size, dst0, dst1, dst2, dst3); | |
871 } | |
872 | |
873 if (!csp.Is(StackPointer()) && emit_debug_code()) { | |
874 // It is safe to leave csp where it is when unwinding the JavaScript stack, | |
875 // but if we keep it matching StackPointer, the simulator can detect memory | |
876 // accesses in the now-free part of the stack. | |
877 Mov(csp, StackPointer()); | |
878 } | |
879 } | |
880 | |
881 | |
882 void MacroAssembler::PushMultipleTimes(CPURegister src, int count) { | |
883 int size = src.SizeInBytes(); | |
884 | |
885 PrepareForPush(count, size); | |
886 | |
887 if (FLAG_optimize_for_size && count > 8) { | |
888 UseScratchRegisterScope temps(this); | |
889 Register temp = temps.AcquireX(); | |
890 | |
891 Label loop; | |
892 __ Mov(temp, count / 2); | |
893 __ Bind(&loop); | |
894 PushHelper(2, size, src, src, NoReg, NoReg); | |
895 __ Subs(temp, temp, 1); | |
896 __ B(ne, &loop); | |
897 | |
898 count %= 2; | |
899 } | |
900 | |
901 // Push up to four registers at a time if possible because if the current | |
902 // stack pointer is csp and the register size is 32, registers must be pushed | |
903 // in blocks of four in order to maintain the 16-byte alignment for csp. | |
904 while (count >= 4) { | |
905 PushHelper(4, size, src, src, src, src); | |
906 count -= 4; | |
907 } | |
908 if (count >= 2) { | |
909 PushHelper(2, size, src, src, NoReg, NoReg); | |
910 count -= 2; | |
911 } | |
912 if (count == 1) { | |
913 PushHelper(1, size, src, NoReg, NoReg, NoReg); | |
914 count -= 1; | |
915 } | |
916 ASSERT(count == 0); | |
917 } | |
918 | |
919 | |
920 void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) { | |
921 PrepareForPush(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes()))); | |
922 | |
923 UseScratchRegisterScope temps(this); | |
924 Register temp = temps.AcquireSameSizeAs(count); | |
925 | |
926 if (FLAG_optimize_for_size) { | |
927 Label loop, done; | |
928 | |
929 Subs(temp, count, 1); | |
930 B(mi, &done); | |
931 | |
932 // Push all registers individually, to save code size. | |
933 Bind(&loop); | |
934 Subs(temp, temp, 1); | |
935 PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg); | |
936 B(pl, &loop); | |
937 | |
938 Bind(&done); | |
939 } else { | |
940 Label loop, leftover2, leftover1, done; | |
941 | |
942 Subs(temp, count, 4); | |
943 B(mi, &leftover2); | |
944 | |
945 // Push groups of four first. | |
946 Bind(&loop); | |
947 Subs(temp, temp, 4); | |
948 PushHelper(4, src.SizeInBytes(), src, src, src, src); | |
949 B(pl, &loop); | |
950 | |
951 // Push groups of two. | |
952 Bind(&leftover2); | |
953 Tbz(count, 1, &leftover1); | |
954 PushHelper(2, src.SizeInBytes(), src, src, NoReg, NoReg); | |
955 | |
956 // Push the last one (if required). | |
957 Bind(&leftover1); | |
958 Tbz(count, 0, &done); | |
959 PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg); | |
960 | |
961 Bind(&done); | |
962 } | |
963 } | |
964 | |
965 | |
966 void MacroAssembler::PushHelper(int count, int size, | |
967 const CPURegister& src0, | |
968 const CPURegister& src1, | |
969 const CPURegister& src2, | |
970 const CPURegister& src3) { | |
971 // Ensure that we don't unintentially modify scratch or debug registers. | |
972 InstructionAccurateScope scope(this); | |
973 | |
974 ASSERT(AreSameSizeAndType(src0, src1, src2, src3)); | |
975 ASSERT(size == src0.SizeInBytes()); | |
976 | |
977 // When pushing multiple registers, the store order is chosen such that | |
978 // Push(a, b) is equivalent to Push(a) followed by Push(b). | |
979 switch (count) { | |
980 case 1: | |
981 ASSERT(src1.IsNone() && src2.IsNone() && src3.IsNone()); | |
982 str(src0, MemOperand(StackPointer(), -1 * size, PreIndex)); | |
983 break; | |
984 case 2: | |
985 ASSERT(src2.IsNone() && src3.IsNone()); | |
986 stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex)); | |
987 break; | |
988 case 3: | |
989 ASSERT(src3.IsNone()); | |
990 stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex)); | |
991 str(src0, MemOperand(StackPointer(), 2 * size)); | |
992 break; | |
993 case 4: | |
994 // Skip over 4 * size, then fill in the gap. This allows four W registers | |
995 // to be pushed using csp, whilst maintaining 16-byte alignment for csp | |
996 // at all times. | |
997 stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex)); | |
998 stp(src1, src0, MemOperand(StackPointer(), 2 * size)); | |
999 break; | |
1000 default: | |
1001 UNREACHABLE(); | |
1002 } | |
1003 } | |
1004 | |
1005 | |
1006 void MacroAssembler::PopHelper(int count, int size, | |
1007 const CPURegister& dst0, | |
1008 const CPURegister& dst1, | |
1009 const CPURegister& dst2, | |
1010 const CPURegister& dst3) { | |
1011 // Ensure that we don't unintentially modify scratch or debug registers. | |
1012 InstructionAccurateScope scope(this); | |
1013 | |
1014 ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3)); | |
1015 ASSERT(size == dst0.SizeInBytes()); | |
1016 | |
1017 // When popping multiple registers, the load order is chosen such that | |
1018 // Pop(a, b) is equivalent to Pop(a) followed by Pop(b). | |
1019 switch (count) { | |
1020 case 1: | |
1021 ASSERT(dst1.IsNone() && dst2.IsNone() && dst3.IsNone()); | |
1022 ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex)); | |
1023 break; | |
1024 case 2: | |
1025 ASSERT(dst2.IsNone() && dst3.IsNone()); | |
1026 ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex)); | |
1027 break; | |
1028 case 3: | |
1029 ASSERT(dst3.IsNone()); | |
1030 ldr(dst2, MemOperand(StackPointer(), 2 * size)); | |
1031 ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex)); | |
1032 break; | |
1033 case 4: | |
1034 // Load the higher addresses first, then load the lower addresses and | |
1035 // skip the whole block in the second instruction. This allows four W | |
1036 // registers to be popped using csp, whilst maintaining 16-byte alignment | |
1037 // for csp at all times. | |
1038 ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size)); | |
1039 ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex)); | |
1040 break; | |
1041 default: | |
1042 UNREACHABLE(); | |
1043 } | |
1044 } | |
1045 | |
1046 | |
1047 void MacroAssembler::PrepareForPush(Operand total_size) { | |
1048 // TODO(jbramley): This assertion generates too much code in some debug tests. | |
1049 // AssertStackConsistency(); | |
1050 if (csp.Is(StackPointer())) { | |
1051 // If the current stack pointer is csp, then it must be aligned to 16 bytes | |
1052 // on entry and the total size of the specified registers must also be a | |
1053 // multiple of 16 bytes. | |
1054 if (total_size.IsImmediate()) { | |
1055 ASSERT((total_size.immediate() % 16) == 0); | |
1056 } | |
1057 | |
1058 // Don't check access size for non-immediate sizes. It's difficult to do | |
1059 // well, and it will be caught by hardware (or the simulator) anyway. | |
1060 } else { | |
1061 // Even if the current stack pointer is not the system stack pointer (csp), | |
1062 // the system stack pointer will still be modified in order to comply with | |
1063 // ABI rules about accessing memory below the system stack pointer. | |
1064 BumpSystemStackPointer(total_size); | |
1065 } | |
1066 } | |
1067 | |
1068 | |
1069 void MacroAssembler::PrepareForPop(Operand total_size) { | |
1070 AssertStackConsistency(); | |
1071 if (csp.Is(StackPointer())) { | |
1072 // If the current stack pointer is csp, then it must be aligned to 16 bytes | |
1073 // on entry and the total size of the specified registers must also be a | |
1074 // multiple of 16 bytes. | |
1075 if (total_size.IsImmediate()) { | |
1076 ASSERT((total_size.immediate() % 16) == 0); | |
1077 } | |
1078 | |
1079 // Don't check access size for non-immediate sizes. It's difficult to do | |
1080 // well, and it will be caught by hardware (or the simulator) anyway. | |
1081 } | |
1082 } | |
1083 | |
1084 | |
1085 void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) { | |
1086 if (offset.IsImmediate()) { | |
1087 ASSERT(offset.immediate() >= 0); | |
1088 } else if (emit_debug_code()) { | |
1089 Cmp(xzr, offset); | |
1090 Check(le, kStackAccessBelowStackPointer); | |
1091 } | |
1092 | |
1093 Str(src, MemOperand(StackPointer(), offset)); | |
1094 } | |
1095 | |
1096 | |
1097 void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) { | |
1098 if (offset.IsImmediate()) { | |
1099 ASSERT(offset.immediate() >= 0); | |
1100 } else if (emit_debug_code()) { | |
1101 Cmp(xzr, offset); | |
1102 Check(le, kStackAccessBelowStackPointer); | |
1103 } | |
1104 | |
1105 Ldr(dst, MemOperand(StackPointer(), offset)); | |
1106 } | |
1107 | |
1108 | |
1109 void MacroAssembler::PokePair(const CPURegister& src1, | |
1110 const CPURegister& src2, | |
1111 int offset) { | |
1112 ASSERT(AreSameSizeAndType(src1, src2)); | |
1113 ASSERT((offset >= 0) && ((offset % src1.SizeInBytes()) == 0)); | |
1114 Stp(src1, src2, MemOperand(StackPointer(), offset)); | |
1115 } | |
1116 | |
1117 | |
1118 void MacroAssembler::PeekPair(const CPURegister& dst1, | |
1119 const CPURegister& dst2, | |
1120 int offset) { | |
1121 ASSERT(AreSameSizeAndType(dst1, dst2)); | |
1122 ASSERT((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0)); | |
1123 Ldp(dst1, dst2, MemOperand(StackPointer(), offset)); | |
1124 } | |
1125 | |
1126 | |
1127 void MacroAssembler::PushCalleeSavedRegisters() { | |
1128 // Ensure that the macro-assembler doesn't use any scratch registers. | |
1129 InstructionAccurateScope scope(this); | |
1130 | |
1131 // This method must not be called unless the current stack pointer is the | |
1132 // system stack pointer (csp). | |
1133 ASSERT(csp.Is(StackPointer())); | |
1134 | |
1135 MemOperand tos(csp, -2 * kXRegSize, PreIndex); | |
1136 | |
1137 stp(d14, d15, tos); | |
1138 stp(d12, d13, tos); | |
1139 stp(d10, d11, tos); | |
1140 stp(d8, d9, tos); | |
1141 | |
1142 stp(x29, x30, tos); | |
1143 stp(x27, x28, tos); // x28 = jssp | |
1144 stp(x25, x26, tos); | |
1145 stp(x23, x24, tos); | |
1146 stp(x21, x22, tos); | |
1147 stp(x19, x20, tos); | |
1148 } | |
1149 | |
1150 | |
1151 void MacroAssembler::PopCalleeSavedRegisters() { | |
1152 // Ensure that the macro-assembler doesn't use any scratch registers. | |
1153 InstructionAccurateScope scope(this); | |
1154 | |
1155 // This method must not be called unless the current stack pointer is the | |
1156 // system stack pointer (csp). | |
1157 ASSERT(csp.Is(StackPointer())); | |
1158 | |
1159 MemOperand tos(csp, 2 * kXRegSize, PostIndex); | |
1160 | |
1161 ldp(x19, x20, tos); | |
1162 ldp(x21, x22, tos); | |
1163 ldp(x23, x24, tos); | |
1164 ldp(x25, x26, tos); | |
1165 ldp(x27, x28, tos); // x28 = jssp | |
1166 ldp(x29, x30, tos); | |
1167 | |
1168 ldp(d8, d9, tos); | |
1169 ldp(d10, d11, tos); | |
1170 ldp(d12, d13, tos); | |
1171 ldp(d14, d15, tos); | |
1172 } | |
1173 | |
1174 | |
1175 void MacroAssembler::AssertStackConsistency() { | |
1176 if (emit_debug_code()) { | |
1177 if (csp.Is(StackPointer())) { | |
1178 // We can't check the alignment of csp without using a scratch register | |
1179 // (or clobbering the flags), but the processor (or simulator) will abort | |
1180 // if it is not properly aligned during a load. | |
1181 ldr(xzr, MemOperand(csp, 0)); | |
1182 } else if (FLAG_enable_slow_asserts) { | |
1183 Label ok; | |
1184 // Check that csp <= StackPointer(), preserving all registers and NZCV. | |
1185 sub(StackPointer(), csp, StackPointer()); | |
1186 cbz(StackPointer(), &ok); // Ok if csp == StackPointer(). | |
1187 tbnz(StackPointer(), kXSignBit, &ok); // Ok if csp < StackPointer(). | |
1188 | |
1189 Abort(kTheCurrentStackPointerIsBelowCsp); | |
1190 | |
1191 bind(&ok); | |
1192 // Restore StackPointer(). | |
1193 sub(StackPointer(), csp, StackPointer()); | |
1194 } | |
1195 } | |
1196 } | |
1197 | |
1198 | |
1199 void MacroAssembler::LoadRoot(Register destination, | |
1200 Heap::RootListIndex index) { | |
1201 // TODO(jbramley): Most root values are constants, and can be synthesized | |
1202 // without a load. Refer to the ARM back end for details. | |
1203 Ldr(destination, MemOperand(root, index << kPointerSizeLog2)); | |
1204 } | |
1205 | |
1206 | |
1207 void MacroAssembler::StoreRoot(Register source, | |
1208 Heap::RootListIndex index) { | |
1209 Str(source, MemOperand(root, index << kPointerSizeLog2)); | |
1210 } | |
1211 | |
1212 | |
1213 void MacroAssembler::LoadTrueFalseRoots(Register true_root, | |
1214 Register false_root) { | |
1215 STATIC_ASSERT((Heap::kTrueValueRootIndex + 1) == Heap::kFalseValueRootIndex); | |
1216 Ldp(true_root, false_root, | |
1217 MemOperand(root, Heap::kTrueValueRootIndex << kPointerSizeLog2)); | |
1218 } | |
1219 | |
1220 | |
1221 void MacroAssembler::LoadHeapObject(Register result, | |
1222 Handle<HeapObject> object) { | |
1223 AllowDeferredHandleDereference using_raw_address; | |
1224 if (isolate()->heap()->InNewSpace(*object)) { | |
1225 Handle<Cell> cell = isolate()->factory()->NewCell(object); | |
1226 Mov(result, Operand(cell)); | |
1227 Ldr(result, FieldMemOperand(result, Cell::kValueOffset)); | |
1228 } else { | |
1229 Mov(result, Operand(object)); | |
1230 } | |
1231 } | |
1232 | |
1233 | |
1234 void MacroAssembler::LoadInstanceDescriptors(Register map, | |
1235 Register descriptors) { | |
1236 Ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset)); | |
1237 } | |
1238 | |
1239 | |
1240 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) { | |
1241 Ldr(dst, FieldMemOperand(map, Map::kBitField3Offset)); | |
1242 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst); | |
1243 } | |
1244 | |
1245 | |
1246 void MacroAssembler::EnumLengthUntagged(Register dst, Register map) { | |
1247 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0); | |
1248 Ldrsw(dst, UntagSmiFieldMemOperand(map, Map::kBitField3Offset)); | |
1249 And(dst, dst, Map::EnumLengthBits::kMask); | |
1250 } | |
1251 | |
1252 | |
1253 void MacroAssembler::EnumLengthSmi(Register dst, Register map) { | |
1254 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0); | |
1255 Ldr(dst, FieldMemOperand(map, Map::kBitField3Offset)); | |
1256 And(dst, dst, Smi::FromInt(Map::EnumLengthBits::kMask)); | |
1257 } | |
1258 | |
1259 | |
1260 void MacroAssembler::CheckEnumCache(Register object, | |
1261 Register null_value, | |
1262 Register scratch0, | |
1263 Register scratch1, | |
1264 Register scratch2, | |
1265 Register scratch3, | |
1266 Label* call_runtime) { | |
1267 ASSERT(!AreAliased(object, null_value, scratch0, scratch1, scratch2, | |
1268 scratch3)); | |
1269 | |
1270 Register empty_fixed_array_value = scratch0; | |
1271 Register current_object = scratch1; | |
1272 | |
1273 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex); | |
1274 Label next, start; | |
1275 | |
1276 Mov(current_object, object); | |
1277 | |
1278 // Check if the enum length field is properly initialized, indicating that | |
1279 // there is an enum cache. | |
1280 Register map = scratch2; | |
1281 Register enum_length = scratch3; | |
1282 Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset)); | |
1283 | |
1284 EnumLengthUntagged(enum_length, map); | |
1285 Cmp(enum_length, kInvalidEnumCacheSentinel); | |
1286 B(eq, call_runtime); | |
1287 | |
1288 B(&start); | |
1289 | |
1290 Bind(&next); | |
1291 Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset)); | |
1292 | |
1293 // For all objects but the receiver, check that the cache is empty. | |
1294 EnumLengthUntagged(enum_length, map); | |
1295 Cbnz(enum_length, call_runtime); | |
1296 | |
1297 Bind(&start); | |
1298 | |
1299 // Check that there are no elements. Register current_object contains the | |
1300 // current JS object we've reached through the prototype chain. | |
1301 Label no_elements; | |
1302 Ldr(current_object, FieldMemOperand(current_object, | |
1303 JSObject::kElementsOffset)); | |
1304 Cmp(current_object, empty_fixed_array_value); | |
1305 B(eq, &no_elements); | |
1306 | |
1307 // Second chance, the object may be using the empty slow element dictionary. | |
1308 CompareRoot(current_object, Heap::kEmptySlowElementDictionaryRootIndex); | |
1309 B(ne, call_runtime); | |
1310 | |
1311 Bind(&no_elements); | |
1312 Ldr(current_object, FieldMemOperand(map, Map::kPrototypeOffset)); | |
1313 Cmp(current_object, null_value); | |
1314 B(ne, &next); | |
1315 } | |
1316 | |
1317 | |
1318 void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver, | |
1319 Register scratch1, | |
1320 Register scratch2, | |
1321 Label* no_memento_found) { | |
1322 ExternalReference new_space_start = | |
1323 ExternalReference::new_space_start(isolate()); | |
1324 ExternalReference new_space_allocation_top = | |
1325 ExternalReference::new_space_allocation_top_address(isolate()); | |
1326 | |
1327 Add(scratch1, receiver, | |
1328 JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag); | |
1329 Cmp(scratch1, new_space_start); | |
1330 B(lt, no_memento_found); | |
1331 | |
1332 Mov(scratch2, new_space_allocation_top); | |
1333 Ldr(scratch2, MemOperand(scratch2)); | |
1334 Cmp(scratch1, scratch2); | |
1335 B(gt, no_memento_found); | |
1336 | |
1337 Ldr(scratch1, MemOperand(scratch1, -AllocationMemento::kSize)); | |
1338 Cmp(scratch1, | |
1339 Operand(isolate()->factory()->allocation_memento_map())); | |
1340 } | |
1341 | |
1342 | |
1343 void MacroAssembler::JumpToHandlerEntry(Register exception, | |
1344 Register object, | |
1345 Register state, | |
1346 Register scratch1, | |
1347 Register scratch2) { | |
1348 // Handler expects argument in x0. | |
1349 ASSERT(exception.Is(x0)); | |
1350 | |
1351 // Compute the handler entry address and jump to it. The handler table is | |
1352 // a fixed array of (smi-tagged) code offsets. | |
1353 Ldr(scratch1, FieldMemOperand(object, Code::kHandlerTableOffset)); | |
1354 Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag); | |
1355 STATIC_ASSERT(StackHandler::kKindWidth < kPointerSizeLog2); | |
1356 Lsr(scratch2, state, StackHandler::kKindWidth); | |
1357 Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2)); | |
1358 Add(scratch1, object, Code::kHeaderSize - kHeapObjectTag); | |
1359 Add(scratch1, scratch1, Operand::UntagSmi(scratch2)); | |
1360 Br(scratch1); | |
1361 } | |
1362 | |
1363 | |
1364 void MacroAssembler::InNewSpace(Register object, | |
1365 Condition cond, | |
1366 Label* branch) { | |
1367 ASSERT(cond == eq || cond == ne); | |
1368 UseScratchRegisterScope temps(this); | |
1369 Register temp = temps.AcquireX(); | |
1370 And(temp, object, ExternalReference::new_space_mask(isolate())); | |
1371 Cmp(temp, ExternalReference::new_space_start(isolate())); | |
1372 B(cond, branch); | |
1373 } | |
1374 | |
1375 | |
1376 void MacroAssembler::Throw(Register value, | |
1377 Register scratch1, | |
1378 Register scratch2, | |
1379 Register scratch3, | |
1380 Register scratch4) { | |
1381 // Adjust this code if not the case. | |
1382 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); | |
1383 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); | |
1384 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); | |
1385 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); | |
1386 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); | |
1387 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); | |
1388 | |
1389 // The handler expects the exception in x0. | |
1390 ASSERT(value.Is(x0)); | |
1391 | |
1392 // Drop the stack pointer to the top of the top handler. | |
1393 ASSERT(jssp.Is(StackPointer())); | |
1394 Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress, | |
1395 isolate()))); | |
1396 Ldr(jssp, MemOperand(scratch1)); | |
1397 // Restore the next handler. | |
1398 Pop(scratch2); | |
1399 Str(scratch2, MemOperand(scratch1)); | |
1400 | |
1401 // Get the code object and state. Restore the context and frame pointer. | |
1402 Register object = scratch1; | |
1403 Register state = scratch2; | |
1404 Pop(object, state, cp, fp); | |
1405 | |
1406 // If the handler is a JS frame, restore the context to the frame. | |
1407 // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp | |
1408 // or cp. | |
1409 Label not_js_frame; | |
1410 Cbz(cp, ¬_js_frame); | |
1411 Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); | |
1412 Bind(¬_js_frame); | |
1413 | |
1414 JumpToHandlerEntry(value, object, state, scratch3, scratch4); | |
1415 } | |
1416 | |
1417 | |
1418 void MacroAssembler::ThrowUncatchable(Register value, | |
1419 Register scratch1, | |
1420 Register scratch2, | |
1421 Register scratch3, | |
1422 Register scratch4) { | |
1423 // Adjust this code if not the case. | |
1424 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); | |
1425 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); | |
1426 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); | |
1427 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); | |
1428 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); | |
1429 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); | |
1430 | |
1431 // The handler expects the exception in x0. | |
1432 ASSERT(value.Is(x0)); | |
1433 | |
1434 // Drop the stack pointer to the top of the top stack handler. | |
1435 ASSERT(jssp.Is(StackPointer())); | |
1436 Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress, | |
1437 isolate()))); | |
1438 Ldr(jssp, MemOperand(scratch1)); | |
1439 | |
1440 // Unwind the handlers until the ENTRY handler is found. | |
1441 Label fetch_next, check_kind; | |
1442 B(&check_kind); | |
1443 Bind(&fetch_next); | |
1444 Peek(jssp, StackHandlerConstants::kNextOffset); | |
1445 | |
1446 Bind(&check_kind); | |
1447 STATIC_ASSERT(StackHandler::JS_ENTRY == 0); | |
1448 Peek(scratch2, StackHandlerConstants::kStateOffset); | |
1449 TestAndBranchIfAnySet(scratch2, StackHandler::KindField::kMask, &fetch_next); | |
1450 | |
1451 // Set the top handler address to next handler past the top ENTRY handler. | |
1452 Pop(scratch2); | |
1453 Str(scratch2, MemOperand(scratch1)); | |
1454 | |
1455 // Get the code object and state. Clear the context and frame pointer (0 was | |
1456 // saved in the handler). | |
1457 Register object = scratch1; | |
1458 Register state = scratch2; | |
1459 Pop(object, state, cp, fp); | |
1460 | |
1461 JumpToHandlerEntry(value, object, state, scratch3, scratch4); | |
1462 } | |
1463 | |
1464 | |
1465 void MacroAssembler::Throw(BailoutReason reason) { | |
1466 Label throw_start; | |
1467 Bind(&throw_start); | |
1468 #ifdef DEBUG | |
1469 const char* msg = GetBailoutReason(reason); | |
1470 RecordComment("Throw message: "); | |
1471 RecordComment((msg != NULL) ? msg : "UNKNOWN"); | |
1472 #endif | |
1473 | |
1474 Mov(x0, Smi::FromInt(reason)); | |
1475 Push(x0); | |
1476 | |
1477 // Disable stub call restrictions to always allow calls to throw. | |
1478 if (!has_frame_) { | |
1479 // We don't actually want to generate a pile of code for this, so just | |
1480 // claim there is a stack frame, without generating one. | |
1481 FrameScope scope(this, StackFrame::NONE); | |
1482 CallRuntime(Runtime::kThrowMessage, 1); | |
1483 } else { | |
1484 CallRuntime(Runtime::kThrowMessage, 1); | |
1485 } | |
1486 // ThrowMessage should not return here. | |
1487 Unreachable(); | |
1488 } | |
1489 | |
1490 | |
1491 void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) { | |
1492 Label ok; | |
1493 B(InvertCondition(cc), &ok); | |
1494 Throw(reason); | |
1495 Bind(&ok); | |
1496 } | |
1497 | |
1498 | |
1499 void MacroAssembler::ThrowIfSmi(const Register& value, BailoutReason reason) { | |
1500 Label ok; | |
1501 JumpIfNotSmi(value, &ok); | |
1502 Throw(reason); | |
1503 Bind(&ok); | |
1504 } | |
1505 | |
1506 | |
1507 void MacroAssembler::SmiAbs(const Register& smi, Label* slow) { | |
1508 ASSERT(smi.Is64Bits()); | |
1509 Abs(smi, smi, slow); | |
1510 } | |
1511 | |
1512 | |
1513 void MacroAssembler::AssertSmi(Register object, BailoutReason reason) { | |
1514 if (emit_debug_code()) { | |
1515 STATIC_ASSERT(kSmiTag == 0); | |
1516 Tst(object, kSmiTagMask); | |
1517 Check(eq, reason); | |
1518 } | |
1519 } | |
1520 | |
1521 | |
1522 void MacroAssembler::AssertNotSmi(Register object, BailoutReason reason) { | |
1523 if (emit_debug_code()) { | |
1524 STATIC_ASSERT(kSmiTag == 0); | |
1525 Tst(object, kSmiTagMask); | |
1526 Check(ne, reason); | |
1527 } | |
1528 } | |
1529 | |
1530 | |
1531 void MacroAssembler::AssertName(Register object) { | |
1532 if (emit_debug_code()) { | |
1533 AssertNotSmi(object, kOperandIsASmiAndNotAName); | |
1534 | |
1535 UseScratchRegisterScope temps(this); | |
1536 Register temp = temps.AcquireX(); | |
1537 | |
1538 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); | |
1539 CompareInstanceType(temp, temp, LAST_NAME_TYPE); | |
1540 Check(ls, kOperandIsNotAName); | |
1541 } | |
1542 } | |
1543 | |
1544 | |
1545 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, | |
1546 Register scratch) { | |
1547 if (emit_debug_code()) { | |
1548 Label done_checking; | |
1549 AssertNotSmi(object); | |
1550 JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &done_checking); | |
1551 Ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); | |
1552 CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex); | |
1553 Assert(eq, kExpectedUndefinedOrCell); | |
1554 Bind(&done_checking); | |
1555 } | |
1556 } | |
1557 | |
1558 | |
1559 void MacroAssembler::AssertString(Register object) { | |
1560 if (emit_debug_code()) { | |
1561 UseScratchRegisterScope temps(this); | |
1562 Register temp = temps.AcquireX(); | |
1563 STATIC_ASSERT(kSmiTag == 0); | |
1564 Tst(object, kSmiTagMask); | |
1565 Check(ne, kOperandIsASmiAndNotAString); | |
1566 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); | |
1567 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE); | |
1568 Check(lo, kOperandIsNotAString); | |
1569 } | |
1570 } | |
1571 | |
1572 | |
1573 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) { | |
1574 ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. | |
1575 Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id); | |
1576 } | |
1577 | |
1578 | |
1579 void MacroAssembler::TailCallStub(CodeStub* stub) { | |
1580 Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET); | |
1581 } | |
1582 | |
1583 | |
1584 void MacroAssembler::CallRuntime(const Runtime::Function* f, | |
1585 int num_arguments, | |
1586 SaveFPRegsMode save_doubles) { | |
1587 // All arguments must be on the stack before this function is called. | |
1588 // x0 holds the return value after the call. | |
1589 | |
1590 // Check that the number of arguments matches what the function expects. | |
1591 // If f->nargs is -1, the function can accept a variable number of arguments. | |
1592 if (f->nargs >= 0 && f->nargs != num_arguments) { | |
1593 // Illegal operation: drop the stack arguments and return undefined. | |
1594 if (num_arguments > 0) { | |
1595 Drop(num_arguments); | |
1596 } | |
1597 LoadRoot(x0, Heap::kUndefinedValueRootIndex); | |
1598 return; | |
1599 } | |
1600 | |
1601 // Place the necessary arguments. | |
1602 Mov(x0, num_arguments); | |
1603 Mov(x1, ExternalReference(f, isolate())); | |
1604 | |
1605 CEntryStub stub(1, save_doubles); | |
1606 CallStub(&stub); | |
1607 } | |
1608 | |
1609 | |
1610 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) { | |
1611 return ref0.address() - ref1.address(); | |
1612 } | |
1613 | |
1614 | |
1615 void MacroAssembler::CallApiFunctionAndReturn( | |
1616 Register function_address, | |
1617 ExternalReference thunk_ref, | |
1618 int stack_space, | |
1619 int spill_offset, | |
1620 MemOperand return_value_operand, | |
1621 MemOperand* context_restore_operand) { | |
1622 ASM_LOCATION("CallApiFunctionAndReturn"); | |
1623 ExternalReference next_address = | |
1624 ExternalReference::handle_scope_next_address(isolate()); | |
1625 const int kNextOffset = 0; | |
1626 const int kLimitOffset = AddressOffset( | |
1627 ExternalReference::handle_scope_limit_address(isolate()), | |
1628 next_address); | |
1629 const int kLevelOffset = AddressOffset( | |
1630 ExternalReference::handle_scope_level_address(isolate()), | |
1631 next_address); | |
1632 | |
1633 ASSERT(function_address.is(x1) || function_address.is(x2)); | |
1634 | |
1635 Label profiler_disabled; | |
1636 Label end_profiler_check; | |
1637 bool* is_profiling_flag = isolate()->cpu_profiler()->is_profiling_address(); | |
1638 STATIC_ASSERT(sizeof(*is_profiling_flag) == 1); | |
1639 Mov(x10, reinterpret_cast<uintptr_t>(is_profiling_flag)); | |
1640 Ldrb(w10, MemOperand(x10)); | |
1641 Cbz(w10, &profiler_disabled); | |
1642 Mov(x3, thunk_ref); | |
1643 B(&end_profiler_check); | |
1644 | |
1645 Bind(&profiler_disabled); | |
1646 Mov(x3, function_address); | |
1647 Bind(&end_profiler_check); | |
1648 | |
1649 // Save the callee-save registers we are going to use. | |
1650 // TODO(all): Is this necessary? ARM doesn't do it. | |
1651 STATIC_ASSERT(kCallApiFunctionSpillSpace == 4); | |
1652 Poke(x19, (spill_offset + 0) * kXRegSize); | |
1653 Poke(x20, (spill_offset + 1) * kXRegSize); | |
1654 Poke(x21, (spill_offset + 2) * kXRegSize); | |
1655 Poke(x22, (spill_offset + 3) * kXRegSize); | |
1656 | |
1657 // Allocate HandleScope in callee-save registers. | |
1658 // We will need to restore the HandleScope after the call to the API function, | |
1659 // by allocating it in callee-save registers they will be preserved by C code. | |
1660 Register handle_scope_base = x22; | |
1661 Register next_address_reg = x19; | |
1662 Register limit_reg = x20; | |
1663 Register level_reg = w21; | |
1664 | |
1665 Mov(handle_scope_base, next_address); | |
1666 Ldr(next_address_reg, MemOperand(handle_scope_base, kNextOffset)); | |
1667 Ldr(limit_reg, MemOperand(handle_scope_base, kLimitOffset)); | |
1668 Ldr(level_reg, MemOperand(handle_scope_base, kLevelOffset)); | |
1669 Add(level_reg, level_reg, 1); | |
1670 Str(level_reg, MemOperand(handle_scope_base, kLevelOffset)); | |
1671 | |
1672 if (FLAG_log_timer_events) { | |
1673 FrameScope frame(this, StackFrame::MANUAL); | |
1674 PushSafepointRegisters(); | |
1675 Mov(x0, ExternalReference::isolate_address(isolate())); | |
1676 CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1); | |
1677 PopSafepointRegisters(); | |
1678 } | |
1679 | |
1680 // Native call returns to the DirectCEntry stub which redirects to the | |
1681 // return address pushed on stack (could have moved after GC). | |
1682 // DirectCEntry stub itself is generated early and never moves. | |
1683 DirectCEntryStub stub; | |
1684 stub.GenerateCall(this, x3); | |
1685 | |
1686 if (FLAG_log_timer_events) { | |
1687 FrameScope frame(this, StackFrame::MANUAL); | |
1688 PushSafepointRegisters(); | |
1689 Mov(x0, ExternalReference::isolate_address(isolate())); | |
1690 CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1); | |
1691 PopSafepointRegisters(); | |
1692 } | |
1693 | |
1694 Label promote_scheduled_exception; | |
1695 Label exception_handled; | |
1696 Label delete_allocated_handles; | |
1697 Label leave_exit_frame; | |
1698 Label return_value_loaded; | |
1699 | |
1700 // Load value from ReturnValue. | |
1701 Ldr(x0, return_value_operand); | |
1702 Bind(&return_value_loaded); | |
1703 // No more valid handles (the result handle was the last one). Restore | |
1704 // previous handle scope. | |
1705 Str(next_address_reg, MemOperand(handle_scope_base, kNextOffset)); | |
1706 if (emit_debug_code()) { | |
1707 Ldr(w1, MemOperand(handle_scope_base, kLevelOffset)); | |
1708 Cmp(w1, level_reg); | |
1709 Check(eq, kUnexpectedLevelAfterReturnFromApiCall); | |
1710 } | |
1711 Sub(level_reg, level_reg, 1); | |
1712 Str(level_reg, MemOperand(handle_scope_base, kLevelOffset)); | |
1713 Ldr(x1, MemOperand(handle_scope_base, kLimitOffset)); | |
1714 Cmp(limit_reg, x1); | |
1715 B(ne, &delete_allocated_handles); | |
1716 | |
1717 Bind(&leave_exit_frame); | |
1718 // Restore callee-saved registers. | |
1719 Peek(x19, (spill_offset + 0) * kXRegSize); | |
1720 Peek(x20, (spill_offset + 1) * kXRegSize); | |
1721 Peek(x21, (spill_offset + 2) * kXRegSize); | |
1722 Peek(x22, (spill_offset + 3) * kXRegSize); | |
1723 | |
1724 // Check if the function scheduled an exception. | |
1725 Mov(x5, ExternalReference::scheduled_exception_address(isolate())); | |
1726 Ldr(x5, MemOperand(x5)); | |
1727 JumpIfNotRoot(x5, Heap::kTheHoleValueRootIndex, &promote_scheduled_exception); | |
1728 Bind(&exception_handled); | |
1729 | |
1730 bool restore_context = context_restore_operand != NULL; | |
1731 if (restore_context) { | |
1732 Ldr(cp, *context_restore_operand); | |
1733 } | |
1734 | |
1735 LeaveExitFrame(false, x1, !restore_context); | |
1736 Drop(stack_space); | |
1737 Ret(); | |
1738 | |
1739 Bind(&promote_scheduled_exception); | |
1740 { | |
1741 FrameScope frame(this, StackFrame::INTERNAL); | |
1742 CallExternalReference( | |
1743 ExternalReference(Runtime::kPromoteScheduledException, isolate()), 0); | |
1744 } | |
1745 B(&exception_handled); | |
1746 | |
1747 // HandleScope limit has changed. Delete allocated extensions. | |
1748 Bind(&delete_allocated_handles); | |
1749 Str(limit_reg, MemOperand(handle_scope_base, kLimitOffset)); | |
1750 // Save the return value in a callee-save register. | |
1751 Register saved_result = x19; | |
1752 Mov(saved_result, x0); | |
1753 Mov(x0, ExternalReference::isolate_address(isolate())); | |
1754 CallCFunction( | |
1755 ExternalReference::delete_handle_scope_extensions(isolate()), 1); | |
1756 Mov(x0, saved_result); | |
1757 B(&leave_exit_frame); | |
1758 } | |
1759 | |
1760 | |
1761 void MacroAssembler::CallExternalReference(const ExternalReference& ext, | |
1762 int num_arguments) { | |
1763 Mov(x0, num_arguments); | |
1764 Mov(x1, ext); | |
1765 | |
1766 CEntryStub stub(1); | |
1767 CallStub(&stub); | |
1768 } | |
1769 | |
1770 | |
1771 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) { | |
1772 Mov(x1, builtin); | |
1773 CEntryStub stub(1); | |
1774 Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET); | |
1775 } | |
1776 | |
1777 | |
1778 void MacroAssembler::GetBuiltinFunction(Register target, | |
1779 Builtins::JavaScript id) { | |
1780 // Load the builtins object into target register. | |
1781 Ldr(target, GlobalObjectMemOperand()); | |
1782 Ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset)); | |
1783 // Load the JavaScript builtin function from the builtins object. | |
1784 Ldr(target, FieldMemOperand(target, | |
1785 JSBuiltinsObject::OffsetOfFunctionWithId(id))); | |
1786 } | |
1787 | |
1788 | |
1789 void MacroAssembler::GetBuiltinEntry(Register target, | |
1790 Register function, | |
1791 Builtins::JavaScript id) { | |
1792 ASSERT(!AreAliased(target, function)); | |
1793 GetBuiltinFunction(function, id); | |
1794 // Load the code entry point from the builtins object. | |
1795 Ldr(target, FieldMemOperand(function, JSFunction::kCodeEntryOffset)); | |
1796 } | |
1797 | |
1798 | |
1799 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, | |
1800 InvokeFlag flag, | |
1801 const CallWrapper& call_wrapper) { | |
1802 ASM_LOCATION("MacroAssembler::InvokeBuiltin"); | |
1803 // You can't call a builtin without a valid frame. | |
1804 ASSERT(flag == JUMP_FUNCTION || has_frame()); | |
1805 | |
1806 // Get the builtin entry in x2 and setup the function object in x1. | |
1807 GetBuiltinEntry(x2, x1, id); | |
1808 if (flag == CALL_FUNCTION) { | |
1809 call_wrapper.BeforeCall(CallSize(x2)); | |
1810 Call(x2); | |
1811 call_wrapper.AfterCall(); | |
1812 } else { | |
1813 ASSERT(flag == JUMP_FUNCTION); | |
1814 Jump(x2); | |
1815 } | |
1816 } | |
1817 | |
1818 | |
1819 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext, | |
1820 int num_arguments, | |
1821 int result_size) { | |
1822 // TODO(1236192): Most runtime routines don't need the number of | |
1823 // arguments passed in because it is constant. At some point we | |
1824 // should remove this need and make the runtime routine entry code | |
1825 // smarter. | |
1826 Mov(x0, num_arguments); | |
1827 JumpToExternalReference(ext); | |
1828 } | |
1829 | |
1830 | |
1831 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid, | |
1832 int num_arguments, | |
1833 int result_size) { | |
1834 TailCallExternalReference(ExternalReference(fid, isolate()), | |
1835 num_arguments, | |
1836 result_size); | |
1837 } | |
1838 | |
1839 | |
1840 void MacroAssembler::InitializeNewString(Register string, | |
1841 Register length, | |
1842 Heap::RootListIndex map_index, | |
1843 Register scratch1, | |
1844 Register scratch2) { | |
1845 ASSERT(!AreAliased(string, length, scratch1, scratch2)); | |
1846 LoadRoot(scratch2, map_index); | |
1847 SmiTag(scratch1, length); | |
1848 Str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset)); | |
1849 | |
1850 Mov(scratch2, String::kEmptyHashField); | |
1851 Str(scratch1, FieldMemOperand(string, String::kLengthOffset)); | |
1852 Str(scratch2, FieldMemOperand(string, String::kHashFieldOffset)); | |
1853 } | |
1854 | |
1855 | |
1856 int MacroAssembler::ActivationFrameAlignment() { | |
1857 #if V8_HOST_ARCH_A64 | |
1858 // Running on the real platform. Use the alignment as mandated by the local | |
1859 // environment. | |
1860 // Note: This will break if we ever start generating snapshots on one ARM | |
1861 // platform for another ARM platform with a different alignment. | |
1862 return OS::ActivationFrameAlignment(); | |
1863 #else // V8_HOST_ARCH_A64 | |
1864 // If we are using the simulator then we should always align to the expected | |
1865 // alignment. As the simulator is used to generate snapshots we do not know | |
1866 // if the target platform will need alignment, so this is controlled from a | |
1867 // flag. | |
1868 return FLAG_sim_stack_alignment; | |
1869 #endif // V8_HOST_ARCH_A64 | |
1870 } | |
1871 | |
1872 | |
1873 void MacroAssembler::CallCFunction(ExternalReference function, | |
1874 int num_of_reg_args) { | |
1875 CallCFunction(function, num_of_reg_args, 0); | |
1876 } | |
1877 | |
1878 | |
1879 void MacroAssembler::CallCFunction(ExternalReference function, | |
1880 int num_of_reg_args, | |
1881 int num_of_double_args) { | |
1882 UseScratchRegisterScope temps(this); | |
1883 Register temp = temps.AcquireX(); | |
1884 Mov(temp, function); | |
1885 CallCFunction(temp, num_of_reg_args, num_of_double_args); | |
1886 } | |
1887 | |
1888 | |
1889 void MacroAssembler::CallCFunction(Register function, | |
1890 int num_of_reg_args, | |
1891 int num_of_double_args) { | |
1892 ASSERT(has_frame()); | |
1893 // We can pass 8 integer arguments in registers. If we need to pass more than | |
1894 // that, we'll need to implement support for passing them on the stack. | |
1895 ASSERT(num_of_reg_args <= 8); | |
1896 | |
1897 // If we're passing doubles, we're limited to the following prototypes | |
1898 // (defined by ExternalReference::Type): | |
1899 // BUILTIN_COMPARE_CALL: int f(double, double) | |
1900 // BUILTIN_FP_FP_CALL: double f(double, double) | |
1901 // BUILTIN_FP_CALL: double f(double) | |
1902 // BUILTIN_FP_INT_CALL: double f(double, int) | |
1903 if (num_of_double_args > 0) { | |
1904 ASSERT(num_of_reg_args <= 1); | |
1905 ASSERT((num_of_double_args + num_of_reg_args) <= 2); | |
1906 } | |
1907 | |
1908 | |
1909 // If the stack pointer is not csp, we need to derive an aligned csp from the | |
1910 // current stack pointer. | |
1911 const Register old_stack_pointer = StackPointer(); | |
1912 if (!csp.Is(old_stack_pointer)) { | |
1913 AssertStackConsistency(); | |
1914 | |
1915 int sp_alignment = ActivationFrameAlignment(); | |
1916 // The ABI mandates at least 16-byte alignment. | |
1917 ASSERT(sp_alignment >= 16); | |
1918 ASSERT(IsPowerOf2(sp_alignment)); | |
1919 | |
1920 // The current stack pointer is a callee saved register, and is preserved | |
1921 // across the call. | |
1922 ASSERT(kCalleeSaved.IncludesAliasOf(old_stack_pointer)); | |
1923 | |
1924 // Align and synchronize the system stack pointer with jssp. | |
1925 Bic(csp, old_stack_pointer, sp_alignment - 1); | |
1926 SetStackPointer(csp); | |
1927 } | |
1928 | |
1929 // Call directly. The function called cannot cause a GC, or allow preemption, | |
1930 // so the return address in the link register stays correct. | |
1931 Call(function); | |
1932 | |
1933 if (!csp.Is(old_stack_pointer)) { | |
1934 if (emit_debug_code()) { | |
1935 // Because the stack pointer must be aligned on a 16-byte boundary, the | |
1936 // aligned csp can be up to 12 bytes below the jssp. This is the case | |
1937 // where we only pushed one W register on top of an aligned jssp. | |
1938 UseScratchRegisterScope temps(this); | |
1939 Register temp = temps.AcquireX(); | |
1940 ASSERT(ActivationFrameAlignment() == 16); | |
1941 Sub(temp, csp, old_stack_pointer); | |
1942 // We want temp <= 0 && temp >= -12. | |
1943 Cmp(temp, 0); | |
1944 Ccmp(temp, -12, NFlag, le); | |
1945 Check(ge, kTheStackWasCorruptedByMacroAssemblerCall); | |
1946 } | |
1947 SetStackPointer(old_stack_pointer); | |
1948 } | |
1949 } | |
1950 | |
1951 | |
1952 void MacroAssembler::Jump(Register target) { | |
1953 Br(target); | |
1954 } | |
1955 | |
1956 | |
1957 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode) { | |
1958 UseScratchRegisterScope temps(this); | |
1959 Register temp = temps.AcquireX(); | |
1960 Mov(temp, Operand(target, rmode)); | |
1961 Br(temp); | |
1962 } | |
1963 | |
1964 | |
1965 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode) { | |
1966 ASSERT(!RelocInfo::IsCodeTarget(rmode)); | |
1967 Jump(reinterpret_cast<intptr_t>(target), rmode); | |
1968 } | |
1969 | |
1970 | |
1971 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode) { | |
1972 ASSERT(RelocInfo::IsCodeTarget(rmode)); | |
1973 AllowDeferredHandleDereference embedding_raw_address; | |
1974 Jump(reinterpret_cast<intptr_t>(code.location()), rmode); | |
1975 } | |
1976 | |
1977 | |
1978 void MacroAssembler::Call(Register target) { | |
1979 BlockPoolsScope scope(this); | |
1980 #ifdef DEBUG | |
1981 Label start_call; | |
1982 Bind(&start_call); | |
1983 #endif | |
1984 | |
1985 Blr(target); | |
1986 | |
1987 #ifdef DEBUG | |
1988 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target)); | |
1989 #endif | |
1990 } | |
1991 | |
1992 | |
1993 void MacroAssembler::Call(Label* target) { | |
1994 BlockPoolsScope scope(this); | |
1995 #ifdef DEBUG | |
1996 Label start_call; | |
1997 Bind(&start_call); | |
1998 #endif | |
1999 | |
2000 Bl(target); | |
2001 | |
2002 #ifdef DEBUG | |
2003 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target)); | |
2004 #endif | |
2005 } | |
2006 | |
2007 | |
2008 // MacroAssembler::CallSize is sensitive to changes in this function, as it | |
2009 // requires to know how many instructions are used to branch to the target. | |
2010 void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) { | |
2011 BlockPoolsScope scope(this); | |
2012 #ifdef DEBUG | |
2013 Label start_call; | |
2014 Bind(&start_call); | |
2015 #endif | |
2016 // Statement positions are expected to be recorded when the target | |
2017 // address is loaded. | |
2018 positions_recorder()->WriteRecordedPositions(); | |
2019 | |
2020 // Addresses always have 64 bits, so we shouldn't encounter NONE32. | |
2021 ASSERT(rmode != RelocInfo::NONE32); | |
2022 | |
2023 UseScratchRegisterScope temps(this); | |
2024 Register temp = temps.AcquireX(); | |
2025 | |
2026 if (rmode == RelocInfo::NONE64) { | |
2027 uint64_t imm = reinterpret_cast<uint64_t>(target); | |
2028 movz(temp, (imm >> 0) & 0xffff, 0); | |
2029 movk(temp, (imm >> 16) & 0xffff, 16); | |
2030 movk(temp, (imm >> 32) & 0xffff, 32); | |
2031 movk(temp, (imm >> 48) & 0xffff, 48); | |
2032 } else { | |
2033 LoadRelocated(temp, Operand(reinterpret_cast<intptr_t>(target), rmode)); | |
2034 } | |
2035 Blr(temp); | |
2036 #ifdef DEBUG | |
2037 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode)); | |
2038 #endif | |
2039 } | |
2040 | |
2041 | |
2042 void MacroAssembler::Call(Handle<Code> code, | |
2043 RelocInfo::Mode rmode, | |
2044 TypeFeedbackId ast_id) { | |
2045 #ifdef DEBUG | |
2046 Label start_call; | |
2047 Bind(&start_call); | |
2048 #endif | |
2049 | |
2050 if ((rmode == RelocInfo::CODE_TARGET) && (!ast_id.IsNone())) { | |
2051 SetRecordedAstId(ast_id); | |
2052 rmode = RelocInfo::CODE_TARGET_WITH_ID; | |
2053 } | |
2054 | |
2055 AllowDeferredHandleDereference embedding_raw_address; | |
2056 Call(reinterpret_cast<Address>(code.location()), rmode); | |
2057 | |
2058 #ifdef DEBUG | |
2059 // Check the size of the code generated. | |
2060 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(code, rmode, ast_id)); | |
2061 #endif | |
2062 } | |
2063 | |
2064 | |
2065 int MacroAssembler::CallSize(Register target) { | |
2066 USE(target); | |
2067 return kInstructionSize; | |
2068 } | |
2069 | |
2070 | |
2071 int MacroAssembler::CallSize(Label* target) { | |
2072 USE(target); | |
2073 return kInstructionSize; | |
2074 } | |
2075 | |
2076 | |
2077 int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode) { | |
2078 USE(target); | |
2079 | |
2080 // Addresses always have 64 bits, so we shouldn't encounter NONE32. | |
2081 ASSERT(rmode != RelocInfo::NONE32); | |
2082 | |
2083 if (rmode == RelocInfo::NONE64) { | |
2084 return kCallSizeWithoutRelocation; | |
2085 } else { | |
2086 return kCallSizeWithRelocation; | |
2087 } | |
2088 } | |
2089 | |
2090 | |
2091 int MacroAssembler::CallSize(Handle<Code> code, | |
2092 RelocInfo::Mode rmode, | |
2093 TypeFeedbackId ast_id) { | |
2094 USE(code); | |
2095 USE(ast_id); | |
2096 | |
2097 // Addresses always have 64 bits, so we shouldn't encounter NONE32. | |
2098 ASSERT(rmode != RelocInfo::NONE32); | |
2099 | |
2100 if (rmode == RelocInfo::NONE64) { | |
2101 return kCallSizeWithoutRelocation; | |
2102 } else { | |
2103 return kCallSizeWithRelocation; | |
2104 } | |
2105 } | |
2106 | |
2107 | |
2108 | |
2109 | |
2110 | |
2111 void MacroAssembler::JumpForHeapNumber(Register object, | |
2112 Register heap_number_map, | |
2113 Label* on_heap_number, | |
2114 Label* on_not_heap_number) { | |
2115 ASSERT(on_heap_number || on_not_heap_number); | |
2116 AssertNotSmi(object); | |
2117 | |
2118 UseScratchRegisterScope temps(this); | |
2119 Register temp = temps.AcquireX(); | |
2120 | |
2121 // Load the HeapNumber map if it is not passed. | |
2122 if (heap_number_map.Is(NoReg)) { | |
2123 heap_number_map = temps.AcquireX(); | |
2124 LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | |
2125 } else { | |
2126 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | |
2127 } | |
2128 | |
2129 ASSERT(!AreAliased(temp, heap_number_map)); | |
2130 | |
2131 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); | |
2132 Cmp(temp, heap_number_map); | |
2133 | |
2134 if (on_heap_number) { | |
2135 B(eq, on_heap_number); | |
2136 } | |
2137 if (on_not_heap_number) { | |
2138 B(ne, on_not_heap_number); | |
2139 } | |
2140 } | |
2141 | |
2142 | |
2143 void MacroAssembler::JumpIfHeapNumber(Register object, | |
2144 Label* on_heap_number, | |
2145 Register heap_number_map) { | |
2146 JumpForHeapNumber(object, | |
2147 heap_number_map, | |
2148 on_heap_number, | |
2149 NULL); | |
2150 } | |
2151 | |
2152 | |
2153 void MacroAssembler::JumpIfNotHeapNumber(Register object, | |
2154 Label* on_not_heap_number, | |
2155 Register heap_number_map) { | |
2156 JumpForHeapNumber(object, | |
2157 heap_number_map, | |
2158 NULL, | |
2159 on_not_heap_number); | |
2160 } | |
2161 | |
2162 | |
2163 void MacroAssembler::LookupNumberStringCache(Register object, | |
2164 Register result, | |
2165 Register scratch1, | |
2166 Register scratch2, | |
2167 Register scratch3, | |
2168 Label* not_found) { | |
2169 ASSERT(!AreAliased(object, result, scratch1, scratch2, scratch3)); | |
2170 | |
2171 // Use of registers. Register result is used as a temporary. | |
2172 Register number_string_cache = result; | |
2173 Register mask = scratch3; | |
2174 | |
2175 // Load the number string cache. | |
2176 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); | |
2177 | |
2178 // Make the hash mask from the length of the number string cache. It | |
2179 // contains two elements (number and string) for each cache entry. | |
2180 Ldrsw(mask, UntagSmiFieldMemOperand(number_string_cache, | |
2181 FixedArray::kLengthOffset)); | |
2182 Asr(mask, mask, 1); // Divide length by two. | |
2183 Sub(mask, mask, 1); // Make mask. | |
2184 | |
2185 // Calculate the entry in the number string cache. The hash value in the | |
2186 // number string cache for smis is just the smi value, and the hash for | |
2187 // doubles is the xor of the upper and lower words. See | |
2188 // Heap::GetNumberStringCache. | |
2189 Label is_smi; | |
2190 Label load_result_from_cache; | |
2191 | |
2192 JumpIfSmi(object, &is_smi); | |
2193 CheckMap(object, scratch1, Heap::kHeapNumberMapRootIndex, not_found, | |
2194 DONT_DO_SMI_CHECK); | |
2195 | |
2196 STATIC_ASSERT(kDoubleSize == (kWRegSize * 2)); | |
2197 Add(scratch1, object, HeapNumber::kValueOffset - kHeapObjectTag); | |
2198 Ldp(scratch1.W(), scratch2.W(), MemOperand(scratch1)); | |
2199 Eor(scratch1, scratch1, scratch2); | |
2200 And(scratch1, scratch1, mask); | |
2201 | |
2202 // Calculate address of entry in string cache: each entry consists of two | |
2203 // pointer sized fields. | |
2204 Add(scratch1, number_string_cache, | |
2205 Operand(scratch1, LSL, kPointerSizeLog2 + 1)); | |
2206 | |
2207 Register probe = mask; | |
2208 Ldr(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize)); | |
2209 JumpIfSmi(probe, not_found); | |
2210 Ldr(d0, FieldMemOperand(object, HeapNumber::kValueOffset)); | |
2211 Ldr(d1, FieldMemOperand(probe, HeapNumber::kValueOffset)); | |
2212 Fcmp(d0, d1); | |
2213 B(ne, not_found); | |
2214 B(&load_result_from_cache); | |
2215 | |
2216 Bind(&is_smi); | |
2217 Register scratch = scratch1; | |
2218 And(scratch, mask, Operand::UntagSmi(object)); | |
2219 // Calculate address of entry in string cache: each entry consists | |
2220 // of two pointer sized fields. | |
2221 Add(scratch, number_string_cache, | |
2222 Operand(scratch, LSL, kPointerSizeLog2 + 1)); | |
2223 | |
2224 // Check if the entry is the smi we are looking for. | |
2225 Ldr(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize)); | |
2226 Cmp(object, probe); | |
2227 B(ne, not_found); | |
2228 | |
2229 // Get the result from the cache. | |
2230 Bind(&load_result_from_cache); | |
2231 Ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize)); | |
2232 IncrementCounter(isolate()->counters()->number_to_string_native(), 1, | |
2233 scratch1, scratch2); | |
2234 } | |
2235 | |
2236 | |
2237 void MacroAssembler::TryConvertDoubleToInt(Register as_int, | |
2238 FPRegister value, | |
2239 FPRegister scratch_d, | |
2240 Label* on_successful_conversion, | |
2241 Label* on_failed_conversion) { | |
2242 // Convert to an int and back again, then compare with the original value. | |
2243 Fcvtzs(as_int, value); | |
2244 Scvtf(scratch_d, as_int); | |
2245 Fcmp(value, scratch_d); | |
2246 | |
2247 if (on_successful_conversion) { | |
2248 B(on_successful_conversion, eq); | |
2249 } | |
2250 if (on_failed_conversion) { | |
2251 B(on_failed_conversion, ne); | |
2252 } | |
2253 } | |
2254 | |
2255 | |
2256 void MacroAssembler::TestForMinusZero(DoubleRegister input) { | |
2257 UseScratchRegisterScope temps(this); | |
2258 Register temp = temps.AcquireX(); | |
2259 // Floating point -0.0 is kMinInt as an integer, so subtracting 1 (cmp) will | |
2260 // cause overflow. | |
2261 Fmov(temp, input); | |
2262 Cmp(temp, 1); | |
2263 } | |
2264 | |
2265 | |
2266 void MacroAssembler::JumpIfMinusZero(DoubleRegister input, | |
2267 Label* on_negative_zero) { | |
2268 TestForMinusZero(input); | |
2269 B(vs, on_negative_zero); | |
2270 } | |
2271 | |
2272 | |
2273 void MacroAssembler::ClampInt32ToUint8(Register output, Register input) { | |
2274 // Clamp the value to [0..255]. | |
2275 Cmp(input.W(), Operand(input.W(), UXTB)); | |
2276 // If input < input & 0xff, it must be < 0, so saturate to 0. | |
2277 Csel(output.W(), wzr, input.W(), lt); | |
2278 // If input <= input & 0xff, it must be <= 255. Otherwise, saturate to 255. | |
2279 Csel(output.W(), output.W(), 255, le); | |
2280 } | |
2281 | |
2282 | |
2283 void MacroAssembler::ClampInt32ToUint8(Register in_out) { | |
2284 ClampInt32ToUint8(in_out, in_out); | |
2285 } | |
2286 | |
2287 | |
2288 void MacroAssembler::ClampDoubleToUint8(Register output, | |
2289 DoubleRegister input, | |
2290 DoubleRegister dbl_scratch) { | |
2291 // This conversion follows the WebIDL "[Clamp]" rules for PIXEL types: | |
2292 // - Inputs lower than 0 (including -infinity) produce 0. | |
2293 // - Inputs higher than 255 (including +infinity) produce 255. | |
2294 // Also, it seems that PIXEL types use round-to-nearest rather than | |
2295 // round-towards-zero. | |
2296 | |
2297 // Squash +infinity before the conversion, since Fcvtnu will normally | |
2298 // convert it to 0. | |
2299 Fmov(dbl_scratch, 255); | |
2300 Fmin(dbl_scratch, dbl_scratch, input); | |
2301 | |
2302 // Convert double to unsigned integer. Values less than zero become zero. | |
2303 // Values greater than 255 have already been clamped to 255. | |
2304 Fcvtnu(output, dbl_scratch); | |
2305 } | |
2306 | |
2307 | |
2308 void MacroAssembler::CopyFieldsLoopPairsHelper(Register dst, | |
2309 Register src, | |
2310 unsigned count, | |
2311 Register scratch1, | |
2312 Register scratch2, | |
2313 Register scratch3, | |
2314 Register scratch4, | |
2315 Register scratch5) { | |
2316 // Untag src and dst into scratch registers. | |
2317 // Copy src->dst in a tight loop. | |
2318 ASSERT(!AreAliased(dst, src, | |
2319 scratch1, scratch2, scratch3, scratch4, scratch5)); | |
2320 ASSERT(count >= 2); | |
2321 | |
2322 const Register& remaining = scratch3; | |
2323 Mov(remaining, count / 2); | |
2324 | |
2325 const Register& dst_untagged = scratch1; | |
2326 const Register& src_untagged = scratch2; | |
2327 Sub(dst_untagged, dst, kHeapObjectTag); | |
2328 Sub(src_untagged, src, kHeapObjectTag); | |
2329 | |
2330 // Copy fields in pairs. | |
2331 Label loop; | |
2332 Bind(&loop); | |
2333 Ldp(scratch4, scratch5, | |
2334 MemOperand(src_untagged, kXRegSize* 2, PostIndex)); | |
2335 Stp(scratch4, scratch5, | |
2336 MemOperand(dst_untagged, kXRegSize* 2, PostIndex)); | |
2337 Sub(remaining, remaining, 1); | |
2338 Cbnz(remaining, &loop); | |
2339 | |
2340 // Handle the leftovers. | |
2341 if (count & 1) { | |
2342 Ldr(scratch4, MemOperand(src_untagged)); | |
2343 Str(scratch4, MemOperand(dst_untagged)); | |
2344 } | |
2345 } | |
2346 | |
2347 | |
2348 void MacroAssembler::CopyFieldsUnrolledPairsHelper(Register dst, | |
2349 Register src, | |
2350 unsigned count, | |
2351 Register scratch1, | |
2352 Register scratch2, | |
2353 Register scratch3, | |
2354 Register scratch4) { | |
2355 // Untag src and dst into scratch registers. | |
2356 // Copy src->dst in an unrolled loop. | |
2357 ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3, scratch4)); | |
2358 | |
2359 const Register& dst_untagged = scratch1; | |
2360 const Register& src_untagged = scratch2; | |
2361 sub(dst_untagged, dst, kHeapObjectTag); | |
2362 sub(src_untagged, src, kHeapObjectTag); | |
2363 | |
2364 // Copy fields in pairs. | |
2365 for (unsigned i = 0; i < count / 2; i++) { | |
2366 Ldp(scratch3, scratch4, MemOperand(src_untagged, kXRegSize * 2, PostIndex)); | |
2367 Stp(scratch3, scratch4, MemOperand(dst_untagged, kXRegSize * 2, PostIndex)); | |
2368 } | |
2369 | |
2370 // Handle the leftovers. | |
2371 if (count & 1) { | |
2372 Ldr(scratch3, MemOperand(src_untagged)); | |
2373 Str(scratch3, MemOperand(dst_untagged)); | |
2374 } | |
2375 } | |
2376 | |
2377 | |
2378 void MacroAssembler::CopyFieldsUnrolledHelper(Register dst, | |
2379 Register src, | |
2380 unsigned count, | |
2381 Register scratch1, | |
2382 Register scratch2, | |
2383 Register scratch3) { | |
2384 // Untag src and dst into scratch registers. | |
2385 // Copy src->dst in an unrolled loop. | |
2386 ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3)); | |
2387 | |
2388 const Register& dst_untagged = scratch1; | |
2389 const Register& src_untagged = scratch2; | |
2390 Sub(dst_untagged, dst, kHeapObjectTag); | |
2391 Sub(src_untagged, src, kHeapObjectTag); | |
2392 | |
2393 // Copy fields one by one. | |
2394 for (unsigned i = 0; i < count; i++) { | |
2395 Ldr(scratch3, MemOperand(src_untagged, kXRegSize, PostIndex)); | |
2396 Str(scratch3, MemOperand(dst_untagged, kXRegSize, PostIndex)); | |
2397 } | |
2398 } | |
2399 | |
2400 | |
2401 void MacroAssembler::CopyFields(Register dst, Register src, CPURegList temps, | |
2402 unsigned count) { | |
2403 // One of two methods is used: | |
2404 // | |
2405 // For high 'count' values where many scratch registers are available: | |
2406 // Untag src and dst into scratch registers. | |
2407 // Copy src->dst in a tight loop. | |
2408 // | |
2409 // For low 'count' values or where few scratch registers are available: | |
2410 // Untag src and dst into scratch registers. | |
2411 // Copy src->dst in an unrolled loop. | |
2412 // | |
2413 // In both cases, fields are copied in pairs if possible, and left-overs are | |
2414 // handled separately. | |
2415 ASSERT(!AreAliased(dst, src)); | |
2416 ASSERT(!temps.IncludesAliasOf(dst)); | |
2417 ASSERT(!temps.IncludesAliasOf(src)); | |
2418 ASSERT(!temps.IncludesAliasOf(xzr)); | |
2419 | |
2420 if (emit_debug_code()) { | |
2421 Cmp(dst, src); | |
2422 Check(ne, kTheSourceAndDestinationAreTheSame); | |
2423 } | |
2424 | |
2425 // The value of 'count' at which a loop will be generated (if there are | |
2426 // enough scratch registers). | |
2427 static const unsigned kLoopThreshold = 8; | |
2428 | |
2429 UseScratchRegisterScope masm_temps(this); | |
2430 if ((temps.Count() >= 3) && (count >= kLoopThreshold)) { | |
2431 CopyFieldsLoopPairsHelper(dst, src, count, | |
2432 Register(temps.PopLowestIndex()), | |
2433 Register(temps.PopLowestIndex()), | |
2434 Register(temps.PopLowestIndex()), | |
2435 masm_temps.AcquireX(), | |
2436 masm_temps.AcquireX()); | |
2437 } else if (temps.Count() >= 2) { | |
2438 CopyFieldsUnrolledPairsHelper(dst, src, count, | |
2439 Register(temps.PopLowestIndex()), | |
2440 Register(temps.PopLowestIndex()), | |
2441 masm_temps.AcquireX(), | |
2442 masm_temps.AcquireX()); | |
2443 } else if (temps.Count() == 1) { | |
2444 CopyFieldsUnrolledHelper(dst, src, count, | |
2445 Register(temps.PopLowestIndex()), | |
2446 masm_temps.AcquireX(), | |
2447 masm_temps.AcquireX()); | |
2448 } else { | |
2449 UNREACHABLE(); | |
2450 } | |
2451 } | |
2452 | |
2453 | |
2454 void MacroAssembler::CopyBytes(Register dst, | |
2455 Register src, | |
2456 Register length, | |
2457 Register scratch, | |
2458 CopyHint hint) { | |
2459 UseScratchRegisterScope temps(this); | |
2460 Register tmp1 = temps.AcquireX(); | |
2461 Register tmp2 = temps.AcquireX(); | |
2462 ASSERT(!AreAliased(src, dst, length, scratch, tmp1, tmp2)); | |
2463 ASSERT(!AreAliased(src, dst, csp)); | |
2464 | |
2465 if (emit_debug_code()) { | |
2466 // Check copy length. | |
2467 Cmp(length, 0); | |
2468 Assert(ge, kUnexpectedNegativeValue); | |
2469 | |
2470 // Check src and dst buffers don't overlap. | |
2471 Add(scratch, src, length); // Calculate end of src buffer. | |
2472 Cmp(scratch, dst); | |
2473 Add(scratch, dst, length); // Calculate end of dst buffer. | |
2474 Ccmp(scratch, src, ZFlag, gt); | |
2475 Assert(le, kCopyBuffersOverlap); | |
2476 } | |
2477 | |
2478 Label short_copy, short_loop, bulk_loop, done; | |
2479 | |
2480 if ((hint == kCopyLong || hint == kCopyUnknown) && !FLAG_optimize_for_size) { | |
2481 Register bulk_length = scratch; | |
2482 int pair_size = 2 * kXRegSize; | |
2483 int pair_mask = pair_size - 1; | |
2484 | |
2485 Bic(bulk_length, length, pair_mask); | |
2486 Cbz(bulk_length, &short_copy); | |
2487 Bind(&bulk_loop); | |
2488 Sub(bulk_length, bulk_length, pair_size); | |
2489 Ldp(tmp1, tmp2, MemOperand(src, pair_size, PostIndex)); | |
2490 Stp(tmp1, tmp2, MemOperand(dst, pair_size, PostIndex)); | |
2491 Cbnz(bulk_length, &bulk_loop); | |
2492 | |
2493 And(length, length, pair_mask); | |
2494 } | |
2495 | |
2496 Bind(&short_copy); | |
2497 Cbz(length, &done); | |
2498 Bind(&short_loop); | |
2499 Sub(length, length, 1); | |
2500 Ldrb(tmp1, MemOperand(src, 1, PostIndex)); | |
2501 Strb(tmp1, MemOperand(dst, 1, PostIndex)); | |
2502 Cbnz(length, &short_loop); | |
2503 | |
2504 | |
2505 Bind(&done); | |
2506 } | |
2507 | |
2508 | |
2509 void MacroAssembler::FillFields(Register dst, | |
2510 Register field_count, | |
2511 Register filler) { | |
2512 ASSERT(!dst.Is(csp)); | |
2513 UseScratchRegisterScope temps(this); | |
2514 Register field_ptr = temps.AcquireX(); | |
2515 Register counter = temps.AcquireX(); | |
2516 Label done; | |
2517 | |
2518 // Decrement count. If the result < zero, count was zero, and there's nothing | |
2519 // to do. If count was one, flags are set to fail the gt condition at the end | |
2520 // of the pairs loop. | |
2521 Subs(counter, field_count, 1); | |
2522 B(lt, &done); | |
2523 | |
2524 // There's at least one field to fill, so do this unconditionally. | |
2525 Str(filler, MemOperand(dst, kPointerSize, PostIndex)); | |
2526 | |
2527 // If the bottom bit of counter is set, there are an even number of fields to | |
2528 // fill, so pull the start pointer back by one field, allowing the pairs loop | |
2529 // to overwrite the field that was stored above. | |
2530 And(field_ptr, counter, 1); | |
2531 Sub(field_ptr, dst, Operand(field_ptr, LSL, kPointerSizeLog2)); | |
2532 | |
2533 // Store filler to memory in pairs. | |
2534 Label entry, loop; | |
2535 B(&entry); | |
2536 Bind(&loop); | |
2537 Stp(filler, filler, MemOperand(field_ptr, 2 * kPointerSize, PostIndex)); | |
2538 Subs(counter, counter, 2); | |
2539 Bind(&entry); | |
2540 B(gt, &loop); | |
2541 | |
2542 Bind(&done); | |
2543 } | |
2544 | |
2545 | |
2546 void MacroAssembler::JumpIfEitherIsNotSequentialAsciiStrings( | |
2547 Register first, | |
2548 Register second, | |
2549 Register scratch1, | |
2550 Register scratch2, | |
2551 Label* failure, | |
2552 SmiCheckType smi_check) { | |
2553 | |
2554 if (smi_check == DO_SMI_CHECK) { | |
2555 JumpIfEitherSmi(first, second, failure); | |
2556 } else if (emit_debug_code()) { | |
2557 ASSERT(smi_check == DONT_DO_SMI_CHECK); | |
2558 Label not_smi; | |
2559 JumpIfEitherSmi(first, second, NULL, ¬_smi); | |
2560 | |
2561 // At least one input is a smi, but the flags indicated a smi check wasn't | |
2562 // needed. | |
2563 Abort(kUnexpectedSmi); | |
2564 | |
2565 Bind(¬_smi); | |
2566 } | |
2567 | |
2568 // Test that both first and second are sequential ASCII strings. | |
2569 Ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset)); | |
2570 Ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset)); | |
2571 Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); | |
2572 Ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset)); | |
2573 | |
2574 JumpIfEitherInstanceTypeIsNotSequentialAscii(scratch1, | |
2575 scratch2, | |
2576 scratch1, | |
2577 scratch2, | |
2578 failure); | |
2579 } | |
2580 | |
2581 | |
2582 void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialAscii( | |
2583 Register first, | |
2584 Register second, | |
2585 Register scratch1, | |
2586 Register scratch2, | |
2587 Label* failure) { | |
2588 ASSERT(!AreAliased(scratch1, second)); | |
2589 ASSERT(!AreAliased(scratch1, scratch2)); | |
2590 static const int kFlatAsciiStringMask = | |
2591 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; | |
2592 static const int kFlatAsciiStringTag = ASCII_STRING_TYPE; | |
2593 And(scratch1, first, kFlatAsciiStringMask); | |
2594 And(scratch2, second, kFlatAsciiStringMask); | |
2595 Cmp(scratch1, kFlatAsciiStringTag); | |
2596 Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq); | |
2597 B(ne, failure); | |
2598 } | |
2599 | |
2600 | |
2601 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type, | |
2602 Register scratch, | |
2603 Label* failure) { | |
2604 const int kFlatAsciiStringMask = | |
2605 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; | |
2606 const int kFlatAsciiStringTag = | |
2607 kStringTag | kOneByteStringTag | kSeqStringTag; | |
2608 And(scratch, type, kFlatAsciiStringMask); | |
2609 Cmp(scratch, kFlatAsciiStringTag); | |
2610 B(ne, failure); | |
2611 } | |
2612 | |
2613 | |
2614 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii( | |
2615 Register first, | |
2616 Register second, | |
2617 Register scratch1, | |
2618 Register scratch2, | |
2619 Label* failure) { | |
2620 ASSERT(!AreAliased(first, second, scratch1, scratch2)); | |
2621 const int kFlatAsciiStringMask = | |
2622 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; | |
2623 const int kFlatAsciiStringTag = | |
2624 kStringTag | kOneByteStringTag | kSeqStringTag; | |
2625 And(scratch1, first, kFlatAsciiStringMask); | |
2626 And(scratch2, second, kFlatAsciiStringMask); | |
2627 Cmp(scratch1, kFlatAsciiStringTag); | |
2628 Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq); | |
2629 B(ne, failure); | |
2630 } | |
2631 | |
2632 | |
2633 void MacroAssembler::JumpIfNotUniqueName(Register type, | |
2634 Label* not_unique_name) { | |
2635 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0)); | |
2636 // if ((type is string && type is internalized) || type == SYMBOL_TYPE) { | |
2637 // continue | |
2638 // } else { | |
2639 // goto not_unique_name | |
2640 // } | |
2641 Tst(type, kIsNotStringMask | kIsNotInternalizedMask); | |
2642 Ccmp(type, SYMBOL_TYPE, ZFlag, ne); | |
2643 B(ne, not_unique_name); | |
2644 } | |
2645 | |
2646 | |
2647 void MacroAssembler::InvokePrologue(const ParameterCount& expected, | |
2648 const ParameterCount& actual, | |
2649 Handle<Code> code_constant, | |
2650 Register code_reg, | |
2651 Label* done, | |
2652 InvokeFlag flag, | |
2653 bool* definitely_mismatches, | |
2654 const CallWrapper& call_wrapper) { | |
2655 bool definitely_matches = false; | |
2656 *definitely_mismatches = false; | |
2657 Label regular_invoke; | |
2658 | |
2659 // Check whether the expected and actual arguments count match. If not, | |
2660 // setup registers according to contract with ArgumentsAdaptorTrampoline: | |
2661 // x0: actual arguments count. | |
2662 // x1: function (passed through to callee). | |
2663 // x2: expected arguments count. | |
2664 | |
2665 // The code below is made a lot easier because the calling code already sets | |
2666 // up actual and expected registers according to the contract if values are | |
2667 // passed in registers. | |
2668 ASSERT(actual.is_immediate() || actual.reg().is(x0)); | |
2669 ASSERT(expected.is_immediate() || expected.reg().is(x2)); | |
2670 ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(x3)); | |
2671 | |
2672 if (expected.is_immediate()) { | |
2673 ASSERT(actual.is_immediate()); | |
2674 if (expected.immediate() == actual.immediate()) { | |
2675 definitely_matches = true; | |
2676 | |
2677 } else { | |
2678 Mov(x0, actual.immediate()); | |
2679 if (expected.immediate() == | |
2680 SharedFunctionInfo::kDontAdaptArgumentsSentinel) { | |
2681 // Don't worry about adapting arguments for builtins that | |
2682 // don't want that done. Skip adaption code by making it look | |
2683 // like we have a match between expected and actual number of | |
2684 // arguments. | |
2685 definitely_matches = true; | |
2686 } else { | |
2687 *definitely_mismatches = true; | |
2688 // Set up x2 for the argument adaptor. | |
2689 Mov(x2, expected.immediate()); | |
2690 } | |
2691 } | |
2692 | |
2693 } else { // expected is a register. | |
2694 Operand actual_op = actual.is_immediate() ? Operand(actual.immediate()) | |
2695 : Operand(actual.reg()); | |
2696 // If actual == expected perform a regular invocation. | |
2697 Cmp(expected.reg(), actual_op); | |
2698 B(eq, ®ular_invoke); | |
2699 // Otherwise set up x0 for the argument adaptor. | |
2700 Mov(x0, actual_op); | |
2701 } | |
2702 | |
2703 // If the argument counts may mismatch, generate a call to the argument | |
2704 // adaptor. | |
2705 if (!definitely_matches) { | |
2706 if (!code_constant.is_null()) { | |
2707 Mov(x3, Operand(code_constant)); | |
2708 Add(x3, x3, Code::kHeaderSize - kHeapObjectTag); | |
2709 } | |
2710 | |
2711 Handle<Code> adaptor = | |
2712 isolate()->builtins()->ArgumentsAdaptorTrampoline(); | |
2713 if (flag == CALL_FUNCTION) { | |
2714 call_wrapper.BeforeCall(CallSize(adaptor)); | |
2715 Call(adaptor); | |
2716 call_wrapper.AfterCall(); | |
2717 if (!*definitely_mismatches) { | |
2718 // If the arg counts don't match, no extra code is emitted by | |
2719 // MAsm::InvokeCode and we can just fall through. | |
2720 B(done); | |
2721 } | |
2722 } else { | |
2723 Jump(adaptor, RelocInfo::CODE_TARGET); | |
2724 } | |
2725 } | |
2726 Bind(®ular_invoke); | |
2727 } | |
2728 | |
2729 | |
2730 void MacroAssembler::InvokeCode(Register code, | |
2731 const ParameterCount& expected, | |
2732 const ParameterCount& actual, | |
2733 InvokeFlag flag, | |
2734 const CallWrapper& call_wrapper) { | |
2735 // You can't call a function without a valid frame. | |
2736 ASSERT(flag == JUMP_FUNCTION || has_frame()); | |
2737 | |
2738 Label done; | |
2739 | |
2740 bool definitely_mismatches = false; | |
2741 InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag, | |
2742 &definitely_mismatches, call_wrapper); | |
2743 | |
2744 // If we are certain that actual != expected, then we know InvokePrologue will | |
2745 // have handled the call through the argument adaptor mechanism. | |
2746 // The called function expects the call kind in x5. | |
2747 if (!definitely_mismatches) { | |
2748 if (flag == CALL_FUNCTION) { | |
2749 call_wrapper.BeforeCall(CallSize(code)); | |
2750 Call(code); | |
2751 call_wrapper.AfterCall(); | |
2752 } else { | |
2753 ASSERT(flag == JUMP_FUNCTION); | |
2754 Jump(code); | |
2755 } | |
2756 } | |
2757 | |
2758 // Continue here if InvokePrologue does handle the invocation due to | |
2759 // mismatched parameter counts. | |
2760 Bind(&done); | |
2761 } | |
2762 | |
2763 | |
2764 void MacroAssembler::InvokeFunction(Register function, | |
2765 const ParameterCount& actual, | |
2766 InvokeFlag flag, | |
2767 const CallWrapper& call_wrapper) { | |
2768 // You can't call a function without a valid frame. | |
2769 ASSERT(flag == JUMP_FUNCTION || has_frame()); | |
2770 | |
2771 // Contract with called JS functions requires that function is passed in x1. | |
2772 // (See FullCodeGenerator::Generate().) | |
2773 ASSERT(function.is(x1)); | |
2774 | |
2775 Register expected_reg = x2; | |
2776 Register code_reg = x3; | |
2777 | |
2778 Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset)); | |
2779 // The number of arguments is stored as an int32_t, and -1 is a marker | |
2780 // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign | |
2781 // extension to correctly handle it. | |
2782 Ldr(expected_reg, FieldMemOperand(function, | |
2783 JSFunction::kSharedFunctionInfoOffset)); | |
2784 Ldrsw(expected_reg, | |
2785 FieldMemOperand(expected_reg, | |
2786 SharedFunctionInfo::kFormalParameterCountOffset)); | |
2787 Ldr(code_reg, | |
2788 FieldMemOperand(function, JSFunction::kCodeEntryOffset)); | |
2789 | |
2790 ParameterCount expected(expected_reg); | |
2791 InvokeCode(code_reg, expected, actual, flag, call_wrapper); | |
2792 } | |
2793 | |
2794 | |
2795 void MacroAssembler::InvokeFunction(Register function, | |
2796 const ParameterCount& expected, | |
2797 const ParameterCount& actual, | |
2798 InvokeFlag flag, | |
2799 const CallWrapper& call_wrapper) { | |
2800 // You can't call a function without a valid frame. | |
2801 ASSERT(flag == JUMP_FUNCTION || has_frame()); | |
2802 | |
2803 // Contract with called JS functions requires that function is passed in x1. | |
2804 // (See FullCodeGenerator::Generate().) | |
2805 ASSERT(function.Is(x1)); | |
2806 | |
2807 Register code_reg = x3; | |
2808 | |
2809 // Set up the context. | |
2810 Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset)); | |
2811 | |
2812 // We call indirectly through the code field in the function to | |
2813 // allow recompilation to take effect without changing any of the | |
2814 // call sites. | |
2815 Ldr(code_reg, FieldMemOperand(function, JSFunction::kCodeEntryOffset)); | |
2816 InvokeCode(code_reg, expected, actual, flag, call_wrapper); | |
2817 } | |
2818 | |
2819 | |
2820 void MacroAssembler::InvokeFunction(Handle<JSFunction> function, | |
2821 const ParameterCount& expected, | |
2822 const ParameterCount& actual, | |
2823 InvokeFlag flag, | |
2824 const CallWrapper& call_wrapper) { | |
2825 // Contract with called JS functions requires that function is passed in x1. | |
2826 // (See FullCodeGenerator::Generate().) | |
2827 __ LoadObject(x1, function); | |
2828 InvokeFunction(x1, expected, actual, flag, call_wrapper); | |
2829 } | |
2830 | |
2831 | |
2832 void MacroAssembler::TryConvertDoubleToInt64(Register result, | |
2833 DoubleRegister double_input, | |
2834 Label* done) { | |
2835 // Try to convert with an FPU convert instruction. It's trivial to compute | |
2836 // the modulo operation on an integer register so we convert to a 64-bit | |
2837 // integer. | |
2838 // | |
2839 // Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) | |
2840 // when the double is out of range. NaNs and infinities will be converted to 0 | |
2841 // (as ECMA-262 requires). | |
2842 Fcvtzs(result.X(), double_input); | |
2843 | |
2844 // The values INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) are not | |
2845 // representable using a double, so if the result is one of those then we know | |
2846 // that saturation occured, and we need to manually handle the conversion. | |
2847 // | |
2848 // It is easy to detect INT64_MIN and INT64_MAX because adding or subtracting | |
2849 // 1 will cause signed overflow. | |
2850 Cmp(result.X(), 1); | |
2851 Ccmp(result.X(), -1, VFlag, vc); | |
2852 | |
2853 B(vc, done); | |
2854 } | |
2855 | |
2856 | |
2857 void MacroAssembler::TruncateDoubleToI(Register result, | |
2858 DoubleRegister double_input) { | |
2859 Label done; | |
2860 ASSERT(jssp.Is(StackPointer())); | |
2861 | |
2862 // Try to convert the double to an int64. If successful, the bottom 32 bits | |
2863 // contain our truncated int32 result. | |
2864 TryConvertDoubleToInt64(result, double_input, &done); | |
2865 | |
2866 // If we fell through then inline version didn't succeed - call stub instead. | |
2867 Push(lr); | |
2868 Push(double_input); // Put input on stack. | |
2869 | |
2870 DoubleToIStub stub(jssp, | |
2871 result, | |
2872 0, | |
2873 true, // is_truncating | |
2874 true); // skip_fastpath | |
2875 CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber | |
2876 | |
2877 Drop(1, kDoubleSize); // Drop the double input on the stack. | |
2878 Pop(lr); | |
2879 | |
2880 Bind(&done); | |
2881 } | |
2882 | |
2883 | |
2884 void MacroAssembler::TruncateHeapNumberToI(Register result, | |
2885 Register object) { | |
2886 Label done; | |
2887 ASSERT(!result.is(object)); | |
2888 ASSERT(jssp.Is(StackPointer())); | |
2889 | |
2890 Ldr(fp_scratch, FieldMemOperand(object, HeapNumber::kValueOffset)); | |
2891 | |
2892 // Try to convert the double to an int64. If successful, the bottom 32 bits | |
2893 // contain our truncated int32 result. | |
2894 TryConvertDoubleToInt64(result, fp_scratch, &done); | |
2895 | |
2896 // If we fell through then inline version didn't succeed - call stub instead. | |
2897 Push(lr); | |
2898 DoubleToIStub stub(object, | |
2899 result, | |
2900 HeapNumber::kValueOffset - kHeapObjectTag, | |
2901 true, // is_truncating | |
2902 true); // skip_fastpath | |
2903 CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber | |
2904 Pop(lr); | |
2905 | |
2906 Bind(&done); | |
2907 } | |
2908 | |
2909 | |
2910 void MacroAssembler::Prologue(PrologueFrameMode frame_mode) { | |
2911 if (frame_mode == BUILD_STUB_FRAME) { | |
2912 ASSERT(StackPointer().Is(jssp)); | |
2913 UseScratchRegisterScope temps(this); | |
2914 Register temp = temps.AcquireX(); | |
2915 __ Mov(temp, Smi::FromInt(StackFrame::STUB)); | |
2916 // Compiled stubs don't age, and so they don't need the predictable code | |
2917 // ageing sequence. | |
2918 __ Push(lr, fp, cp, temp); | |
2919 __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp); | |
2920 } else { | |
2921 if (isolate()->IsCodePreAgingActive()) { | |
2922 Code* stub = Code::GetPreAgedCodeAgeStub(isolate()); | |
2923 __ EmitCodeAgeSequence(stub); | |
2924 } else { | |
2925 __ EmitFrameSetupForCodeAgePatching(); | |
2926 } | |
2927 } | |
2928 } | |
2929 | |
2930 | |
2931 void MacroAssembler::EnterFrame(StackFrame::Type type) { | |
2932 ASSERT(jssp.Is(StackPointer())); | |
2933 UseScratchRegisterScope temps(this); | |
2934 Register type_reg = temps.AcquireX(); | |
2935 Register code_reg = temps.AcquireX(); | |
2936 | |
2937 Push(lr, fp, cp); | |
2938 Mov(type_reg, Smi::FromInt(type)); | |
2939 Mov(code_reg, Operand(CodeObject())); | |
2940 Push(type_reg, code_reg); | |
2941 // jssp[4] : lr | |
2942 // jssp[3] : fp | |
2943 // jssp[2] : cp | |
2944 // jssp[1] : type | |
2945 // jssp[0] : code object | |
2946 | |
2947 // Adjust FP to point to saved FP. | |
2948 Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize); | |
2949 } | |
2950 | |
2951 | |
2952 void MacroAssembler::LeaveFrame(StackFrame::Type type) { | |
2953 ASSERT(jssp.Is(StackPointer())); | |
2954 // Drop the execution stack down to the frame pointer and restore | |
2955 // the caller frame pointer and return address. | |
2956 Mov(jssp, fp); | |
2957 AssertStackConsistency(); | |
2958 Pop(fp, lr); | |
2959 } | |
2960 | |
2961 | |
2962 void MacroAssembler::ExitFramePreserveFPRegs() { | |
2963 PushCPURegList(kCallerSavedFP); | |
2964 } | |
2965 | |
2966 | |
2967 void MacroAssembler::ExitFrameRestoreFPRegs() { | |
2968 // Read the registers from the stack without popping them. The stack pointer | |
2969 // will be reset as part of the unwinding process. | |
2970 CPURegList saved_fp_regs = kCallerSavedFP; | |
2971 ASSERT(saved_fp_regs.Count() % 2 == 0); | |
2972 | |
2973 int offset = ExitFrameConstants::kLastExitFrameField; | |
2974 while (!saved_fp_regs.IsEmpty()) { | |
2975 const CPURegister& dst0 = saved_fp_regs.PopHighestIndex(); | |
2976 const CPURegister& dst1 = saved_fp_regs.PopHighestIndex(); | |
2977 offset -= 2 * kDRegSize; | |
2978 Ldp(dst1, dst0, MemOperand(fp, offset)); | |
2979 } | |
2980 } | |
2981 | |
2982 | |
2983 void MacroAssembler::EnterExitFrame(bool save_doubles, | |
2984 const Register& scratch, | |
2985 int extra_space) { | |
2986 ASSERT(jssp.Is(StackPointer())); | |
2987 | |
2988 // Set up the new stack frame. | |
2989 Mov(scratch, Operand(CodeObject())); | |
2990 Push(lr, fp); | |
2991 Mov(fp, StackPointer()); | |
2992 Push(xzr, scratch); | |
2993 // fp[8]: CallerPC (lr) | |
2994 // fp -> fp[0]: CallerFP (old fp) | |
2995 // fp[-8]: Space reserved for SPOffset. | |
2996 // jssp -> fp[-16]: CodeObject() | |
2997 STATIC_ASSERT((2 * kPointerSize) == | |
2998 ExitFrameConstants::kCallerSPDisplacement); | |
2999 STATIC_ASSERT((1 * kPointerSize) == ExitFrameConstants::kCallerPCOffset); | |
3000 STATIC_ASSERT((0 * kPointerSize) == ExitFrameConstants::kCallerFPOffset); | |
3001 STATIC_ASSERT((-1 * kPointerSize) == ExitFrameConstants::kSPOffset); | |
3002 STATIC_ASSERT((-2 * kPointerSize) == ExitFrameConstants::kCodeOffset); | |
3003 | |
3004 // Save the frame pointer and context pointer in the top frame. | |
3005 Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress, | |
3006 isolate()))); | |
3007 Str(fp, MemOperand(scratch)); | |
3008 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress, | |
3009 isolate()))); | |
3010 Str(cp, MemOperand(scratch)); | |
3011 | |
3012 STATIC_ASSERT((-2 * kPointerSize) == | |
3013 ExitFrameConstants::kLastExitFrameField); | |
3014 if (save_doubles) { | |
3015 ExitFramePreserveFPRegs(); | |
3016 } | |
3017 | |
3018 // Reserve space for the return address and for user requested memory. | |
3019 // We do this before aligning to make sure that we end up correctly | |
3020 // aligned with the minimum of wasted space. | |
3021 Claim(extra_space + 1, kXRegSize); | |
3022 // fp[8]: CallerPC (lr) | |
3023 // fp -> fp[0]: CallerFP (old fp) | |
3024 // fp[-8]: Space reserved for SPOffset. | |
3025 // fp[-16]: CodeObject() | |
3026 // fp[-16 - fp_size]: Saved doubles (if save_doubles is true). | |
3027 // jssp[8]: Extra space reserved for caller (if extra_space != 0). | |
3028 // jssp -> jssp[0]: Space reserved for the return address. | |
3029 | |
3030 // Align and synchronize the system stack pointer with jssp. | |
3031 AlignAndSetCSPForFrame(); | |
3032 ASSERT(csp.Is(StackPointer())); | |
3033 | |
3034 // fp[8]: CallerPC (lr) | |
3035 // fp -> fp[0]: CallerFP (old fp) | |
3036 // fp[-8]: Space reserved for SPOffset. | |
3037 // fp[-16]: CodeObject() | |
3038 // fp[-16 - fp_size]: Saved doubles (if save_doubles is true). | |
3039 // csp[8]: Memory reserved for the caller if extra_space != 0. | |
3040 // Alignment padding, if necessary. | |
3041 // csp -> csp[0]: Space reserved for the return address. | |
3042 | |
3043 // ExitFrame::GetStateForFramePointer expects to find the return address at | |
3044 // the memory address immediately below the pointer stored in SPOffset. | |
3045 // It is not safe to derive much else from SPOffset, because the size of the | |
3046 // padding can vary. | |
3047 Add(scratch, csp, kXRegSize); | |
3048 Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset)); | |
3049 } | |
3050 | |
3051 | |
3052 // Leave the current exit frame. | |
3053 void MacroAssembler::LeaveExitFrame(bool restore_doubles, | |
3054 const Register& scratch, | |
3055 bool restore_context) { | |
3056 ASSERT(csp.Is(StackPointer())); | |
3057 | |
3058 if (restore_doubles) { | |
3059 ExitFrameRestoreFPRegs(); | |
3060 } | |
3061 | |
3062 // Restore the context pointer from the top frame. | |
3063 if (restore_context) { | |
3064 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress, | |
3065 isolate()))); | |
3066 Ldr(cp, MemOperand(scratch)); | |
3067 } | |
3068 | |
3069 if (emit_debug_code()) { | |
3070 // Also emit debug code to clear the cp in the top frame. | |
3071 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress, | |
3072 isolate()))); | |
3073 Str(xzr, MemOperand(scratch)); | |
3074 } | |
3075 // Clear the frame pointer from the top frame. | |
3076 Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress, | |
3077 isolate()))); | |
3078 Str(xzr, MemOperand(scratch)); | |
3079 | |
3080 // Pop the exit frame. | |
3081 // fp[8]: CallerPC (lr) | |
3082 // fp -> fp[0]: CallerFP (old fp) | |
3083 // fp[...]: The rest of the frame. | |
3084 Mov(jssp, fp); | |
3085 SetStackPointer(jssp); | |
3086 AssertStackConsistency(); | |
3087 Pop(fp, lr); | |
3088 } | |
3089 | |
3090 | |
3091 void MacroAssembler::SetCounter(StatsCounter* counter, int value, | |
3092 Register scratch1, Register scratch2) { | |
3093 if (FLAG_native_code_counters && counter->Enabled()) { | |
3094 Mov(scratch1, value); | |
3095 Mov(scratch2, ExternalReference(counter)); | |
3096 Str(scratch1, MemOperand(scratch2)); | |
3097 } | |
3098 } | |
3099 | |
3100 | |
3101 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, | |
3102 Register scratch1, Register scratch2) { | |
3103 ASSERT(value != 0); | |
3104 if (FLAG_native_code_counters && counter->Enabled()) { | |
3105 Mov(scratch2, ExternalReference(counter)); | |
3106 Ldr(scratch1, MemOperand(scratch2)); | |
3107 Add(scratch1, scratch1, value); | |
3108 Str(scratch1, MemOperand(scratch2)); | |
3109 } | |
3110 } | |
3111 | |
3112 | |
3113 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, | |
3114 Register scratch1, Register scratch2) { | |
3115 IncrementCounter(counter, -value, scratch1, scratch2); | |
3116 } | |
3117 | |
3118 | |
3119 void MacroAssembler::LoadContext(Register dst, int context_chain_length) { | |
3120 if (context_chain_length > 0) { | |
3121 // Move up the chain of contexts to the context containing the slot. | |
3122 Ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX))); | |
3123 for (int i = 1; i < context_chain_length; i++) { | |
3124 Ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX))); | |
3125 } | |
3126 } else { | |
3127 // Slot is in the current function context. Move it into the | |
3128 // destination register in case we store into it (the write barrier | |
3129 // cannot be allowed to destroy the context in cp). | |
3130 Mov(dst, cp); | |
3131 } | |
3132 } | |
3133 | |
3134 | |
3135 #ifdef ENABLE_DEBUGGER_SUPPORT | |
3136 void MacroAssembler::DebugBreak() { | |
3137 Mov(x0, 0); | |
3138 Mov(x1, ExternalReference(Runtime::kDebugBreak, isolate())); | |
3139 CEntryStub ces(1); | |
3140 ASSERT(AllowThisStubCall(&ces)); | |
3141 Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK); | |
3142 } | |
3143 #endif | |
3144 | |
3145 | |
3146 void MacroAssembler::PushTryHandler(StackHandler::Kind kind, | |
3147 int handler_index) { | |
3148 ASSERT(jssp.Is(StackPointer())); | |
3149 // Adjust this code if the asserts don't hold. | |
3150 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); | |
3151 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); | |
3152 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); | |
3153 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); | |
3154 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); | |
3155 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); | |
3156 | |
3157 // For the JSEntry handler, we must preserve the live registers x0-x4. | |
3158 // (See JSEntryStub::GenerateBody().) | |
3159 | |
3160 unsigned state = | |
3161 StackHandler::IndexField::encode(handler_index) | | |
3162 StackHandler::KindField::encode(kind); | |
3163 | |
3164 // Set up the code object and the state for pushing. | |
3165 Mov(x10, Operand(CodeObject())); | |
3166 Mov(x11, state); | |
3167 | |
3168 // Push the frame pointer, context, state, and code object. | |
3169 if (kind == StackHandler::JS_ENTRY) { | |
3170 ASSERT(Smi::FromInt(0) == 0); | |
3171 Push(xzr, xzr, x11, x10); | |
3172 } else { | |
3173 Push(fp, cp, x11, x10); | |
3174 } | |
3175 | |
3176 // Link the current handler as the next handler. | |
3177 Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate())); | |
3178 Ldr(x10, MemOperand(x11)); | |
3179 Push(x10); | |
3180 // Set this new handler as the current one. | |
3181 Str(jssp, MemOperand(x11)); | |
3182 } | |
3183 | |
3184 | |
3185 void MacroAssembler::PopTryHandler() { | |
3186 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); | |
3187 Pop(x10); | |
3188 Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate())); | |
3189 Drop(StackHandlerConstants::kSize - kXRegSize, kByteSizeInBytes); | |
3190 Str(x10, MemOperand(x11)); | |
3191 } | |
3192 | |
3193 | |
3194 void MacroAssembler::Allocate(int object_size, | |
3195 Register result, | |
3196 Register scratch1, | |
3197 Register scratch2, | |
3198 Label* gc_required, | |
3199 AllocationFlags flags) { | |
3200 ASSERT(object_size <= Page::kMaxRegularHeapObjectSize); | |
3201 if (!FLAG_inline_new) { | |
3202 if (emit_debug_code()) { | |
3203 // Trash the registers to simulate an allocation failure. | |
3204 // We apply salt to the original zap value to easily spot the values. | |
3205 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L); | |
3206 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L); | |
3207 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L); | |
3208 } | |
3209 B(gc_required); | |
3210 return; | |
3211 } | |
3212 | |
3213 UseScratchRegisterScope temps(this); | |
3214 Register scratch3 = temps.AcquireX(); | |
3215 | |
3216 ASSERT(!AreAliased(result, scratch1, scratch2, scratch3)); | |
3217 ASSERT(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits()); | |
3218 | |
3219 // Make object size into bytes. | |
3220 if ((flags & SIZE_IN_WORDS) != 0) { | |
3221 object_size *= kPointerSize; | |
3222 } | |
3223 ASSERT(0 == (object_size & kObjectAlignmentMask)); | |
3224 | |
3225 // Check relative positions of allocation top and limit addresses. | |
3226 // The values must be adjacent in memory to allow the use of LDP. | |
3227 ExternalReference heap_allocation_top = | |
3228 AllocationUtils::GetAllocationTopReference(isolate(), flags); | |
3229 ExternalReference heap_allocation_limit = | |
3230 AllocationUtils::GetAllocationLimitReference(isolate(), flags); | |
3231 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address()); | |
3232 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address()); | |
3233 ASSERT((limit - top) == kPointerSize); | |
3234 | |
3235 // Set up allocation top address and object size registers. | |
3236 Register top_address = scratch1; | |
3237 Register allocation_limit = scratch2; | |
3238 Mov(top_address, Operand(heap_allocation_top)); | |
3239 | |
3240 if ((flags & RESULT_CONTAINS_TOP) == 0) { | |
3241 // Load allocation top into result and the allocation limit. | |
3242 Ldp(result, allocation_limit, MemOperand(top_address)); | |
3243 } else { | |
3244 if (emit_debug_code()) { | |
3245 // Assert that result actually contains top on entry. | |
3246 Ldr(scratch3, MemOperand(top_address)); | |
3247 Cmp(result, scratch3); | |
3248 Check(eq, kUnexpectedAllocationTop); | |
3249 } | |
3250 // Load the allocation limit. 'result' already contains the allocation top. | |
3251 Ldr(allocation_limit, MemOperand(top_address, limit - top)); | |
3252 } | |
3253 | |
3254 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have | |
3255 // the same alignment on A64. | |
3256 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); | |
3257 | |
3258 // Calculate new top and bail out if new space is exhausted. | |
3259 Adds(scratch3, result, object_size); | |
3260 B(vs, gc_required); | |
3261 Cmp(scratch3, allocation_limit); | |
3262 B(hi, gc_required); | |
3263 Str(scratch3, MemOperand(top_address)); | |
3264 | |
3265 // Tag the object if requested. | |
3266 if ((flags & TAG_OBJECT) != 0) { | |
3267 Orr(result, result, kHeapObjectTag); | |
3268 } | |
3269 } | |
3270 | |
3271 | |
3272 void MacroAssembler::Allocate(Register object_size, | |
3273 Register result, | |
3274 Register scratch1, | |
3275 Register scratch2, | |
3276 Label* gc_required, | |
3277 AllocationFlags flags) { | |
3278 if (!FLAG_inline_new) { | |
3279 if (emit_debug_code()) { | |
3280 // Trash the registers to simulate an allocation failure. | |
3281 // We apply salt to the original zap value to easily spot the values. | |
3282 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L); | |
3283 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L); | |
3284 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L); | |
3285 } | |
3286 B(gc_required); | |
3287 return; | |
3288 } | |
3289 | |
3290 UseScratchRegisterScope temps(this); | |
3291 Register scratch3 = temps.AcquireX(); | |
3292 | |
3293 ASSERT(!AreAliased(object_size, result, scratch1, scratch2, scratch3)); | |
3294 ASSERT(object_size.Is64Bits() && result.Is64Bits() && | |
3295 scratch1.Is64Bits() && scratch2.Is64Bits()); | |
3296 | |
3297 // Check relative positions of allocation top and limit addresses. | |
3298 // The values must be adjacent in memory to allow the use of LDP. | |
3299 ExternalReference heap_allocation_top = | |
3300 AllocationUtils::GetAllocationTopReference(isolate(), flags); | |
3301 ExternalReference heap_allocation_limit = | |
3302 AllocationUtils::GetAllocationLimitReference(isolate(), flags); | |
3303 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address()); | |
3304 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address()); | |
3305 ASSERT((limit - top) == kPointerSize); | |
3306 | |
3307 // Set up allocation top address and object size registers. | |
3308 Register top_address = scratch1; | |
3309 Register allocation_limit = scratch2; | |
3310 Mov(top_address, heap_allocation_top); | |
3311 | |
3312 if ((flags & RESULT_CONTAINS_TOP) == 0) { | |
3313 // Load allocation top into result and the allocation limit. | |
3314 Ldp(result, allocation_limit, MemOperand(top_address)); | |
3315 } else { | |
3316 if (emit_debug_code()) { | |
3317 // Assert that result actually contains top on entry. | |
3318 Ldr(scratch3, MemOperand(top_address)); | |
3319 Cmp(result, scratch3); | |
3320 Check(eq, kUnexpectedAllocationTop); | |
3321 } | |
3322 // Load the allocation limit. 'result' already contains the allocation top. | |
3323 Ldr(allocation_limit, MemOperand(top_address, limit - top)); | |
3324 } | |
3325 | |
3326 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have | |
3327 // the same alignment on A64. | |
3328 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); | |
3329 | |
3330 // Calculate new top and bail out if new space is exhausted | |
3331 if ((flags & SIZE_IN_WORDS) != 0) { | |
3332 Adds(scratch3, result, Operand(object_size, LSL, kPointerSizeLog2)); | |
3333 } else { | |
3334 Adds(scratch3, result, object_size); | |
3335 } | |
3336 | |
3337 if (emit_debug_code()) { | |
3338 Tst(scratch3, kObjectAlignmentMask); | |
3339 Check(eq, kUnalignedAllocationInNewSpace); | |
3340 } | |
3341 | |
3342 B(vs, gc_required); | |
3343 Cmp(scratch3, allocation_limit); | |
3344 B(hi, gc_required); | |
3345 Str(scratch3, MemOperand(top_address)); | |
3346 | |
3347 // Tag the object if requested. | |
3348 if ((flags & TAG_OBJECT) != 0) { | |
3349 Orr(result, result, kHeapObjectTag); | |
3350 } | |
3351 } | |
3352 | |
3353 | |
3354 void MacroAssembler::UndoAllocationInNewSpace(Register object, | |
3355 Register scratch) { | |
3356 ExternalReference new_space_allocation_top = | |
3357 ExternalReference::new_space_allocation_top_address(isolate()); | |
3358 | |
3359 // Make sure the object has no tag before resetting top. | |
3360 Bic(object, object, kHeapObjectTagMask); | |
3361 #ifdef DEBUG | |
3362 // Check that the object un-allocated is below the current top. | |
3363 Mov(scratch, new_space_allocation_top); | |
3364 Ldr(scratch, MemOperand(scratch)); | |
3365 Cmp(object, scratch); | |
3366 Check(lt, kUndoAllocationOfNonAllocatedMemory); | |
3367 #endif | |
3368 // Write the address of the object to un-allocate as the current top. | |
3369 Mov(scratch, new_space_allocation_top); | |
3370 Str(object, MemOperand(scratch)); | |
3371 } | |
3372 | |
3373 | |
3374 void MacroAssembler::AllocateTwoByteString(Register result, | |
3375 Register length, | |
3376 Register scratch1, | |
3377 Register scratch2, | |
3378 Register scratch3, | |
3379 Label* gc_required) { | |
3380 ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3)); | |
3381 // Calculate the number of bytes needed for the characters in the string while | |
3382 // observing object alignment. | |
3383 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); | |
3384 Add(scratch1, length, length); // Length in bytes, not chars. | |
3385 Add(scratch1, scratch1, kObjectAlignmentMask + SeqTwoByteString::kHeaderSize); | |
3386 Bic(scratch1, scratch1, kObjectAlignmentMask); | |
3387 | |
3388 // Allocate two-byte string in new space. | |
3389 Allocate(scratch1, | |
3390 result, | |
3391 scratch2, | |
3392 scratch3, | |
3393 gc_required, | |
3394 TAG_OBJECT); | |
3395 | |
3396 // Set the map, length and hash field. | |
3397 InitializeNewString(result, | |
3398 length, | |
3399 Heap::kStringMapRootIndex, | |
3400 scratch1, | |
3401 scratch2); | |
3402 } | |
3403 | |
3404 | |
3405 void MacroAssembler::AllocateAsciiString(Register result, | |
3406 Register length, | |
3407 Register scratch1, | |
3408 Register scratch2, | |
3409 Register scratch3, | |
3410 Label* gc_required) { | |
3411 ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3)); | |
3412 // Calculate the number of bytes needed for the characters in the string while | |
3413 // observing object alignment. | |
3414 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); | |
3415 STATIC_ASSERT(kCharSize == 1); | |
3416 Add(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize); | |
3417 Bic(scratch1, scratch1, kObjectAlignmentMask); | |
3418 | |
3419 // Allocate ASCII string in new space. | |
3420 Allocate(scratch1, | |
3421 result, | |
3422 scratch2, | |
3423 scratch3, | |
3424 gc_required, | |
3425 TAG_OBJECT); | |
3426 | |
3427 // Set the map, length and hash field. | |
3428 InitializeNewString(result, | |
3429 length, | |
3430 Heap::kAsciiStringMapRootIndex, | |
3431 scratch1, | |
3432 scratch2); | |
3433 } | |
3434 | |
3435 | |
3436 void MacroAssembler::AllocateTwoByteConsString(Register result, | |
3437 Register length, | |
3438 Register scratch1, | |
3439 Register scratch2, | |
3440 Label* gc_required) { | |
3441 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required, | |
3442 TAG_OBJECT); | |
3443 | |
3444 InitializeNewString(result, | |
3445 length, | |
3446 Heap::kConsStringMapRootIndex, | |
3447 scratch1, | |
3448 scratch2); | |
3449 } | |
3450 | |
3451 | |
3452 void MacroAssembler::AllocateAsciiConsString(Register result, | |
3453 Register length, | |
3454 Register scratch1, | |
3455 Register scratch2, | |
3456 Label* gc_required) { | |
3457 Label allocate_new_space, install_map; | |
3458 AllocationFlags flags = TAG_OBJECT; | |
3459 | |
3460 ExternalReference high_promotion_mode = ExternalReference:: | |
3461 new_space_high_promotion_mode_active_address(isolate()); | |
3462 Mov(scratch1, high_promotion_mode); | |
3463 Ldr(scratch1, MemOperand(scratch1)); | |
3464 Cbz(scratch1, &allocate_new_space); | |
3465 | |
3466 Allocate(ConsString::kSize, | |
3467 result, | |
3468 scratch1, | |
3469 scratch2, | |
3470 gc_required, | |
3471 static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE)); | |
3472 | |
3473 B(&install_map); | |
3474 | |
3475 Bind(&allocate_new_space); | |
3476 Allocate(ConsString::kSize, | |
3477 result, | |
3478 scratch1, | |
3479 scratch2, | |
3480 gc_required, | |
3481 flags); | |
3482 | |
3483 Bind(&install_map); | |
3484 | |
3485 InitializeNewString(result, | |
3486 length, | |
3487 Heap::kConsAsciiStringMapRootIndex, | |
3488 scratch1, | |
3489 scratch2); | |
3490 } | |
3491 | |
3492 | |
3493 void MacroAssembler::AllocateTwoByteSlicedString(Register result, | |
3494 Register length, | |
3495 Register scratch1, | |
3496 Register scratch2, | |
3497 Label* gc_required) { | |
3498 ASSERT(!AreAliased(result, length, scratch1, scratch2)); | |
3499 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required, | |
3500 TAG_OBJECT); | |
3501 | |
3502 InitializeNewString(result, | |
3503 length, | |
3504 Heap::kSlicedStringMapRootIndex, | |
3505 scratch1, | |
3506 scratch2); | |
3507 } | |
3508 | |
3509 | |
3510 void MacroAssembler::AllocateAsciiSlicedString(Register result, | |
3511 Register length, | |
3512 Register scratch1, | |
3513 Register scratch2, | |
3514 Label* gc_required) { | |
3515 ASSERT(!AreAliased(result, length, scratch1, scratch2)); | |
3516 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required, | |
3517 TAG_OBJECT); | |
3518 | |
3519 InitializeNewString(result, | |
3520 length, | |
3521 Heap::kSlicedAsciiStringMapRootIndex, | |
3522 scratch1, | |
3523 scratch2); | |
3524 } | |
3525 | |
3526 | |
3527 // Allocates a heap number or jumps to the need_gc label if the young space | |
3528 // is full and a scavenge is needed. | |
3529 void MacroAssembler::AllocateHeapNumber(Register result, | |
3530 Label* gc_required, | |
3531 Register scratch1, | |
3532 Register scratch2, | |
3533 Register heap_number_map) { | |
3534 // Allocate an object in the heap for the heap number and tag it as a heap | |
3535 // object. | |
3536 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required, | |
3537 TAG_OBJECT); | |
3538 | |
3539 // Store heap number map in the allocated object. | |
3540 if (heap_number_map.Is(NoReg)) { | |
3541 heap_number_map = scratch1; | |
3542 LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | |
3543 } | |
3544 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | |
3545 Str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset)); | |
3546 } | |
3547 | |
3548 | |
3549 void MacroAssembler::AllocateHeapNumberWithValue(Register result, | |
3550 DoubleRegister value, | |
3551 Label* gc_required, | |
3552 Register scratch1, | |
3553 Register scratch2, | |
3554 Register heap_number_map) { | |
3555 // TODO(all): Check if it would be more efficient to use STP to store both | |
3556 // the map and the value. | |
3557 AllocateHeapNumber(result, gc_required, scratch1, scratch2, heap_number_map); | |
3558 Str(value, FieldMemOperand(result, HeapNumber::kValueOffset)); | |
3559 } | |
3560 | |
3561 | |
3562 void MacroAssembler::JumpIfObjectType(Register object, | |
3563 Register map, | |
3564 Register type_reg, | |
3565 InstanceType type, | |
3566 Label* if_cond_pass, | |
3567 Condition cond) { | |
3568 CompareObjectType(object, map, type_reg, type); | |
3569 B(cond, if_cond_pass); | |
3570 } | |
3571 | |
3572 | |
3573 void MacroAssembler::JumpIfNotObjectType(Register object, | |
3574 Register map, | |
3575 Register type_reg, | |
3576 InstanceType type, | |
3577 Label* if_not_object) { | |
3578 JumpIfObjectType(object, map, type_reg, type, if_not_object, ne); | |
3579 } | |
3580 | |
3581 | |
3582 // Sets condition flags based on comparison, and returns type in type_reg. | |
3583 void MacroAssembler::CompareObjectType(Register object, | |
3584 Register map, | |
3585 Register type_reg, | |
3586 InstanceType type) { | |
3587 Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset)); | |
3588 CompareInstanceType(map, type_reg, type); | |
3589 } | |
3590 | |
3591 | |
3592 // Sets condition flags based on comparison, and returns type in type_reg. | |
3593 void MacroAssembler::CompareInstanceType(Register map, | |
3594 Register type_reg, | |
3595 InstanceType type) { | |
3596 Ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); | |
3597 Cmp(type_reg, type); | |
3598 } | |
3599 | |
3600 | |
3601 void MacroAssembler::CompareMap(Register obj, | |
3602 Register scratch, | |
3603 Handle<Map> map) { | |
3604 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); | |
3605 CompareMap(scratch, map); | |
3606 } | |
3607 | |
3608 | |
3609 void MacroAssembler::CompareMap(Register obj_map, | |
3610 Handle<Map> map) { | |
3611 Cmp(obj_map, Operand(map)); | |
3612 } | |
3613 | |
3614 | |
3615 void MacroAssembler::CheckMap(Register obj, | |
3616 Register scratch, | |
3617 Handle<Map> map, | |
3618 Label* fail, | |
3619 SmiCheckType smi_check_type) { | |
3620 if (smi_check_type == DO_SMI_CHECK) { | |
3621 JumpIfSmi(obj, fail); | |
3622 } | |
3623 | |
3624 CompareMap(obj, scratch, map); | |
3625 B(ne, fail); | |
3626 } | |
3627 | |
3628 | |
3629 void MacroAssembler::CheckMap(Register obj, | |
3630 Register scratch, | |
3631 Heap::RootListIndex index, | |
3632 Label* fail, | |
3633 SmiCheckType smi_check_type) { | |
3634 if (smi_check_type == DO_SMI_CHECK) { | |
3635 JumpIfSmi(obj, fail); | |
3636 } | |
3637 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); | |
3638 JumpIfNotRoot(scratch, index, fail); | |
3639 } | |
3640 | |
3641 | |
3642 void MacroAssembler::CheckMap(Register obj_map, | |
3643 Handle<Map> map, | |
3644 Label* fail, | |
3645 SmiCheckType smi_check_type) { | |
3646 if (smi_check_type == DO_SMI_CHECK) { | |
3647 JumpIfSmi(obj_map, fail); | |
3648 } | |
3649 | |
3650 CompareMap(obj_map, map); | |
3651 B(ne, fail); | |
3652 } | |
3653 | |
3654 | |
3655 void MacroAssembler::DispatchMap(Register obj, | |
3656 Register scratch, | |
3657 Handle<Map> map, | |
3658 Handle<Code> success, | |
3659 SmiCheckType smi_check_type) { | |
3660 Label fail; | |
3661 if (smi_check_type == DO_SMI_CHECK) { | |
3662 JumpIfSmi(obj, &fail); | |
3663 } | |
3664 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); | |
3665 Cmp(scratch, Operand(map)); | |
3666 B(ne, &fail); | |
3667 Jump(success, RelocInfo::CODE_TARGET); | |
3668 Bind(&fail); | |
3669 } | |
3670 | |
3671 | |
3672 void MacroAssembler::TestMapBitfield(Register object, uint64_t mask) { | |
3673 UseScratchRegisterScope temps(this); | |
3674 Register temp = temps.AcquireX(); | |
3675 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); | |
3676 Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset)); | |
3677 Tst(temp, mask); | |
3678 } | |
3679 | |
3680 | |
3681 void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) { | |
3682 // Load the map's "bit field 2". | |
3683 __ Ldrb(result, FieldMemOperand(map, Map::kBitField2Offset)); | |
3684 // Retrieve elements_kind from bit field 2. | |
3685 __ Ubfx(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount); | |
3686 } | |
3687 | |
3688 | |
3689 void MacroAssembler::TryGetFunctionPrototype(Register function, | |
3690 Register result, | |
3691 Register scratch, | |
3692 Label* miss, | |
3693 BoundFunctionAction action) { | |
3694 ASSERT(!AreAliased(function, result, scratch)); | |
3695 | |
3696 // Check that the receiver isn't a smi. | |
3697 JumpIfSmi(function, miss); | |
3698 | |
3699 // Check that the function really is a function. Load map into result reg. | |
3700 JumpIfNotObjectType(function, result, scratch, JS_FUNCTION_TYPE, miss); | |
3701 | |
3702 if (action == kMissOnBoundFunction) { | |
3703 Register scratch_w = scratch.W(); | |
3704 Ldr(scratch, | |
3705 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); | |
3706 // On 64-bit platforms, compiler hints field is not a smi. See definition of | |
3707 // kCompilerHintsOffset in src/objects.h. | |
3708 Ldr(scratch_w, | |
3709 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset)); | |
3710 Tbnz(scratch, SharedFunctionInfo::kBoundFunction, miss); | |
3711 } | |
3712 | |
3713 // Make sure that the function has an instance prototype. | |
3714 Label non_instance; | |
3715 Ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset)); | |
3716 Tbnz(scratch, Map::kHasNonInstancePrototype, &non_instance); | |
3717 | |
3718 // Get the prototype or initial map from the function. | |
3719 Ldr(result, | |
3720 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); | |
3721 | |
3722 // If the prototype or initial map is the hole, don't return it and simply | |
3723 // miss the cache instead. This will allow us to allocate a prototype object | |
3724 // on-demand in the runtime system. | |
3725 JumpIfRoot(result, Heap::kTheHoleValueRootIndex, miss); | |
3726 | |
3727 // If the function does not have an initial map, we're done. | |
3728 Label done; | |
3729 JumpIfNotObjectType(result, scratch, scratch, MAP_TYPE, &done); | |
3730 | |
3731 // Get the prototype from the initial map. | |
3732 Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); | |
3733 B(&done); | |
3734 | |
3735 // Non-instance prototype: fetch prototype from constructor field in initial | |
3736 // map. | |
3737 Bind(&non_instance); | |
3738 Ldr(result, FieldMemOperand(result, Map::kConstructorOffset)); | |
3739 | |
3740 // All done. | |
3741 Bind(&done); | |
3742 } | |
3743 | |
3744 | |
3745 void MacroAssembler::CompareRoot(const Register& obj, | |
3746 Heap::RootListIndex index) { | |
3747 UseScratchRegisterScope temps(this); | |
3748 Register temp = temps.AcquireX(); | |
3749 ASSERT(!AreAliased(obj, temp)); | |
3750 LoadRoot(temp, index); | |
3751 Cmp(obj, temp); | |
3752 } | |
3753 | |
3754 | |
3755 void MacroAssembler::JumpIfRoot(const Register& obj, | |
3756 Heap::RootListIndex index, | |
3757 Label* if_equal) { | |
3758 CompareRoot(obj, index); | |
3759 B(eq, if_equal); | |
3760 } | |
3761 | |
3762 | |
3763 void MacroAssembler::JumpIfNotRoot(const Register& obj, | |
3764 Heap::RootListIndex index, | |
3765 Label* if_not_equal) { | |
3766 CompareRoot(obj, index); | |
3767 B(ne, if_not_equal); | |
3768 } | |
3769 | |
3770 | |
3771 void MacroAssembler::CompareAndSplit(const Register& lhs, | |
3772 const Operand& rhs, | |
3773 Condition cond, | |
3774 Label* if_true, | |
3775 Label* if_false, | |
3776 Label* fall_through) { | |
3777 if ((if_true == if_false) && (if_false == fall_through)) { | |
3778 // Fall through. | |
3779 } else if (if_true == if_false) { | |
3780 B(if_true); | |
3781 } else if (if_false == fall_through) { | |
3782 CompareAndBranch(lhs, rhs, cond, if_true); | |
3783 } else if (if_true == fall_through) { | |
3784 CompareAndBranch(lhs, rhs, InvertCondition(cond), if_false); | |
3785 } else { | |
3786 CompareAndBranch(lhs, rhs, cond, if_true); | |
3787 B(if_false); | |
3788 } | |
3789 } | |
3790 | |
3791 | |
3792 void MacroAssembler::TestAndSplit(const Register& reg, | |
3793 uint64_t bit_pattern, | |
3794 Label* if_all_clear, | |
3795 Label* if_any_set, | |
3796 Label* fall_through) { | |
3797 if ((if_all_clear == if_any_set) && (if_any_set == fall_through)) { | |
3798 // Fall through. | |
3799 } else if (if_all_clear == if_any_set) { | |
3800 B(if_all_clear); | |
3801 } else if (if_all_clear == fall_through) { | |
3802 TestAndBranchIfAnySet(reg, bit_pattern, if_any_set); | |
3803 } else if (if_any_set == fall_through) { | |
3804 TestAndBranchIfAllClear(reg, bit_pattern, if_all_clear); | |
3805 } else { | |
3806 TestAndBranchIfAnySet(reg, bit_pattern, if_any_set); | |
3807 B(if_all_clear); | |
3808 } | |
3809 } | |
3810 | |
3811 | |
3812 void MacroAssembler::CheckFastElements(Register map, | |
3813 Register scratch, | |
3814 Label* fail) { | |
3815 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); | |
3816 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); | |
3817 STATIC_ASSERT(FAST_ELEMENTS == 2); | |
3818 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); | |
3819 Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); | |
3820 Cmp(scratch, Map::kMaximumBitField2FastHoleyElementValue); | |
3821 B(hi, fail); | |
3822 } | |
3823 | |
3824 | |
3825 void MacroAssembler::CheckFastObjectElements(Register map, | |
3826 Register scratch, | |
3827 Label* fail) { | |
3828 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); | |
3829 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); | |
3830 STATIC_ASSERT(FAST_ELEMENTS == 2); | |
3831 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); | |
3832 Ldrb(scratch, FieldMemOperand(map, Map::kBitField2Offset)); | |
3833 Cmp(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue)); | |
3834 // If cond==ls, set cond=hi, otherwise compare. | |
3835 Ccmp(scratch, | |
3836 Operand(Map::kMaximumBitField2FastHoleyElementValue), CFlag, hi); | |
3837 B(hi, fail); | |
3838 } | |
3839 | |
3840 | |
3841 // Note: The ARM version of this clobbers elements_reg, but this version does | |
3842 // not. Some uses of this in A64 assume that elements_reg will be preserved. | |
3843 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, | |
3844 Register key_reg, | |
3845 Register elements_reg, | |
3846 Register scratch1, | |
3847 FPRegister fpscratch1, | |
3848 FPRegister fpscratch2, | |
3849 Label* fail, | |
3850 int elements_offset) { | |
3851 ASSERT(!AreAliased(value_reg, key_reg, elements_reg, scratch1)); | |
3852 Label store_num; | |
3853 | |
3854 // Speculatively convert the smi to a double - all smis can be exactly | |
3855 // represented as a double. | |
3856 SmiUntagToDouble(fpscratch1, value_reg, kSpeculativeUntag); | |
3857 | |
3858 // If value_reg is a smi, we're done. | |
3859 JumpIfSmi(value_reg, &store_num); | |
3860 | |
3861 // Ensure that the object is a heap number. | |
3862 CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(), | |
3863 fail, DONT_DO_SMI_CHECK); | |
3864 | |
3865 Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); | |
3866 Fmov(fpscratch2, FixedDoubleArray::canonical_not_the_hole_nan_as_double()); | |
3867 | |
3868 // Check for NaN by comparing the number to itself: NaN comparison will | |
3869 // report unordered, indicated by the overflow flag being set. | |
3870 Fcmp(fpscratch1, fpscratch1); | |
3871 Fcsel(fpscratch1, fpscratch2, fpscratch1, vs); | |
3872 | |
3873 // Store the result. | |
3874 Bind(&store_num); | |
3875 Add(scratch1, elements_reg, | |
3876 Operand::UntagSmiAndScale(key_reg, kDoubleSizeLog2)); | |
3877 Str(fpscratch1, | |
3878 FieldMemOperand(scratch1, | |
3879 FixedDoubleArray::kHeaderSize - elements_offset)); | |
3880 } | |
3881 | |
3882 | |
3883 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { | |
3884 return has_frame_ || !stub->SometimesSetsUpAFrame(); | |
3885 } | |
3886 | |
3887 | |
3888 void MacroAssembler::IndexFromHash(Register hash, Register index) { | |
3889 // If the hash field contains an array index pick it out. The assert checks | |
3890 // that the constants for the maximum number of digits for an array index | |
3891 // cached in the hash field and the number of bits reserved for it does not | |
3892 // conflict. | |
3893 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < | |
3894 (1 << String::kArrayIndexValueBits)); | |
3895 // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in | |
3896 // the low kHashShift bits. | |
3897 STATIC_ASSERT(kSmiTag == 0); | |
3898 Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits); | |
3899 SmiTag(index, hash); | |
3900 } | |
3901 | |
3902 | |
3903 void MacroAssembler::EmitSeqStringSetCharCheck( | |
3904 Register string, | |
3905 Register index, | |
3906 SeqStringSetCharCheckIndexType index_type, | |
3907 Register scratch, | |
3908 uint32_t encoding_mask) { | |
3909 ASSERT(!AreAliased(string, index, scratch)); | |
3910 | |
3911 if (index_type == kIndexIsSmi) { | |
3912 AssertSmi(index); | |
3913 } | |
3914 | |
3915 // Check that string is an object. | |
3916 AssertNotSmi(string, kNonObject); | |
3917 | |
3918 // Check that string has an appropriate map. | |
3919 Ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset)); | |
3920 Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | |
3921 | |
3922 And(scratch, scratch, kStringRepresentationMask | kStringEncodingMask); | |
3923 Cmp(scratch, encoding_mask); | |
3924 Check(eq, kUnexpectedStringType); | |
3925 | |
3926 Ldr(scratch, FieldMemOperand(string, String::kLengthOffset)); | |
3927 Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch)); | |
3928 Check(lt, kIndexIsTooLarge); | |
3929 | |
3930 ASSERT_EQ(0, Smi::FromInt(0)); | |
3931 Cmp(index, 0); | |
3932 Check(ge, kIndexIsNegative); | |
3933 } | |
3934 | |
3935 | |
3936 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, | |
3937 Register scratch1, | |
3938 Register scratch2, | |
3939 Label* miss) { | |
3940 ASSERT(!AreAliased(holder_reg, scratch1, scratch2)); | |
3941 Label same_contexts; | |
3942 | |
3943 // Load current lexical context from the stack frame. | |
3944 Ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset)); | |
3945 // In debug mode, make sure the lexical context is set. | |
3946 #ifdef DEBUG | |
3947 Cmp(scratch1, 0); | |
3948 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext); | |
3949 #endif | |
3950 | |
3951 // Load the native context of the current context. | |
3952 int offset = | |
3953 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize; | |
3954 Ldr(scratch1, FieldMemOperand(scratch1, offset)); | |
3955 Ldr(scratch1, FieldMemOperand(scratch1, GlobalObject::kNativeContextOffset)); | |
3956 | |
3957 // Check the context is a native context. | |
3958 if (emit_debug_code()) { | |
3959 // Read the first word and compare to the global_context_map. | |
3960 Ldr(scratch2, FieldMemOperand(scratch1, HeapObject::kMapOffset)); | |
3961 CompareRoot(scratch2, Heap::kNativeContextMapRootIndex); | |
3962 Check(eq, kExpectedNativeContext); | |
3963 } | |
3964 | |
3965 // Check if both contexts are the same. | |
3966 Ldr(scratch2, FieldMemOperand(holder_reg, | |
3967 JSGlobalProxy::kNativeContextOffset)); | |
3968 Cmp(scratch1, scratch2); | |
3969 B(&same_contexts, eq); | |
3970 | |
3971 // Check the context is a native context. | |
3972 if (emit_debug_code()) { | |
3973 // We're short on scratch registers here, so use holder_reg as a scratch. | |
3974 Push(holder_reg); | |
3975 Register scratch3 = holder_reg; | |
3976 | |
3977 CompareRoot(scratch2, Heap::kNullValueRootIndex); | |
3978 Check(ne, kExpectedNonNullContext); | |
3979 | |
3980 Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset)); | |
3981 CompareRoot(scratch3, Heap::kNativeContextMapRootIndex); | |
3982 Check(eq, kExpectedNativeContext); | |
3983 Pop(holder_reg); | |
3984 } | |
3985 | |
3986 // Check that the security token in the calling global object is | |
3987 // compatible with the security token in the receiving global | |
3988 // object. | |
3989 int token_offset = Context::kHeaderSize + | |
3990 Context::SECURITY_TOKEN_INDEX * kPointerSize; | |
3991 | |
3992 Ldr(scratch1, FieldMemOperand(scratch1, token_offset)); | |
3993 Ldr(scratch2, FieldMemOperand(scratch2, token_offset)); | |
3994 Cmp(scratch1, scratch2); | |
3995 B(miss, ne); | |
3996 | |
3997 Bind(&same_contexts); | |
3998 } | |
3999 | |
4000 | |
4001 // Compute the hash code from the untagged key. This must be kept in sync with | |
4002 // ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in | |
4003 // code-stub-hydrogen.cc | |
4004 void MacroAssembler::GetNumberHash(Register key, Register scratch) { | |
4005 ASSERT(!AreAliased(key, scratch)); | |
4006 | |
4007 // Xor original key with a seed. | |
4008 LoadRoot(scratch, Heap::kHashSeedRootIndex); | |
4009 Eor(key, key, Operand::UntagSmi(scratch)); | |
4010 | |
4011 // The algorithm uses 32-bit integer values. | |
4012 key = key.W(); | |
4013 scratch = scratch.W(); | |
4014 | |
4015 // Compute the hash code from the untagged key. This must be kept in sync | |
4016 // with ComputeIntegerHash in utils.h. | |
4017 // | |
4018 // hash = ~hash + (hash <<1 15); | |
4019 Mvn(scratch, key); | |
4020 Add(key, scratch, Operand(key, LSL, 15)); | |
4021 // hash = hash ^ (hash >> 12); | |
4022 Eor(key, key, Operand(key, LSR, 12)); | |
4023 // hash = hash + (hash << 2); | |
4024 Add(key, key, Operand(key, LSL, 2)); | |
4025 // hash = hash ^ (hash >> 4); | |
4026 Eor(key, key, Operand(key, LSR, 4)); | |
4027 // hash = hash * 2057; | |
4028 Mov(scratch, Operand(key, LSL, 11)); | |
4029 Add(key, key, Operand(key, LSL, 3)); | |
4030 Add(key, key, scratch); | |
4031 // hash = hash ^ (hash >> 16); | |
4032 Eor(key, key, Operand(key, LSR, 16)); | |
4033 } | |
4034 | |
4035 | |
4036 void MacroAssembler::LoadFromNumberDictionary(Label* miss, | |
4037 Register elements, | |
4038 Register key, | |
4039 Register result, | |
4040 Register scratch0, | |
4041 Register scratch1, | |
4042 Register scratch2, | |
4043 Register scratch3) { | |
4044 ASSERT(!AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3)); | |
4045 | |
4046 Label done; | |
4047 | |
4048 SmiUntag(scratch0, key); | |
4049 GetNumberHash(scratch0, scratch1); | |
4050 | |
4051 // Compute the capacity mask. | |
4052 Ldrsw(scratch1, | |
4053 UntagSmiFieldMemOperand(elements, | |
4054 SeededNumberDictionary::kCapacityOffset)); | |
4055 Sub(scratch1, scratch1, 1); | |
4056 | |
4057 // Generate an unrolled loop that performs a few probes before giving up. | |
4058 for (int i = 0; i < kNumberDictionaryProbes; i++) { | |
4059 // Compute the masked index: (hash + i + i * i) & mask. | |
4060 if (i > 0) { | |
4061 Add(scratch2, scratch0, SeededNumberDictionary::GetProbeOffset(i)); | |
4062 } else { | |
4063 Mov(scratch2, scratch0); | |
4064 } | |
4065 And(scratch2, scratch2, scratch1); | |
4066 | |
4067 // Scale the index by multiplying by the element size. | |
4068 ASSERT(SeededNumberDictionary::kEntrySize == 3); | |
4069 Add(scratch2, scratch2, Operand(scratch2, LSL, 1)); | |
4070 | |
4071 // Check if the key is identical to the name. | |
4072 Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2)); | |
4073 Ldr(scratch3, | |
4074 FieldMemOperand(scratch2, | |
4075 SeededNumberDictionary::kElementsStartOffset)); | |
4076 Cmp(key, scratch3); | |
4077 if (i != (kNumberDictionaryProbes - 1)) { | |
4078 B(eq, &done); | |
4079 } else { | |
4080 B(ne, miss); | |
4081 } | |
4082 } | |
4083 | |
4084 Bind(&done); | |
4085 // Check that the value is a normal property. | |
4086 const int kDetailsOffset = | |
4087 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize; | |
4088 Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset)); | |
4089 TestAndBranchIfAnySet(scratch1, PropertyDetails::TypeField::kMask, miss); | |
4090 | |
4091 // Get the value at the masked, scaled index and return. | |
4092 const int kValueOffset = | |
4093 SeededNumberDictionary::kElementsStartOffset + kPointerSize; | |
4094 Ldr(result, FieldMemOperand(scratch2, kValueOffset)); | |
4095 } | |
4096 | |
4097 | |
4098 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests. | |
4099 Register address, | |
4100 Register scratch1, | |
4101 SaveFPRegsMode fp_mode, | |
4102 RememberedSetFinalAction and_then) { | |
4103 ASSERT(!AreAliased(object, address, scratch1)); | |
4104 Label done, store_buffer_overflow; | |
4105 if (emit_debug_code()) { | |
4106 Label ok; | |
4107 JumpIfNotInNewSpace(object, &ok); | |
4108 Abort(kRememberedSetPointerInNewSpace); | |
4109 bind(&ok); | |
4110 } | |
4111 UseScratchRegisterScope temps(this); | |
4112 Register scratch2 = temps.AcquireX(); | |
4113 | |
4114 // Load store buffer top. | |
4115 Mov(scratch2, ExternalReference::store_buffer_top(isolate())); | |
4116 Ldr(scratch1, MemOperand(scratch2)); | |
4117 // Store pointer to buffer and increment buffer top. | |
4118 Str(address, MemOperand(scratch1, kPointerSize, PostIndex)); | |
4119 // Write back new top of buffer. | |
4120 Str(scratch1, MemOperand(scratch2)); | |
4121 // Call stub on end of buffer. | |
4122 // Check for end of buffer. | |
4123 ASSERT(StoreBuffer::kStoreBufferOverflowBit == | |
4124 (1 << (14 + kPointerSizeLog2))); | |
4125 if (and_then == kFallThroughAtEnd) { | |
4126 Tbz(scratch1, (14 + kPointerSizeLog2), &done); | |
4127 } else { | |
4128 ASSERT(and_then == kReturnAtEnd); | |
4129 Tbnz(scratch1, (14 + kPointerSizeLog2), &store_buffer_overflow); | |
4130 Ret(); | |
4131 } | |
4132 | |
4133 Bind(&store_buffer_overflow); | |
4134 Push(lr); | |
4135 StoreBufferOverflowStub store_buffer_overflow_stub = | |
4136 StoreBufferOverflowStub(fp_mode); | |
4137 CallStub(&store_buffer_overflow_stub); | |
4138 Pop(lr); | |
4139 | |
4140 Bind(&done); | |
4141 if (and_then == kReturnAtEnd) { | |
4142 Ret(); | |
4143 } | |
4144 } | |
4145 | |
4146 | |
4147 void MacroAssembler::PopSafepointRegisters() { | |
4148 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; | |
4149 PopXRegList(kSafepointSavedRegisters); | |
4150 Drop(num_unsaved); | |
4151 } | |
4152 | |
4153 | |
4154 void MacroAssembler::PushSafepointRegisters() { | |
4155 // Safepoints expect a block of kNumSafepointRegisters values on the stack, so | |
4156 // adjust the stack for unsaved registers. | |
4157 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; | |
4158 ASSERT(num_unsaved >= 0); | |
4159 Claim(num_unsaved); | |
4160 PushXRegList(kSafepointSavedRegisters); | |
4161 } | |
4162 | |
4163 | |
4164 void MacroAssembler::PushSafepointFPRegisters() { | |
4165 PushCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, | |
4166 FPRegister::kAllocatableFPRegisters)); | |
4167 } | |
4168 | |
4169 | |
4170 void MacroAssembler::PopSafepointFPRegisters() { | |
4171 PopCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, | |
4172 FPRegister::kAllocatableFPRegisters)); | |
4173 } | |
4174 | |
4175 | |
4176 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { | |
4177 // Make sure the safepoint registers list is what we expect. | |
4178 ASSERT(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff); | |
4179 | |
4180 // Safepoint registers are stored contiguously on the stack, but not all the | |
4181 // registers are saved. The following registers are excluded: | |
4182 // - x16 and x17 (ip0 and ip1) because they shouldn't be preserved outside of | |
4183 // the macro assembler. | |
4184 // - x28 (jssp) because JS stack pointer doesn't need to be included in | |
4185 // safepoint registers. | |
4186 // - x31 (csp) because the system stack pointer doesn't need to be included | |
4187 // in safepoint registers. | |
4188 // | |
4189 // This function implements the mapping of register code to index into the | |
4190 // safepoint register slots. | |
4191 if ((reg_code >= 0) && (reg_code <= 15)) { | |
4192 return reg_code; | |
4193 } else if ((reg_code >= 18) && (reg_code <= 27)) { | |
4194 // Skip ip0 and ip1. | |
4195 return reg_code - 2; | |
4196 } else if ((reg_code == 29) || (reg_code == 30)) { | |
4197 // Also skip jssp. | |
4198 return reg_code - 3; | |
4199 } else { | |
4200 // This register has no safepoint register slot. | |
4201 UNREACHABLE(); | |
4202 return -1; | |
4203 } | |
4204 } | |
4205 | |
4206 | |
4207 void MacroAssembler::CheckPageFlagSet(const Register& object, | |
4208 const Register& scratch, | |
4209 int mask, | |
4210 Label* if_any_set) { | |
4211 And(scratch, object, ~Page::kPageAlignmentMask); | |
4212 Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); | |
4213 TestAndBranchIfAnySet(scratch, mask, if_any_set); | |
4214 } | |
4215 | |
4216 | |
4217 void MacroAssembler::CheckPageFlagClear(const Register& object, | |
4218 const Register& scratch, | |
4219 int mask, | |
4220 Label* if_all_clear) { | |
4221 And(scratch, object, ~Page::kPageAlignmentMask); | |
4222 Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); | |
4223 TestAndBranchIfAllClear(scratch, mask, if_all_clear); | |
4224 } | |
4225 | |
4226 | |
4227 void MacroAssembler::RecordWriteField( | |
4228 Register object, | |
4229 int offset, | |
4230 Register value, | |
4231 Register scratch, | |
4232 LinkRegisterStatus lr_status, | |
4233 SaveFPRegsMode save_fp, | |
4234 RememberedSetAction remembered_set_action, | |
4235 SmiCheck smi_check) { | |
4236 // First, check if a write barrier is even needed. The tests below | |
4237 // catch stores of Smis. | |
4238 Label done; | |
4239 | |
4240 // Skip the barrier if writing a smi. | |
4241 if (smi_check == INLINE_SMI_CHECK) { | |
4242 JumpIfSmi(value, &done); | |
4243 } | |
4244 | |
4245 // Although the object register is tagged, the offset is relative to the start | |
4246 // of the object, so offset must be a multiple of kPointerSize. | |
4247 ASSERT(IsAligned(offset, kPointerSize)); | |
4248 | |
4249 Add(scratch, object, offset - kHeapObjectTag); | |
4250 if (emit_debug_code()) { | |
4251 Label ok; | |
4252 Tst(scratch, (1 << kPointerSizeLog2) - 1); | |
4253 B(eq, &ok); | |
4254 Abort(kUnalignedCellInWriteBarrier); | |
4255 Bind(&ok); | |
4256 } | |
4257 | |
4258 RecordWrite(object, | |
4259 scratch, | |
4260 value, | |
4261 lr_status, | |
4262 save_fp, | |
4263 remembered_set_action, | |
4264 OMIT_SMI_CHECK); | |
4265 | |
4266 Bind(&done); | |
4267 | |
4268 // Clobber clobbered input registers when running with the debug-code flag | |
4269 // turned on to provoke errors. | |
4270 if (emit_debug_code()) { | |
4271 Mov(value, Operand(BitCast<int64_t>(kZapValue + 4))); | |
4272 Mov(scratch, Operand(BitCast<int64_t>(kZapValue + 8))); | |
4273 } | |
4274 } | |
4275 | |
4276 | |
4277 // Will clobber: object, address, value. | |
4278 // If lr_status is kLRHasBeenSaved, lr will also be clobbered. | |
4279 // | |
4280 // The register 'object' contains a heap object pointer. The heap object tag is | |
4281 // shifted away. | |
4282 void MacroAssembler::RecordWrite(Register object, | |
4283 Register address, | |
4284 Register value, | |
4285 LinkRegisterStatus lr_status, | |
4286 SaveFPRegsMode fp_mode, | |
4287 RememberedSetAction remembered_set_action, | |
4288 SmiCheck smi_check) { | |
4289 ASM_LOCATION("MacroAssembler::RecordWrite"); | |
4290 ASSERT(!AreAliased(object, value)); | |
4291 | |
4292 if (emit_debug_code()) { | |
4293 UseScratchRegisterScope temps(this); | |
4294 Register temp = temps.AcquireX(); | |
4295 | |
4296 Ldr(temp, MemOperand(address)); | |
4297 Cmp(temp, value); | |
4298 Check(eq, kWrongAddressOrValuePassedToRecordWrite); | |
4299 } | |
4300 | |
4301 // Count number of write barriers in generated code. | |
4302 isolate()->counters()->write_barriers_static()->Increment(); | |
4303 // TODO(mstarzinger): Dynamic counter missing. | |
4304 | |
4305 // First, check if a write barrier is even needed. The tests below | |
4306 // catch stores of smis and stores into the young generation. | |
4307 Label done; | |
4308 | |
4309 if (smi_check == INLINE_SMI_CHECK) { | |
4310 ASSERT_EQ(0, kSmiTag); | |
4311 JumpIfSmi(value, &done); | |
4312 } | |
4313 | |
4314 CheckPageFlagClear(value, | |
4315 value, // Used as scratch. | |
4316 MemoryChunk::kPointersToHereAreInterestingMask, | |
4317 &done); | |
4318 CheckPageFlagClear(object, | |
4319 value, // Used as scratch. | |
4320 MemoryChunk::kPointersFromHereAreInterestingMask, | |
4321 &done); | |
4322 | |
4323 // Record the actual write. | |
4324 if (lr_status == kLRHasNotBeenSaved) { | |
4325 Push(lr); | |
4326 } | |
4327 RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode); | |
4328 CallStub(&stub); | |
4329 if (lr_status == kLRHasNotBeenSaved) { | |
4330 Pop(lr); | |
4331 } | |
4332 | |
4333 Bind(&done); | |
4334 | |
4335 // Clobber clobbered registers when running with the debug-code flag | |
4336 // turned on to provoke errors. | |
4337 if (emit_debug_code()) { | |
4338 Mov(address, Operand(BitCast<int64_t>(kZapValue + 12))); | |
4339 Mov(value, Operand(BitCast<int64_t>(kZapValue + 16))); | |
4340 } | |
4341 } | |
4342 | |
4343 | |
4344 void MacroAssembler::AssertHasValidColor(const Register& reg) { | |
4345 if (emit_debug_code()) { | |
4346 // The bit sequence is backward. The first character in the string | |
4347 // represents the least significant bit. | |
4348 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); | |
4349 | |
4350 Label color_is_valid; | |
4351 Tbnz(reg, 0, &color_is_valid); | |
4352 Tbz(reg, 1, &color_is_valid); | |
4353 Abort(kUnexpectedColorFound); | |
4354 Bind(&color_is_valid); | |
4355 } | |
4356 } | |
4357 | |
4358 | |
4359 void MacroAssembler::GetMarkBits(Register addr_reg, | |
4360 Register bitmap_reg, | |
4361 Register shift_reg) { | |
4362 ASSERT(!AreAliased(addr_reg, bitmap_reg, shift_reg)); | |
4363 ASSERT(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits()); | |
4364 // addr_reg is divided into fields: | |
4365 // |63 page base 20|19 high 8|7 shift 3|2 0| | |
4366 // 'high' gives the index of the cell holding color bits for the object. | |
4367 // 'shift' gives the offset in the cell for this object's color. | |
4368 const int kShiftBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2; | |
4369 UseScratchRegisterScope temps(this); | |
4370 Register temp = temps.AcquireX(); | |
4371 Ubfx(temp, addr_reg, kShiftBits, kPageSizeBits - kShiftBits); | |
4372 Bic(bitmap_reg, addr_reg, Page::kPageAlignmentMask); | |
4373 Add(bitmap_reg, bitmap_reg, Operand(temp, LSL, Bitmap::kBytesPerCellLog2)); | |
4374 // bitmap_reg: | |
4375 // |63 page base 20|19 zeros 15|14 high 3|2 0| | |
4376 Ubfx(shift_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2); | |
4377 } | |
4378 | |
4379 | |
4380 void MacroAssembler::HasColor(Register object, | |
4381 Register bitmap_scratch, | |
4382 Register shift_scratch, | |
4383 Label* has_color, | |
4384 int first_bit, | |
4385 int second_bit) { | |
4386 // See mark-compact.h for color definitions. | |
4387 ASSERT(!AreAliased(object, bitmap_scratch, shift_scratch)); | |
4388 | |
4389 GetMarkBits(object, bitmap_scratch, shift_scratch); | |
4390 Ldr(bitmap_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); | |
4391 // Shift the bitmap down to get the color of the object in bits [1:0]. | |
4392 Lsr(bitmap_scratch, bitmap_scratch, shift_scratch); | |
4393 | |
4394 AssertHasValidColor(bitmap_scratch); | |
4395 | |
4396 // These bit sequences are backwards. The first character in the string | |
4397 // represents the least significant bit. | |
4398 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); | |
4399 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); | |
4400 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); | |
4401 | |
4402 // Check for the color. | |
4403 if (first_bit == 0) { | |
4404 // Checking for white. | |
4405 ASSERT(second_bit == 0); | |
4406 // We only need to test the first bit. | |
4407 Tbz(bitmap_scratch, 0, has_color); | |
4408 } else { | |
4409 Label other_color; | |
4410 // Checking for grey or black. | |
4411 Tbz(bitmap_scratch, 0, &other_color); | |
4412 if (second_bit == 0) { | |
4413 Tbz(bitmap_scratch, 1, has_color); | |
4414 } else { | |
4415 Tbnz(bitmap_scratch, 1, has_color); | |
4416 } | |
4417 Bind(&other_color); | |
4418 } | |
4419 | |
4420 // Fall through if it does not have the right color. | |
4421 } | |
4422 | |
4423 | |
4424 void MacroAssembler::CheckMapDeprecated(Handle<Map> map, | |
4425 Register scratch, | |
4426 Label* if_deprecated) { | |
4427 if (map->CanBeDeprecated()) { | |
4428 Mov(scratch, Operand(map)); | |
4429 Ldrsw(scratch, UntagSmiFieldMemOperand(scratch, Map::kBitField3Offset)); | |
4430 TestAndBranchIfAnySet(scratch, Map::Deprecated::kMask, if_deprecated); | |
4431 } | |
4432 } | |
4433 | |
4434 | |
4435 void MacroAssembler::JumpIfBlack(Register object, | |
4436 Register scratch0, | |
4437 Register scratch1, | |
4438 Label* on_black) { | |
4439 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); | |
4440 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern. | |
4441 } | |
4442 | |
4443 | |
4444 void MacroAssembler::JumpIfDictionaryInPrototypeChain( | |
4445 Register object, | |
4446 Register scratch0, | |
4447 Register scratch1, | |
4448 Label* found) { | |
4449 ASSERT(!AreAliased(object, scratch0, scratch1)); | |
4450 Factory* factory = isolate()->factory(); | |
4451 Register current = scratch0; | |
4452 Label loop_again; | |
4453 | |
4454 // Scratch contains elements pointer. | |
4455 Mov(current, object); | |
4456 | |
4457 // Loop based on the map going up the prototype chain. | |
4458 Bind(&loop_again); | |
4459 Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset)); | |
4460 Ldrb(scratch1, FieldMemOperand(current, Map::kBitField2Offset)); | |
4461 Ubfx(scratch1, scratch1, Map::kElementsKindShift, Map::kElementsKindBitCount); | |
4462 CompareAndBranch(scratch1, DICTIONARY_ELEMENTS, eq, found); | |
4463 Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset)); | |
4464 CompareAndBranch(current, Operand(factory->null_value()), ne, &loop_again); | |
4465 } | |
4466 | |
4467 | |
4468 void MacroAssembler::GetRelocatedValueLocation(Register ldr_location, | |
4469 Register result) { | |
4470 ASSERT(!result.Is(ldr_location)); | |
4471 const uint32_t kLdrLitOffset_lsb = 5; | |
4472 const uint32_t kLdrLitOffset_width = 19; | |
4473 Ldr(result, MemOperand(ldr_location)); | |
4474 if (emit_debug_code()) { | |
4475 And(result, result, LoadLiteralFMask); | |
4476 Cmp(result, LoadLiteralFixed); | |
4477 Check(eq, kTheInstructionToPatchShouldBeAnLdrLiteral); | |
4478 // The instruction was clobbered. Reload it. | |
4479 Ldr(result, MemOperand(ldr_location)); | |
4480 } | |
4481 Sbfx(result, result, kLdrLitOffset_lsb, kLdrLitOffset_width); | |
4482 Add(result, ldr_location, Operand(result, LSL, kWordSizeInBytesLog2)); | |
4483 } | |
4484 | |
4485 | |
4486 void MacroAssembler::EnsureNotWhite( | |
4487 Register value, | |
4488 Register bitmap_scratch, | |
4489 Register shift_scratch, | |
4490 Register load_scratch, | |
4491 Register length_scratch, | |
4492 Label* value_is_white_and_not_data) { | |
4493 ASSERT(!AreAliased( | |
4494 value, bitmap_scratch, shift_scratch, load_scratch, length_scratch)); | |
4495 | |
4496 // These bit sequences are backwards. The first character in the string | |
4497 // represents the least significant bit. | |
4498 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); | |
4499 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); | |
4500 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); | |
4501 | |
4502 GetMarkBits(value, bitmap_scratch, shift_scratch); | |
4503 Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); | |
4504 Lsr(load_scratch, load_scratch, shift_scratch); | |
4505 | |
4506 AssertHasValidColor(load_scratch); | |
4507 | |
4508 // If the value is black or grey we don't need to do anything. | |
4509 // Since both black and grey have a 1 in the first position and white does | |
4510 // not have a 1 there we only need to check one bit. | |
4511 Label done; | |
4512 Tbnz(load_scratch, 0, &done); | |
4513 | |
4514 // Value is white. We check whether it is data that doesn't need scanning. | |
4515 Register map = load_scratch; // Holds map while checking type. | |
4516 Label is_data_object; | |
4517 | |
4518 // Check for heap-number. | |
4519 Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset)); | |
4520 Mov(length_scratch, HeapNumber::kSize); | |
4521 JumpIfRoot(map, Heap::kHeapNumberMapRootIndex, &is_data_object); | |
4522 | |
4523 // Check for strings. | |
4524 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); | |
4525 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); | |
4526 // If it's a string and it's not a cons string then it's an object containing | |
4527 // no GC pointers. | |
4528 Register instance_type = load_scratch; | |
4529 Ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset)); | |
4530 TestAndBranchIfAnySet(instance_type, | |
4531 kIsIndirectStringMask | kIsNotStringMask, | |
4532 value_is_white_and_not_data); | |
4533 | |
4534 // It's a non-indirect (non-cons and non-slice) string. | |
4535 // If it's external, the length is just ExternalString::kSize. | |
4536 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2). | |
4537 // External strings are the only ones with the kExternalStringTag bit | |
4538 // set. | |
4539 ASSERT_EQ(0, kSeqStringTag & kExternalStringTag); | |
4540 ASSERT_EQ(0, kConsStringTag & kExternalStringTag); | |
4541 Mov(length_scratch, ExternalString::kSize); | |
4542 TestAndBranchIfAnySet(instance_type, kExternalStringTag, &is_data_object); | |
4543 | |
4544 // Sequential string, either ASCII or UC16. | |
4545 // For ASCII (char-size of 1) we shift the smi tag away to get the length. | |
4546 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby | |
4547 // getting the length multiplied by 2. | |
4548 ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4); | |
4549 Ldrsw(length_scratch, UntagSmiFieldMemOperand(value, | |
4550 String::kLengthOffset)); | |
4551 Tst(instance_type, kStringEncodingMask); | |
4552 Cset(load_scratch, eq); | |
4553 Lsl(length_scratch, length_scratch, load_scratch); | |
4554 Add(length_scratch, | |
4555 length_scratch, | |
4556 SeqString::kHeaderSize + kObjectAlignmentMask); | |
4557 Bic(length_scratch, length_scratch, kObjectAlignmentMask); | |
4558 | |
4559 Bind(&is_data_object); | |
4560 // Value is a data object, and it is white. Mark it black. Since we know | |
4561 // that the object is white we can make it black by flipping one bit. | |
4562 Register mask = shift_scratch; | |
4563 Mov(load_scratch, 1); | |
4564 Lsl(mask, load_scratch, shift_scratch); | |
4565 | |
4566 Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); | |
4567 Orr(load_scratch, load_scratch, mask); | |
4568 Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); | |
4569 | |
4570 Bic(bitmap_scratch, bitmap_scratch, Page::kPageAlignmentMask); | |
4571 Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); | |
4572 Add(load_scratch, load_scratch, length_scratch); | |
4573 Str(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); | |
4574 | |
4575 Bind(&done); | |
4576 } | |
4577 | |
4578 | |
4579 void MacroAssembler::Assert(Condition cond, BailoutReason reason) { | |
4580 if (emit_debug_code()) { | |
4581 Check(cond, reason); | |
4582 } | |
4583 } | |
4584 | |
4585 | |
4586 | |
4587 void MacroAssembler::AssertRegisterIsClear(Register reg, BailoutReason reason) { | |
4588 if (emit_debug_code()) { | |
4589 CheckRegisterIsClear(reg, reason); | |
4590 } | |
4591 } | |
4592 | |
4593 | |
4594 void MacroAssembler::AssertRegisterIsRoot(Register reg, | |
4595 Heap::RootListIndex index, | |
4596 BailoutReason reason) { | |
4597 if (emit_debug_code()) { | |
4598 CompareRoot(reg, index); | |
4599 Check(eq, reason); | |
4600 } | |
4601 } | |
4602 | |
4603 | |
4604 void MacroAssembler::AssertFastElements(Register elements) { | |
4605 if (emit_debug_code()) { | |
4606 UseScratchRegisterScope temps(this); | |
4607 Register temp = temps.AcquireX(); | |
4608 Label ok; | |
4609 Ldr(temp, FieldMemOperand(elements, HeapObject::kMapOffset)); | |
4610 JumpIfRoot(temp, Heap::kFixedArrayMapRootIndex, &ok); | |
4611 JumpIfRoot(temp, Heap::kFixedDoubleArrayMapRootIndex, &ok); | |
4612 JumpIfRoot(temp, Heap::kFixedCOWArrayMapRootIndex, &ok); | |
4613 Abort(kJSObjectWithFastElementsMapHasSlowElements); | |
4614 Bind(&ok); | |
4615 } | |
4616 } | |
4617 | |
4618 | |
4619 void MacroAssembler::AssertIsString(const Register& object) { | |
4620 if (emit_debug_code()) { | |
4621 UseScratchRegisterScope temps(this); | |
4622 Register temp = temps.AcquireX(); | |
4623 STATIC_ASSERT(kSmiTag == 0); | |
4624 Tst(object, kSmiTagMask); | |
4625 Check(ne, kOperandIsNotAString); | |
4626 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); | |
4627 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE); | |
4628 Check(lo, kOperandIsNotAString); | |
4629 } | |
4630 } | |
4631 | |
4632 | |
4633 void MacroAssembler::Check(Condition cond, BailoutReason reason) { | |
4634 Label ok; | |
4635 B(cond, &ok); | |
4636 Abort(reason); | |
4637 // Will not return here. | |
4638 Bind(&ok); | |
4639 } | |
4640 | |
4641 | |
4642 void MacroAssembler::CheckRegisterIsClear(Register reg, BailoutReason reason) { | |
4643 Label ok; | |
4644 Cbz(reg, &ok); | |
4645 Abort(reason); | |
4646 // Will not return here. | |
4647 Bind(&ok); | |
4648 } | |
4649 | |
4650 | |
4651 void MacroAssembler::Abort(BailoutReason reason) { | |
4652 #ifdef DEBUG | |
4653 RecordComment("Abort message: "); | |
4654 RecordComment(GetBailoutReason(reason)); | |
4655 | |
4656 if (FLAG_trap_on_abort) { | |
4657 Brk(0); | |
4658 return; | |
4659 } | |
4660 #endif | |
4661 | |
4662 // Abort is used in some contexts where csp is the stack pointer. In order to | |
4663 // simplify the CallRuntime code, make sure that jssp is the stack pointer. | |
4664 // There is no risk of register corruption here because Abort doesn't return. | |
4665 Register old_stack_pointer = StackPointer(); | |
4666 SetStackPointer(jssp); | |
4667 Mov(jssp, old_stack_pointer); | |
4668 | |
4669 // We need some scratch registers for the MacroAssembler, so make sure we have | |
4670 // some. This is safe here because Abort never returns. | |
4671 RegList old_tmp_list = TmpList()->list(); | |
4672 TmpList()->Combine(ip0); | |
4673 TmpList()->Combine(ip1); | |
4674 | |
4675 if (use_real_aborts()) { | |
4676 // Avoid infinite recursion; Push contains some assertions that use Abort. | |
4677 NoUseRealAbortsScope no_real_aborts(this); | |
4678 | |
4679 Mov(x0, Smi::FromInt(reason)); | |
4680 Push(x0); | |
4681 | |
4682 if (!has_frame_) { | |
4683 // We don't actually want to generate a pile of code for this, so just | |
4684 // claim there is a stack frame, without generating one. | |
4685 FrameScope scope(this, StackFrame::NONE); | |
4686 CallRuntime(Runtime::kAbort, 1); | |
4687 } else { | |
4688 CallRuntime(Runtime::kAbort, 1); | |
4689 } | |
4690 } else { | |
4691 // Load the string to pass to Printf. | |
4692 Label msg_address; | |
4693 Adr(x0, &msg_address); | |
4694 | |
4695 // Call Printf directly to report the error. | |
4696 CallPrintf(); | |
4697 | |
4698 // We need a way to stop execution on both the simulator and real hardware, | |
4699 // and Unreachable() is the best option. | |
4700 Unreachable(); | |
4701 | |
4702 // Emit the message string directly in the instruction stream. | |
4703 { | |
4704 BlockPoolsScope scope(this); | |
4705 Bind(&msg_address); | |
4706 EmitStringData(GetBailoutReason(reason)); | |
4707 } | |
4708 } | |
4709 | |
4710 SetStackPointer(old_stack_pointer); | |
4711 TmpList()->set_list(old_tmp_list); | |
4712 } | |
4713 | |
4714 | |
4715 void MacroAssembler::LoadTransitionedArrayMapConditional( | |
4716 ElementsKind expected_kind, | |
4717 ElementsKind transitioned_kind, | |
4718 Register map_in_out, | |
4719 Register scratch1, | |
4720 Register scratch2, | |
4721 Label* no_map_match) { | |
4722 // Load the global or builtins object from the current context. | |
4723 Ldr(scratch1, GlobalObjectMemOperand()); | |
4724 Ldr(scratch1, FieldMemOperand(scratch1, GlobalObject::kNativeContextOffset)); | |
4725 | |
4726 // Check that the function's map is the same as the expected cached map. | |
4727 Ldr(scratch1, ContextMemOperand(scratch1, Context::JS_ARRAY_MAPS_INDEX)); | |
4728 size_t offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize; | |
4729 Ldr(scratch2, FieldMemOperand(scratch1, offset)); | |
4730 Cmp(map_in_out, scratch2); | |
4731 B(ne, no_map_match); | |
4732 | |
4733 // Use the transitioned cached map. | |
4734 offset = (transitioned_kind * kPointerSize) + FixedArrayBase::kHeaderSize; | |
4735 Ldr(map_in_out, FieldMemOperand(scratch1, offset)); | |
4736 } | |
4737 | |
4738 | |
4739 void MacroAssembler::LoadGlobalFunction(int index, Register function) { | |
4740 // Load the global or builtins object from the current context. | |
4741 Ldr(function, GlobalObjectMemOperand()); | |
4742 // Load the native context from the global or builtins object. | |
4743 Ldr(function, FieldMemOperand(function, | |
4744 GlobalObject::kNativeContextOffset)); | |
4745 // Load the function from the native context. | |
4746 Ldr(function, ContextMemOperand(function, index)); | |
4747 } | |
4748 | |
4749 | |
4750 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, | |
4751 Register map, | |
4752 Register scratch) { | |
4753 // Load the initial map. The global functions all have initial maps. | |
4754 Ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); | |
4755 if (emit_debug_code()) { | |
4756 Label ok, fail; | |
4757 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK); | |
4758 B(&ok); | |
4759 Bind(&fail); | |
4760 Abort(kGlobalFunctionsMustHaveInitialMap); | |
4761 Bind(&ok); | |
4762 } | |
4763 } | |
4764 | |
4765 | |
4766 // This is the main Printf implementation. All other Printf variants call | |
4767 // PrintfNoPreserve after setting up one or more PreserveRegisterScopes. | |
4768 void MacroAssembler::PrintfNoPreserve(const char * format, | |
4769 const CPURegister& arg0, | |
4770 const CPURegister& arg1, | |
4771 const CPURegister& arg2, | |
4772 const CPURegister& arg3) { | |
4773 // We cannot handle a caller-saved stack pointer. It doesn't make much sense | |
4774 // in most cases anyway, so this restriction shouldn't be too serious. | |
4775 ASSERT(!kCallerSaved.IncludesAliasOf(__ StackPointer())); | |
4776 | |
4777 // Make sure that the macro assembler doesn't try to use any of our arguments | |
4778 // as scratch registers. | |
4779 ASSERT(!TmpList()->IncludesAliasOf(arg0, arg1, arg2, arg3)); | |
4780 ASSERT(!FPTmpList()->IncludesAliasOf(arg0, arg1, arg2, arg3)); | |
4781 | |
4782 // We cannot print the stack pointer because it is typically used to preserve | |
4783 // caller-saved registers (using other Printf variants which depend on this | |
4784 // helper). | |
4785 ASSERT(!AreAliased(arg0, StackPointer())); | |
4786 ASSERT(!AreAliased(arg1, StackPointer())); | |
4787 ASSERT(!AreAliased(arg2, StackPointer())); | |
4788 ASSERT(!AreAliased(arg3, StackPointer())); | |
4789 | |
4790 static const int kMaxArgCount = 4; | |
4791 // Assume that we have the maximum number of arguments until we know | |
4792 // otherwise. | |
4793 int arg_count = kMaxArgCount; | |
4794 | |
4795 // The provided arguments. | |
4796 CPURegister args[kMaxArgCount] = {arg0, arg1, arg2, arg3}; | |
4797 | |
4798 // The PCS registers where the arguments need to end up. | |
4799 CPURegister pcs[kMaxArgCount] = {NoCPUReg, NoCPUReg, NoCPUReg, NoCPUReg}; | |
4800 | |
4801 // Promote FP arguments to doubles, and integer arguments to X registers. | |
4802 // Note that FP and integer arguments cannot be mixed, but we'll check | |
4803 // AreSameSizeAndType once we've processed these promotions. | |
4804 for (int i = 0; i < kMaxArgCount; i++) { | |
4805 if (args[i].IsRegister()) { | |
4806 // Note that we use x1 onwards, because x0 will hold the format string. | |
4807 pcs[i] = Register::XRegFromCode(i + 1); | |
4808 // For simplicity, we handle all integer arguments as X registers. An X | |
4809 // register argument takes the same space as a W register argument in the | |
4810 // PCS anyway. The only limitation is that we must explicitly clear the | |
4811 // top word for W register arguments as the callee will expect it to be | |
4812 // clear. | |
4813 if (!args[i].Is64Bits()) { | |
4814 const Register& as_x = args[i].X(); | |
4815 And(as_x, as_x, 0x00000000ffffffff); | |
4816 args[i] = as_x; | |
4817 } | |
4818 } else if (args[i].IsFPRegister()) { | |
4819 pcs[i] = FPRegister::DRegFromCode(i); | |
4820 // C and C++ varargs functions (such as printf) implicitly promote float | |
4821 // arguments to doubles. | |
4822 if (!args[i].Is64Bits()) { | |
4823 FPRegister s(args[i]); | |
4824 const FPRegister& as_d = args[i].D(); | |
4825 Fcvt(as_d, s); | |
4826 args[i] = as_d; | |
4827 } | |
4828 } else { | |
4829 // This is the first empty (NoCPUReg) argument, so use it to set the | |
4830 // argument count and bail out. | |
4831 arg_count = i; | |
4832 break; | |
4833 } | |
4834 } | |
4835 ASSERT((arg_count >= 0) && (arg_count <= kMaxArgCount)); | |
4836 // Check that every remaining argument is NoCPUReg. | |
4837 for (int i = arg_count; i < kMaxArgCount; i++) { | |
4838 ASSERT(args[i].IsNone()); | |
4839 } | |
4840 ASSERT((arg_count == 0) || AreSameSizeAndType(args[0], args[1], | |
4841 args[2], args[3], | |
4842 pcs[0], pcs[1], | |
4843 pcs[2], pcs[3])); | |
4844 | |
4845 // Move the arguments into the appropriate PCS registers. | |
4846 // | |
4847 // Arranging an arbitrary list of registers into x1-x4 (or d0-d3) is | |
4848 // surprisingly complicated. | |
4849 // | |
4850 // * For even numbers of registers, we push the arguments and then pop them | |
4851 // into their final registers. This maintains 16-byte stack alignment in | |
4852 // case csp is the stack pointer, since we're only handling X or D | |
4853 // registers at this point. | |
4854 // | |
4855 // * For odd numbers of registers, we push and pop all but one register in | |
4856 // the same way, but the left-over register is moved directly, since we | |
4857 // can always safely move one register without clobbering any source. | |
4858 if (arg_count >= 4) { | |
4859 Push(args[3], args[2], args[1], args[0]); | |
4860 } else if (arg_count >= 2) { | |
4861 Push(args[1], args[0]); | |
4862 } | |
4863 | |
4864 if ((arg_count % 2) != 0) { | |
4865 // Move the left-over register directly. | |
4866 const CPURegister& leftover_arg = args[arg_count - 1]; | |
4867 const CPURegister& leftover_pcs = pcs[arg_count - 1]; | |
4868 if (leftover_arg.IsRegister()) { | |
4869 Mov(Register(leftover_pcs), Register(leftover_arg)); | |
4870 } else { | |
4871 Fmov(FPRegister(leftover_pcs), FPRegister(leftover_arg)); | |
4872 } | |
4873 } | |
4874 | |
4875 if (arg_count >= 4) { | |
4876 Pop(pcs[0], pcs[1], pcs[2], pcs[3]); | |
4877 } else if (arg_count >= 2) { | |
4878 Pop(pcs[0], pcs[1]); | |
4879 } | |
4880 | |
4881 // Load the format string into x0, as per the procedure-call standard. | |
4882 // | |
4883 // To make the code as portable as possible, the format string is encoded | |
4884 // directly in the instruction stream. It might be cleaner to encode it in a | |
4885 // literal pool, but since Printf is usually used for debugging, it is | |
4886 // beneficial for it to be minimally dependent on other features. | |
4887 Label format_address; | |
4888 Adr(x0, &format_address); | |
4889 | |
4890 // Emit the format string directly in the instruction stream. | |
4891 { BlockPoolsScope scope(this); | |
4892 Label after_data; | |
4893 B(&after_data); | |
4894 Bind(&format_address); | |
4895 EmitStringData(format); | |
4896 Unreachable(); | |
4897 Bind(&after_data); | |
4898 } | |
4899 | |
4900 // We don't pass any arguments on the stack, but we still need to align the C | |
4901 // stack pointer to a 16-byte boundary for PCS compliance. | |
4902 if (!csp.Is(StackPointer())) { | |
4903 Bic(csp, StackPointer(), 0xf); | |
4904 } | |
4905 | |
4906 CallPrintf(pcs[0].type()); | |
4907 } | |
4908 | |
4909 | |
4910 void MacroAssembler::CallPrintf(CPURegister::RegisterType type) { | |
4911 // A call to printf needs special handling for the simulator, since the system | |
4912 // printf function will use a different instruction set and the procedure-call | |
4913 // standard will not be compatible. | |
4914 #ifdef USE_SIMULATOR | |
4915 { InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize); | |
4916 hlt(kImmExceptionIsPrintf); | |
4917 dc32(type); | |
4918 } | |
4919 #else | |
4920 Call(FUNCTION_ADDR(printf), RelocInfo::EXTERNAL_REFERENCE); | |
4921 #endif | |
4922 } | |
4923 | |
4924 | |
4925 void MacroAssembler::Printf(const char * format, | |
4926 const CPURegister& arg0, | |
4927 const CPURegister& arg1, | |
4928 const CPURegister& arg2, | |
4929 const CPURegister& arg3) { | |
4930 // Printf is expected to preserve all registers, so make sure that none are | |
4931 // available as scratch registers until we've preserved them. | |
4932 RegList old_tmp_list = TmpList()->list(); | |
4933 RegList old_fp_tmp_list = FPTmpList()->list(); | |
4934 TmpList()->set_list(0); | |
4935 FPTmpList()->set_list(0); | |
4936 | |
4937 // Preserve all caller-saved registers as well as NZCV. | |
4938 // If csp is the stack pointer, PushCPURegList asserts that the size of each | |
4939 // list is a multiple of 16 bytes. | |
4940 PushCPURegList(kCallerSaved); | |
4941 PushCPURegList(kCallerSavedFP); | |
4942 | |
4943 // We can use caller-saved registers as scratch values (except for argN). | |
4944 CPURegList tmp_list = kCallerSaved; | |
4945 CPURegList fp_tmp_list = kCallerSavedFP; | |
4946 tmp_list.Remove(arg0, arg1, arg2, arg3); | |
4947 fp_tmp_list.Remove(arg0, arg1, arg2, arg3); | |
4948 TmpList()->set_list(tmp_list.list()); | |
4949 FPTmpList()->set_list(fp_tmp_list.list()); | |
4950 | |
4951 // Preserve NZCV. | |
4952 { UseScratchRegisterScope temps(this); | |
4953 Register tmp = temps.AcquireX(); | |
4954 Mrs(tmp, NZCV); | |
4955 Push(tmp, xzr); | |
4956 } | |
4957 | |
4958 PrintfNoPreserve(format, arg0, arg1, arg2, arg3); | |
4959 | |
4960 { UseScratchRegisterScope temps(this); | |
4961 Register tmp = temps.AcquireX(); | |
4962 Pop(xzr, tmp); | |
4963 Msr(NZCV, tmp); | |
4964 } | |
4965 | |
4966 PopCPURegList(kCallerSavedFP); | |
4967 PopCPURegList(kCallerSaved); | |
4968 | |
4969 TmpList()->set_list(old_tmp_list); | |
4970 FPTmpList()->set_list(old_fp_tmp_list); | |
4971 } | |
4972 | |
4973 | |
4974 void MacroAssembler::EmitFrameSetupForCodeAgePatching() { | |
4975 // TODO(jbramley): Other architectures use the internal memcpy to copy the | |
4976 // sequence. If this is a performance bottleneck, we should consider caching | |
4977 // the sequence and copying it in the same way. | |
4978 InstructionAccurateScope scope(this, kCodeAgeSequenceSize / kInstructionSize); | |
4979 ASSERT(jssp.Is(StackPointer())); | |
4980 EmitFrameSetupForCodeAgePatching(this); | |
4981 } | |
4982 | |
4983 | |
4984 | |
4985 void MacroAssembler::EmitCodeAgeSequence(Code* stub) { | |
4986 InstructionAccurateScope scope(this, kCodeAgeSequenceSize / kInstructionSize); | |
4987 ASSERT(jssp.Is(StackPointer())); | |
4988 EmitCodeAgeSequence(this, stub); | |
4989 } | |
4990 | |
4991 | |
4992 #undef __ | |
4993 #define __ assm-> | |
4994 | |
4995 | |
4996 void MacroAssembler::EmitFrameSetupForCodeAgePatching(Assembler * assm) { | |
4997 Label start; | |
4998 __ bind(&start); | |
4999 | |
5000 // We can do this sequence using four instructions, but the code ageing | |
5001 // sequence that patches it needs five, so we use the extra space to try to | |
5002 // simplify some addressing modes and remove some dependencies (compared to | |
5003 // using two stp instructions with write-back). | |
5004 __ sub(jssp, jssp, 4 * kXRegSize); | |
5005 __ sub(csp, csp, 4 * kXRegSize); | |
5006 __ stp(x1, cp, MemOperand(jssp, 0 * kXRegSize)); | |
5007 __ stp(fp, lr, MemOperand(jssp, 2 * kXRegSize)); | |
5008 __ add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp); | |
5009 | |
5010 __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeSequenceSize); | |
5011 } | |
5012 | |
5013 | |
5014 void MacroAssembler::EmitCodeAgeSequence(Assembler * assm, | |
5015 Code * stub) { | |
5016 Label start; | |
5017 __ bind(&start); | |
5018 // When the stub is called, the sequence is replaced with the young sequence | |
5019 // (as in EmitFrameSetupForCodeAgePatching). After the code is replaced, the | |
5020 // stub jumps to &start, stored in x0. The young sequence does not call the | |
5021 // stub so there is no infinite loop here. | |
5022 // | |
5023 // A branch (br) is used rather than a call (blr) because this code replaces | |
5024 // the frame setup code that would normally preserve lr. | |
5025 __ LoadLiteral(ip0, kCodeAgeStubEntryOffset); | |
5026 __ adr(x0, &start); | |
5027 __ br(ip0); | |
5028 // IsCodeAgeSequence in codegen-a64.cc assumes that the code generated up | |
5029 // until now (kCodeAgeStubEntryOffset) is the same for all code age sequences. | |
5030 __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeStubEntryOffset); | |
5031 if (stub) { | |
5032 __ dc64(reinterpret_cast<uint64_t>(stub->instruction_start())); | |
5033 __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeSequenceSize); | |
5034 } | |
5035 } | |
5036 | |
5037 | |
5038 bool MacroAssembler::IsYoungSequence(byte* sequence) { | |
5039 // Generate a young sequence to compare with. | |
5040 const int length = kCodeAgeSequenceSize / kInstructionSize; | |
5041 static bool initialized = false; | |
5042 static byte young[kCodeAgeSequenceSize]; | |
5043 if (!initialized) { | |
5044 PatchingAssembler patcher(young, length); | |
5045 // The young sequence is the frame setup code for FUNCTION code types. It is | |
5046 // generated by FullCodeGenerator::Generate. | |
5047 MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher); | |
5048 initialized = true; | |
5049 } | |
5050 | |
5051 bool is_young = (memcmp(sequence, young, kCodeAgeSequenceSize) == 0); | |
5052 ASSERT(is_young || IsCodeAgeSequence(sequence)); | |
5053 return is_young; | |
5054 } | |
5055 | |
5056 | |
5057 #ifdef DEBUG | |
5058 bool MacroAssembler::IsCodeAgeSequence(byte* sequence) { | |
5059 // The old sequence varies depending on the code age. However, the code up | |
5060 // until kCodeAgeStubEntryOffset does not change, so we can check that part to | |
5061 // get a reasonable level of verification. | |
5062 const int length = kCodeAgeStubEntryOffset / kInstructionSize; | |
5063 static bool initialized = false; | |
5064 static byte old[kCodeAgeStubEntryOffset]; | |
5065 if (!initialized) { | |
5066 PatchingAssembler patcher(old, length); | |
5067 MacroAssembler::EmitCodeAgeSequence(&patcher, NULL); | |
5068 initialized = true; | |
5069 } | |
5070 return memcmp(sequence, old, kCodeAgeStubEntryOffset) == 0; | |
5071 } | |
5072 #endif | |
5073 | |
5074 | |
5075 void MacroAssembler::TruncatingDiv(Register result, | |
5076 Register dividend, | |
5077 int32_t divisor) { | |
5078 ASSERT(!AreAliased(result, dividend)); | |
5079 ASSERT(result.Is32Bits() && dividend.Is32Bits()); | |
5080 MultiplierAndShift ms(divisor); | |
5081 Mov(result, ms.multiplier()); | |
5082 Smull(result.X(), dividend, result); | |
5083 Asr(result.X(), result.X(), 32); | |
5084 if (divisor > 0 && ms.multiplier() < 0) Add(result, result, dividend); | |
5085 if (divisor < 0 && ms.multiplier() > 0) Sub(result, result, dividend); | |
5086 if (ms.shift() > 0) Asr(result, result, ms.shift()); | |
5087 Add(result, result, Operand(dividend, LSR, 31)); | |
5088 } | |
5089 | |
5090 | |
5091 #undef __ | |
5092 | |
5093 | |
5094 UseScratchRegisterScope::~UseScratchRegisterScope() { | |
5095 available_->set_list(old_available_); | |
5096 availablefp_->set_list(old_availablefp_); | |
5097 } | |
5098 | |
5099 | |
5100 Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) { | |
5101 int code = AcquireNextAvailable(available_).code(); | |
5102 return Register::Create(code, reg.SizeInBits()); | |
5103 } | |
5104 | |
5105 | |
5106 FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) { | |
5107 int code = AcquireNextAvailable(availablefp_).code(); | |
5108 return FPRegister::Create(code, reg.SizeInBits()); | |
5109 } | |
5110 | |
5111 | |
5112 CPURegister UseScratchRegisterScope::AcquireNextAvailable( | |
5113 CPURegList* available) { | |
5114 CHECK(!available->IsEmpty()); | |
5115 CPURegister result = available->PopLowestIndex(); | |
5116 ASSERT(!AreAliased(result, xzr, csp)); | |
5117 return result; | |
5118 } | |
5119 | |
5120 | |
5121 #define __ masm-> | |
5122 | |
5123 | |
5124 void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg, | |
5125 const Label* smi_check) { | |
5126 Assembler::BlockPoolsScope scope(masm); | |
5127 if (reg.IsValid()) { | |
5128 ASSERT(smi_check->is_bound()); | |
5129 ASSERT(reg.Is64Bits()); | |
5130 | |
5131 // Encode the register (x0-x30) in the lowest 5 bits, then the offset to | |
5132 // 'check' in the other bits. The possible offset is limited in that we | |
5133 // use BitField to pack the data, and the underlying data type is a | |
5134 // uint32_t. | |
5135 uint32_t delta = __ InstructionsGeneratedSince(smi_check); | |
5136 __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta)); | |
5137 } else { | |
5138 ASSERT(!smi_check->is_bound()); | |
5139 | |
5140 // An offset of 0 indicates that there is no patch site. | |
5141 __ InlineData(0); | |
5142 } | |
5143 } | |
5144 | |
5145 | |
5146 InlineSmiCheckInfo::InlineSmiCheckInfo(Address info) | |
5147 : reg_(NoReg), smi_check_(NULL) { | |
5148 InstructionSequence* inline_data = InstructionSequence::At(info); | |
5149 ASSERT(inline_data->IsInlineData()); | |
5150 if (inline_data->IsInlineData()) { | |
5151 uint64_t payload = inline_data->InlineData(); | |
5152 // We use BitField to decode the payload, and BitField can only handle | |
5153 // 32-bit values. | |
5154 ASSERT(is_uint32(payload)); | |
5155 if (payload != 0) { | |
5156 int reg_code = RegisterBits::decode(payload); | |
5157 reg_ = Register::XRegFromCode(reg_code); | |
5158 uint64_t smi_check_delta = DeltaBits::decode(payload); | |
5159 ASSERT(smi_check_delta != 0); | |
5160 smi_check_ = inline_data->preceding(smi_check_delta); | |
5161 } | |
5162 } | |
5163 } | |
5164 | |
5165 | |
5166 #undef __ | |
5167 | |
5168 | |
5169 } } // namespace v8::internal | |
5170 | |
5171 #endif // V8_TARGET_ARCH_A64 | |
OLD | NEW |