Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(32)

Side by Side Diff: src/arm64/macro-assembler-arm64.cc

Issue 430503007: Rename ASSERT* to DCHECK*. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: REBASE and fixes Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm64/macro-assembler-arm64.h ('k') | src/arm64/macro-assembler-arm64-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/v8.h" 5 #include "src/v8.h"
6 6
7 #if V8_TARGET_ARCH_ARM64 7 #if V8_TARGET_ARCH_ARM64
8 8
9 #include "src/bootstrapper.h" 9 #include "src/bootstrapper.h"
10 #include "src/codegen.h" 10 #include "src/codegen.h"
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
67 67
68 // If the operation is NOT, invert the operation and immediate. 68 // If the operation is NOT, invert the operation and immediate.
69 if ((op & NOT) == NOT) { 69 if ((op & NOT) == NOT) {
70 op = static_cast<LogicalOp>(op & ~NOT); 70 op = static_cast<LogicalOp>(op & ~NOT);
71 immediate = ~immediate; 71 immediate = ~immediate;
72 } 72 }
73 73
74 // Ignore the top 32 bits of an immediate if we're moving to a W register. 74 // Ignore the top 32 bits of an immediate if we're moving to a W register.
75 if (rd.Is32Bits()) { 75 if (rd.Is32Bits()) {
76 // Check that the top 32 bits are consistent. 76 // Check that the top 32 bits are consistent.
77 ASSERT(((immediate >> kWRegSizeInBits) == 0) || 77 DCHECK(((immediate >> kWRegSizeInBits) == 0) ||
78 ((immediate >> kWRegSizeInBits) == -1)); 78 ((immediate >> kWRegSizeInBits) == -1));
79 immediate &= kWRegMask; 79 immediate &= kWRegMask;
80 } 80 }
81 81
82 ASSERT(rd.Is64Bits() || is_uint32(immediate)); 82 DCHECK(rd.Is64Bits() || is_uint32(immediate));
83 83
84 // Special cases for all set or all clear immediates. 84 // Special cases for all set or all clear immediates.
85 if (immediate == 0) { 85 if (immediate == 0) {
86 switch (op) { 86 switch (op) {
87 case AND: 87 case AND:
88 Mov(rd, 0); 88 Mov(rd, 0);
89 return; 89 return;
90 case ORR: // Fall through. 90 case ORR: // Fall through.
91 case EOR: 91 case EOR:
92 Mov(rd, rn); 92 Mov(rd, rn);
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
130 // register so we use the temp register as an intermediate again. 130 // register so we use the temp register as an intermediate again.
131 Logical(temp, rn, imm_operand, op); 131 Logical(temp, rn, imm_operand, op);
132 Mov(csp, temp); 132 Mov(csp, temp);
133 AssertStackConsistency(); 133 AssertStackConsistency();
134 } else { 134 } else {
135 Logical(rd, rn, imm_operand, op); 135 Logical(rd, rn, imm_operand, op);
136 } 136 }
137 } 137 }
138 138
139 } else if (operand.IsExtendedRegister()) { 139 } else if (operand.IsExtendedRegister()) {
140 ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits()); 140 DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
141 // Add/sub extended supports shift <= 4. We want to support exactly the 141 // Add/sub extended supports shift <= 4. We want to support exactly the
142 // same modes here. 142 // same modes here.
143 ASSERT(operand.shift_amount() <= 4); 143 DCHECK(operand.shift_amount() <= 4);
144 ASSERT(operand.reg().Is64Bits() || 144 DCHECK(operand.reg().Is64Bits() ||
145 ((operand.extend() != UXTX) && (operand.extend() != SXTX))); 145 ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
146 Register temp = temps.AcquireSameSizeAs(rn); 146 Register temp = temps.AcquireSameSizeAs(rn);
147 EmitExtendShift(temp, operand.reg(), operand.extend(), 147 EmitExtendShift(temp, operand.reg(), operand.extend(),
148 operand.shift_amount()); 148 operand.shift_amount());
149 Logical(rd, rn, temp, op); 149 Logical(rd, rn, temp, op);
150 150
151 } else { 151 } else {
152 // The operand can be encoded in the instruction. 152 // The operand can be encoded in the instruction.
153 ASSERT(operand.IsShiftedRegister()); 153 DCHECK(operand.IsShiftedRegister());
154 Logical(rd, rn, operand, op); 154 Logical(rd, rn, operand, op);
155 } 155 }
156 } 156 }
157 157
158 158
159 void MacroAssembler::Mov(const Register& rd, uint64_t imm) { 159 void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
160 ASSERT(allow_macro_instructions_); 160 DCHECK(allow_macro_instructions_);
161 ASSERT(is_uint32(imm) || is_int32(imm) || rd.Is64Bits()); 161 DCHECK(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
162 ASSERT(!rd.IsZero()); 162 DCHECK(!rd.IsZero());
163 163
164 // TODO(all) extend to support more immediates. 164 // TODO(all) extend to support more immediates.
165 // 165 //
166 // Immediates on Aarch64 can be produced using an initial value, and zero to 166 // Immediates on Aarch64 can be produced using an initial value, and zero to
167 // three move keep operations. 167 // three move keep operations.
168 // 168 //
169 // Initial values can be generated with: 169 // Initial values can be generated with:
170 // 1. 64-bit move zero (movz). 170 // 1. 64-bit move zero (movz).
171 // 2. 32-bit move inverted (movn). 171 // 2. 32-bit move inverted (movn).
172 // 3. 64-bit move inverted. 172 // 3. 64-bit move inverted.
(...skipping 25 matching lines...) Expand all
198 invert_move = true; 198 invert_move = true;
199 } 199 }
200 200
201 // Mov instructions can't move immediate values into the stack pointer, so 201 // Mov instructions can't move immediate values into the stack pointer, so
202 // set up a temporary register, if needed. 202 // set up a temporary register, if needed.
203 UseScratchRegisterScope temps(this); 203 UseScratchRegisterScope temps(this);
204 Register temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd; 204 Register temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd;
205 205
206 // Iterate through the halfwords. Use movn/movz for the first non-ignored 206 // Iterate through the halfwords. Use movn/movz for the first non-ignored
207 // halfword, and movk for subsequent halfwords. 207 // halfword, and movk for subsequent halfwords.
208 ASSERT((reg_size % 16) == 0); 208 DCHECK((reg_size % 16) == 0);
209 bool first_mov_done = false; 209 bool first_mov_done = false;
210 for (unsigned i = 0; i < (rd.SizeInBits() / 16); i++) { 210 for (unsigned i = 0; i < (rd.SizeInBits() / 16); i++) {
211 uint64_t imm16 = (imm >> (16 * i)) & 0xffffL; 211 uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
212 if (imm16 != ignored_halfword) { 212 if (imm16 != ignored_halfword) {
213 if (!first_mov_done) { 213 if (!first_mov_done) {
214 if (invert_move) { 214 if (invert_move) {
215 movn(temp, (~imm16) & 0xffffL, 16 * i); 215 movn(temp, (~imm16) & 0xffffL, 16 * i);
216 } else { 216 } else {
217 movz(temp, imm16, 16 * i); 217 movz(temp, imm16, 16 * i);
218 } 218 }
219 first_mov_done = true; 219 first_mov_done = true;
220 } else { 220 } else {
221 // Construct a wider constant. 221 // Construct a wider constant.
222 movk(temp, imm16, 16 * i); 222 movk(temp, imm16, 16 * i);
223 } 223 }
224 } 224 }
225 } 225 }
226 ASSERT(first_mov_done); 226 DCHECK(first_mov_done);
227 227
228 // Move the temporary if the original destination register was the stack 228 // Move the temporary if the original destination register was the stack
229 // pointer. 229 // pointer.
230 if (rd.IsSP()) { 230 if (rd.IsSP()) {
231 mov(rd, temp); 231 mov(rd, temp);
232 AssertStackConsistency(); 232 AssertStackConsistency();
233 } 233 }
234 } 234 }
235 } 235 }
236 236
237 237
238 void MacroAssembler::Mov(const Register& rd, 238 void MacroAssembler::Mov(const Register& rd,
239 const Operand& operand, 239 const Operand& operand,
240 DiscardMoveMode discard_mode) { 240 DiscardMoveMode discard_mode) {
241 ASSERT(allow_macro_instructions_); 241 DCHECK(allow_macro_instructions_);
242 ASSERT(!rd.IsZero()); 242 DCHECK(!rd.IsZero());
243 243
244 // Provide a swap register for instructions that need to write into the 244 // Provide a swap register for instructions that need to write into the
245 // system stack pointer (and can't do this inherently). 245 // system stack pointer (and can't do this inherently).
246 UseScratchRegisterScope temps(this); 246 UseScratchRegisterScope temps(this);
247 Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd; 247 Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd;
248 248
249 if (operand.NeedsRelocation(this)) { 249 if (operand.NeedsRelocation(this)) {
250 Ldr(dst, operand.immediate()); 250 Ldr(dst, operand.immediate());
251 251
252 } else if (operand.IsImmediate()) { 252 } else if (operand.IsImmediate()) {
(...skipping 25 matching lines...) Expand all
278 if (!rd.Is(operand.reg()) || (rd.Is32Bits() && 278 if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
279 (discard_mode == kDontDiscardForSameWReg))) { 279 (discard_mode == kDontDiscardForSameWReg))) {
280 Assembler::mov(rd, operand.reg()); 280 Assembler::mov(rd, operand.reg());
281 } 281 }
282 // This case can handle writes into the system stack pointer directly. 282 // This case can handle writes into the system stack pointer directly.
283 dst = rd; 283 dst = rd;
284 } 284 }
285 285
286 // Copy the result to the system stack pointer. 286 // Copy the result to the system stack pointer.
287 if (!dst.Is(rd)) { 287 if (!dst.Is(rd)) {
288 ASSERT(rd.IsSP()); 288 DCHECK(rd.IsSP());
289 Assembler::mov(rd, dst); 289 Assembler::mov(rd, dst);
290 } 290 }
291 } 291 }
292 292
293 293
294 void MacroAssembler::Mvn(const Register& rd, const Operand& operand) { 294 void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
295 ASSERT(allow_macro_instructions_); 295 DCHECK(allow_macro_instructions_);
296 296
297 if (operand.NeedsRelocation(this)) { 297 if (operand.NeedsRelocation(this)) {
298 Ldr(rd, operand.immediate()); 298 Ldr(rd, operand.immediate());
299 mvn(rd, rd); 299 mvn(rd, rd);
300 300
301 } else if (operand.IsImmediate()) { 301 } else if (operand.IsImmediate()) {
302 // Call the macro assembler for generic immediates. 302 // Call the macro assembler for generic immediates.
303 Mov(rd, ~operand.ImmediateValue()); 303 Mov(rd, ~operand.ImmediateValue());
304 304
305 } else if (operand.IsExtendedRegister()) { 305 } else if (operand.IsExtendedRegister()) {
306 // Emit two instructions for the extend case. This differs from Mov, as 306 // Emit two instructions for the extend case. This differs from Mov, as
307 // the extend and invert can't be achieved in one instruction. 307 // the extend and invert can't be achieved in one instruction.
308 EmitExtendShift(rd, operand.reg(), operand.extend(), 308 EmitExtendShift(rd, operand.reg(), operand.extend(),
309 operand.shift_amount()); 309 operand.shift_amount());
310 mvn(rd, rd); 310 mvn(rd, rd);
311 311
312 } else { 312 } else {
313 mvn(rd, operand); 313 mvn(rd, operand);
314 } 314 }
315 } 315 }
316 316
317 317
318 unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) { 318 unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
319 ASSERT((reg_size % 8) == 0); 319 DCHECK((reg_size % 8) == 0);
320 int count = 0; 320 int count = 0;
321 for (unsigned i = 0; i < (reg_size / 16); i++) { 321 for (unsigned i = 0; i < (reg_size / 16); i++) {
322 if ((imm & 0xffff) == 0) { 322 if ((imm & 0xffff) == 0) {
323 count++; 323 count++;
324 } 324 }
325 imm >>= 16; 325 imm >>= 16;
326 } 326 }
327 return count; 327 return count;
328 } 328 }
329 329
330 330
331 // The movz instruction can generate immediates containing an arbitrary 16-bit 331 // The movz instruction can generate immediates containing an arbitrary 16-bit
332 // half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000. 332 // half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
333 bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) { 333 bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
334 ASSERT((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits)); 334 DCHECK((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
335 return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1); 335 return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
336 } 336 }
337 337
338 338
339 // The movn instruction can generate immediates containing an arbitrary 16-bit 339 // The movn instruction can generate immediates containing an arbitrary 16-bit
340 // half-word, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff. 340 // half-word, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff.
341 bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) { 341 bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
342 return IsImmMovz(~imm, reg_size); 342 return IsImmMovz(~imm, reg_size);
343 } 343 }
344 344
345 345
346 void MacroAssembler::ConditionalCompareMacro(const Register& rn, 346 void MacroAssembler::ConditionalCompareMacro(const Register& rn,
347 const Operand& operand, 347 const Operand& operand,
348 StatusFlags nzcv, 348 StatusFlags nzcv,
349 Condition cond, 349 Condition cond,
350 ConditionalCompareOp op) { 350 ConditionalCompareOp op) {
351 ASSERT((cond != al) && (cond != nv)); 351 DCHECK((cond != al) && (cond != nv));
352 if (operand.NeedsRelocation(this)) { 352 if (operand.NeedsRelocation(this)) {
353 UseScratchRegisterScope temps(this); 353 UseScratchRegisterScope temps(this);
354 Register temp = temps.AcquireX(); 354 Register temp = temps.AcquireX();
355 Ldr(temp, operand.immediate()); 355 Ldr(temp, operand.immediate());
356 ConditionalCompareMacro(rn, temp, nzcv, cond, op); 356 ConditionalCompareMacro(rn, temp, nzcv, cond, op);
357 357
358 } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) || 358 } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
359 (operand.IsImmediate() && 359 (operand.IsImmediate() &&
360 IsImmConditionalCompare(operand.ImmediateValue()))) { 360 IsImmConditionalCompare(operand.ImmediateValue()))) {
361 // The immediate can be encoded in the instruction, or the operand is an 361 // The immediate can be encoded in the instruction, or the operand is an
362 // unshifted register: call the assembler. 362 // unshifted register: call the assembler.
363 ConditionalCompare(rn, operand, nzcv, cond, op); 363 ConditionalCompare(rn, operand, nzcv, cond, op);
364 364
365 } else { 365 } else {
366 // The operand isn't directly supported by the instruction: perform the 366 // The operand isn't directly supported by the instruction: perform the
367 // operation on a temporary register. 367 // operation on a temporary register.
368 UseScratchRegisterScope temps(this); 368 UseScratchRegisterScope temps(this);
369 Register temp = temps.AcquireSameSizeAs(rn); 369 Register temp = temps.AcquireSameSizeAs(rn);
370 Mov(temp, operand); 370 Mov(temp, operand);
371 ConditionalCompare(rn, temp, nzcv, cond, op); 371 ConditionalCompare(rn, temp, nzcv, cond, op);
372 } 372 }
373 } 373 }
374 374
375 375
376 void MacroAssembler::Csel(const Register& rd, 376 void MacroAssembler::Csel(const Register& rd,
377 const Register& rn, 377 const Register& rn,
378 const Operand& operand, 378 const Operand& operand,
379 Condition cond) { 379 Condition cond) {
380 ASSERT(allow_macro_instructions_); 380 DCHECK(allow_macro_instructions_);
381 ASSERT(!rd.IsZero()); 381 DCHECK(!rd.IsZero());
382 ASSERT((cond != al) && (cond != nv)); 382 DCHECK((cond != al) && (cond != nv));
383 if (operand.IsImmediate()) { 383 if (operand.IsImmediate()) {
384 // Immediate argument. Handle special cases of 0, 1 and -1 using zero 384 // Immediate argument. Handle special cases of 0, 1 and -1 using zero
385 // register. 385 // register.
386 int64_t imm = operand.ImmediateValue(); 386 int64_t imm = operand.ImmediateValue();
387 Register zr = AppropriateZeroRegFor(rn); 387 Register zr = AppropriateZeroRegFor(rn);
388 if (imm == 0) { 388 if (imm == 0) {
389 csel(rd, rn, zr, cond); 389 csel(rd, rn, zr, cond);
390 } else if (imm == 1) { 390 } else if (imm == 1) {
391 csinc(rd, rn, zr, cond); 391 csinc(rd, rn, zr, cond);
392 } else if (imm == -1) { 392 } else if (imm == -1) {
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after
504 AddSub(rd, rn, operand, S, op); 504 AddSub(rd, rn, operand, S, op);
505 } 505 }
506 } 506 }
507 507
508 508
509 void MacroAssembler::AddSubWithCarryMacro(const Register& rd, 509 void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
510 const Register& rn, 510 const Register& rn,
511 const Operand& operand, 511 const Operand& operand,
512 FlagsUpdate S, 512 FlagsUpdate S,
513 AddSubWithCarryOp op) { 513 AddSubWithCarryOp op) {
514 ASSERT(rd.SizeInBits() == rn.SizeInBits()); 514 DCHECK(rd.SizeInBits() == rn.SizeInBits());
515 UseScratchRegisterScope temps(this); 515 UseScratchRegisterScope temps(this);
516 516
517 if (operand.NeedsRelocation(this)) { 517 if (operand.NeedsRelocation(this)) {
518 Register temp = temps.AcquireX(); 518 Register temp = temps.AcquireX();
519 Ldr(temp, operand.immediate()); 519 Ldr(temp, operand.immediate());
520 AddSubWithCarryMacro(rd, rn, temp, S, op); 520 AddSubWithCarryMacro(rd, rn, temp, S, op);
521 521
522 } else if (operand.IsImmediate() || 522 } else if (operand.IsImmediate() ||
523 (operand.IsShiftedRegister() && (operand.shift() == ROR))) { 523 (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
524 // Add/sub with carry (immediate or ROR shifted register.) 524 // Add/sub with carry (immediate or ROR shifted register.)
525 Register temp = temps.AcquireSameSizeAs(rn); 525 Register temp = temps.AcquireSameSizeAs(rn);
526 Mov(temp, operand); 526 Mov(temp, operand);
527 AddSubWithCarry(rd, rn, temp, S, op); 527 AddSubWithCarry(rd, rn, temp, S, op);
528 528
529 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) { 529 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
530 // Add/sub with carry (shifted register). 530 // Add/sub with carry (shifted register).
531 ASSERT(operand.reg().SizeInBits() == rd.SizeInBits()); 531 DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
532 ASSERT(operand.shift() != ROR); 532 DCHECK(operand.shift() != ROR);
533 ASSERT(is_uintn(operand.shift_amount(), 533 DCHECK(is_uintn(operand.shift_amount(),
534 rd.SizeInBits() == kXRegSizeInBits ? kXRegSizeInBitsLog2 534 rd.SizeInBits() == kXRegSizeInBits ? kXRegSizeInBitsLog2
535 : kWRegSizeInBitsLog2)); 535 : kWRegSizeInBitsLog2));
536 Register temp = temps.AcquireSameSizeAs(rn); 536 Register temp = temps.AcquireSameSizeAs(rn);
537 EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount()); 537 EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
538 AddSubWithCarry(rd, rn, temp, S, op); 538 AddSubWithCarry(rd, rn, temp, S, op);
539 539
540 } else if (operand.IsExtendedRegister()) { 540 } else if (operand.IsExtendedRegister()) {
541 // Add/sub with carry (extended register). 541 // Add/sub with carry (extended register).
542 ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits()); 542 DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
543 // Add/sub extended supports a shift <= 4. We want to support exactly the 543 // Add/sub extended supports a shift <= 4. We want to support exactly the
544 // same modes. 544 // same modes.
545 ASSERT(operand.shift_amount() <= 4); 545 DCHECK(operand.shift_amount() <= 4);
546 ASSERT(operand.reg().Is64Bits() || 546 DCHECK(operand.reg().Is64Bits() ||
547 ((operand.extend() != UXTX) && (operand.extend() != SXTX))); 547 ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
548 Register temp = temps.AcquireSameSizeAs(rn); 548 Register temp = temps.AcquireSameSizeAs(rn);
549 EmitExtendShift(temp, operand.reg(), operand.extend(), 549 EmitExtendShift(temp, operand.reg(), operand.extend(),
550 operand.shift_amount()); 550 operand.shift_amount());
551 AddSubWithCarry(rd, rn, temp, S, op); 551 AddSubWithCarry(rd, rn, temp, S, op);
552 552
553 } else { 553 } else {
554 // The addressing mode is directly supported by the instruction. 554 // The addressing mode is directly supported by the instruction.
555 AddSubWithCarry(rd, rn, operand, S, op); 555 AddSubWithCarry(rd, rn, operand, S, op);
556 } 556 }
(...skipping 28 matching lines...) Expand all
585 } else { 585 } else {
586 // Encodable in one load/store instruction. 586 // Encodable in one load/store instruction.
587 LoadStore(rt, addr, op); 587 LoadStore(rt, addr, op);
588 } 588 }
589 } 589 }
590 590
591 591
592 void MacroAssembler::Load(const Register& rt, 592 void MacroAssembler::Load(const Register& rt,
593 const MemOperand& addr, 593 const MemOperand& addr,
594 Representation r) { 594 Representation r) {
595 ASSERT(!r.IsDouble()); 595 DCHECK(!r.IsDouble());
596 596
597 if (r.IsInteger8()) { 597 if (r.IsInteger8()) {
598 Ldrsb(rt, addr); 598 Ldrsb(rt, addr);
599 } else if (r.IsUInteger8()) { 599 } else if (r.IsUInteger8()) {
600 Ldrb(rt, addr); 600 Ldrb(rt, addr);
601 } else if (r.IsInteger16()) { 601 } else if (r.IsInteger16()) {
602 Ldrsh(rt, addr); 602 Ldrsh(rt, addr);
603 } else if (r.IsUInteger16()) { 603 } else if (r.IsUInteger16()) {
604 Ldrh(rt, addr); 604 Ldrh(rt, addr);
605 } else if (r.IsInteger32()) { 605 } else if (r.IsInteger32()) {
606 Ldr(rt.W(), addr); 606 Ldr(rt.W(), addr);
607 } else { 607 } else {
608 ASSERT(rt.Is64Bits()); 608 DCHECK(rt.Is64Bits());
609 Ldr(rt, addr); 609 Ldr(rt, addr);
610 } 610 }
611 } 611 }
612 612
613 613
614 void MacroAssembler::Store(const Register& rt, 614 void MacroAssembler::Store(const Register& rt,
615 const MemOperand& addr, 615 const MemOperand& addr,
616 Representation r) { 616 Representation r) {
617 ASSERT(!r.IsDouble()); 617 DCHECK(!r.IsDouble());
618 618
619 if (r.IsInteger8() || r.IsUInteger8()) { 619 if (r.IsInteger8() || r.IsUInteger8()) {
620 Strb(rt, addr); 620 Strb(rt, addr);
621 } else if (r.IsInteger16() || r.IsUInteger16()) { 621 } else if (r.IsInteger16() || r.IsUInteger16()) {
622 Strh(rt, addr); 622 Strh(rt, addr);
623 } else if (r.IsInteger32()) { 623 } else if (r.IsInteger32()) {
624 Str(rt.W(), addr); 624 Str(rt.W(), addr);
625 } else { 625 } else {
626 ASSERT(rt.Is64Bits()); 626 DCHECK(rt.Is64Bits());
627 if (r.IsHeapObject()) { 627 if (r.IsHeapObject()) {
628 AssertNotSmi(rt); 628 AssertNotSmi(rt);
629 } else if (r.IsSmi()) { 629 } else if (r.IsSmi()) {
630 AssertSmi(rt); 630 AssertSmi(rt);
631 } 631 }
632 Str(rt, addr); 632 Str(rt, addr);
633 } 633 }
634 } 634 }
635 635
636 636
(...skipping 17 matching lines...) Expand all
654 // Also maintain the next pool check. 654 // Also maintain the next pool check.
655 next_veneer_pool_check_ = 655 next_veneer_pool_check_ =
656 Min(next_veneer_pool_check_, 656 Min(next_veneer_pool_check_,
657 max_reachable_pc - kVeneerDistanceCheckMargin); 657 max_reachable_pc - kVeneerDistanceCheckMargin);
658 } 658 }
659 return need_longer_range; 659 return need_longer_range;
660 } 660 }
661 661
662 662
663 void MacroAssembler::Adr(const Register& rd, Label* label, AdrHint hint) { 663 void MacroAssembler::Adr(const Register& rd, Label* label, AdrHint hint) {
664 ASSERT(allow_macro_instructions_); 664 DCHECK(allow_macro_instructions_);
665 ASSERT(!rd.IsZero()); 665 DCHECK(!rd.IsZero());
666 666
667 if (hint == kAdrNear) { 667 if (hint == kAdrNear) {
668 adr(rd, label); 668 adr(rd, label);
669 return; 669 return;
670 } 670 }
671 671
672 ASSERT(hint == kAdrFar); 672 DCHECK(hint == kAdrFar);
673 if (label->is_bound()) { 673 if (label->is_bound()) {
674 int label_offset = label->pos() - pc_offset(); 674 int label_offset = label->pos() - pc_offset();
675 if (Instruction::IsValidPCRelOffset(label_offset)) { 675 if (Instruction::IsValidPCRelOffset(label_offset)) {
676 adr(rd, label); 676 adr(rd, label);
677 } else { 677 } else {
678 ASSERT(label_offset <= 0); 678 DCHECK(label_offset <= 0);
679 int min_adr_offset = -(1 << (Instruction::ImmPCRelRangeBitwidth - 1)); 679 int min_adr_offset = -(1 << (Instruction::ImmPCRelRangeBitwidth - 1));
680 adr(rd, min_adr_offset); 680 adr(rd, min_adr_offset);
681 Add(rd, rd, label_offset - min_adr_offset); 681 Add(rd, rd, label_offset - min_adr_offset);
682 } 682 }
683 } else { 683 } else {
684 UseScratchRegisterScope temps(this); 684 UseScratchRegisterScope temps(this);
685 Register scratch = temps.AcquireX(); 685 Register scratch = temps.AcquireX();
686 686
687 InstructionAccurateScope scope( 687 InstructionAccurateScope scope(
688 this, PatchingAssembler::kAdrFarPatchableNInstrs); 688 this, PatchingAssembler::kAdrFarPatchableNInstrs);
689 adr(rd, label); 689 adr(rd, label);
690 for (int i = 0; i < PatchingAssembler::kAdrFarPatchableNNops; ++i) { 690 for (int i = 0; i < PatchingAssembler::kAdrFarPatchableNNops; ++i) {
691 nop(ADR_FAR_NOP); 691 nop(ADR_FAR_NOP);
692 } 692 }
693 movz(scratch, 0); 693 movz(scratch, 0);
694 } 694 }
695 } 695 }
696 696
697 697
698 void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) { 698 void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
699 ASSERT((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) && 699 DCHECK((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) &&
700 (bit == -1 || type >= kBranchTypeFirstUsingBit)); 700 (bit == -1 || type >= kBranchTypeFirstUsingBit));
701 if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) { 701 if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
702 B(static_cast<Condition>(type), label); 702 B(static_cast<Condition>(type), label);
703 } else { 703 } else {
704 switch (type) { 704 switch (type) {
705 case always: B(label); break; 705 case always: B(label); break;
706 case never: break; 706 case never: break;
707 case reg_zero: Cbz(reg, label); break; 707 case reg_zero: Cbz(reg, label); break;
708 case reg_not_zero: Cbnz(reg, label); break; 708 case reg_not_zero: Cbnz(reg, label); break;
709 case reg_bit_clear: Tbz(reg, bit, label); break; 709 case reg_bit_clear: Tbz(reg, bit, label); break;
710 case reg_bit_set: Tbnz(reg, bit, label); break; 710 case reg_bit_set: Tbnz(reg, bit, label); break;
711 default: 711 default:
712 UNREACHABLE(); 712 UNREACHABLE();
713 } 713 }
714 } 714 }
715 } 715 }
716 716
717 717
718 void MacroAssembler::B(Label* label, Condition cond) { 718 void MacroAssembler::B(Label* label, Condition cond) {
719 ASSERT(allow_macro_instructions_); 719 DCHECK(allow_macro_instructions_);
720 ASSERT((cond != al) && (cond != nv)); 720 DCHECK((cond != al) && (cond != nv));
721 721
722 Label done; 722 Label done;
723 bool need_extra_instructions = 723 bool need_extra_instructions =
724 NeedExtraInstructionsOrRegisterBranch(label, CondBranchType); 724 NeedExtraInstructionsOrRegisterBranch(label, CondBranchType);
725 725
726 if (need_extra_instructions) { 726 if (need_extra_instructions) {
727 b(&done, NegateCondition(cond)); 727 b(&done, NegateCondition(cond));
728 B(label); 728 B(label);
729 } else { 729 } else {
730 b(label, cond); 730 b(label, cond);
731 } 731 }
732 bind(&done); 732 bind(&done);
733 } 733 }
734 734
735 735
736 void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) { 736 void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
737 ASSERT(allow_macro_instructions_); 737 DCHECK(allow_macro_instructions_);
738 738
739 Label done; 739 Label done;
740 bool need_extra_instructions = 740 bool need_extra_instructions =
741 NeedExtraInstructionsOrRegisterBranch(label, TestBranchType); 741 NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
742 742
743 if (need_extra_instructions) { 743 if (need_extra_instructions) {
744 tbz(rt, bit_pos, &done); 744 tbz(rt, bit_pos, &done);
745 B(label); 745 B(label);
746 } else { 746 } else {
747 tbnz(rt, bit_pos, label); 747 tbnz(rt, bit_pos, label);
748 } 748 }
749 bind(&done); 749 bind(&done);
750 } 750 }
751 751
752 752
753 void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) { 753 void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
754 ASSERT(allow_macro_instructions_); 754 DCHECK(allow_macro_instructions_);
755 755
756 Label done; 756 Label done;
757 bool need_extra_instructions = 757 bool need_extra_instructions =
758 NeedExtraInstructionsOrRegisterBranch(label, TestBranchType); 758 NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
759 759
760 if (need_extra_instructions) { 760 if (need_extra_instructions) {
761 tbnz(rt, bit_pos, &done); 761 tbnz(rt, bit_pos, &done);
762 B(label); 762 B(label);
763 } else { 763 } else {
764 tbz(rt, bit_pos, label); 764 tbz(rt, bit_pos, label);
765 } 765 }
766 bind(&done); 766 bind(&done);
767 } 767 }
768 768
769 769
770 void MacroAssembler::Cbnz(const Register& rt, Label* label) { 770 void MacroAssembler::Cbnz(const Register& rt, Label* label) {
771 ASSERT(allow_macro_instructions_); 771 DCHECK(allow_macro_instructions_);
772 772
773 Label done; 773 Label done;
774 bool need_extra_instructions = 774 bool need_extra_instructions =
775 NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType); 775 NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
776 776
777 if (need_extra_instructions) { 777 if (need_extra_instructions) {
778 cbz(rt, &done); 778 cbz(rt, &done);
779 B(label); 779 B(label);
780 } else { 780 } else {
781 cbnz(rt, label); 781 cbnz(rt, label);
782 } 782 }
783 bind(&done); 783 bind(&done);
784 } 784 }
785 785
786 786
787 void MacroAssembler::Cbz(const Register& rt, Label* label) { 787 void MacroAssembler::Cbz(const Register& rt, Label* label) {
788 ASSERT(allow_macro_instructions_); 788 DCHECK(allow_macro_instructions_);
789 789
790 Label done; 790 Label done;
791 bool need_extra_instructions = 791 bool need_extra_instructions =
792 NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType); 792 NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
793 793
794 if (need_extra_instructions) { 794 if (need_extra_instructions) {
795 cbnz(rt, &done); 795 cbnz(rt, &done);
796 B(label); 796 B(label);
797 } else { 797 } else {
798 cbz(rt, label); 798 cbz(rt, label);
799 } 799 }
800 bind(&done); 800 bind(&done);
801 } 801 }
802 802
803 803
804 // Pseudo-instructions. 804 // Pseudo-instructions.
805 805
806 806
807 void MacroAssembler::Abs(const Register& rd, const Register& rm, 807 void MacroAssembler::Abs(const Register& rd, const Register& rm,
808 Label* is_not_representable, 808 Label* is_not_representable,
809 Label* is_representable) { 809 Label* is_representable) {
810 ASSERT(allow_macro_instructions_); 810 DCHECK(allow_macro_instructions_);
811 ASSERT(AreSameSizeAndType(rd, rm)); 811 DCHECK(AreSameSizeAndType(rd, rm));
812 812
813 Cmp(rm, 1); 813 Cmp(rm, 1);
814 Cneg(rd, rm, lt); 814 Cneg(rd, rm, lt);
815 815
816 // If the comparison sets the v flag, the input was the smallest value 816 // If the comparison sets the v flag, the input was the smallest value
817 // representable by rm, and the mathematical result of abs(rm) is not 817 // representable by rm, and the mathematical result of abs(rm) is not
818 // representable using two's complement. 818 // representable using two's complement.
819 if ((is_not_representable != NULL) && (is_representable != NULL)) { 819 if ((is_not_representable != NULL) && (is_representable != NULL)) {
820 B(is_not_representable, vs); 820 B(is_not_representable, vs);
821 B(is_representable); 821 B(is_representable);
822 } else if (is_not_representable != NULL) { 822 } else if (is_not_representable != NULL) {
823 B(is_not_representable, vs); 823 B(is_not_representable, vs);
824 } else if (is_representable != NULL) { 824 } else if (is_representable != NULL) {
825 B(is_representable, vc); 825 B(is_representable, vc);
826 } 826 }
827 } 827 }
828 828
829 829
830 // Abstracted stack operations. 830 // Abstracted stack operations.
831 831
832 832
833 void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1, 833 void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
834 const CPURegister& src2, const CPURegister& src3) { 834 const CPURegister& src2, const CPURegister& src3) {
835 ASSERT(AreSameSizeAndType(src0, src1, src2, src3)); 835 DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
836 836
837 int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid(); 837 int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
838 int size = src0.SizeInBytes(); 838 int size = src0.SizeInBytes();
839 839
840 PushPreamble(count, size); 840 PushPreamble(count, size);
841 PushHelper(count, size, src0, src1, src2, src3); 841 PushHelper(count, size, src0, src1, src2, src3);
842 } 842 }
843 843
844 844
845 void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1, 845 void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
846 const CPURegister& src2, const CPURegister& src3, 846 const CPURegister& src2, const CPURegister& src3,
847 const CPURegister& src4, const CPURegister& src5, 847 const CPURegister& src4, const CPURegister& src5,
848 const CPURegister& src6, const CPURegister& src7) { 848 const CPURegister& src6, const CPURegister& src7) {
849 ASSERT(AreSameSizeAndType(src0, src1, src2, src3, src4, src5, src6, src7)); 849 DCHECK(AreSameSizeAndType(src0, src1, src2, src3, src4, src5, src6, src7));
850 850
851 int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid(); 851 int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid();
852 int size = src0.SizeInBytes(); 852 int size = src0.SizeInBytes();
853 853
854 PushPreamble(count, size); 854 PushPreamble(count, size);
855 PushHelper(4, size, src0, src1, src2, src3); 855 PushHelper(4, size, src0, src1, src2, src3);
856 PushHelper(count - 4, size, src4, src5, src6, src7); 856 PushHelper(count - 4, size, src4, src5, src6, src7);
857 } 857 }
858 858
859 859
860 void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1, 860 void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
861 const CPURegister& dst2, const CPURegister& dst3) { 861 const CPURegister& dst2, const CPURegister& dst3) {
862 // It is not valid to pop into the same register more than once in one 862 // It is not valid to pop into the same register more than once in one
863 // instruction, not even into the zero register. 863 // instruction, not even into the zero register.
864 ASSERT(!AreAliased(dst0, dst1, dst2, dst3)); 864 DCHECK(!AreAliased(dst0, dst1, dst2, dst3));
865 ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3)); 865 DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3));
866 ASSERT(dst0.IsValid()); 866 DCHECK(dst0.IsValid());
867 867
868 int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid(); 868 int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
869 int size = dst0.SizeInBytes(); 869 int size = dst0.SizeInBytes();
870 870
871 PopHelper(count, size, dst0, dst1, dst2, dst3); 871 PopHelper(count, size, dst0, dst1, dst2, dst3);
872 PopPostamble(count, size); 872 PopPostamble(count, size);
873 } 873 }
874 874
875 875
876 void MacroAssembler::Push(const Register& src0, const FPRegister& src1) { 876 void MacroAssembler::Push(const Register& src0, const FPRegister& src1) {
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after
1001 count -= 4; 1001 count -= 4;
1002 } 1002 }
1003 if (count >= 2) { 1003 if (count >= 2) {
1004 PushHelper(2, size, src, src, NoReg, NoReg); 1004 PushHelper(2, size, src, src, NoReg, NoReg);
1005 count -= 2; 1005 count -= 2;
1006 } 1006 }
1007 if (count == 1) { 1007 if (count == 1) {
1008 PushHelper(1, size, src, NoReg, NoReg, NoReg); 1008 PushHelper(1, size, src, NoReg, NoReg, NoReg);
1009 count -= 1; 1009 count -= 1;
1010 } 1010 }
1011 ASSERT(count == 0); 1011 DCHECK(count == 0);
1012 } 1012 }
1013 1013
1014 1014
1015 void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) { 1015 void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) {
1016 PushPreamble(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes()))); 1016 PushPreamble(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes())));
1017 1017
1018 UseScratchRegisterScope temps(this); 1018 UseScratchRegisterScope temps(this);
1019 Register temp = temps.AcquireSameSizeAs(count); 1019 Register temp = temps.AcquireSameSizeAs(count);
1020 1020
1021 if (FLAG_optimize_for_size) { 1021 if (FLAG_optimize_for_size) {
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
1059 1059
1060 1060
1061 void MacroAssembler::PushHelper(int count, int size, 1061 void MacroAssembler::PushHelper(int count, int size,
1062 const CPURegister& src0, 1062 const CPURegister& src0,
1063 const CPURegister& src1, 1063 const CPURegister& src1,
1064 const CPURegister& src2, 1064 const CPURegister& src2,
1065 const CPURegister& src3) { 1065 const CPURegister& src3) {
1066 // Ensure that we don't unintentially modify scratch or debug registers. 1066 // Ensure that we don't unintentially modify scratch or debug registers.
1067 InstructionAccurateScope scope(this); 1067 InstructionAccurateScope scope(this);
1068 1068
1069 ASSERT(AreSameSizeAndType(src0, src1, src2, src3)); 1069 DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
1070 ASSERT(size == src0.SizeInBytes()); 1070 DCHECK(size == src0.SizeInBytes());
1071 1071
1072 // When pushing multiple registers, the store order is chosen such that 1072 // When pushing multiple registers, the store order is chosen such that
1073 // Push(a, b) is equivalent to Push(a) followed by Push(b). 1073 // Push(a, b) is equivalent to Push(a) followed by Push(b).
1074 switch (count) { 1074 switch (count) {
1075 case 1: 1075 case 1:
1076 ASSERT(src1.IsNone() && src2.IsNone() && src3.IsNone()); 1076 DCHECK(src1.IsNone() && src2.IsNone() && src3.IsNone());
1077 str(src0, MemOperand(StackPointer(), -1 * size, PreIndex)); 1077 str(src0, MemOperand(StackPointer(), -1 * size, PreIndex));
1078 break; 1078 break;
1079 case 2: 1079 case 2:
1080 ASSERT(src2.IsNone() && src3.IsNone()); 1080 DCHECK(src2.IsNone() && src3.IsNone());
1081 stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex)); 1081 stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex));
1082 break; 1082 break;
1083 case 3: 1083 case 3:
1084 ASSERT(src3.IsNone()); 1084 DCHECK(src3.IsNone());
1085 stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex)); 1085 stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex));
1086 str(src0, MemOperand(StackPointer(), 2 * size)); 1086 str(src0, MemOperand(StackPointer(), 2 * size));
1087 break; 1087 break;
1088 case 4: 1088 case 4:
1089 // Skip over 4 * size, then fill in the gap. This allows four W registers 1089 // Skip over 4 * size, then fill in the gap. This allows four W registers
1090 // to be pushed using csp, whilst maintaining 16-byte alignment for csp 1090 // to be pushed using csp, whilst maintaining 16-byte alignment for csp
1091 // at all times. 1091 // at all times.
1092 stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex)); 1092 stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex));
1093 stp(src1, src0, MemOperand(StackPointer(), 2 * size)); 1093 stp(src1, src0, MemOperand(StackPointer(), 2 * size));
1094 break; 1094 break;
1095 default: 1095 default:
1096 UNREACHABLE(); 1096 UNREACHABLE();
1097 } 1097 }
1098 } 1098 }
1099 1099
1100 1100
1101 void MacroAssembler::PopHelper(int count, int size, 1101 void MacroAssembler::PopHelper(int count, int size,
1102 const CPURegister& dst0, 1102 const CPURegister& dst0,
1103 const CPURegister& dst1, 1103 const CPURegister& dst1,
1104 const CPURegister& dst2, 1104 const CPURegister& dst2,
1105 const CPURegister& dst3) { 1105 const CPURegister& dst3) {
1106 // Ensure that we don't unintentially modify scratch or debug registers. 1106 // Ensure that we don't unintentially modify scratch or debug registers.
1107 InstructionAccurateScope scope(this); 1107 InstructionAccurateScope scope(this);
1108 1108
1109 ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3)); 1109 DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3));
1110 ASSERT(size == dst0.SizeInBytes()); 1110 DCHECK(size == dst0.SizeInBytes());
1111 1111
1112 // When popping multiple registers, the load order is chosen such that 1112 // When popping multiple registers, the load order is chosen such that
1113 // Pop(a, b) is equivalent to Pop(a) followed by Pop(b). 1113 // Pop(a, b) is equivalent to Pop(a) followed by Pop(b).
1114 switch (count) { 1114 switch (count) {
1115 case 1: 1115 case 1:
1116 ASSERT(dst1.IsNone() && dst2.IsNone() && dst3.IsNone()); 1116 DCHECK(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
1117 ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex)); 1117 ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex));
1118 break; 1118 break;
1119 case 2: 1119 case 2:
1120 ASSERT(dst2.IsNone() && dst3.IsNone()); 1120 DCHECK(dst2.IsNone() && dst3.IsNone());
1121 ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex)); 1121 ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex));
1122 break; 1122 break;
1123 case 3: 1123 case 3:
1124 ASSERT(dst3.IsNone()); 1124 DCHECK(dst3.IsNone());
1125 ldr(dst2, MemOperand(StackPointer(), 2 * size)); 1125 ldr(dst2, MemOperand(StackPointer(), 2 * size));
1126 ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex)); 1126 ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex));
1127 break; 1127 break;
1128 case 4: 1128 case 4:
1129 // Load the higher addresses first, then load the lower addresses and 1129 // Load the higher addresses first, then load the lower addresses and
1130 // skip the whole block in the second instruction. This allows four W 1130 // skip the whole block in the second instruction. This allows four W
1131 // registers to be popped using csp, whilst maintaining 16-byte alignment 1131 // registers to be popped using csp, whilst maintaining 16-byte alignment
1132 // for csp at all times. 1132 // for csp at all times.
1133 ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size)); 1133 ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size));
1134 ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex)); 1134 ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex));
1135 break; 1135 break;
1136 default: 1136 default:
1137 UNREACHABLE(); 1137 UNREACHABLE();
1138 } 1138 }
1139 } 1139 }
1140 1140
1141 1141
1142 void MacroAssembler::PushPreamble(Operand total_size) { 1142 void MacroAssembler::PushPreamble(Operand total_size) {
1143 if (csp.Is(StackPointer())) { 1143 if (csp.Is(StackPointer())) {
1144 // If the current stack pointer is csp, then it must be aligned to 16 bytes 1144 // If the current stack pointer is csp, then it must be aligned to 16 bytes
1145 // on entry and the total size of the specified registers must also be a 1145 // on entry and the total size of the specified registers must also be a
1146 // multiple of 16 bytes. 1146 // multiple of 16 bytes.
1147 if (total_size.IsImmediate()) { 1147 if (total_size.IsImmediate()) {
1148 ASSERT((total_size.ImmediateValue() % 16) == 0); 1148 DCHECK((total_size.ImmediateValue() % 16) == 0);
1149 } 1149 }
1150 1150
1151 // Don't check access size for non-immediate sizes. It's difficult to do 1151 // Don't check access size for non-immediate sizes. It's difficult to do
1152 // well, and it will be caught by hardware (or the simulator) anyway. 1152 // well, and it will be caught by hardware (or the simulator) anyway.
1153 } else { 1153 } else {
1154 // Even if the current stack pointer is not the system stack pointer (csp), 1154 // Even if the current stack pointer is not the system stack pointer (csp),
1155 // the system stack pointer will still be modified in order to comply with 1155 // the system stack pointer will still be modified in order to comply with
1156 // ABI rules about accessing memory below the system stack pointer. 1156 // ABI rules about accessing memory below the system stack pointer.
1157 BumpSystemStackPointer(total_size); 1157 BumpSystemStackPointer(total_size);
1158 } 1158 }
1159 } 1159 }
1160 1160
1161 1161
1162 void MacroAssembler::PopPostamble(Operand total_size) { 1162 void MacroAssembler::PopPostamble(Operand total_size) {
1163 if (csp.Is(StackPointer())) { 1163 if (csp.Is(StackPointer())) {
1164 // If the current stack pointer is csp, then it must be aligned to 16 bytes 1164 // If the current stack pointer is csp, then it must be aligned to 16 bytes
1165 // on entry and the total size of the specified registers must also be a 1165 // on entry and the total size of the specified registers must also be a
1166 // multiple of 16 bytes. 1166 // multiple of 16 bytes.
1167 if (total_size.IsImmediate()) { 1167 if (total_size.IsImmediate()) {
1168 ASSERT((total_size.ImmediateValue() % 16) == 0); 1168 DCHECK((total_size.ImmediateValue() % 16) == 0);
1169 } 1169 }
1170 1170
1171 // Don't check access size for non-immediate sizes. It's difficult to do 1171 // Don't check access size for non-immediate sizes. It's difficult to do
1172 // well, and it will be caught by hardware (or the simulator) anyway. 1172 // well, and it will be caught by hardware (or the simulator) anyway.
1173 } else if (emit_debug_code()) { 1173 } else if (emit_debug_code()) {
1174 // It is safe to leave csp where it is when unwinding the JavaScript stack, 1174 // It is safe to leave csp where it is when unwinding the JavaScript stack,
1175 // but if we keep it matching StackPointer, the simulator can detect memory 1175 // but if we keep it matching StackPointer, the simulator can detect memory
1176 // accesses in the now-free part of the stack. 1176 // accesses in the now-free part of the stack.
1177 SyncSystemStackPointer(); 1177 SyncSystemStackPointer();
1178 } 1178 }
1179 } 1179 }
1180 1180
1181 1181
1182 void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) { 1182 void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) {
1183 if (offset.IsImmediate()) { 1183 if (offset.IsImmediate()) {
1184 ASSERT(offset.ImmediateValue() >= 0); 1184 DCHECK(offset.ImmediateValue() >= 0);
1185 } else if (emit_debug_code()) { 1185 } else if (emit_debug_code()) {
1186 Cmp(xzr, offset); 1186 Cmp(xzr, offset);
1187 Check(le, kStackAccessBelowStackPointer); 1187 Check(le, kStackAccessBelowStackPointer);
1188 } 1188 }
1189 1189
1190 Str(src, MemOperand(StackPointer(), offset)); 1190 Str(src, MemOperand(StackPointer(), offset));
1191 } 1191 }
1192 1192
1193 1193
1194 void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) { 1194 void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
1195 if (offset.IsImmediate()) { 1195 if (offset.IsImmediate()) {
1196 ASSERT(offset.ImmediateValue() >= 0); 1196 DCHECK(offset.ImmediateValue() >= 0);
1197 } else if (emit_debug_code()) { 1197 } else if (emit_debug_code()) {
1198 Cmp(xzr, offset); 1198 Cmp(xzr, offset);
1199 Check(le, kStackAccessBelowStackPointer); 1199 Check(le, kStackAccessBelowStackPointer);
1200 } 1200 }
1201 1201
1202 Ldr(dst, MemOperand(StackPointer(), offset)); 1202 Ldr(dst, MemOperand(StackPointer(), offset));
1203 } 1203 }
1204 1204
1205 1205
1206 void MacroAssembler::PokePair(const CPURegister& src1, 1206 void MacroAssembler::PokePair(const CPURegister& src1,
1207 const CPURegister& src2, 1207 const CPURegister& src2,
1208 int offset) { 1208 int offset) {
1209 ASSERT(AreSameSizeAndType(src1, src2)); 1209 DCHECK(AreSameSizeAndType(src1, src2));
1210 ASSERT((offset >= 0) && ((offset % src1.SizeInBytes()) == 0)); 1210 DCHECK((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
1211 Stp(src1, src2, MemOperand(StackPointer(), offset)); 1211 Stp(src1, src2, MemOperand(StackPointer(), offset));
1212 } 1212 }
1213 1213
1214 1214
1215 void MacroAssembler::PeekPair(const CPURegister& dst1, 1215 void MacroAssembler::PeekPair(const CPURegister& dst1,
1216 const CPURegister& dst2, 1216 const CPURegister& dst2,
1217 int offset) { 1217 int offset) {
1218 ASSERT(AreSameSizeAndType(dst1, dst2)); 1218 DCHECK(AreSameSizeAndType(dst1, dst2));
1219 ASSERT((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0)); 1219 DCHECK((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
1220 Ldp(dst1, dst2, MemOperand(StackPointer(), offset)); 1220 Ldp(dst1, dst2, MemOperand(StackPointer(), offset));
1221 } 1221 }
1222 1222
1223 1223
1224 void MacroAssembler::PushCalleeSavedRegisters() { 1224 void MacroAssembler::PushCalleeSavedRegisters() {
1225 // Ensure that the macro-assembler doesn't use any scratch registers. 1225 // Ensure that the macro-assembler doesn't use any scratch registers.
1226 InstructionAccurateScope scope(this); 1226 InstructionAccurateScope scope(this);
1227 1227
1228 // This method must not be called unless the current stack pointer is the 1228 // This method must not be called unless the current stack pointer is the
1229 // system stack pointer (csp). 1229 // system stack pointer (csp).
1230 ASSERT(csp.Is(StackPointer())); 1230 DCHECK(csp.Is(StackPointer()));
1231 1231
1232 MemOperand tos(csp, -2 * kXRegSize, PreIndex); 1232 MemOperand tos(csp, -2 * kXRegSize, PreIndex);
1233 1233
1234 stp(d14, d15, tos); 1234 stp(d14, d15, tos);
1235 stp(d12, d13, tos); 1235 stp(d12, d13, tos);
1236 stp(d10, d11, tos); 1236 stp(d10, d11, tos);
1237 stp(d8, d9, tos); 1237 stp(d8, d9, tos);
1238 1238
1239 stp(x29, x30, tos); 1239 stp(x29, x30, tos);
1240 stp(x27, x28, tos); // x28 = jssp 1240 stp(x27, x28, tos); // x28 = jssp
1241 stp(x25, x26, tos); 1241 stp(x25, x26, tos);
1242 stp(x23, x24, tos); 1242 stp(x23, x24, tos);
1243 stp(x21, x22, tos); 1243 stp(x21, x22, tos);
1244 stp(x19, x20, tos); 1244 stp(x19, x20, tos);
1245 } 1245 }
1246 1246
1247 1247
1248 void MacroAssembler::PopCalleeSavedRegisters() { 1248 void MacroAssembler::PopCalleeSavedRegisters() {
1249 // Ensure that the macro-assembler doesn't use any scratch registers. 1249 // Ensure that the macro-assembler doesn't use any scratch registers.
1250 InstructionAccurateScope scope(this); 1250 InstructionAccurateScope scope(this);
1251 1251
1252 // This method must not be called unless the current stack pointer is the 1252 // This method must not be called unless the current stack pointer is the
1253 // system stack pointer (csp). 1253 // system stack pointer (csp).
1254 ASSERT(csp.Is(StackPointer())); 1254 DCHECK(csp.Is(StackPointer()));
1255 1255
1256 MemOperand tos(csp, 2 * kXRegSize, PostIndex); 1256 MemOperand tos(csp, 2 * kXRegSize, PostIndex);
1257 1257
1258 ldp(x19, x20, tos); 1258 ldp(x19, x20, tos);
1259 ldp(x21, x22, tos); 1259 ldp(x21, x22, tos);
1260 ldp(x23, x24, tos); 1260 ldp(x23, x24, tos);
1261 ldp(x25, x26, tos); 1261 ldp(x25, x26, tos);
1262 ldp(x27, x28, tos); // x28 = jssp 1262 ldp(x27, x28, tos); // x28 = jssp
1263 ldp(x29, x30, tos); 1263 ldp(x29, x30, tos);
1264 1264
(...skipping 153 matching lines...) Expand 10 before | Expand all | Expand 10 after
1418 } 1418 }
1419 1419
1420 1420
1421 void MacroAssembler::CheckEnumCache(Register object, 1421 void MacroAssembler::CheckEnumCache(Register object,
1422 Register null_value, 1422 Register null_value,
1423 Register scratch0, 1423 Register scratch0,
1424 Register scratch1, 1424 Register scratch1,
1425 Register scratch2, 1425 Register scratch2,
1426 Register scratch3, 1426 Register scratch3,
1427 Label* call_runtime) { 1427 Label* call_runtime) {
1428 ASSERT(!AreAliased(object, null_value, scratch0, scratch1, scratch2, 1428 DCHECK(!AreAliased(object, null_value, scratch0, scratch1, scratch2,
1429 scratch3)); 1429 scratch3));
1430 1430
1431 Register empty_fixed_array_value = scratch0; 1431 Register empty_fixed_array_value = scratch0;
1432 Register current_object = scratch1; 1432 Register current_object = scratch1;
1433 1433
1434 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex); 1434 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
1435 Label next, start; 1435 Label next, start;
1436 1436
1437 Mov(current_object, object); 1437 Mov(current_object, object);
1438 1438
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
1500 Operand(isolate()->factory()->allocation_memento_map())); 1500 Operand(isolate()->factory()->allocation_memento_map()));
1501 } 1501 }
1502 1502
1503 1503
1504 void MacroAssembler::JumpToHandlerEntry(Register exception, 1504 void MacroAssembler::JumpToHandlerEntry(Register exception,
1505 Register object, 1505 Register object,
1506 Register state, 1506 Register state,
1507 Register scratch1, 1507 Register scratch1,
1508 Register scratch2) { 1508 Register scratch2) {
1509 // Handler expects argument in x0. 1509 // Handler expects argument in x0.
1510 ASSERT(exception.Is(x0)); 1510 DCHECK(exception.Is(x0));
1511 1511
1512 // Compute the handler entry address and jump to it. The handler table is 1512 // Compute the handler entry address and jump to it. The handler table is
1513 // a fixed array of (smi-tagged) code offsets. 1513 // a fixed array of (smi-tagged) code offsets.
1514 Ldr(scratch1, FieldMemOperand(object, Code::kHandlerTableOffset)); 1514 Ldr(scratch1, FieldMemOperand(object, Code::kHandlerTableOffset));
1515 Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag); 1515 Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
1516 STATIC_ASSERT(StackHandler::kKindWidth < kPointerSizeLog2); 1516 STATIC_ASSERT(StackHandler::kKindWidth < kPointerSizeLog2);
1517 Lsr(scratch2, state, StackHandler::kKindWidth); 1517 Lsr(scratch2, state, StackHandler::kKindWidth);
1518 Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2)); 1518 Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
1519 Add(scratch1, object, Code::kHeaderSize - kHeapObjectTag); 1519 Add(scratch1, object, Code::kHeaderSize - kHeapObjectTag);
1520 Add(scratch1, scratch1, Operand::UntagSmi(scratch2)); 1520 Add(scratch1, scratch1, Operand::UntagSmi(scratch2));
1521 Br(scratch1); 1521 Br(scratch1);
1522 } 1522 }
1523 1523
1524 1524
1525 void MacroAssembler::InNewSpace(Register object, 1525 void MacroAssembler::InNewSpace(Register object,
1526 Condition cond, 1526 Condition cond,
1527 Label* branch) { 1527 Label* branch) {
1528 ASSERT(cond == eq || cond == ne); 1528 DCHECK(cond == eq || cond == ne);
1529 UseScratchRegisterScope temps(this); 1529 UseScratchRegisterScope temps(this);
1530 Register temp = temps.AcquireX(); 1530 Register temp = temps.AcquireX();
1531 And(temp, object, ExternalReference::new_space_mask(isolate())); 1531 And(temp, object, ExternalReference::new_space_mask(isolate()));
1532 Cmp(temp, ExternalReference::new_space_start(isolate())); 1532 Cmp(temp, ExternalReference::new_space_start(isolate()));
1533 B(cond, branch); 1533 B(cond, branch);
1534 } 1534 }
1535 1535
1536 1536
1537 void MacroAssembler::Throw(Register value, 1537 void MacroAssembler::Throw(Register value,
1538 Register scratch1, 1538 Register scratch1,
1539 Register scratch2, 1539 Register scratch2,
1540 Register scratch3, 1540 Register scratch3,
1541 Register scratch4) { 1541 Register scratch4) {
1542 // Adjust this code if not the case. 1542 // Adjust this code if not the case.
1543 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); 1543 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1544 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); 1544 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1545 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); 1545 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1546 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); 1546 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1547 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); 1547 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1548 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); 1548 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1549 1549
1550 // The handler expects the exception in x0. 1550 // The handler expects the exception in x0.
1551 ASSERT(value.Is(x0)); 1551 DCHECK(value.Is(x0));
1552 1552
1553 // Drop the stack pointer to the top of the top handler. 1553 // Drop the stack pointer to the top of the top handler.
1554 ASSERT(jssp.Is(StackPointer())); 1554 DCHECK(jssp.Is(StackPointer()));
1555 Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress, 1555 Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
1556 isolate()))); 1556 isolate())));
1557 Ldr(jssp, MemOperand(scratch1)); 1557 Ldr(jssp, MemOperand(scratch1));
1558 // Restore the next handler. 1558 // Restore the next handler.
1559 Pop(scratch2); 1559 Pop(scratch2);
1560 Str(scratch2, MemOperand(scratch1)); 1560 Str(scratch2, MemOperand(scratch1));
1561 1561
1562 // Get the code object and state. Restore the context and frame pointer. 1562 // Get the code object and state. Restore the context and frame pointer.
1563 Register object = scratch1; 1563 Register object = scratch1;
1564 Register state = scratch2; 1564 Register state = scratch2;
(...skipping 18 matching lines...) Expand all
1583 Register scratch4) { 1583 Register scratch4) {
1584 // Adjust this code if not the case. 1584 // Adjust this code if not the case.
1585 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); 1585 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1586 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); 1586 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1587 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); 1587 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1588 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); 1588 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1589 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); 1589 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1590 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); 1590 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1591 1591
1592 // The handler expects the exception in x0. 1592 // The handler expects the exception in x0.
1593 ASSERT(value.Is(x0)); 1593 DCHECK(value.Is(x0));
1594 1594
1595 // Drop the stack pointer to the top of the top stack handler. 1595 // Drop the stack pointer to the top of the top stack handler.
1596 ASSERT(jssp.Is(StackPointer())); 1596 DCHECK(jssp.Is(StackPointer()));
1597 Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress, 1597 Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
1598 isolate()))); 1598 isolate())));
1599 Ldr(jssp, MemOperand(scratch1)); 1599 Ldr(jssp, MemOperand(scratch1));
1600 1600
1601 // Unwind the handlers until the ENTRY handler is found. 1601 // Unwind the handlers until the ENTRY handler is found.
1602 Label fetch_next, check_kind; 1602 Label fetch_next, check_kind;
1603 B(&check_kind); 1603 B(&check_kind);
1604 Bind(&fetch_next); 1604 Bind(&fetch_next);
1605 Peek(jssp, StackHandlerConstants::kNextOffset); 1605 Peek(jssp, StackHandlerConstants::kNextOffset);
1606 1606
(...skipping 10 matching lines...) Expand all
1617 // saved in the handler). 1617 // saved in the handler).
1618 Register object = scratch1; 1618 Register object = scratch1;
1619 Register state = scratch2; 1619 Register state = scratch2;
1620 Pop(object, state, cp, fp); 1620 Pop(object, state, cp, fp);
1621 1621
1622 JumpToHandlerEntry(value, object, state, scratch3, scratch4); 1622 JumpToHandlerEntry(value, object, state, scratch3, scratch4);
1623 } 1623 }
1624 1624
1625 1625
1626 void MacroAssembler::SmiAbs(const Register& smi, Label* slow) { 1626 void MacroAssembler::SmiAbs(const Register& smi, Label* slow) {
1627 ASSERT(smi.Is64Bits()); 1627 DCHECK(smi.Is64Bits());
1628 Abs(smi, smi, slow); 1628 Abs(smi, smi, slow);
1629 } 1629 }
1630 1630
1631 1631
1632 void MacroAssembler::AssertSmi(Register object, BailoutReason reason) { 1632 void MacroAssembler::AssertSmi(Register object, BailoutReason reason) {
1633 if (emit_debug_code()) { 1633 if (emit_debug_code()) {
1634 STATIC_ASSERT(kSmiTag == 0); 1634 STATIC_ASSERT(kSmiTag == 0);
1635 Tst(object, kSmiTagMask); 1635 Tst(object, kSmiTagMask);
1636 Check(eq, reason); 1636 Check(eq, reason);
1637 } 1637 }
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
1683 Tst(object, kSmiTagMask); 1683 Tst(object, kSmiTagMask);
1684 Check(ne, kOperandIsASmiAndNotAString); 1684 Check(ne, kOperandIsASmiAndNotAString);
1685 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); 1685 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
1686 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE); 1686 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
1687 Check(lo, kOperandIsNotAString); 1687 Check(lo, kOperandIsNotAString);
1688 } 1688 }
1689 } 1689 }
1690 1690
1691 1691
1692 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) { 1692 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
1693 ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. 1693 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
1694 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id); 1694 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
1695 } 1695 }
1696 1696
1697 1697
1698 void MacroAssembler::TailCallStub(CodeStub* stub) { 1698 void MacroAssembler::TailCallStub(CodeStub* stub) {
1699 Jump(stub->GetCode(), RelocInfo::CODE_TARGET); 1699 Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
1700 } 1700 }
1701 1701
1702 1702
1703 void MacroAssembler::CallRuntime(const Runtime::Function* f, 1703 void MacroAssembler::CallRuntime(const Runtime::Function* f,
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
1735 ExternalReference next_address = 1735 ExternalReference next_address =
1736 ExternalReference::handle_scope_next_address(isolate()); 1736 ExternalReference::handle_scope_next_address(isolate());
1737 const int kNextOffset = 0; 1737 const int kNextOffset = 0;
1738 const int kLimitOffset = AddressOffset( 1738 const int kLimitOffset = AddressOffset(
1739 ExternalReference::handle_scope_limit_address(isolate()), 1739 ExternalReference::handle_scope_limit_address(isolate()),
1740 next_address); 1740 next_address);
1741 const int kLevelOffset = AddressOffset( 1741 const int kLevelOffset = AddressOffset(
1742 ExternalReference::handle_scope_level_address(isolate()), 1742 ExternalReference::handle_scope_level_address(isolate()),
1743 next_address); 1743 next_address);
1744 1744
1745 ASSERT(function_address.is(x1) || function_address.is(x2)); 1745 DCHECK(function_address.is(x1) || function_address.is(x2));
1746 1746
1747 Label profiler_disabled; 1747 Label profiler_disabled;
1748 Label end_profiler_check; 1748 Label end_profiler_check;
1749 Mov(x10, ExternalReference::is_profiling_address(isolate())); 1749 Mov(x10, ExternalReference::is_profiling_address(isolate()));
1750 Ldrb(w10, MemOperand(x10)); 1750 Ldrb(w10, MemOperand(x10));
1751 Cbz(w10, &profiler_disabled); 1751 Cbz(w10, &profiler_disabled);
1752 Mov(x3, thunk_ref); 1752 Mov(x3, thunk_ref);
1753 B(&end_profiler_check); 1753 B(&end_profiler_check);
1754 1754
1755 Bind(&profiler_disabled); 1755 Bind(&profiler_disabled);
(...skipping 137 matching lines...) Expand 10 before | Expand all | Expand 10 after
1893 Ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset)); 1893 Ldr(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
1894 // Load the JavaScript builtin function from the builtins object. 1894 // Load the JavaScript builtin function from the builtins object.
1895 Ldr(target, FieldMemOperand(target, 1895 Ldr(target, FieldMemOperand(target,
1896 JSBuiltinsObject::OffsetOfFunctionWithId(id))); 1896 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
1897 } 1897 }
1898 1898
1899 1899
1900 void MacroAssembler::GetBuiltinEntry(Register target, 1900 void MacroAssembler::GetBuiltinEntry(Register target,
1901 Register function, 1901 Register function,
1902 Builtins::JavaScript id) { 1902 Builtins::JavaScript id) {
1903 ASSERT(!AreAliased(target, function)); 1903 DCHECK(!AreAliased(target, function));
1904 GetBuiltinFunction(function, id); 1904 GetBuiltinFunction(function, id);
1905 // Load the code entry point from the builtins object. 1905 // Load the code entry point from the builtins object.
1906 Ldr(target, FieldMemOperand(function, JSFunction::kCodeEntryOffset)); 1906 Ldr(target, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
1907 } 1907 }
1908 1908
1909 1909
1910 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, 1910 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
1911 InvokeFlag flag, 1911 InvokeFlag flag,
1912 const CallWrapper& call_wrapper) { 1912 const CallWrapper& call_wrapper) {
1913 ASM_LOCATION("MacroAssembler::InvokeBuiltin"); 1913 ASM_LOCATION("MacroAssembler::InvokeBuiltin");
1914 // You can't call a builtin without a valid frame. 1914 // You can't call a builtin without a valid frame.
1915 ASSERT(flag == JUMP_FUNCTION || has_frame()); 1915 DCHECK(flag == JUMP_FUNCTION || has_frame());
1916 1916
1917 // Get the builtin entry in x2 and setup the function object in x1. 1917 // Get the builtin entry in x2 and setup the function object in x1.
1918 GetBuiltinEntry(x2, x1, id); 1918 GetBuiltinEntry(x2, x1, id);
1919 if (flag == CALL_FUNCTION) { 1919 if (flag == CALL_FUNCTION) {
1920 call_wrapper.BeforeCall(CallSize(x2)); 1920 call_wrapper.BeforeCall(CallSize(x2));
1921 Call(x2); 1921 Call(x2);
1922 call_wrapper.AfterCall(); 1922 call_wrapper.AfterCall();
1923 } else { 1923 } else {
1924 ASSERT(flag == JUMP_FUNCTION); 1924 DCHECK(flag == JUMP_FUNCTION);
1925 Jump(x2); 1925 Jump(x2);
1926 } 1926 }
1927 } 1927 }
1928 1928
1929 1929
1930 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext, 1930 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
1931 int num_arguments, 1931 int num_arguments,
1932 int result_size) { 1932 int result_size) {
1933 // TODO(1236192): Most runtime routines don't need the number of 1933 // TODO(1236192): Most runtime routines don't need the number of
1934 // arguments passed in because it is constant. At some point we 1934 // arguments passed in because it is constant. At some point we
(...skipping 11 matching lines...) Expand all
1946 num_arguments, 1946 num_arguments,
1947 result_size); 1947 result_size);
1948 } 1948 }
1949 1949
1950 1950
1951 void MacroAssembler::InitializeNewString(Register string, 1951 void MacroAssembler::InitializeNewString(Register string,
1952 Register length, 1952 Register length,
1953 Heap::RootListIndex map_index, 1953 Heap::RootListIndex map_index,
1954 Register scratch1, 1954 Register scratch1,
1955 Register scratch2) { 1955 Register scratch2) {
1956 ASSERT(!AreAliased(string, length, scratch1, scratch2)); 1956 DCHECK(!AreAliased(string, length, scratch1, scratch2));
1957 LoadRoot(scratch2, map_index); 1957 LoadRoot(scratch2, map_index);
1958 SmiTag(scratch1, length); 1958 SmiTag(scratch1, length);
1959 Str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset)); 1959 Str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
1960 1960
1961 Mov(scratch2, String::kEmptyHashField); 1961 Mov(scratch2, String::kEmptyHashField);
1962 Str(scratch1, FieldMemOperand(string, String::kLengthOffset)); 1962 Str(scratch1, FieldMemOperand(string, String::kLengthOffset));
1963 Str(scratch2, FieldMemOperand(string, String::kHashFieldOffset)); 1963 Str(scratch2, FieldMemOperand(string, String::kHashFieldOffset));
1964 } 1964 }
1965 1965
1966 1966
(...skipping 26 matching lines...) Expand all
1993 UseScratchRegisterScope temps(this); 1993 UseScratchRegisterScope temps(this);
1994 Register temp = temps.AcquireX(); 1994 Register temp = temps.AcquireX();
1995 Mov(temp, function); 1995 Mov(temp, function);
1996 CallCFunction(temp, num_of_reg_args, num_of_double_args); 1996 CallCFunction(temp, num_of_reg_args, num_of_double_args);
1997 } 1997 }
1998 1998
1999 1999
2000 void MacroAssembler::CallCFunction(Register function, 2000 void MacroAssembler::CallCFunction(Register function,
2001 int num_of_reg_args, 2001 int num_of_reg_args,
2002 int num_of_double_args) { 2002 int num_of_double_args) {
2003 ASSERT(has_frame()); 2003 DCHECK(has_frame());
2004 // We can pass 8 integer arguments in registers. If we need to pass more than 2004 // We can pass 8 integer arguments in registers. If we need to pass more than
2005 // that, we'll need to implement support for passing them on the stack. 2005 // that, we'll need to implement support for passing them on the stack.
2006 ASSERT(num_of_reg_args <= 8); 2006 DCHECK(num_of_reg_args <= 8);
2007 2007
2008 // If we're passing doubles, we're limited to the following prototypes 2008 // If we're passing doubles, we're limited to the following prototypes
2009 // (defined by ExternalReference::Type): 2009 // (defined by ExternalReference::Type):
2010 // BUILTIN_COMPARE_CALL: int f(double, double) 2010 // BUILTIN_COMPARE_CALL: int f(double, double)
2011 // BUILTIN_FP_FP_CALL: double f(double, double) 2011 // BUILTIN_FP_FP_CALL: double f(double, double)
2012 // BUILTIN_FP_CALL: double f(double) 2012 // BUILTIN_FP_CALL: double f(double)
2013 // BUILTIN_FP_INT_CALL: double f(double, int) 2013 // BUILTIN_FP_INT_CALL: double f(double, int)
2014 if (num_of_double_args > 0) { 2014 if (num_of_double_args > 0) {
2015 ASSERT(num_of_reg_args <= 1); 2015 DCHECK(num_of_reg_args <= 1);
2016 ASSERT((num_of_double_args + num_of_reg_args) <= 2); 2016 DCHECK((num_of_double_args + num_of_reg_args) <= 2);
2017 } 2017 }
2018 2018
2019 2019
2020 // If the stack pointer is not csp, we need to derive an aligned csp from the 2020 // If the stack pointer is not csp, we need to derive an aligned csp from the
2021 // current stack pointer. 2021 // current stack pointer.
2022 const Register old_stack_pointer = StackPointer(); 2022 const Register old_stack_pointer = StackPointer();
2023 if (!csp.Is(old_stack_pointer)) { 2023 if (!csp.Is(old_stack_pointer)) {
2024 AssertStackConsistency(); 2024 AssertStackConsistency();
2025 2025
2026 int sp_alignment = ActivationFrameAlignment(); 2026 int sp_alignment = ActivationFrameAlignment();
2027 // The ABI mandates at least 16-byte alignment. 2027 // The ABI mandates at least 16-byte alignment.
2028 ASSERT(sp_alignment >= 16); 2028 DCHECK(sp_alignment >= 16);
2029 ASSERT(IsPowerOf2(sp_alignment)); 2029 DCHECK(IsPowerOf2(sp_alignment));
2030 2030
2031 // The current stack pointer is a callee saved register, and is preserved 2031 // The current stack pointer is a callee saved register, and is preserved
2032 // across the call. 2032 // across the call.
2033 ASSERT(kCalleeSaved.IncludesAliasOf(old_stack_pointer)); 2033 DCHECK(kCalleeSaved.IncludesAliasOf(old_stack_pointer));
2034 2034
2035 // Align and synchronize the system stack pointer with jssp. 2035 // Align and synchronize the system stack pointer with jssp.
2036 Bic(csp, old_stack_pointer, sp_alignment - 1); 2036 Bic(csp, old_stack_pointer, sp_alignment - 1);
2037 SetStackPointer(csp); 2037 SetStackPointer(csp);
2038 } 2038 }
2039 2039
2040 // Call directly. The function called cannot cause a GC, or allow preemption, 2040 // Call directly. The function called cannot cause a GC, or allow preemption,
2041 // so the return address in the link register stays correct. 2041 // so the return address in the link register stays correct.
2042 Call(function); 2042 Call(function);
2043 2043
2044 if (!csp.Is(old_stack_pointer)) { 2044 if (!csp.Is(old_stack_pointer)) {
2045 if (emit_debug_code()) { 2045 if (emit_debug_code()) {
2046 // Because the stack pointer must be aligned on a 16-byte boundary, the 2046 // Because the stack pointer must be aligned on a 16-byte boundary, the
2047 // aligned csp can be up to 12 bytes below the jssp. This is the case 2047 // aligned csp can be up to 12 bytes below the jssp. This is the case
2048 // where we only pushed one W register on top of an aligned jssp. 2048 // where we only pushed one W register on top of an aligned jssp.
2049 UseScratchRegisterScope temps(this); 2049 UseScratchRegisterScope temps(this);
2050 Register temp = temps.AcquireX(); 2050 Register temp = temps.AcquireX();
2051 ASSERT(ActivationFrameAlignment() == 16); 2051 DCHECK(ActivationFrameAlignment() == 16);
2052 Sub(temp, csp, old_stack_pointer); 2052 Sub(temp, csp, old_stack_pointer);
2053 // We want temp <= 0 && temp >= -12. 2053 // We want temp <= 0 && temp >= -12.
2054 Cmp(temp, 0); 2054 Cmp(temp, 0);
2055 Ccmp(temp, -12, NFlag, le); 2055 Ccmp(temp, -12, NFlag, le);
2056 Check(ge, kTheStackWasCorruptedByMacroAssemblerCall); 2056 Check(ge, kTheStackWasCorruptedByMacroAssemblerCall);
2057 } 2057 }
2058 SetStackPointer(old_stack_pointer); 2058 SetStackPointer(old_stack_pointer);
2059 } 2059 }
2060 } 2060 }
2061 2061
2062 2062
2063 void MacroAssembler::Jump(Register target) { 2063 void MacroAssembler::Jump(Register target) {
2064 Br(target); 2064 Br(target);
2065 } 2065 }
2066 2066
2067 2067
2068 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode) { 2068 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode) {
2069 UseScratchRegisterScope temps(this); 2069 UseScratchRegisterScope temps(this);
2070 Register temp = temps.AcquireX(); 2070 Register temp = temps.AcquireX();
2071 Mov(temp, Operand(target, rmode)); 2071 Mov(temp, Operand(target, rmode));
2072 Br(temp); 2072 Br(temp);
2073 } 2073 }
2074 2074
2075 2075
2076 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode) { 2076 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode) {
2077 ASSERT(!RelocInfo::IsCodeTarget(rmode)); 2077 DCHECK(!RelocInfo::IsCodeTarget(rmode));
2078 Jump(reinterpret_cast<intptr_t>(target), rmode); 2078 Jump(reinterpret_cast<intptr_t>(target), rmode);
2079 } 2079 }
2080 2080
2081 2081
2082 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode) { 2082 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode) {
2083 ASSERT(RelocInfo::IsCodeTarget(rmode)); 2083 DCHECK(RelocInfo::IsCodeTarget(rmode));
2084 AllowDeferredHandleDereference embedding_raw_address; 2084 AllowDeferredHandleDereference embedding_raw_address;
2085 Jump(reinterpret_cast<intptr_t>(code.location()), rmode); 2085 Jump(reinterpret_cast<intptr_t>(code.location()), rmode);
2086 } 2086 }
2087 2087
2088 2088
2089 void MacroAssembler::Call(Register target) { 2089 void MacroAssembler::Call(Register target) {
2090 BlockPoolsScope scope(this); 2090 BlockPoolsScope scope(this);
2091 #ifdef DEBUG 2091 #ifdef DEBUG
2092 Label start_call; 2092 Label start_call;
2093 Bind(&start_call); 2093 Bind(&start_call);
(...skipping 28 matching lines...) Expand all
2122 BlockPoolsScope scope(this); 2122 BlockPoolsScope scope(this);
2123 #ifdef DEBUG 2123 #ifdef DEBUG
2124 Label start_call; 2124 Label start_call;
2125 Bind(&start_call); 2125 Bind(&start_call);
2126 #endif 2126 #endif
2127 // Statement positions are expected to be recorded when the target 2127 // Statement positions are expected to be recorded when the target
2128 // address is loaded. 2128 // address is loaded.
2129 positions_recorder()->WriteRecordedPositions(); 2129 positions_recorder()->WriteRecordedPositions();
2130 2130
2131 // Addresses always have 64 bits, so we shouldn't encounter NONE32. 2131 // Addresses always have 64 bits, so we shouldn't encounter NONE32.
2132 ASSERT(rmode != RelocInfo::NONE32); 2132 DCHECK(rmode != RelocInfo::NONE32);
2133 2133
2134 UseScratchRegisterScope temps(this); 2134 UseScratchRegisterScope temps(this);
2135 Register temp = temps.AcquireX(); 2135 Register temp = temps.AcquireX();
2136 2136
2137 if (rmode == RelocInfo::NONE64) { 2137 if (rmode == RelocInfo::NONE64) {
2138 // Addresses are 48 bits so we never need to load the upper 16 bits. 2138 // Addresses are 48 bits so we never need to load the upper 16 bits.
2139 uint64_t imm = reinterpret_cast<uint64_t>(target); 2139 uint64_t imm = reinterpret_cast<uint64_t>(target);
2140 // If we don't use ARM tagged addresses, the 16 higher bits must be 0. 2140 // If we don't use ARM tagged addresses, the 16 higher bits must be 0.
2141 ASSERT(((imm >> 48) & 0xffff) == 0); 2141 DCHECK(((imm >> 48) & 0xffff) == 0);
2142 movz(temp, (imm >> 0) & 0xffff, 0); 2142 movz(temp, (imm >> 0) & 0xffff, 0);
2143 movk(temp, (imm >> 16) & 0xffff, 16); 2143 movk(temp, (imm >> 16) & 0xffff, 16);
2144 movk(temp, (imm >> 32) & 0xffff, 32); 2144 movk(temp, (imm >> 32) & 0xffff, 32);
2145 } else { 2145 } else {
2146 Ldr(temp, Immediate(reinterpret_cast<intptr_t>(target), rmode)); 2146 Ldr(temp, Immediate(reinterpret_cast<intptr_t>(target), rmode));
2147 } 2147 }
2148 Blr(temp); 2148 Blr(temp);
2149 #ifdef DEBUG 2149 #ifdef DEBUG
2150 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode)); 2150 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode));
2151 #endif 2151 #endif
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
2184 int MacroAssembler::CallSize(Label* target) { 2184 int MacroAssembler::CallSize(Label* target) {
2185 USE(target); 2185 USE(target);
2186 return kInstructionSize; 2186 return kInstructionSize;
2187 } 2187 }
2188 2188
2189 2189
2190 int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode) { 2190 int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
2191 USE(target); 2191 USE(target);
2192 2192
2193 // Addresses always have 64 bits, so we shouldn't encounter NONE32. 2193 // Addresses always have 64 bits, so we shouldn't encounter NONE32.
2194 ASSERT(rmode != RelocInfo::NONE32); 2194 DCHECK(rmode != RelocInfo::NONE32);
2195 2195
2196 if (rmode == RelocInfo::NONE64) { 2196 if (rmode == RelocInfo::NONE64) {
2197 return kCallSizeWithoutRelocation; 2197 return kCallSizeWithoutRelocation;
2198 } else { 2198 } else {
2199 return kCallSizeWithRelocation; 2199 return kCallSizeWithRelocation;
2200 } 2200 }
2201 } 2201 }
2202 2202
2203 2203
2204 int MacroAssembler::CallSize(Handle<Code> code, 2204 int MacroAssembler::CallSize(Handle<Code> code,
2205 RelocInfo::Mode rmode, 2205 RelocInfo::Mode rmode,
2206 TypeFeedbackId ast_id) { 2206 TypeFeedbackId ast_id) {
2207 USE(code); 2207 USE(code);
2208 USE(ast_id); 2208 USE(ast_id);
2209 2209
2210 // Addresses always have 64 bits, so we shouldn't encounter NONE32. 2210 // Addresses always have 64 bits, so we shouldn't encounter NONE32.
2211 ASSERT(rmode != RelocInfo::NONE32); 2211 DCHECK(rmode != RelocInfo::NONE32);
2212 2212
2213 if (rmode == RelocInfo::NONE64) { 2213 if (rmode == RelocInfo::NONE64) {
2214 return kCallSizeWithoutRelocation; 2214 return kCallSizeWithoutRelocation;
2215 } else { 2215 } else {
2216 return kCallSizeWithRelocation; 2216 return kCallSizeWithRelocation;
2217 } 2217 }
2218 } 2218 }
2219 2219
2220 2220
2221 2221
2222 2222
2223 2223
2224 void MacroAssembler::JumpForHeapNumber(Register object, 2224 void MacroAssembler::JumpForHeapNumber(Register object,
2225 Register heap_number_map, 2225 Register heap_number_map,
2226 Label* on_heap_number, 2226 Label* on_heap_number,
2227 Label* on_not_heap_number) { 2227 Label* on_not_heap_number) {
2228 ASSERT(on_heap_number || on_not_heap_number); 2228 DCHECK(on_heap_number || on_not_heap_number);
2229 AssertNotSmi(object); 2229 AssertNotSmi(object);
2230 2230
2231 UseScratchRegisterScope temps(this); 2231 UseScratchRegisterScope temps(this);
2232 Register temp = temps.AcquireX(); 2232 Register temp = temps.AcquireX();
2233 2233
2234 // Load the HeapNumber map if it is not passed. 2234 // Load the HeapNumber map if it is not passed.
2235 if (heap_number_map.Is(NoReg)) { 2235 if (heap_number_map.Is(NoReg)) {
2236 heap_number_map = temps.AcquireX(); 2236 heap_number_map = temps.AcquireX();
2237 LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); 2237 LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2238 } else { 2238 } else {
2239 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); 2239 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2240 } 2240 }
2241 2241
2242 ASSERT(!AreAliased(temp, heap_number_map)); 2242 DCHECK(!AreAliased(temp, heap_number_map));
2243 2243
2244 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); 2244 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
2245 Cmp(temp, heap_number_map); 2245 Cmp(temp, heap_number_map);
2246 2246
2247 if (on_heap_number) { 2247 if (on_heap_number) {
2248 B(eq, on_heap_number); 2248 B(eq, on_heap_number);
2249 } 2249 }
2250 if (on_not_heap_number) { 2250 if (on_not_heap_number) {
2251 B(ne, on_not_heap_number); 2251 B(ne, on_not_heap_number);
2252 } 2252 }
(...skipping 19 matching lines...) Expand all
2272 on_not_heap_number); 2272 on_not_heap_number);
2273 } 2273 }
2274 2274
2275 2275
2276 void MacroAssembler::LookupNumberStringCache(Register object, 2276 void MacroAssembler::LookupNumberStringCache(Register object,
2277 Register result, 2277 Register result,
2278 Register scratch1, 2278 Register scratch1,
2279 Register scratch2, 2279 Register scratch2,
2280 Register scratch3, 2280 Register scratch3,
2281 Label* not_found) { 2281 Label* not_found) {
2282 ASSERT(!AreAliased(object, result, scratch1, scratch2, scratch3)); 2282 DCHECK(!AreAliased(object, result, scratch1, scratch2, scratch3));
2283 2283
2284 // Use of registers. Register result is used as a temporary. 2284 // Use of registers. Register result is used as a temporary.
2285 Register number_string_cache = result; 2285 Register number_string_cache = result;
2286 Register mask = scratch3; 2286 Register mask = scratch3;
2287 2287
2288 // Load the number string cache. 2288 // Load the number string cache.
2289 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); 2289 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
2290 2290
2291 // Make the hash mask from the length of the number string cache. It 2291 // Make the hash mask from the length of the number string cache. It
2292 // contains two elements (number and string) for each cache entry. 2292 // contains two elements (number and string) for each cache entry.
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
2378 2378
2379 void MacroAssembler::JumpIfMinusZero(DoubleRegister input, 2379 void MacroAssembler::JumpIfMinusZero(DoubleRegister input,
2380 Label* on_negative_zero) { 2380 Label* on_negative_zero) {
2381 TestForMinusZero(input); 2381 TestForMinusZero(input);
2382 B(vs, on_negative_zero); 2382 B(vs, on_negative_zero);
2383 } 2383 }
2384 2384
2385 2385
2386 void MacroAssembler::JumpIfMinusZero(Register input, 2386 void MacroAssembler::JumpIfMinusZero(Register input,
2387 Label* on_negative_zero) { 2387 Label* on_negative_zero) {
2388 ASSERT(input.Is64Bits()); 2388 DCHECK(input.Is64Bits());
2389 // Floating point value is in an integer register. Detect -0.0 by subtracting 2389 // Floating point value is in an integer register. Detect -0.0 by subtracting
2390 // 1 (cmp), which will cause overflow. 2390 // 1 (cmp), which will cause overflow.
2391 Cmp(input, 1); 2391 Cmp(input, 1);
2392 B(vs, on_negative_zero); 2392 B(vs, on_negative_zero);
2393 } 2393 }
2394 2394
2395 2395
2396 void MacroAssembler::ClampInt32ToUint8(Register output, Register input) { 2396 void MacroAssembler::ClampInt32ToUint8(Register output, Register input) {
2397 // Clamp the value to [0..255]. 2397 // Clamp the value to [0..255].
2398 Cmp(input.W(), Operand(input.W(), UXTB)); 2398 Cmp(input.W(), Operand(input.W(), UXTB));
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
2431 void MacroAssembler::CopyFieldsLoopPairsHelper(Register dst, 2431 void MacroAssembler::CopyFieldsLoopPairsHelper(Register dst,
2432 Register src, 2432 Register src,
2433 unsigned count, 2433 unsigned count,
2434 Register scratch1, 2434 Register scratch1,
2435 Register scratch2, 2435 Register scratch2,
2436 Register scratch3, 2436 Register scratch3,
2437 Register scratch4, 2437 Register scratch4,
2438 Register scratch5) { 2438 Register scratch5) {
2439 // Untag src and dst into scratch registers. 2439 // Untag src and dst into scratch registers.
2440 // Copy src->dst in a tight loop. 2440 // Copy src->dst in a tight loop.
2441 ASSERT(!AreAliased(dst, src, 2441 DCHECK(!AreAliased(dst, src,
2442 scratch1, scratch2, scratch3, scratch4, scratch5)); 2442 scratch1, scratch2, scratch3, scratch4, scratch5));
2443 ASSERT(count >= 2); 2443 DCHECK(count >= 2);
2444 2444
2445 const Register& remaining = scratch3; 2445 const Register& remaining = scratch3;
2446 Mov(remaining, count / 2); 2446 Mov(remaining, count / 2);
2447 2447
2448 const Register& dst_untagged = scratch1; 2448 const Register& dst_untagged = scratch1;
2449 const Register& src_untagged = scratch2; 2449 const Register& src_untagged = scratch2;
2450 Sub(dst_untagged, dst, kHeapObjectTag); 2450 Sub(dst_untagged, dst, kHeapObjectTag);
2451 Sub(src_untagged, src, kHeapObjectTag); 2451 Sub(src_untagged, src, kHeapObjectTag);
2452 2452
2453 // Copy fields in pairs. 2453 // Copy fields in pairs.
(...skipping 16 matching lines...) Expand all
2470 2470
2471 void MacroAssembler::CopyFieldsUnrolledPairsHelper(Register dst, 2471 void MacroAssembler::CopyFieldsUnrolledPairsHelper(Register dst,
2472 Register src, 2472 Register src,
2473 unsigned count, 2473 unsigned count,
2474 Register scratch1, 2474 Register scratch1,
2475 Register scratch2, 2475 Register scratch2,
2476 Register scratch3, 2476 Register scratch3,
2477 Register scratch4) { 2477 Register scratch4) {
2478 // Untag src and dst into scratch registers. 2478 // Untag src and dst into scratch registers.
2479 // Copy src->dst in an unrolled loop. 2479 // Copy src->dst in an unrolled loop.
2480 ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3, scratch4)); 2480 DCHECK(!AreAliased(dst, src, scratch1, scratch2, scratch3, scratch4));
2481 2481
2482 const Register& dst_untagged = scratch1; 2482 const Register& dst_untagged = scratch1;
2483 const Register& src_untagged = scratch2; 2483 const Register& src_untagged = scratch2;
2484 sub(dst_untagged, dst, kHeapObjectTag); 2484 sub(dst_untagged, dst, kHeapObjectTag);
2485 sub(src_untagged, src, kHeapObjectTag); 2485 sub(src_untagged, src, kHeapObjectTag);
2486 2486
2487 // Copy fields in pairs. 2487 // Copy fields in pairs.
2488 for (unsigned i = 0; i < count / 2; i++) { 2488 for (unsigned i = 0; i < count / 2; i++) {
2489 Ldp(scratch3, scratch4, MemOperand(src_untagged, kXRegSize * 2, PostIndex)); 2489 Ldp(scratch3, scratch4, MemOperand(src_untagged, kXRegSize * 2, PostIndex));
2490 Stp(scratch3, scratch4, MemOperand(dst_untagged, kXRegSize * 2, PostIndex)); 2490 Stp(scratch3, scratch4, MemOperand(dst_untagged, kXRegSize * 2, PostIndex));
2491 } 2491 }
2492 2492
2493 // Handle the leftovers. 2493 // Handle the leftovers.
2494 if (count & 1) { 2494 if (count & 1) {
2495 Ldr(scratch3, MemOperand(src_untagged)); 2495 Ldr(scratch3, MemOperand(src_untagged));
2496 Str(scratch3, MemOperand(dst_untagged)); 2496 Str(scratch3, MemOperand(dst_untagged));
2497 } 2497 }
2498 } 2498 }
2499 2499
2500 2500
2501 void MacroAssembler::CopyFieldsUnrolledHelper(Register dst, 2501 void MacroAssembler::CopyFieldsUnrolledHelper(Register dst,
2502 Register src, 2502 Register src,
2503 unsigned count, 2503 unsigned count,
2504 Register scratch1, 2504 Register scratch1,
2505 Register scratch2, 2505 Register scratch2,
2506 Register scratch3) { 2506 Register scratch3) {
2507 // Untag src and dst into scratch registers. 2507 // Untag src and dst into scratch registers.
2508 // Copy src->dst in an unrolled loop. 2508 // Copy src->dst in an unrolled loop.
2509 ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3)); 2509 DCHECK(!AreAliased(dst, src, scratch1, scratch2, scratch3));
2510 2510
2511 const Register& dst_untagged = scratch1; 2511 const Register& dst_untagged = scratch1;
2512 const Register& src_untagged = scratch2; 2512 const Register& src_untagged = scratch2;
2513 Sub(dst_untagged, dst, kHeapObjectTag); 2513 Sub(dst_untagged, dst, kHeapObjectTag);
2514 Sub(src_untagged, src, kHeapObjectTag); 2514 Sub(src_untagged, src, kHeapObjectTag);
2515 2515
2516 // Copy fields one by one. 2516 // Copy fields one by one.
2517 for (unsigned i = 0; i < count; i++) { 2517 for (unsigned i = 0; i < count; i++) {
2518 Ldr(scratch3, MemOperand(src_untagged, kXRegSize, PostIndex)); 2518 Ldr(scratch3, MemOperand(src_untagged, kXRegSize, PostIndex));
2519 Str(scratch3, MemOperand(dst_untagged, kXRegSize, PostIndex)); 2519 Str(scratch3, MemOperand(dst_untagged, kXRegSize, PostIndex));
2520 } 2520 }
2521 } 2521 }
2522 2522
2523 2523
2524 void MacroAssembler::CopyFields(Register dst, Register src, CPURegList temps, 2524 void MacroAssembler::CopyFields(Register dst, Register src, CPURegList temps,
2525 unsigned count) { 2525 unsigned count) {
2526 // One of two methods is used: 2526 // One of two methods is used:
2527 // 2527 //
2528 // For high 'count' values where many scratch registers are available: 2528 // For high 'count' values where many scratch registers are available:
2529 // Untag src and dst into scratch registers. 2529 // Untag src and dst into scratch registers.
2530 // Copy src->dst in a tight loop. 2530 // Copy src->dst in a tight loop.
2531 // 2531 //
2532 // For low 'count' values or where few scratch registers are available: 2532 // For low 'count' values or where few scratch registers are available:
2533 // Untag src and dst into scratch registers. 2533 // Untag src and dst into scratch registers.
2534 // Copy src->dst in an unrolled loop. 2534 // Copy src->dst in an unrolled loop.
2535 // 2535 //
2536 // In both cases, fields are copied in pairs if possible, and left-overs are 2536 // In both cases, fields are copied in pairs if possible, and left-overs are
2537 // handled separately. 2537 // handled separately.
2538 ASSERT(!AreAliased(dst, src)); 2538 DCHECK(!AreAliased(dst, src));
2539 ASSERT(!temps.IncludesAliasOf(dst)); 2539 DCHECK(!temps.IncludesAliasOf(dst));
2540 ASSERT(!temps.IncludesAliasOf(src)); 2540 DCHECK(!temps.IncludesAliasOf(src));
2541 ASSERT(!temps.IncludesAliasOf(xzr)); 2541 DCHECK(!temps.IncludesAliasOf(xzr));
2542 2542
2543 if (emit_debug_code()) { 2543 if (emit_debug_code()) {
2544 Cmp(dst, src); 2544 Cmp(dst, src);
2545 Check(ne, kTheSourceAndDestinationAreTheSame); 2545 Check(ne, kTheSourceAndDestinationAreTheSame);
2546 } 2546 }
2547 2547
2548 // The value of 'count' at which a loop will be generated (if there are 2548 // The value of 'count' at which a loop will be generated (if there are
2549 // enough scratch registers). 2549 // enough scratch registers).
2550 static const unsigned kLoopThreshold = 8; 2550 static const unsigned kLoopThreshold = 8;
2551 2551
(...skipping 23 matching lines...) Expand all
2575 2575
2576 2576
2577 void MacroAssembler::CopyBytes(Register dst, 2577 void MacroAssembler::CopyBytes(Register dst,
2578 Register src, 2578 Register src,
2579 Register length, 2579 Register length,
2580 Register scratch, 2580 Register scratch,
2581 CopyHint hint) { 2581 CopyHint hint) {
2582 UseScratchRegisterScope temps(this); 2582 UseScratchRegisterScope temps(this);
2583 Register tmp1 = temps.AcquireX(); 2583 Register tmp1 = temps.AcquireX();
2584 Register tmp2 = temps.AcquireX(); 2584 Register tmp2 = temps.AcquireX();
2585 ASSERT(!AreAliased(src, dst, length, scratch, tmp1, tmp2)); 2585 DCHECK(!AreAliased(src, dst, length, scratch, tmp1, tmp2));
2586 ASSERT(!AreAliased(src, dst, csp)); 2586 DCHECK(!AreAliased(src, dst, csp));
2587 2587
2588 if (emit_debug_code()) { 2588 if (emit_debug_code()) {
2589 // Check copy length. 2589 // Check copy length.
2590 Cmp(length, 0); 2590 Cmp(length, 0);
2591 Assert(ge, kUnexpectedNegativeValue); 2591 Assert(ge, kUnexpectedNegativeValue);
2592 2592
2593 // Check src and dst buffers don't overlap. 2593 // Check src and dst buffers don't overlap.
2594 Add(scratch, src, length); // Calculate end of src buffer. 2594 Add(scratch, src, length); // Calculate end of src buffer.
2595 Cmp(scratch, dst); 2595 Cmp(scratch, dst);
2596 Add(scratch, dst, length); // Calculate end of dst buffer. 2596 Add(scratch, dst, length); // Calculate end of dst buffer.
(...skipping 28 matching lines...) Expand all
2625 Cbnz(length, &short_loop); 2625 Cbnz(length, &short_loop);
2626 2626
2627 2627
2628 Bind(&done); 2628 Bind(&done);
2629 } 2629 }
2630 2630
2631 2631
2632 void MacroAssembler::FillFields(Register dst, 2632 void MacroAssembler::FillFields(Register dst,
2633 Register field_count, 2633 Register field_count,
2634 Register filler) { 2634 Register filler) {
2635 ASSERT(!dst.Is(csp)); 2635 DCHECK(!dst.Is(csp));
2636 UseScratchRegisterScope temps(this); 2636 UseScratchRegisterScope temps(this);
2637 Register field_ptr = temps.AcquireX(); 2637 Register field_ptr = temps.AcquireX();
2638 Register counter = temps.AcquireX(); 2638 Register counter = temps.AcquireX();
2639 Label done; 2639 Label done;
2640 2640
2641 // Decrement count. If the result < zero, count was zero, and there's nothing 2641 // Decrement count. If the result < zero, count was zero, and there's nothing
2642 // to do. If count was one, flags are set to fail the gt condition at the end 2642 // to do. If count was one, flags are set to fail the gt condition at the end
2643 // of the pairs loop. 2643 // of the pairs loop.
2644 Subs(counter, field_count, 1); 2644 Subs(counter, field_count, 1);
2645 B(lt, &done); 2645 B(lt, &done);
(...skipping 24 matching lines...) Expand all
2670 Register first, 2670 Register first,
2671 Register second, 2671 Register second,
2672 Register scratch1, 2672 Register scratch1,
2673 Register scratch2, 2673 Register scratch2,
2674 Label* failure, 2674 Label* failure,
2675 SmiCheckType smi_check) { 2675 SmiCheckType smi_check) {
2676 2676
2677 if (smi_check == DO_SMI_CHECK) { 2677 if (smi_check == DO_SMI_CHECK) {
2678 JumpIfEitherSmi(first, second, failure); 2678 JumpIfEitherSmi(first, second, failure);
2679 } else if (emit_debug_code()) { 2679 } else if (emit_debug_code()) {
2680 ASSERT(smi_check == DONT_DO_SMI_CHECK); 2680 DCHECK(smi_check == DONT_DO_SMI_CHECK);
2681 Label not_smi; 2681 Label not_smi;
2682 JumpIfEitherSmi(first, second, NULL, &not_smi); 2682 JumpIfEitherSmi(first, second, NULL, &not_smi);
2683 2683
2684 // At least one input is a smi, but the flags indicated a smi check wasn't 2684 // At least one input is a smi, but the flags indicated a smi check wasn't
2685 // needed. 2685 // needed.
2686 Abort(kUnexpectedSmi); 2686 Abort(kUnexpectedSmi);
2687 2687
2688 Bind(&not_smi); 2688 Bind(&not_smi);
2689 } 2689 }
2690 2690
(...skipping 10 matching lines...) Expand all
2701 failure); 2701 failure);
2702 } 2702 }
2703 2703
2704 2704
2705 void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialAscii( 2705 void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialAscii(
2706 Register first, 2706 Register first,
2707 Register second, 2707 Register second,
2708 Register scratch1, 2708 Register scratch1,
2709 Register scratch2, 2709 Register scratch2,
2710 Label* failure) { 2710 Label* failure) {
2711 ASSERT(!AreAliased(scratch1, second)); 2711 DCHECK(!AreAliased(scratch1, second));
2712 ASSERT(!AreAliased(scratch1, scratch2)); 2712 DCHECK(!AreAliased(scratch1, scratch2));
2713 static const int kFlatAsciiStringMask = 2713 static const int kFlatAsciiStringMask =
2714 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; 2714 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2715 static const int kFlatAsciiStringTag = ASCII_STRING_TYPE; 2715 static const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
2716 And(scratch1, first, kFlatAsciiStringMask); 2716 And(scratch1, first, kFlatAsciiStringMask);
2717 And(scratch2, second, kFlatAsciiStringMask); 2717 And(scratch2, second, kFlatAsciiStringMask);
2718 Cmp(scratch1, kFlatAsciiStringTag); 2718 Cmp(scratch1, kFlatAsciiStringTag);
2719 Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq); 2719 Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq);
2720 B(ne, failure); 2720 B(ne, failure);
2721 } 2721 }
2722 2722
(...skipping 10 matching lines...) Expand all
2733 B(ne, failure); 2733 B(ne, failure);
2734 } 2734 }
2735 2735
2736 2736
2737 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii( 2737 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
2738 Register first, 2738 Register first,
2739 Register second, 2739 Register second,
2740 Register scratch1, 2740 Register scratch1,
2741 Register scratch2, 2741 Register scratch2,
2742 Label* failure) { 2742 Label* failure) {
2743 ASSERT(!AreAliased(first, second, scratch1, scratch2)); 2743 DCHECK(!AreAliased(first, second, scratch1, scratch2));
2744 const int kFlatAsciiStringMask = 2744 const int kFlatAsciiStringMask =
2745 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; 2745 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2746 const int kFlatAsciiStringTag = 2746 const int kFlatAsciiStringTag =
2747 kStringTag | kOneByteStringTag | kSeqStringTag; 2747 kStringTag | kOneByteStringTag | kSeqStringTag;
2748 And(scratch1, first, kFlatAsciiStringMask); 2748 And(scratch1, first, kFlatAsciiStringMask);
2749 And(scratch2, second, kFlatAsciiStringMask); 2749 And(scratch2, second, kFlatAsciiStringMask);
2750 Cmp(scratch1, kFlatAsciiStringTag); 2750 Cmp(scratch1, kFlatAsciiStringTag);
2751 Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq); 2751 Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq);
2752 B(ne, failure); 2752 B(ne, failure);
2753 } 2753 }
(...skipping 27 matching lines...) Expand all
2781 2781
2782 // Check whether the expected and actual arguments count match. If not, 2782 // Check whether the expected and actual arguments count match. If not,
2783 // setup registers according to contract with ArgumentsAdaptorTrampoline: 2783 // setup registers according to contract with ArgumentsAdaptorTrampoline:
2784 // x0: actual arguments count. 2784 // x0: actual arguments count.
2785 // x1: function (passed through to callee). 2785 // x1: function (passed through to callee).
2786 // x2: expected arguments count. 2786 // x2: expected arguments count.
2787 2787
2788 // The code below is made a lot easier because the calling code already sets 2788 // The code below is made a lot easier because the calling code already sets
2789 // up actual and expected registers according to the contract if values are 2789 // up actual and expected registers according to the contract if values are
2790 // passed in registers. 2790 // passed in registers.
2791 ASSERT(actual.is_immediate() || actual.reg().is(x0)); 2791 DCHECK(actual.is_immediate() || actual.reg().is(x0));
2792 ASSERT(expected.is_immediate() || expected.reg().is(x2)); 2792 DCHECK(expected.is_immediate() || expected.reg().is(x2));
2793 ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(x3)); 2793 DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(x3));
2794 2794
2795 if (expected.is_immediate()) { 2795 if (expected.is_immediate()) {
2796 ASSERT(actual.is_immediate()); 2796 DCHECK(actual.is_immediate());
2797 if (expected.immediate() == actual.immediate()) { 2797 if (expected.immediate() == actual.immediate()) {
2798 definitely_matches = true; 2798 definitely_matches = true;
2799 2799
2800 } else { 2800 } else {
2801 Mov(x0, actual.immediate()); 2801 Mov(x0, actual.immediate());
2802 if (expected.immediate() == 2802 if (expected.immediate() ==
2803 SharedFunctionInfo::kDontAdaptArgumentsSentinel) { 2803 SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
2804 // Don't worry about adapting arguments for builtins that 2804 // Don't worry about adapting arguments for builtins that
2805 // don't want that done. Skip adaption code by making it look 2805 // don't want that done. Skip adaption code by making it look
2806 // like we have a match between expected and actual number of 2806 // like we have a match between expected and actual number of
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
2849 Bind(&regular_invoke); 2849 Bind(&regular_invoke);
2850 } 2850 }
2851 2851
2852 2852
2853 void MacroAssembler::InvokeCode(Register code, 2853 void MacroAssembler::InvokeCode(Register code,
2854 const ParameterCount& expected, 2854 const ParameterCount& expected,
2855 const ParameterCount& actual, 2855 const ParameterCount& actual,
2856 InvokeFlag flag, 2856 InvokeFlag flag,
2857 const CallWrapper& call_wrapper) { 2857 const CallWrapper& call_wrapper) {
2858 // You can't call a function without a valid frame. 2858 // You can't call a function without a valid frame.
2859 ASSERT(flag == JUMP_FUNCTION || has_frame()); 2859 DCHECK(flag == JUMP_FUNCTION || has_frame());
2860 2860
2861 Label done; 2861 Label done;
2862 2862
2863 bool definitely_mismatches = false; 2863 bool definitely_mismatches = false;
2864 InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag, 2864 InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
2865 &definitely_mismatches, call_wrapper); 2865 &definitely_mismatches, call_wrapper);
2866 2866
2867 // If we are certain that actual != expected, then we know InvokePrologue will 2867 // If we are certain that actual != expected, then we know InvokePrologue will
2868 // have handled the call through the argument adaptor mechanism. 2868 // have handled the call through the argument adaptor mechanism.
2869 // The called function expects the call kind in x5. 2869 // The called function expects the call kind in x5.
2870 if (!definitely_mismatches) { 2870 if (!definitely_mismatches) {
2871 if (flag == CALL_FUNCTION) { 2871 if (flag == CALL_FUNCTION) {
2872 call_wrapper.BeforeCall(CallSize(code)); 2872 call_wrapper.BeforeCall(CallSize(code));
2873 Call(code); 2873 Call(code);
2874 call_wrapper.AfterCall(); 2874 call_wrapper.AfterCall();
2875 } else { 2875 } else {
2876 ASSERT(flag == JUMP_FUNCTION); 2876 DCHECK(flag == JUMP_FUNCTION);
2877 Jump(code); 2877 Jump(code);
2878 } 2878 }
2879 } 2879 }
2880 2880
2881 // Continue here if InvokePrologue does handle the invocation due to 2881 // Continue here if InvokePrologue does handle the invocation due to
2882 // mismatched parameter counts. 2882 // mismatched parameter counts.
2883 Bind(&done); 2883 Bind(&done);
2884 } 2884 }
2885 2885
2886 2886
2887 void MacroAssembler::InvokeFunction(Register function, 2887 void MacroAssembler::InvokeFunction(Register function,
2888 const ParameterCount& actual, 2888 const ParameterCount& actual,
2889 InvokeFlag flag, 2889 InvokeFlag flag,
2890 const CallWrapper& call_wrapper) { 2890 const CallWrapper& call_wrapper) {
2891 // You can't call a function without a valid frame. 2891 // You can't call a function without a valid frame.
2892 ASSERT(flag == JUMP_FUNCTION || has_frame()); 2892 DCHECK(flag == JUMP_FUNCTION || has_frame());
2893 2893
2894 // Contract with called JS functions requires that function is passed in x1. 2894 // Contract with called JS functions requires that function is passed in x1.
2895 // (See FullCodeGenerator::Generate().) 2895 // (See FullCodeGenerator::Generate().)
2896 ASSERT(function.is(x1)); 2896 DCHECK(function.is(x1));
2897 2897
2898 Register expected_reg = x2; 2898 Register expected_reg = x2;
2899 Register code_reg = x3; 2899 Register code_reg = x3;
2900 2900
2901 Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset)); 2901 Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
2902 // The number of arguments is stored as an int32_t, and -1 is a marker 2902 // The number of arguments is stored as an int32_t, and -1 is a marker
2903 // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign 2903 // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign
2904 // extension to correctly handle it. 2904 // extension to correctly handle it.
2905 Ldr(expected_reg, FieldMemOperand(function, 2905 Ldr(expected_reg, FieldMemOperand(function,
2906 JSFunction::kSharedFunctionInfoOffset)); 2906 JSFunction::kSharedFunctionInfoOffset));
2907 Ldrsw(expected_reg, 2907 Ldrsw(expected_reg,
2908 FieldMemOperand(expected_reg, 2908 FieldMemOperand(expected_reg,
2909 SharedFunctionInfo::kFormalParameterCountOffset)); 2909 SharedFunctionInfo::kFormalParameterCountOffset));
2910 Ldr(code_reg, 2910 Ldr(code_reg,
2911 FieldMemOperand(function, JSFunction::kCodeEntryOffset)); 2911 FieldMemOperand(function, JSFunction::kCodeEntryOffset));
2912 2912
2913 ParameterCount expected(expected_reg); 2913 ParameterCount expected(expected_reg);
2914 InvokeCode(code_reg, expected, actual, flag, call_wrapper); 2914 InvokeCode(code_reg, expected, actual, flag, call_wrapper);
2915 } 2915 }
2916 2916
2917 2917
2918 void MacroAssembler::InvokeFunction(Register function, 2918 void MacroAssembler::InvokeFunction(Register function,
2919 const ParameterCount& expected, 2919 const ParameterCount& expected,
2920 const ParameterCount& actual, 2920 const ParameterCount& actual,
2921 InvokeFlag flag, 2921 InvokeFlag flag,
2922 const CallWrapper& call_wrapper) { 2922 const CallWrapper& call_wrapper) {
2923 // You can't call a function without a valid frame. 2923 // You can't call a function without a valid frame.
2924 ASSERT(flag == JUMP_FUNCTION || has_frame()); 2924 DCHECK(flag == JUMP_FUNCTION || has_frame());
2925 2925
2926 // Contract with called JS functions requires that function is passed in x1. 2926 // Contract with called JS functions requires that function is passed in x1.
2927 // (See FullCodeGenerator::Generate().) 2927 // (See FullCodeGenerator::Generate().)
2928 ASSERT(function.Is(x1)); 2928 DCHECK(function.Is(x1));
2929 2929
2930 Register code_reg = x3; 2930 Register code_reg = x3;
2931 2931
2932 // Set up the context. 2932 // Set up the context.
2933 Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset)); 2933 Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
2934 2934
2935 // We call indirectly through the code field in the function to 2935 // We call indirectly through the code field in the function to
2936 // allow recompilation to take effect without changing any of the 2936 // allow recompilation to take effect without changing any of the
2937 // call sites. 2937 // call sites.
2938 Ldr(code_reg, FieldMemOperand(function, JSFunction::kCodeEntryOffset)); 2938 Ldr(code_reg, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
2973 Cmp(result.X(), 1); 2973 Cmp(result.X(), 1);
2974 Ccmp(result.X(), -1, VFlag, vc); 2974 Ccmp(result.X(), -1, VFlag, vc);
2975 2975
2976 B(vc, done); 2976 B(vc, done);
2977 } 2977 }
2978 2978
2979 2979
2980 void MacroAssembler::TruncateDoubleToI(Register result, 2980 void MacroAssembler::TruncateDoubleToI(Register result,
2981 DoubleRegister double_input) { 2981 DoubleRegister double_input) {
2982 Label done; 2982 Label done;
2983 ASSERT(jssp.Is(StackPointer())); 2983 DCHECK(jssp.Is(StackPointer()));
2984 2984
2985 // Try to convert the double to an int64. If successful, the bottom 32 bits 2985 // Try to convert the double to an int64. If successful, the bottom 32 bits
2986 // contain our truncated int32 result. 2986 // contain our truncated int32 result.
2987 TryConvertDoubleToInt64(result, double_input, &done); 2987 TryConvertDoubleToInt64(result, double_input, &done);
2988 2988
2989 // If we fell through then inline version didn't succeed - call stub instead. 2989 // If we fell through then inline version didn't succeed - call stub instead.
2990 Push(lr, double_input); 2990 Push(lr, double_input);
2991 2991
2992 DoubleToIStub stub(isolate(), 2992 DoubleToIStub stub(isolate(),
2993 jssp, 2993 jssp,
2994 result, 2994 result,
2995 0, 2995 0,
2996 true, // is_truncating 2996 true, // is_truncating
2997 true); // skip_fastpath 2997 true); // skip_fastpath
2998 CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber 2998 CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
2999 2999
3000 Drop(1, kDoubleSize); // Drop the double input on the stack. 3000 Drop(1, kDoubleSize); // Drop the double input on the stack.
3001 Pop(lr); 3001 Pop(lr);
3002 3002
3003 Bind(&done); 3003 Bind(&done);
3004 } 3004 }
3005 3005
3006 3006
3007 void MacroAssembler::TruncateHeapNumberToI(Register result, 3007 void MacroAssembler::TruncateHeapNumberToI(Register result,
3008 Register object) { 3008 Register object) {
3009 Label done; 3009 Label done;
3010 ASSERT(!result.is(object)); 3010 DCHECK(!result.is(object));
3011 ASSERT(jssp.Is(StackPointer())); 3011 DCHECK(jssp.Is(StackPointer()));
3012 3012
3013 Ldr(fp_scratch, FieldMemOperand(object, HeapNumber::kValueOffset)); 3013 Ldr(fp_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
3014 3014
3015 // Try to convert the double to an int64. If successful, the bottom 32 bits 3015 // Try to convert the double to an int64. If successful, the bottom 32 bits
3016 // contain our truncated int32 result. 3016 // contain our truncated int32 result.
3017 TryConvertDoubleToInt64(result, fp_scratch, &done); 3017 TryConvertDoubleToInt64(result, fp_scratch, &done);
3018 3018
3019 // If we fell through then inline version didn't succeed - call stub instead. 3019 // If we fell through then inline version didn't succeed - call stub instead.
3020 Push(lr); 3020 Push(lr);
3021 DoubleToIStub stub(isolate(), 3021 DoubleToIStub stub(isolate(),
3022 object, 3022 object,
3023 result, 3023 result,
3024 HeapNumber::kValueOffset - kHeapObjectTag, 3024 HeapNumber::kValueOffset - kHeapObjectTag,
3025 true, // is_truncating 3025 true, // is_truncating
3026 true); // skip_fastpath 3026 true); // skip_fastpath
3027 CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber 3027 CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
3028 Pop(lr); 3028 Pop(lr);
3029 3029
3030 Bind(&done); 3030 Bind(&done);
3031 } 3031 }
3032 3032
3033 3033
3034 void MacroAssembler::StubPrologue() { 3034 void MacroAssembler::StubPrologue() {
3035 ASSERT(StackPointer().Is(jssp)); 3035 DCHECK(StackPointer().Is(jssp));
3036 UseScratchRegisterScope temps(this); 3036 UseScratchRegisterScope temps(this);
3037 Register temp = temps.AcquireX(); 3037 Register temp = temps.AcquireX();
3038 __ Mov(temp, Smi::FromInt(StackFrame::STUB)); 3038 __ Mov(temp, Smi::FromInt(StackFrame::STUB));
3039 // Compiled stubs don't age, and so they don't need the predictable code 3039 // Compiled stubs don't age, and so they don't need the predictable code
3040 // ageing sequence. 3040 // ageing sequence.
3041 __ Push(lr, fp, cp, temp); 3041 __ Push(lr, fp, cp, temp);
3042 __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp); 3042 __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
3043 } 3043 }
3044 3044
3045 3045
3046 void MacroAssembler::Prologue(bool code_pre_aging) { 3046 void MacroAssembler::Prologue(bool code_pre_aging) {
3047 if (code_pre_aging) { 3047 if (code_pre_aging) {
3048 Code* stub = Code::GetPreAgedCodeAgeStub(isolate()); 3048 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
3049 __ EmitCodeAgeSequence(stub); 3049 __ EmitCodeAgeSequence(stub);
3050 } else { 3050 } else {
3051 __ EmitFrameSetupForCodeAgePatching(); 3051 __ EmitFrameSetupForCodeAgePatching();
3052 } 3052 }
3053 } 3053 }
3054 3054
3055 3055
3056 void MacroAssembler::EnterFrame(StackFrame::Type type) { 3056 void MacroAssembler::EnterFrame(StackFrame::Type type) {
3057 ASSERT(jssp.Is(StackPointer())); 3057 DCHECK(jssp.Is(StackPointer()));
3058 UseScratchRegisterScope temps(this); 3058 UseScratchRegisterScope temps(this);
3059 Register type_reg = temps.AcquireX(); 3059 Register type_reg = temps.AcquireX();
3060 Register code_reg = temps.AcquireX(); 3060 Register code_reg = temps.AcquireX();
3061 3061
3062 Push(lr, fp, cp); 3062 Push(lr, fp, cp);
3063 Mov(type_reg, Smi::FromInt(type)); 3063 Mov(type_reg, Smi::FromInt(type));
3064 Mov(code_reg, Operand(CodeObject())); 3064 Mov(code_reg, Operand(CodeObject()));
3065 Push(type_reg, code_reg); 3065 Push(type_reg, code_reg);
3066 // jssp[4] : lr 3066 // jssp[4] : lr
3067 // jssp[3] : fp 3067 // jssp[3] : fp
3068 // jssp[2] : cp 3068 // jssp[2] : cp
3069 // jssp[1] : type 3069 // jssp[1] : type
3070 // jssp[0] : code object 3070 // jssp[0] : code object
3071 3071
3072 // Adjust FP to point to saved FP. 3072 // Adjust FP to point to saved FP.
3073 Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize); 3073 Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
3074 } 3074 }
3075 3075
3076 3076
3077 void MacroAssembler::LeaveFrame(StackFrame::Type type) { 3077 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
3078 ASSERT(jssp.Is(StackPointer())); 3078 DCHECK(jssp.Is(StackPointer()));
3079 // Drop the execution stack down to the frame pointer and restore 3079 // Drop the execution stack down to the frame pointer and restore
3080 // the caller frame pointer and return address. 3080 // the caller frame pointer and return address.
3081 Mov(jssp, fp); 3081 Mov(jssp, fp);
3082 AssertStackConsistency(); 3082 AssertStackConsistency();
3083 Pop(fp, lr); 3083 Pop(fp, lr);
3084 } 3084 }
3085 3085
3086 3086
3087 void MacroAssembler::ExitFramePreserveFPRegs() { 3087 void MacroAssembler::ExitFramePreserveFPRegs() {
3088 PushCPURegList(kCallerSavedFP); 3088 PushCPURegList(kCallerSavedFP);
3089 } 3089 }
3090 3090
3091 3091
3092 void MacroAssembler::ExitFrameRestoreFPRegs() { 3092 void MacroAssembler::ExitFrameRestoreFPRegs() {
3093 // Read the registers from the stack without popping them. The stack pointer 3093 // Read the registers from the stack without popping them. The stack pointer
3094 // will be reset as part of the unwinding process. 3094 // will be reset as part of the unwinding process.
3095 CPURegList saved_fp_regs = kCallerSavedFP; 3095 CPURegList saved_fp_regs = kCallerSavedFP;
3096 ASSERT(saved_fp_regs.Count() % 2 == 0); 3096 DCHECK(saved_fp_regs.Count() % 2 == 0);
3097 3097
3098 int offset = ExitFrameConstants::kLastExitFrameField; 3098 int offset = ExitFrameConstants::kLastExitFrameField;
3099 while (!saved_fp_regs.IsEmpty()) { 3099 while (!saved_fp_regs.IsEmpty()) {
3100 const CPURegister& dst0 = saved_fp_regs.PopHighestIndex(); 3100 const CPURegister& dst0 = saved_fp_regs.PopHighestIndex();
3101 const CPURegister& dst1 = saved_fp_regs.PopHighestIndex(); 3101 const CPURegister& dst1 = saved_fp_regs.PopHighestIndex();
3102 offset -= 2 * kDRegSize; 3102 offset -= 2 * kDRegSize;
3103 Ldp(dst1, dst0, MemOperand(fp, offset)); 3103 Ldp(dst1, dst0, MemOperand(fp, offset));
3104 } 3104 }
3105 } 3105 }
3106 3106
3107 3107
3108 void MacroAssembler::EnterExitFrame(bool save_doubles, 3108 void MacroAssembler::EnterExitFrame(bool save_doubles,
3109 const Register& scratch, 3109 const Register& scratch,
3110 int extra_space) { 3110 int extra_space) {
3111 ASSERT(jssp.Is(StackPointer())); 3111 DCHECK(jssp.Is(StackPointer()));
3112 3112
3113 // Set up the new stack frame. 3113 // Set up the new stack frame.
3114 Mov(scratch, Operand(CodeObject())); 3114 Mov(scratch, Operand(CodeObject()));
3115 Push(lr, fp); 3115 Push(lr, fp);
3116 Mov(fp, StackPointer()); 3116 Mov(fp, StackPointer());
3117 Push(xzr, scratch); 3117 Push(xzr, scratch);
3118 // fp[8]: CallerPC (lr) 3118 // fp[8]: CallerPC (lr)
3119 // fp -> fp[0]: CallerFP (old fp) 3119 // fp -> fp[0]: CallerFP (old fp)
3120 // fp[-8]: Space reserved for SPOffset. 3120 // fp[-8]: Space reserved for SPOffset.
3121 // jssp -> fp[-16]: CodeObject() 3121 // jssp -> fp[-16]: CodeObject()
(...skipping 25 matching lines...) Expand all
3147 // fp[8]: CallerPC (lr) 3147 // fp[8]: CallerPC (lr)
3148 // fp -> fp[0]: CallerFP (old fp) 3148 // fp -> fp[0]: CallerFP (old fp)
3149 // fp[-8]: Space reserved for SPOffset. 3149 // fp[-8]: Space reserved for SPOffset.
3150 // fp[-16]: CodeObject() 3150 // fp[-16]: CodeObject()
3151 // fp[-16 - fp_size]: Saved doubles (if save_doubles is true). 3151 // fp[-16 - fp_size]: Saved doubles (if save_doubles is true).
3152 // jssp[8]: Extra space reserved for caller (if extra_space != 0). 3152 // jssp[8]: Extra space reserved for caller (if extra_space != 0).
3153 // jssp -> jssp[0]: Space reserved for the return address. 3153 // jssp -> jssp[0]: Space reserved for the return address.
3154 3154
3155 // Align and synchronize the system stack pointer with jssp. 3155 // Align and synchronize the system stack pointer with jssp.
3156 AlignAndSetCSPForFrame(); 3156 AlignAndSetCSPForFrame();
3157 ASSERT(csp.Is(StackPointer())); 3157 DCHECK(csp.Is(StackPointer()));
3158 3158
3159 // fp[8]: CallerPC (lr) 3159 // fp[8]: CallerPC (lr)
3160 // fp -> fp[0]: CallerFP (old fp) 3160 // fp -> fp[0]: CallerFP (old fp)
3161 // fp[-8]: Space reserved for SPOffset. 3161 // fp[-8]: Space reserved for SPOffset.
3162 // fp[-16]: CodeObject() 3162 // fp[-16]: CodeObject()
3163 // fp[-16 - fp_size]: Saved doubles (if save_doubles is true). 3163 // fp[-16 - fp_size]: Saved doubles (if save_doubles is true).
3164 // csp[8]: Memory reserved for the caller if extra_space != 0. 3164 // csp[8]: Memory reserved for the caller if extra_space != 0.
3165 // Alignment padding, if necessary. 3165 // Alignment padding, if necessary.
3166 // csp -> csp[0]: Space reserved for the return address. 3166 // csp -> csp[0]: Space reserved for the return address.
3167 3167
3168 // ExitFrame::GetStateForFramePointer expects to find the return address at 3168 // ExitFrame::GetStateForFramePointer expects to find the return address at
3169 // the memory address immediately below the pointer stored in SPOffset. 3169 // the memory address immediately below the pointer stored in SPOffset.
3170 // It is not safe to derive much else from SPOffset, because the size of the 3170 // It is not safe to derive much else from SPOffset, because the size of the
3171 // padding can vary. 3171 // padding can vary.
3172 Add(scratch, csp, kXRegSize); 3172 Add(scratch, csp, kXRegSize);
3173 Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset)); 3173 Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
3174 } 3174 }
3175 3175
3176 3176
3177 // Leave the current exit frame. 3177 // Leave the current exit frame.
3178 void MacroAssembler::LeaveExitFrame(bool restore_doubles, 3178 void MacroAssembler::LeaveExitFrame(bool restore_doubles,
3179 const Register& scratch, 3179 const Register& scratch,
3180 bool restore_context) { 3180 bool restore_context) {
3181 ASSERT(csp.Is(StackPointer())); 3181 DCHECK(csp.Is(StackPointer()));
3182 3182
3183 if (restore_doubles) { 3183 if (restore_doubles) {
3184 ExitFrameRestoreFPRegs(); 3184 ExitFrameRestoreFPRegs();
3185 } 3185 }
3186 3186
3187 // Restore the context pointer from the top frame. 3187 // Restore the context pointer from the top frame.
3188 if (restore_context) { 3188 if (restore_context) {
3189 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress, 3189 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
3190 isolate()))); 3190 isolate())));
3191 Ldr(cp, MemOperand(scratch)); 3191 Ldr(cp, MemOperand(scratch));
(...skipping 26 matching lines...) Expand all
3218 if (FLAG_native_code_counters && counter->Enabled()) { 3218 if (FLAG_native_code_counters && counter->Enabled()) {
3219 Mov(scratch1, value); 3219 Mov(scratch1, value);
3220 Mov(scratch2, ExternalReference(counter)); 3220 Mov(scratch2, ExternalReference(counter));
3221 Str(scratch1, MemOperand(scratch2)); 3221 Str(scratch1, MemOperand(scratch2));
3222 } 3222 }
3223 } 3223 }
3224 3224
3225 3225
3226 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, 3226 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
3227 Register scratch1, Register scratch2) { 3227 Register scratch1, Register scratch2) {
3228 ASSERT(value != 0); 3228 DCHECK(value != 0);
3229 if (FLAG_native_code_counters && counter->Enabled()) { 3229 if (FLAG_native_code_counters && counter->Enabled()) {
3230 Mov(scratch2, ExternalReference(counter)); 3230 Mov(scratch2, ExternalReference(counter));
3231 Ldr(scratch1, MemOperand(scratch2)); 3231 Ldr(scratch1, MemOperand(scratch2));
3232 Add(scratch1, scratch1, value); 3232 Add(scratch1, scratch1, value);
3233 Str(scratch1, MemOperand(scratch2)); 3233 Str(scratch1, MemOperand(scratch2));
3234 } 3234 }
3235 } 3235 }
3236 3236
3237 3237
3238 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, 3238 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
(...skipping 15 matching lines...) Expand all
3254 // cannot be allowed to destroy the context in cp). 3254 // cannot be allowed to destroy the context in cp).
3255 Mov(dst, cp); 3255 Mov(dst, cp);
3256 } 3256 }
3257 } 3257 }
3258 3258
3259 3259
3260 void MacroAssembler::DebugBreak() { 3260 void MacroAssembler::DebugBreak() {
3261 Mov(x0, 0); 3261 Mov(x0, 0);
3262 Mov(x1, ExternalReference(Runtime::kDebugBreak, isolate())); 3262 Mov(x1, ExternalReference(Runtime::kDebugBreak, isolate()));
3263 CEntryStub ces(isolate(), 1); 3263 CEntryStub ces(isolate(), 1);
3264 ASSERT(AllowThisStubCall(&ces)); 3264 DCHECK(AllowThisStubCall(&ces));
3265 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); 3265 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
3266 } 3266 }
3267 3267
3268 3268
3269 void MacroAssembler::PushTryHandler(StackHandler::Kind kind, 3269 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
3270 int handler_index) { 3270 int handler_index) {
3271 ASSERT(jssp.Is(StackPointer())); 3271 DCHECK(jssp.Is(StackPointer()));
3272 // Adjust this code if the asserts don't hold. 3272 // Adjust this code if the asserts don't hold.
3273 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); 3273 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
3274 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); 3274 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
3275 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); 3275 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
3276 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); 3276 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
3277 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); 3277 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
3278 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); 3278 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
3279 3279
3280 // For the JSEntry handler, we must preserve the live registers x0-x4. 3280 // For the JSEntry handler, we must preserve the live registers x0-x4.
3281 // (See JSEntryStub::GenerateBody().) 3281 // (See JSEntryStub::GenerateBody().)
3282 3282
3283 unsigned state = 3283 unsigned state =
3284 StackHandler::IndexField::encode(handler_index) | 3284 StackHandler::IndexField::encode(handler_index) |
3285 StackHandler::KindField::encode(kind); 3285 StackHandler::KindField::encode(kind);
3286 3286
3287 // Set up the code object and the state for pushing. 3287 // Set up the code object and the state for pushing.
3288 Mov(x10, Operand(CodeObject())); 3288 Mov(x10, Operand(CodeObject()));
3289 Mov(x11, state); 3289 Mov(x11, state);
3290 3290
3291 // Push the frame pointer, context, state, and code object. 3291 // Push the frame pointer, context, state, and code object.
3292 if (kind == StackHandler::JS_ENTRY) { 3292 if (kind == StackHandler::JS_ENTRY) {
3293 ASSERT(Smi::FromInt(0) == 0); 3293 DCHECK(Smi::FromInt(0) == 0);
3294 Push(xzr, xzr, x11, x10); 3294 Push(xzr, xzr, x11, x10);
3295 } else { 3295 } else {
3296 Push(fp, cp, x11, x10); 3296 Push(fp, cp, x11, x10);
3297 } 3297 }
3298 3298
3299 // Link the current handler as the next handler. 3299 // Link the current handler as the next handler.
3300 Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate())); 3300 Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
3301 Ldr(x10, MemOperand(x11)); 3301 Ldr(x10, MemOperand(x11));
3302 Push(x10); 3302 Push(x10);
3303 // Set this new handler as the current one. 3303 // Set this new handler as the current one.
3304 Str(jssp, MemOperand(x11)); 3304 Str(jssp, MemOperand(x11));
3305 } 3305 }
3306 3306
3307 3307
3308 void MacroAssembler::PopTryHandler() { 3308 void MacroAssembler::PopTryHandler() {
3309 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); 3309 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
3310 Pop(x10); 3310 Pop(x10);
3311 Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate())); 3311 Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
3312 Drop(StackHandlerConstants::kSize - kXRegSize, kByteSizeInBytes); 3312 Drop(StackHandlerConstants::kSize - kXRegSize, kByteSizeInBytes);
3313 Str(x10, MemOperand(x11)); 3313 Str(x10, MemOperand(x11));
3314 } 3314 }
3315 3315
3316 3316
3317 void MacroAssembler::Allocate(int object_size, 3317 void MacroAssembler::Allocate(int object_size,
3318 Register result, 3318 Register result,
3319 Register scratch1, 3319 Register scratch1,
3320 Register scratch2, 3320 Register scratch2,
3321 Label* gc_required, 3321 Label* gc_required,
3322 AllocationFlags flags) { 3322 AllocationFlags flags) {
3323 ASSERT(object_size <= Page::kMaxRegularHeapObjectSize); 3323 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
3324 if (!FLAG_inline_new) { 3324 if (!FLAG_inline_new) {
3325 if (emit_debug_code()) { 3325 if (emit_debug_code()) {
3326 // Trash the registers to simulate an allocation failure. 3326 // Trash the registers to simulate an allocation failure.
3327 // We apply salt to the original zap value to easily spot the values. 3327 // We apply salt to the original zap value to easily spot the values.
3328 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L); 3328 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
3329 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L); 3329 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
3330 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L); 3330 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
3331 } 3331 }
3332 B(gc_required); 3332 B(gc_required);
3333 return; 3333 return;
3334 } 3334 }
3335 3335
3336 UseScratchRegisterScope temps(this); 3336 UseScratchRegisterScope temps(this);
3337 Register scratch3 = temps.AcquireX(); 3337 Register scratch3 = temps.AcquireX();
3338 3338
3339 ASSERT(!AreAliased(result, scratch1, scratch2, scratch3)); 3339 DCHECK(!AreAliased(result, scratch1, scratch2, scratch3));
3340 ASSERT(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits()); 3340 DCHECK(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
3341 3341
3342 // Make object size into bytes. 3342 // Make object size into bytes.
3343 if ((flags & SIZE_IN_WORDS) != 0) { 3343 if ((flags & SIZE_IN_WORDS) != 0) {
3344 object_size *= kPointerSize; 3344 object_size *= kPointerSize;
3345 } 3345 }
3346 ASSERT(0 == (object_size & kObjectAlignmentMask)); 3346 DCHECK(0 == (object_size & kObjectAlignmentMask));
3347 3347
3348 // Check relative positions of allocation top and limit addresses. 3348 // Check relative positions of allocation top and limit addresses.
3349 // The values must be adjacent in memory to allow the use of LDP. 3349 // The values must be adjacent in memory to allow the use of LDP.
3350 ExternalReference heap_allocation_top = 3350 ExternalReference heap_allocation_top =
3351 AllocationUtils::GetAllocationTopReference(isolate(), flags); 3351 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3352 ExternalReference heap_allocation_limit = 3352 ExternalReference heap_allocation_limit =
3353 AllocationUtils::GetAllocationLimitReference(isolate(), flags); 3353 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3354 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address()); 3354 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
3355 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address()); 3355 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
3356 ASSERT((limit - top) == kPointerSize); 3356 DCHECK((limit - top) == kPointerSize);
3357 3357
3358 // Set up allocation top address and object size registers. 3358 // Set up allocation top address and object size registers.
3359 Register top_address = scratch1; 3359 Register top_address = scratch1;
3360 Register allocation_limit = scratch2; 3360 Register allocation_limit = scratch2;
3361 Mov(top_address, Operand(heap_allocation_top)); 3361 Mov(top_address, Operand(heap_allocation_top));
3362 3362
3363 if ((flags & RESULT_CONTAINS_TOP) == 0) { 3363 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3364 // Load allocation top into result and the allocation limit. 3364 // Load allocation top into result and the allocation limit.
3365 Ldp(result, allocation_limit, MemOperand(top_address)); 3365 Ldp(result, allocation_limit, MemOperand(top_address));
3366 } else { 3366 } else {
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
3405 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L); 3405 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
3406 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L); 3406 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
3407 } 3407 }
3408 B(gc_required); 3408 B(gc_required);
3409 return; 3409 return;
3410 } 3410 }
3411 3411
3412 UseScratchRegisterScope temps(this); 3412 UseScratchRegisterScope temps(this);
3413 Register scratch3 = temps.AcquireX(); 3413 Register scratch3 = temps.AcquireX();
3414 3414
3415 ASSERT(!AreAliased(object_size, result, scratch1, scratch2, scratch3)); 3415 DCHECK(!AreAliased(object_size, result, scratch1, scratch2, scratch3));
3416 ASSERT(object_size.Is64Bits() && result.Is64Bits() && 3416 DCHECK(object_size.Is64Bits() && result.Is64Bits() &&
3417 scratch1.Is64Bits() && scratch2.Is64Bits()); 3417 scratch1.Is64Bits() && scratch2.Is64Bits());
3418 3418
3419 // Check relative positions of allocation top and limit addresses. 3419 // Check relative positions of allocation top and limit addresses.
3420 // The values must be adjacent in memory to allow the use of LDP. 3420 // The values must be adjacent in memory to allow the use of LDP.
3421 ExternalReference heap_allocation_top = 3421 ExternalReference heap_allocation_top =
3422 AllocationUtils::GetAllocationTopReference(isolate(), flags); 3422 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3423 ExternalReference heap_allocation_limit = 3423 ExternalReference heap_allocation_limit =
3424 AllocationUtils::GetAllocationLimitReference(isolate(), flags); 3424 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3425 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address()); 3425 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
3426 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address()); 3426 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
3427 ASSERT((limit - top) == kPointerSize); 3427 DCHECK((limit - top) == kPointerSize);
3428 3428
3429 // Set up allocation top address and object size registers. 3429 // Set up allocation top address and object size registers.
3430 Register top_address = scratch1; 3430 Register top_address = scratch1;
3431 Register allocation_limit = scratch2; 3431 Register allocation_limit = scratch2;
3432 Mov(top_address, heap_allocation_top); 3432 Mov(top_address, heap_allocation_top);
3433 3433
3434 if ((flags & RESULT_CONTAINS_TOP) == 0) { 3434 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3435 // Load allocation top into result and the allocation limit. 3435 // Load allocation top into result and the allocation limit.
3436 Ldp(result, allocation_limit, MemOperand(top_address)); 3436 Ldp(result, allocation_limit, MemOperand(top_address));
3437 } else { 3437 } else {
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after
3491 Str(object, MemOperand(scratch)); 3491 Str(object, MemOperand(scratch));
3492 } 3492 }
3493 3493
3494 3494
3495 void MacroAssembler::AllocateTwoByteString(Register result, 3495 void MacroAssembler::AllocateTwoByteString(Register result,
3496 Register length, 3496 Register length,
3497 Register scratch1, 3497 Register scratch1,
3498 Register scratch2, 3498 Register scratch2,
3499 Register scratch3, 3499 Register scratch3,
3500 Label* gc_required) { 3500 Label* gc_required) {
3501 ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3)); 3501 DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
3502 // Calculate the number of bytes needed for the characters in the string while 3502 // Calculate the number of bytes needed for the characters in the string while
3503 // observing object alignment. 3503 // observing object alignment.
3504 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); 3504 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3505 Add(scratch1, length, length); // Length in bytes, not chars. 3505 Add(scratch1, length, length); // Length in bytes, not chars.
3506 Add(scratch1, scratch1, kObjectAlignmentMask + SeqTwoByteString::kHeaderSize); 3506 Add(scratch1, scratch1, kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
3507 Bic(scratch1, scratch1, kObjectAlignmentMask); 3507 Bic(scratch1, scratch1, kObjectAlignmentMask);
3508 3508
3509 // Allocate two-byte string in new space. 3509 // Allocate two-byte string in new space.
3510 Allocate(scratch1, 3510 Allocate(scratch1,
3511 result, 3511 result,
(...skipping 10 matching lines...) Expand all
3522 scratch2); 3522 scratch2);
3523 } 3523 }
3524 3524
3525 3525
3526 void MacroAssembler::AllocateAsciiString(Register result, 3526 void MacroAssembler::AllocateAsciiString(Register result,
3527 Register length, 3527 Register length,
3528 Register scratch1, 3528 Register scratch1,
3529 Register scratch2, 3529 Register scratch2,
3530 Register scratch3, 3530 Register scratch3,
3531 Label* gc_required) { 3531 Label* gc_required) {
3532 ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3)); 3532 DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
3533 // Calculate the number of bytes needed for the characters in the string while 3533 // Calculate the number of bytes needed for the characters in the string while
3534 // observing object alignment. 3534 // observing object alignment.
3535 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); 3535 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3536 STATIC_ASSERT(kCharSize == 1); 3536 STATIC_ASSERT(kCharSize == 1);
3537 Add(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize); 3537 Add(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
3538 Bic(scratch1, scratch1, kObjectAlignmentMask); 3538 Bic(scratch1, scratch1, kObjectAlignmentMask);
3539 3539
3540 // Allocate ASCII string in new space. 3540 // Allocate ASCII string in new space.
3541 Allocate(scratch1, 3541 Allocate(scratch1,
3542 result, 3542 result,
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
3588 scratch1, 3588 scratch1,
3589 scratch2); 3589 scratch2);
3590 } 3590 }
3591 3591
3592 3592
3593 void MacroAssembler::AllocateTwoByteSlicedString(Register result, 3593 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
3594 Register length, 3594 Register length,
3595 Register scratch1, 3595 Register scratch1,
3596 Register scratch2, 3596 Register scratch2,
3597 Label* gc_required) { 3597 Label* gc_required) {
3598 ASSERT(!AreAliased(result, length, scratch1, scratch2)); 3598 DCHECK(!AreAliased(result, length, scratch1, scratch2));
3599 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required, 3599 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3600 TAG_OBJECT); 3600 TAG_OBJECT);
3601 3601
3602 InitializeNewString(result, 3602 InitializeNewString(result,
3603 length, 3603 length,
3604 Heap::kSlicedStringMapRootIndex, 3604 Heap::kSlicedStringMapRootIndex,
3605 scratch1, 3605 scratch1,
3606 scratch2); 3606 scratch2);
3607 } 3607 }
3608 3608
3609 3609
3610 void MacroAssembler::AllocateAsciiSlicedString(Register result, 3610 void MacroAssembler::AllocateAsciiSlicedString(Register result,
3611 Register length, 3611 Register length,
3612 Register scratch1, 3612 Register scratch1,
3613 Register scratch2, 3613 Register scratch2,
3614 Label* gc_required) { 3614 Label* gc_required) {
3615 ASSERT(!AreAliased(result, length, scratch1, scratch2)); 3615 DCHECK(!AreAliased(result, length, scratch1, scratch2));
3616 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required, 3616 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
3617 TAG_OBJECT); 3617 TAG_OBJECT);
3618 3618
3619 InitializeNewString(result, 3619 InitializeNewString(result,
3620 length, 3620 length,
3621 Heap::kSlicedAsciiStringMapRootIndex, 3621 Heap::kSlicedAsciiStringMapRootIndex,
3622 scratch1, 3622 scratch1,
3623 scratch2); 3623 scratch2);
3624 } 3624 }
3625 3625
3626 3626
3627 // Allocates a heap number or jumps to the need_gc label if the young space 3627 // Allocates a heap number or jumps to the need_gc label if the young space
3628 // is full and a scavenge is needed. 3628 // is full and a scavenge is needed.
3629 void MacroAssembler::AllocateHeapNumber(Register result, 3629 void MacroAssembler::AllocateHeapNumber(Register result,
3630 Label* gc_required, 3630 Label* gc_required,
3631 Register scratch1, 3631 Register scratch1,
3632 Register scratch2, 3632 Register scratch2,
3633 CPURegister value, 3633 CPURegister value,
3634 CPURegister heap_number_map, 3634 CPURegister heap_number_map,
3635 MutableMode mode) { 3635 MutableMode mode) {
3636 ASSERT(!value.IsValid() || value.Is64Bits()); 3636 DCHECK(!value.IsValid() || value.Is64Bits());
3637 UseScratchRegisterScope temps(this); 3637 UseScratchRegisterScope temps(this);
3638 3638
3639 // Allocate an object in the heap for the heap number and tag it as a heap 3639 // Allocate an object in the heap for the heap number and tag it as a heap
3640 // object. 3640 // object.
3641 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required, 3641 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
3642 NO_ALLOCATION_FLAGS); 3642 NO_ALLOCATION_FLAGS);
3643 3643
3644 Heap::RootListIndex map_index = mode == MUTABLE 3644 Heap::RootListIndex map_index = mode == MUTABLE
3645 ? Heap::kMutableHeapNumberMapRootIndex 3645 ? Heap::kMutableHeapNumberMapRootIndex
3646 : Heap::kHeapNumberMapRootIndex; 3646 : Heap::kHeapNumberMapRootIndex;
(...skipping 160 matching lines...) Expand 10 before | Expand all | Expand 10 after
3807 // Retrieve elements_kind from bit field 2. 3807 // Retrieve elements_kind from bit field 2.
3808 DecodeField<Map::ElementsKindBits>(result); 3808 DecodeField<Map::ElementsKindBits>(result);
3809 } 3809 }
3810 3810
3811 3811
3812 void MacroAssembler::TryGetFunctionPrototype(Register function, 3812 void MacroAssembler::TryGetFunctionPrototype(Register function,
3813 Register result, 3813 Register result,
3814 Register scratch, 3814 Register scratch,
3815 Label* miss, 3815 Label* miss,
3816 BoundFunctionAction action) { 3816 BoundFunctionAction action) {
3817 ASSERT(!AreAliased(function, result, scratch)); 3817 DCHECK(!AreAliased(function, result, scratch));
3818 3818
3819 Label non_instance; 3819 Label non_instance;
3820 if (action == kMissOnBoundFunction) { 3820 if (action == kMissOnBoundFunction) {
3821 // Check that the receiver isn't a smi. 3821 // Check that the receiver isn't a smi.
3822 JumpIfSmi(function, miss); 3822 JumpIfSmi(function, miss);
3823 3823
3824 // Check that the function really is a function. Load map into result reg. 3824 // Check that the function really is a function. Load map into result reg.
3825 JumpIfNotObjectType(function, result, scratch, JS_FUNCTION_TYPE, miss); 3825 JumpIfNotObjectType(function, result, scratch, JS_FUNCTION_TYPE, miss);
3826 3826
3827 Register scratch_w = scratch.W(); 3827 Register scratch_w = scratch.W();
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
3865 3865
3866 // All done. 3866 // All done.
3867 Bind(&done); 3867 Bind(&done);
3868 } 3868 }
3869 3869
3870 3870
3871 void MacroAssembler::CompareRoot(const Register& obj, 3871 void MacroAssembler::CompareRoot(const Register& obj,
3872 Heap::RootListIndex index) { 3872 Heap::RootListIndex index) {
3873 UseScratchRegisterScope temps(this); 3873 UseScratchRegisterScope temps(this);
3874 Register temp = temps.AcquireX(); 3874 Register temp = temps.AcquireX();
3875 ASSERT(!AreAliased(obj, temp)); 3875 DCHECK(!AreAliased(obj, temp));
3876 LoadRoot(temp, index); 3876 LoadRoot(temp, index);
3877 Cmp(obj, temp); 3877 Cmp(obj, temp);
3878 } 3878 }
3879 3879
3880 3880
3881 void MacroAssembler::JumpIfRoot(const Register& obj, 3881 void MacroAssembler::JumpIfRoot(const Register& obj,
3882 Heap::RootListIndex index, 3882 Heap::RootListIndex index,
3883 Label* if_equal) { 3883 Label* if_equal) {
3884 CompareRoot(obj, index); 3884 CompareRoot(obj, index);
3885 B(eq, if_equal); 3885 B(eq, if_equal);
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after
3966 3966
3967 // Note: The ARM version of this clobbers elements_reg, but this version does 3967 // Note: The ARM version of this clobbers elements_reg, but this version does
3968 // not. Some uses of this in ARM64 assume that elements_reg will be preserved. 3968 // not. Some uses of this in ARM64 assume that elements_reg will be preserved.
3969 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg, 3969 void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
3970 Register key_reg, 3970 Register key_reg,
3971 Register elements_reg, 3971 Register elements_reg,
3972 Register scratch1, 3972 Register scratch1,
3973 FPRegister fpscratch1, 3973 FPRegister fpscratch1,
3974 Label* fail, 3974 Label* fail,
3975 int elements_offset) { 3975 int elements_offset) {
3976 ASSERT(!AreAliased(value_reg, key_reg, elements_reg, scratch1)); 3976 DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
3977 Label store_num; 3977 Label store_num;
3978 3978
3979 // Speculatively convert the smi to a double - all smis can be exactly 3979 // Speculatively convert the smi to a double - all smis can be exactly
3980 // represented as a double. 3980 // represented as a double.
3981 SmiUntagToDouble(fpscratch1, value_reg, kSpeculativeUntag); 3981 SmiUntagToDouble(fpscratch1, value_reg, kSpeculativeUntag);
3982 3982
3983 // If value_reg is a smi, we're done. 3983 // If value_reg is a smi, we're done.
3984 JumpIfSmi(value_reg, &store_num); 3984 JumpIfSmi(value_reg, &store_num);
3985 3985
3986 // Ensure that the object is a heap number. 3986 // Ensure that the object is a heap number.
(...skipping 18 matching lines...) Expand all
4005 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { 4005 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
4006 return has_frame_ || !stub->SometimesSetsUpAFrame(); 4006 return has_frame_ || !stub->SometimesSetsUpAFrame();
4007 } 4007 }
4008 4008
4009 4009
4010 void MacroAssembler::IndexFromHash(Register hash, Register index) { 4010 void MacroAssembler::IndexFromHash(Register hash, Register index) {
4011 // If the hash field contains an array index pick it out. The assert checks 4011 // If the hash field contains an array index pick it out. The assert checks
4012 // that the constants for the maximum number of digits for an array index 4012 // that the constants for the maximum number of digits for an array index
4013 // cached in the hash field and the number of bits reserved for it does not 4013 // cached in the hash field and the number of bits reserved for it does not
4014 // conflict. 4014 // conflict.
4015 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < 4015 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
4016 (1 << String::kArrayIndexValueBits)); 4016 (1 << String::kArrayIndexValueBits));
4017 DecodeField<String::ArrayIndexValueBits>(index, hash); 4017 DecodeField<String::ArrayIndexValueBits>(index, hash);
4018 SmiTag(index, index); 4018 SmiTag(index, index);
4019 } 4019 }
4020 4020
4021 4021
4022 void MacroAssembler::EmitSeqStringSetCharCheck( 4022 void MacroAssembler::EmitSeqStringSetCharCheck(
4023 Register string, 4023 Register string,
4024 Register index, 4024 Register index,
4025 SeqStringSetCharCheckIndexType index_type, 4025 SeqStringSetCharCheckIndexType index_type,
4026 Register scratch, 4026 Register scratch,
4027 uint32_t encoding_mask) { 4027 uint32_t encoding_mask) {
4028 ASSERT(!AreAliased(string, index, scratch)); 4028 DCHECK(!AreAliased(string, index, scratch));
4029 4029
4030 if (index_type == kIndexIsSmi) { 4030 if (index_type == kIndexIsSmi) {
4031 AssertSmi(index); 4031 AssertSmi(index);
4032 } 4032 }
4033 4033
4034 // Check that string is an object. 4034 // Check that string is an object.
4035 AssertNotSmi(string, kNonObject); 4035 AssertNotSmi(string, kNonObject);
4036 4036
4037 // Check that string has an appropriate map. 4037 // Check that string has an appropriate map.
4038 Ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset)); 4038 Ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
4039 Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); 4039 Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
4040 4040
4041 And(scratch, scratch, kStringRepresentationMask | kStringEncodingMask); 4041 And(scratch, scratch, kStringRepresentationMask | kStringEncodingMask);
4042 Cmp(scratch, encoding_mask); 4042 Cmp(scratch, encoding_mask);
4043 Check(eq, kUnexpectedStringType); 4043 Check(eq, kUnexpectedStringType);
4044 4044
4045 Ldr(scratch, FieldMemOperand(string, String::kLengthOffset)); 4045 Ldr(scratch, FieldMemOperand(string, String::kLengthOffset));
4046 Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch)); 4046 Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch));
4047 Check(lt, kIndexIsTooLarge); 4047 Check(lt, kIndexIsTooLarge);
4048 4048
4049 ASSERT_EQ(0, Smi::FromInt(0)); 4049 DCHECK_EQ(0, Smi::FromInt(0));
4050 Cmp(index, 0); 4050 Cmp(index, 0);
4051 Check(ge, kIndexIsNegative); 4051 Check(ge, kIndexIsNegative);
4052 } 4052 }
4053 4053
4054 4054
4055 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, 4055 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
4056 Register scratch1, 4056 Register scratch1,
4057 Register scratch2, 4057 Register scratch2,
4058 Label* miss) { 4058 Label* miss) {
4059 ASSERT(!AreAliased(holder_reg, scratch1, scratch2)); 4059 DCHECK(!AreAliased(holder_reg, scratch1, scratch2));
4060 Label same_contexts; 4060 Label same_contexts;
4061 4061
4062 // Load current lexical context from the stack frame. 4062 // Load current lexical context from the stack frame.
4063 Ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset)); 4063 Ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
4064 // In debug mode, make sure the lexical context is set. 4064 // In debug mode, make sure the lexical context is set.
4065 #ifdef DEBUG 4065 #ifdef DEBUG
4066 Cmp(scratch1, 0); 4066 Cmp(scratch1, 0);
4067 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext); 4067 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
4068 #endif 4068 #endif
4069 4069
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
4114 B(miss, ne); 4114 B(miss, ne);
4115 4115
4116 Bind(&same_contexts); 4116 Bind(&same_contexts);
4117 } 4117 }
4118 4118
4119 4119
4120 // Compute the hash code from the untagged key. This must be kept in sync with 4120 // Compute the hash code from the untagged key. This must be kept in sync with
4121 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in 4121 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
4122 // code-stub-hydrogen.cc 4122 // code-stub-hydrogen.cc
4123 void MacroAssembler::GetNumberHash(Register key, Register scratch) { 4123 void MacroAssembler::GetNumberHash(Register key, Register scratch) {
4124 ASSERT(!AreAliased(key, scratch)); 4124 DCHECK(!AreAliased(key, scratch));
4125 4125
4126 // Xor original key with a seed. 4126 // Xor original key with a seed.
4127 LoadRoot(scratch, Heap::kHashSeedRootIndex); 4127 LoadRoot(scratch, Heap::kHashSeedRootIndex);
4128 Eor(key, key, Operand::UntagSmi(scratch)); 4128 Eor(key, key, Operand::UntagSmi(scratch));
4129 4129
4130 // The algorithm uses 32-bit integer values. 4130 // The algorithm uses 32-bit integer values.
4131 key = key.W(); 4131 key = key.W();
4132 scratch = scratch.W(); 4132 scratch = scratch.W();
4133 4133
4134 // Compute the hash code from the untagged key. This must be kept in sync 4134 // Compute the hash code from the untagged key. This must be kept in sync
(...skipping 18 matching lines...) Expand all
4153 4153
4154 4154
4155 void MacroAssembler::LoadFromNumberDictionary(Label* miss, 4155 void MacroAssembler::LoadFromNumberDictionary(Label* miss,
4156 Register elements, 4156 Register elements,
4157 Register key, 4157 Register key,
4158 Register result, 4158 Register result,
4159 Register scratch0, 4159 Register scratch0,
4160 Register scratch1, 4160 Register scratch1,
4161 Register scratch2, 4161 Register scratch2,
4162 Register scratch3) { 4162 Register scratch3) {
4163 ASSERT(!AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3)); 4163 DCHECK(!AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3));
4164 4164
4165 Label done; 4165 Label done;
4166 4166
4167 SmiUntag(scratch0, key); 4167 SmiUntag(scratch0, key);
4168 GetNumberHash(scratch0, scratch1); 4168 GetNumberHash(scratch0, scratch1);
4169 4169
4170 // Compute the capacity mask. 4170 // Compute the capacity mask.
4171 Ldrsw(scratch1, 4171 Ldrsw(scratch1,
4172 UntagSmiFieldMemOperand(elements, 4172 UntagSmiFieldMemOperand(elements,
4173 SeededNumberDictionary::kCapacityOffset)); 4173 SeededNumberDictionary::kCapacityOffset));
4174 Sub(scratch1, scratch1, 1); 4174 Sub(scratch1, scratch1, 1);
4175 4175
4176 // Generate an unrolled loop that performs a few probes before giving up. 4176 // Generate an unrolled loop that performs a few probes before giving up.
4177 for (int i = 0; i < kNumberDictionaryProbes; i++) { 4177 for (int i = 0; i < kNumberDictionaryProbes; i++) {
4178 // Compute the masked index: (hash + i + i * i) & mask. 4178 // Compute the masked index: (hash + i + i * i) & mask.
4179 if (i > 0) { 4179 if (i > 0) {
4180 Add(scratch2, scratch0, SeededNumberDictionary::GetProbeOffset(i)); 4180 Add(scratch2, scratch0, SeededNumberDictionary::GetProbeOffset(i));
4181 } else { 4181 } else {
4182 Mov(scratch2, scratch0); 4182 Mov(scratch2, scratch0);
4183 } 4183 }
4184 And(scratch2, scratch2, scratch1); 4184 And(scratch2, scratch2, scratch1);
4185 4185
4186 // Scale the index by multiplying by the element size. 4186 // Scale the index by multiplying by the element size.
4187 ASSERT(SeededNumberDictionary::kEntrySize == 3); 4187 DCHECK(SeededNumberDictionary::kEntrySize == 3);
4188 Add(scratch2, scratch2, Operand(scratch2, LSL, 1)); 4188 Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
4189 4189
4190 // Check if the key is identical to the name. 4190 // Check if the key is identical to the name.
4191 Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2)); 4191 Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
4192 Ldr(scratch3, 4192 Ldr(scratch3,
4193 FieldMemOperand(scratch2, 4193 FieldMemOperand(scratch2,
4194 SeededNumberDictionary::kElementsStartOffset)); 4194 SeededNumberDictionary::kElementsStartOffset));
4195 Cmp(key, scratch3); 4195 Cmp(key, scratch3);
4196 if (i != (kNumberDictionaryProbes - 1)) { 4196 if (i != (kNumberDictionaryProbes - 1)) {
4197 B(eq, &done); 4197 B(eq, &done);
(...skipping 14 matching lines...) Expand all
4212 SeededNumberDictionary::kElementsStartOffset + kPointerSize; 4212 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
4213 Ldr(result, FieldMemOperand(scratch2, kValueOffset)); 4213 Ldr(result, FieldMemOperand(scratch2, kValueOffset));
4214 } 4214 }
4215 4215
4216 4216
4217 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests. 4217 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
4218 Register address, 4218 Register address,
4219 Register scratch1, 4219 Register scratch1,
4220 SaveFPRegsMode fp_mode, 4220 SaveFPRegsMode fp_mode,
4221 RememberedSetFinalAction and_then) { 4221 RememberedSetFinalAction and_then) {
4222 ASSERT(!AreAliased(object, address, scratch1)); 4222 DCHECK(!AreAliased(object, address, scratch1));
4223 Label done, store_buffer_overflow; 4223 Label done, store_buffer_overflow;
4224 if (emit_debug_code()) { 4224 if (emit_debug_code()) {
4225 Label ok; 4225 Label ok;
4226 JumpIfNotInNewSpace(object, &ok); 4226 JumpIfNotInNewSpace(object, &ok);
4227 Abort(kRememberedSetPointerInNewSpace); 4227 Abort(kRememberedSetPointerInNewSpace);
4228 bind(&ok); 4228 bind(&ok);
4229 } 4229 }
4230 UseScratchRegisterScope temps(this); 4230 UseScratchRegisterScope temps(this);
4231 Register scratch2 = temps.AcquireX(); 4231 Register scratch2 = temps.AcquireX();
4232 4232
4233 // Load store buffer top. 4233 // Load store buffer top.
4234 Mov(scratch2, ExternalReference::store_buffer_top(isolate())); 4234 Mov(scratch2, ExternalReference::store_buffer_top(isolate()));
4235 Ldr(scratch1, MemOperand(scratch2)); 4235 Ldr(scratch1, MemOperand(scratch2));
4236 // Store pointer to buffer and increment buffer top. 4236 // Store pointer to buffer and increment buffer top.
4237 Str(address, MemOperand(scratch1, kPointerSize, PostIndex)); 4237 Str(address, MemOperand(scratch1, kPointerSize, PostIndex));
4238 // Write back new top of buffer. 4238 // Write back new top of buffer.
4239 Str(scratch1, MemOperand(scratch2)); 4239 Str(scratch1, MemOperand(scratch2));
4240 // Call stub on end of buffer. 4240 // Call stub on end of buffer.
4241 // Check for end of buffer. 4241 // Check for end of buffer.
4242 ASSERT(StoreBuffer::kStoreBufferOverflowBit == 4242 DCHECK(StoreBuffer::kStoreBufferOverflowBit ==
4243 (1 << (14 + kPointerSizeLog2))); 4243 (1 << (14 + kPointerSizeLog2)));
4244 if (and_then == kFallThroughAtEnd) { 4244 if (and_then == kFallThroughAtEnd) {
4245 Tbz(scratch1, (14 + kPointerSizeLog2), &done); 4245 Tbz(scratch1, (14 + kPointerSizeLog2), &done);
4246 } else { 4246 } else {
4247 ASSERT(and_then == kReturnAtEnd); 4247 DCHECK(and_then == kReturnAtEnd);
4248 Tbnz(scratch1, (14 + kPointerSizeLog2), &store_buffer_overflow); 4248 Tbnz(scratch1, (14 + kPointerSizeLog2), &store_buffer_overflow);
4249 Ret(); 4249 Ret();
4250 } 4250 }
4251 4251
4252 Bind(&store_buffer_overflow); 4252 Bind(&store_buffer_overflow);
4253 Push(lr); 4253 Push(lr);
4254 StoreBufferOverflowStub store_buffer_overflow_stub = 4254 StoreBufferOverflowStub store_buffer_overflow_stub =
4255 StoreBufferOverflowStub(isolate(), fp_mode); 4255 StoreBufferOverflowStub(isolate(), fp_mode);
4256 CallStub(&store_buffer_overflow_stub); 4256 CallStub(&store_buffer_overflow_stub);
4257 Pop(lr); 4257 Pop(lr);
4258 4258
4259 Bind(&done); 4259 Bind(&done);
4260 if (and_then == kReturnAtEnd) { 4260 if (and_then == kReturnAtEnd) {
4261 Ret(); 4261 Ret();
4262 } 4262 }
4263 } 4263 }
4264 4264
4265 4265
4266 void MacroAssembler::PopSafepointRegisters() { 4266 void MacroAssembler::PopSafepointRegisters() {
4267 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; 4267 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
4268 PopXRegList(kSafepointSavedRegisters); 4268 PopXRegList(kSafepointSavedRegisters);
4269 Drop(num_unsaved); 4269 Drop(num_unsaved);
4270 } 4270 }
4271 4271
4272 4272
4273 void MacroAssembler::PushSafepointRegisters() { 4273 void MacroAssembler::PushSafepointRegisters() {
4274 // Safepoints expect a block of kNumSafepointRegisters values on the stack, so 4274 // Safepoints expect a block of kNumSafepointRegisters values on the stack, so
4275 // adjust the stack for unsaved registers. 4275 // adjust the stack for unsaved registers.
4276 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; 4276 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
4277 ASSERT(num_unsaved >= 0); 4277 DCHECK(num_unsaved >= 0);
4278 Claim(num_unsaved); 4278 Claim(num_unsaved);
4279 PushXRegList(kSafepointSavedRegisters); 4279 PushXRegList(kSafepointSavedRegisters);
4280 } 4280 }
4281 4281
4282 4282
4283 void MacroAssembler::PushSafepointRegistersAndDoubles() { 4283 void MacroAssembler::PushSafepointRegistersAndDoubles() {
4284 PushSafepointRegisters(); 4284 PushSafepointRegisters();
4285 PushCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 4285 PushCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
4286 FPRegister::kAllocatableFPRegisters)); 4286 FPRegister::kAllocatableFPRegisters));
4287 } 4287 }
4288 4288
4289 4289
4290 void MacroAssembler::PopSafepointRegistersAndDoubles() { 4290 void MacroAssembler::PopSafepointRegistersAndDoubles() {
4291 PopCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 4291 PopCPURegList(CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
4292 FPRegister::kAllocatableFPRegisters)); 4292 FPRegister::kAllocatableFPRegisters));
4293 PopSafepointRegisters(); 4293 PopSafepointRegisters();
4294 } 4294 }
4295 4295
4296 4296
4297 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { 4297 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
4298 // Make sure the safepoint registers list is what we expect. 4298 // Make sure the safepoint registers list is what we expect.
4299 ASSERT(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff); 4299 DCHECK(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff);
4300 4300
4301 // Safepoint registers are stored contiguously on the stack, but not all the 4301 // Safepoint registers are stored contiguously on the stack, but not all the
4302 // registers are saved. The following registers are excluded: 4302 // registers are saved. The following registers are excluded:
4303 // - x16 and x17 (ip0 and ip1) because they shouldn't be preserved outside of 4303 // - x16 and x17 (ip0 and ip1) because they shouldn't be preserved outside of
4304 // the macro assembler. 4304 // the macro assembler.
4305 // - x28 (jssp) because JS stack pointer doesn't need to be included in 4305 // - x28 (jssp) because JS stack pointer doesn't need to be included in
4306 // safepoint registers. 4306 // safepoint registers.
4307 // - x31 (csp) because the system stack pointer doesn't need to be included 4307 // - x31 (csp) because the system stack pointer doesn't need to be included
4308 // in safepoint registers. 4308 // in safepoint registers.
4309 // 4309 //
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
4359 // catch stores of Smis. 4359 // catch stores of Smis.
4360 Label done; 4360 Label done;
4361 4361
4362 // Skip the barrier if writing a smi. 4362 // Skip the barrier if writing a smi.
4363 if (smi_check == INLINE_SMI_CHECK) { 4363 if (smi_check == INLINE_SMI_CHECK) {
4364 JumpIfSmi(value, &done); 4364 JumpIfSmi(value, &done);
4365 } 4365 }
4366 4366
4367 // Although the object register is tagged, the offset is relative to the start 4367 // Although the object register is tagged, the offset is relative to the start
4368 // of the object, so offset must be a multiple of kPointerSize. 4368 // of the object, so offset must be a multiple of kPointerSize.
4369 ASSERT(IsAligned(offset, kPointerSize)); 4369 DCHECK(IsAligned(offset, kPointerSize));
4370 4370
4371 Add(scratch, object, offset - kHeapObjectTag); 4371 Add(scratch, object, offset - kHeapObjectTag);
4372 if (emit_debug_code()) { 4372 if (emit_debug_code()) {
4373 Label ok; 4373 Label ok;
4374 Tst(scratch, (1 << kPointerSizeLog2) - 1); 4374 Tst(scratch, (1 << kPointerSizeLog2) - 1);
4375 B(eq, &ok); 4375 B(eq, &ok);
4376 Abort(kUnalignedCellInWriteBarrier); 4376 Abort(kUnalignedCellInWriteBarrier);
4377 Bind(&ok); 4377 Bind(&ok);
4378 } 4378 }
4379 4379
(...skipping 18 matching lines...) Expand all
4398 4398
4399 4399
4400 // Will clobber: object, map, dst. 4400 // Will clobber: object, map, dst.
4401 // If lr_status is kLRHasBeenSaved, lr will also be clobbered. 4401 // If lr_status is kLRHasBeenSaved, lr will also be clobbered.
4402 void MacroAssembler::RecordWriteForMap(Register object, 4402 void MacroAssembler::RecordWriteForMap(Register object,
4403 Register map, 4403 Register map,
4404 Register dst, 4404 Register dst,
4405 LinkRegisterStatus lr_status, 4405 LinkRegisterStatus lr_status,
4406 SaveFPRegsMode fp_mode) { 4406 SaveFPRegsMode fp_mode) {
4407 ASM_LOCATION("MacroAssembler::RecordWrite"); 4407 ASM_LOCATION("MacroAssembler::RecordWrite");
4408 ASSERT(!AreAliased(object, map)); 4408 DCHECK(!AreAliased(object, map));
4409 4409
4410 if (emit_debug_code()) { 4410 if (emit_debug_code()) {
4411 UseScratchRegisterScope temps(this); 4411 UseScratchRegisterScope temps(this);
4412 Register temp = temps.AcquireX(); 4412 Register temp = temps.AcquireX();
4413 4413
4414 CompareMap(map, temp, isolate()->factory()->meta_map()); 4414 CompareMap(map, temp, isolate()->factory()->meta_map());
4415 Check(eq, kWrongAddressOrValuePassedToRecordWrite); 4415 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
4416 } 4416 }
4417 4417
4418 if (!FLAG_incremental_marking) { 4418 if (!FLAG_incremental_marking) {
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
4477 void MacroAssembler::RecordWrite( 4477 void MacroAssembler::RecordWrite(
4478 Register object, 4478 Register object,
4479 Register address, 4479 Register address,
4480 Register value, 4480 Register value,
4481 LinkRegisterStatus lr_status, 4481 LinkRegisterStatus lr_status,
4482 SaveFPRegsMode fp_mode, 4482 SaveFPRegsMode fp_mode,
4483 RememberedSetAction remembered_set_action, 4483 RememberedSetAction remembered_set_action,
4484 SmiCheck smi_check, 4484 SmiCheck smi_check,
4485 PointersToHereCheck pointers_to_here_check_for_value) { 4485 PointersToHereCheck pointers_to_here_check_for_value) {
4486 ASM_LOCATION("MacroAssembler::RecordWrite"); 4486 ASM_LOCATION("MacroAssembler::RecordWrite");
4487 ASSERT(!AreAliased(object, value)); 4487 DCHECK(!AreAliased(object, value));
4488 4488
4489 if (emit_debug_code()) { 4489 if (emit_debug_code()) {
4490 UseScratchRegisterScope temps(this); 4490 UseScratchRegisterScope temps(this);
4491 Register temp = temps.AcquireX(); 4491 Register temp = temps.AcquireX();
4492 4492
4493 Ldr(temp, MemOperand(address)); 4493 Ldr(temp, MemOperand(address));
4494 Cmp(temp, value); 4494 Cmp(temp, value);
4495 Check(eq, kWrongAddressOrValuePassedToRecordWrite); 4495 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
4496 } 4496 }
4497 4497
4498 // First, check if a write barrier is even needed. The tests below 4498 // First, check if a write barrier is even needed. The tests below
4499 // catch stores of smis and stores into the young generation. 4499 // catch stores of smis and stores into the young generation.
4500 Label done; 4500 Label done;
4501 4501
4502 if (smi_check == INLINE_SMI_CHECK) { 4502 if (smi_check == INLINE_SMI_CHECK) {
4503 ASSERT_EQ(0, kSmiTag); 4503 DCHECK_EQ(0, kSmiTag);
4504 JumpIfSmi(value, &done); 4504 JumpIfSmi(value, &done);
4505 } 4505 }
4506 4506
4507 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) { 4507 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
4508 CheckPageFlagClear(value, 4508 CheckPageFlagClear(value,
4509 value, // Used as scratch. 4509 value, // Used as scratch.
4510 MemoryChunk::kPointersToHereAreInterestingMask, 4510 MemoryChunk::kPointersToHereAreInterestingMask,
4511 &done); 4511 &done);
4512 } 4512 }
4513 CheckPageFlagClear(object, 4513 CheckPageFlagClear(object,
(...skipping 25 matching lines...) Expand all
4539 Mov(address, Operand(BitCast<int64_t>(kZapValue + 12))); 4539 Mov(address, Operand(BitCast<int64_t>(kZapValue + 12)));
4540 Mov(value, Operand(BitCast<int64_t>(kZapValue + 16))); 4540 Mov(value, Operand(BitCast<int64_t>(kZapValue + 16)));
4541 } 4541 }
4542 } 4542 }
4543 4543
4544 4544
4545 void MacroAssembler::AssertHasValidColor(const Register& reg) { 4545 void MacroAssembler::AssertHasValidColor(const Register& reg) {
4546 if (emit_debug_code()) { 4546 if (emit_debug_code()) {
4547 // The bit sequence is backward. The first character in the string 4547 // The bit sequence is backward. The first character in the string
4548 // represents the least significant bit. 4548 // represents the least significant bit.
4549 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); 4549 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
4550 4550
4551 Label color_is_valid; 4551 Label color_is_valid;
4552 Tbnz(reg, 0, &color_is_valid); 4552 Tbnz(reg, 0, &color_is_valid);
4553 Tbz(reg, 1, &color_is_valid); 4553 Tbz(reg, 1, &color_is_valid);
4554 Abort(kUnexpectedColorFound); 4554 Abort(kUnexpectedColorFound);
4555 Bind(&color_is_valid); 4555 Bind(&color_is_valid);
4556 } 4556 }
4557 } 4557 }
4558 4558
4559 4559
4560 void MacroAssembler::GetMarkBits(Register addr_reg, 4560 void MacroAssembler::GetMarkBits(Register addr_reg,
4561 Register bitmap_reg, 4561 Register bitmap_reg,
4562 Register shift_reg) { 4562 Register shift_reg) {
4563 ASSERT(!AreAliased(addr_reg, bitmap_reg, shift_reg)); 4563 DCHECK(!AreAliased(addr_reg, bitmap_reg, shift_reg));
4564 ASSERT(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits()); 4564 DCHECK(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits());
4565 // addr_reg is divided into fields: 4565 // addr_reg is divided into fields:
4566 // |63 page base 20|19 high 8|7 shift 3|2 0| 4566 // |63 page base 20|19 high 8|7 shift 3|2 0|
4567 // 'high' gives the index of the cell holding color bits for the object. 4567 // 'high' gives the index of the cell holding color bits for the object.
4568 // 'shift' gives the offset in the cell for this object's color. 4568 // 'shift' gives the offset in the cell for this object's color.
4569 const int kShiftBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2; 4569 const int kShiftBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
4570 UseScratchRegisterScope temps(this); 4570 UseScratchRegisterScope temps(this);
4571 Register temp = temps.AcquireX(); 4571 Register temp = temps.AcquireX();
4572 Ubfx(temp, addr_reg, kShiftBits, kPageSizeBits - kShiftBits); 4572 Ubfx(temp, addr_reg, kShiftBits, kPageSizeBits - kShiftBits);
4573 Bic(bitmap_reg, addr_reg, Page::kPageAlignmentMask); 4573 Bic(bitmap_reg, addr_reg, Page::kPageAlignmentMask);
4574 Add(bitmap_reg, bitmap_reg, Operand(temp, LSL, Bitmap::kBytesPerCellLog2)); 4574 Add(bitmap_reg, bitmap_reg, Operand(temp, LSL, Bitmap::kBytesPerCellLog2));
4575 // bitmap_reg: 4575 // bitmap_reg:
4576 // |63 page base 20|19 zeros 15|14 high 3|2 0| 4576 // |63 page base 20|19 zeros 15|14 high 3|2 0|
4577 Ubfx(shift_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2); 4577 Ubfx(shift_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
4578 } 4578 }
4579 4579
4580 4580
4581 void MacroAssembler::HasColor(Register object, 4581 void MacroAssembler::HasColor(Register object,
4582 Register bitmap_scratch, 4582 Register bitmap_scratch,
4583 Register shift_scratch, 4583 Register shift_scratch,
4584 Label* has_color, 4584 Label* has_color,
4585 int first_bit, 4585 int first_bit,
4586 int second_bit) { 4586 int second_bit) {
4587 // See mark-compact.h for color definitions. 4587 // See mark-compact.h for color definitions.
4588 ASSERT(!AreAliased(object, bitmap_scratch, shift_scratch)); 4588 DCHECK(!AreAliased(object, bitmap_scratch, shift_scratch));
4589 4589
4590 GetMarkBits(object, bitmap_scratch, shift_scratch); 4590 GetMarkBits(object, bitmap_scratch, shift_scratch);
4591 Ldr(bitmap_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); 4591 Ldr(bitmap_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4592 // Shift the bitmap down to get the color of the object in bits [1:0]. 4592 // Shift the bitmap down to get the color of the object in bits [1:0].
4593 Lsr(bitmap_scratch, bitmap_scratch, shift_scratch); 4593 Lsr(bitmap_scratch, bitmap_scratch, shift_scratch);
4594 4594
4595 AssertHasValidColor(bitmap_scratch); 4595 AssertHasValidColor(bitmap_scratch);
4596 4596
4597 // These bit sequences are backwards. The first character in the string 4597 // These bit sequences are backwards. The first character in the string
4598 // represents the least significant bit. 4598 // represents the least significant bit.
4599 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); 4599 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
4600 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); 4600 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
4601 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); 4601 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
4602 4602
4603 // Check for the color. 4603 // Check for the color.
4604 if (first_bit == 0) { 4604 if (first_bit == 0) {
4605 // Checking for white. 4605 // Checking for white.
4606 ASSERT(second_bit == 0); 4606 DCHECK(second_bit == 0);
4607 // We only need to test the first bit. 4607 // We only need to test the first bit.
4608 Tbz(bitmap_scratch, 0, has_color); 4608 Tbz(bitmap_scratch, 0, has_color);
4609 } else { 4609 } else {
4610 Label other_color; 4610 Label other_color;
4611 // Checking for grey or black. 4611 // Checking for grey or black.
4612 Tbz(bitmap_scratch, 0, &other_color); 4612 Tbz(bitmap_scratch, 0, &other_color);
4613 if (second_bit == 0) { 4613 if (second_bit == 0) {
4614 Tbz(bitmap_scratch, 1, has_color); 4614 Tbz(bitmap_scratch, 1, has_color);
4615 } else { 4615 } else {
4616 Tbnz(bitmap_scratch, 1, has_color); 4616 Tbnz(bitmap_scratch, 1, has_color);
(...skipping 13 matching lines...) Expand all
4630 Ldrsw(scratch, FieldMemOperand(scratch, Map::kBitField3Offset)); 4630 Ldrsw(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
4631 TestAndBranchIfAnySet(scratch, Map::Deprecated::kMask, if_deprecated); 4631 TestAndBranchIfAnySet(scratch, Map::Deprecated::kMask, if_deprecated);
4632 } 4632 }
4633 } 4633 }
4634 4634
4635 4635
4636 void MacroAssembler::JumpIfBlack(Register object, 4636 void MacroAssembler::JumpIfBlack(Register object,
4637 Register scratch0, 4637 Register scratch0,
4638 Register scratch1, 4638 Register scratch1,
4639 Label* on_black) { 4639 Label* on_black) {
4640 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); 4640 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
4641 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern. 4641 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
4642 } 4642 }
4643 4643
4644 4644
4645 void MacroAssembler::JumpIfDictionaryInPrototypeChain( 4645 void MacroAssembler::JumpIfDictionaryInPrototypeChain(
4646 Register object, 4646 Register object,
4647 Register scratch0, 4647 Register scratch0,
4648 Register scratch1, 4648 Register scratch1,
4649 Label* found) { 4649 Label* found) {
4650 ASSERT(!AreAliased(object, scratch0, scratch1)); 4650 DCHECK(!AreAliased(object, scratch0, scratch1));
4651 Factory* factory = isolate()->factory(); 4651 Factory* factory = isolate()->factory();
4652 Register current = scratch0; 4652 Register current = scratch0;
4653 Label loop_again; 4653 Label loop_again;
4654 4654
4655 // Scratch contains elements pointer. 4655 // Scratch contains elements pointer.
4656 Mov(current, object); 4656 Mov(current, object);
4657 4657
4658 // Loop based on the map going up the prototype chain. 4658 // Loop based on the map going up the prototype chain.
4659 Bind(&loop_again); 4659 Bind(&loop_again);
4660 Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset)); 4660 Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
4661 Ldrb(scratch1, FieldMemOperand(current, Map::kBitField2Offset)); 4661 Ldrb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
4662 DecodeField<Map::ElementsKindBits>(scratch1); 4662 DecodeField<Map::ElementsKindBits>(scratch1);
4663 CompareAndBranch(scratch1, DICTIONARY_ELEMENTS, eq, found); 4663 CompareAndBranch(scratch1, DICTIONARY_ELEMENTS, eq, found);
4664 Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset)); 4664 Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
4665 CompareAndBranch(current, Operand(factory->null_value()), ne, &loop_again); 4665 CompareAndBranch(current, Operand(factory->null_value()), ne, &loop_again);
4666 } 4666 }
4667 4667
4668 4668
4669 void MacroAssembler::GetRelocatedValueLocation(Register ldr_location, 4669 void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
4670 Register result) { 4670 Register result) {
4671 ASSERT(!result.Is(ldr_location)); 4671 DCHECK(!result.Is(ldr_location));
4672 const uint32_t kLdrLitOffset_lsb = 5; 4672 const uint32_t kLdrLitOffset_lsb = 5;
4673 const uint32_t kLdrLitOffset_width = 19; 4673 const uint32_t kLdrLitOffset_width = 19;
4674 Ldr(result, MemOperand(ldr_location)); 4674 Ldr(result, MemOperand(ldr_location));
4675 if (emit_debug_code()) { 4675 if (emit_debug_code()) {
4676 And(result, result, LoadLiteralFMask); 4676 And(result, result, LoadLiteralFMask);
4677 Cmp(result, LoadLiteralFixed); 4677 Cmp(result, LoadLiteralFixed);
4678 Check(eq, kTheInstructionToPatchShouldBeAnLdrLiteral); 4678 Check(eq, kTheInstructionToPatchShouldBeAnLdrLiteral);
4679 // The instruction was clobbered. Reload it. 4679 // The instruction was clobbered. Reload it.
4680 Ldr(result, MemOperand(ldr_location)); 4680 Ldr(result, MemOperand(ldr_location));
4681 } 4681 }
4682 Sbfx(result, result, kLdrLitOffset_lsb, kLdrLitOffset_width); 4682 Sbfx(result, result, kLdrLitOffset_lsb, kLdrLitOffset_width);
4683 Add(result, ldr_location, Operand(result, LSL, kWordSizeInBytesLog2)); 4683 Add(result, ldr_location, Operand(result, LSL, kWordSizeInBytesLog2));
4684 } 4684 }
4685 4685
4686 4686
4687 void MacroAssembler::EnsureNotWhite( 4687 void MacroAssembler::EnsureNotWhite(
4688 Register value, 4688 Register value,
4689 Register bitmap_scratch, 4689 Register bitmap_scratch,
4690 Register shift_scratch, 4690 Register shift_scratch,
4691 Register load_scratch, 4691 Register load_scratch,
4692 Register length_scratch, 4692 Register length_scratch,
4693 Label* value_is_white_and_not_data) { 4693 Label* value_is_white_and_not_data) {
4694 ASSERT(!AreAliased( 4694 DCHECK(!AreAliased(
4695 value, bitmap_scratch, shift_scratch, load_scratch, length_scratch)); 4695 value, bitmap_scratch, shift_scratch, load_scratch, length_scratch));
4696 4696
4697 // These bit sequences are backwards. The first character in the string 4697 // These bit sequences are backwards. The first character in the string
4698 // represents the least significant bit. 4698 // represents the least significant bit.
4699 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); 4699 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
4700 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); 4700 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
4701 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); 4701 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
4702 4702
4703 GetMarkBits(value, bitmap_scratch, shift_scratch); 4703 GetMarkBits(value, bitmap_scratch, shift_scratch);
4704 Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); 4704 Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4705 Lsr(load_scratch, load_scratch, shift_scratch); 4705 Lsr(load_scratch, load_scratch, shift_scratch);
4706 4706
4707 AssertHasValidColor(load_scratch); 4707 AssertHasValidColor(load_scratch);
4708 4708
4709 // If the value is black or grey we don't need to do anything. 4709 // If the value is black or grey we don't need to do anything.
4710 // Since both black and grey have a 1 in the first position and white does 4710 // Since both black and grey have a 1 in the first position and white does
4711 // not have a 1 there we only need to check one bit. 4711 // not have a 1 there we only need to check one bit.
4712 Label done; 4712 Label done;
4713 Tbnz(load_scratch, 0, &done); 4713 Tbnz(load_scratch, 0, &done);
4714 4714
4715 // Value is white. We check whether it is data that doesn't need scanning. 4715 // Value is white. We check whether it is data that doesn't need scanning.
4716 Register map = load_scratch; // Holds map while checking type. 4716 Register map = load_scratch; // Holds map while checking type.
4717 Label is_data_object; 4717 Label is_data_object;
4718 4718
4719 // Check for heap-number. 4719 // Check for heap-number.
4720 Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset)); 4720 Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
4721 Mov(length_scratch, HeapNumber::kSize); 4721 Mov(length_scratch, HeapNumber::kSize);
4722 JumpIfRoot(map, Heap::kHeapNumberMapRootIndex, &is_data_object); 4722 JumpIfRoot(map, Heap::kHeapNumberMapRootIndex, &is_data_object);
4723 4723
4724 // Check for strings. 4724 // Check for strings.
4725 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); 4725 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
4726 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); 4726 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
4727 // If it's a string and it's not a cons string then it's an object containing 4727 // If it's a string and it's not a cons string then it's an object containing
4728 // no GC pointers. 4728 // no GC pointers.
4729 Register instance_type = load_scratch; 4729 Register instance_type = load_scratch;
4730 Ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset)); 4730 Ldrb(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
4731 TestAndBranchIfAnySet(instance_type, 4731 TestAndBranchIfAnySet(instance_type,
4732 kIsIndirectStringMask | kIsNotStringMask, 4732 kIsIndirectStringMask | kIsNotStringMask,
4733 value_is_white_and_not_data); 4733 value_is_white_and_not_data);
4734 4734
4735 // It's a non-indirect (non-cons and non-slice) string. 4735 // It's a non-indirect (non-cons and non-slice) string.
4736 // If it's external, the length is just ExternalString::kSize. 4736 // If it's external, the length is just ExternalString::kSize.
4737 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2). 4737 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
4738 // External strings are the only ones with the kExternalStringTag bit 4738 // External strings are the only ones with the kExternalStringTag bit
4739 // set. 4739 // set.
4740 ASSERT_EQ(0, kSeqStringTag & kExternalStringTag); 4740 DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
4741 ASSERT_EQ(0, kConsStringTag & kExternalStringTag); 4741 DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
4742 Mov(length_scratch, ExternalString::kSize); 4742 Mov(length_scratch, ExternalString::kSize);
4743 TestAndBranchIfAnySet(instance_type, kExternalStringTag, &is_data_object); 4743 TestAndBranchIfAnySet(instance_type, kExternalStringTag, &is_data_object);
4744 4744
4745 // Sequential string, either ASCII or UC16. 4745 // Sequential string, either ASCII or UC16.
4746 // For ASCII (char-size of 1) we shift the smi tag away to get the length. 4746 // For ASCII (char-size of 1) we shift the smi tag away to get the length.
4747 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby 4747 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
4748 // getting the length multiplied by 2. 4748 // getting the length multiplied by 2.
4749 ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4); 4749 DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
4750 Ldrsw(length_scratch, UntagSmiFieldMemOperand(value, 4750 Ldrsw(length_scratch, UntagSmiFieldMemOperand(value,
4751 String::kLengthOffset)); 4751 String::kLengthOffset));
4752 Tst(instance_type, kStringEncodingMask); 4752 Tst(instance_type, kStringEncodingMask);
4753 Cset(load_scratch, eq); 4753 Cset(load_scratch, eq);
4754 Lsl(length_scratch, length_scratch, load_scratch); 4754 Lsl(length_scratch, length_scratch, load_scratch);
4755 Add(length_scratch, 4755 Add(length_scratch,
4756 length_scratch, 4756 length_scratch,
4757 SeqString::kHeaderSize + kObjectAlignmentMask); 4757 SeqString::kHeaderSize + kObjectAlignmentMask);
4758 Bic(length_scratch, length_scratch, kObjectAlignmentMask); 4758 Bic(length_scratch, length_scratch, kObjectAlignmentMask);
4759 4759
(...skipping 205 matching lines...) Expand 10 before | Expand all | Expand 10 after
4965 4965
4966 // This is the main Printf implementation. All other Printf variants call 4966 // This is the main Printf implementation. All other Printf variants call
4967 // PrintfNoPreserve after setting up one or more PreserveRegisterScopes. 4967 // PrintfNoPreserve after setting up one or more PreserveRegisterScopes.
4968 void MacroAssembler::PrintfNoPreserve(const char * format, 4968 void MacroAssembler::PrintfNoPreserve(const char * format,
4969 const CPURegister& arg0, 4969 const CPURegister& arg0,
4970 const CPURegister& arg1, 4970 const CPURegister& arg1,
4971 const CPURegister& arg2, 4971 const CPURegister& arg2,
4972 const CPURegister& arg3) { 4972 const CPURegister& arg3) {
4973 // We cannot handle a caller-saved stack pointer. It doesn't make much sense 4973 // We cannot handle a caller-saved stack pointer. It doesn't make much sense
4974 // in most cases anyway, so this restriction shouldn't be too serious. 4974 // in most cases anyway, so this restriction shouldn't be too serious.
4975 ASSERT(!kCallerSaved.IncludesAliasOf(__ StackPointer())); 4975 DCHECK(!kCallerSaved.IncludesAliasOf(__ StackPointer()));
4976 4976
4977 // The provided arguments, and their proper procedure-call standard registers. 4977 // The provided arguments, and their proper procedure-call standard registers.
4978 CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3}; 4978 CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3};
4979 CPURegister pcs[kPrintfMaxArgCount] = {NoReg, NoReg, NoReg, NoReg}; 4979 CPURegister pcs[kPrintfMaxArgCount] = {NoReg, NoReg, NoReg, NoReg};
4980 4980
4981 int arg_count = kPrintfMaxArgCount; 4981 int arg_count = kPrintfMaxArgCount;
4982 4982
4983 // The PCS varargs registers for printf. Note that x0 is used for the printf 4983 // The PCS varargs registers for printf. Note that x0 is used for the printf
4984 // format string. 4984 // format string.
4985 static const CPURegList kPCSVarargs = 4985 static const CPURegList kPCSVarargs =
(...skipping 30 matching lines...) Expand all
5016 // Work out the proper PCS register for this argument. 5016 // Work out the proper PCS register for this argument.
5017 if (args[i].IsRegister()) { 5017 if (args[i].IsRegister()) {
5018 pcs[i] = pcs_varargs.PopLowestIndex().X(); 5018 pcs[i] = pcs_varargs.PopLowestIndex().X();
5019 // We might only need a W register here. We need to know the size of the 5019 // We might only need a W register here. We need to know the size of the
5020 // argument so we can properly encode it for the simulator call. 5020 // argument so we can properly encode it for the simulator call.
5021 if (args[i].Is32Bits()) pcs[i] = pcs[i].W(); 5021 if (args[i].Is32Bits()) pcs[i] = pcs[i].W();
5022 } else if (args[i].IsFPRegister()) { 5022 } else if (args[i].IsFPRegister()) {
5023 // In C, floats are always cast to doubles for varargs calls. 5023 // In C, floats are always cast to doubles for varargs calls.
5024 pcs[i] = pcs_varargs_fp.PopLowestIndex().D(); 5024 pcs[i] = pcs_varargs_fp.PopLowestIndex().D();
5025 } else { 5025 } else {
5026 ASSERT(args[i].IsNone()); 5026 DCHECK(args[i].IsNone());
5027 arg_count = i; 5027 arg_count = i;
5028 break; 5028 break;
5029 } 5029 }
5030 5030
5031 // If the argument is already in the right place, leave it where it is. 5031 // If the argument is already in the right place, leave it where it is.
5032 if (args[i].Aliases(pcs[i])) continue; 5032 if (args[i].Aliases(pcs[i])) continue;
5033 5033
5034 // Otherwise, if the argument is in a PCS argument register, allocate an 5034 // Otherwise, if the argument is in a PCS argument register, allocate an
5035 // appropriate scratch register and then move it out of the way. 5035 // appropriate scratch register and then move it out of the way.
5036 if (kPCSVarargs.IncludesAliasOf(args[i]) || 5036 if (kPCSVarargs.IncludesAliasOf(args[i]) ||
5037 kPCSVarargsFP.IncludesAliasOf(args[i])) { 5037 kPCSVarargsFP.IncludesAliasOf(args[i])) {
5038 if (args[i].IsRegister()) { 5038 if (args[i].IsRegister()) {
5039 Register old_arg = Register(args[i]); 5039 Register old_arg = Register(args[i]);
5040 Register new_arg = temps.AcquireSameSizeAs(old_arg); 5040 Register new_arg = temps.AcquireSameSizeAs(old_arg);
5041 Mov(new_arg, old_arg); 5041 Mov(new_arg, old_arg);
5042 args[i] = new_arg; 5042 args[i] = new_arg;
5043 } else { 5043 } else {
5044 FPRegister old_arg = FPRegister(args[i]); 5044 FPRegister old_arg = FPRegister(args[i]);
5045 FPRegister new_arg = temps.AcquireSameSizeAs(old_arg); 5045 FPRegister new_arg = temps.AcquireSameSizeAs(old_arg);
5046 Fmov(new_arg, old_arg); 5046 Fmov(new_arg, old_arg);
5047 args[i] = new_arg; 5047 args[i] = new_arg;
5048 } 5048 }
5049 } 5049 }
5050 } 5050 }
5051 5051
5052 // Do a second pass to move values into their final positions and perform any 5052 // Do a second pass to move values into their final positions and perform any
5053 // conversions that may be required. 5053 // conversions that may be required.
5054 for (int i = 0; i < arg_count; i++) { 5054 for (int i = 0; i < arg_count; i++) {
5055 ASSERT(pcs[i].type() == args[i].type()); 5055 DCHECK(pcs[i].type() == args[i].type());
5056 if (pcs[i].IsRegister()) { 5056 if (pcs[i].IsRegister()) {
5057 Mov(Register(pcs[i]), Register(args[i]), kDiscardForSameWReg); 5057 Mov(Register(pcs[i]), Register(args[i]), kDiscardForSameWReg);
5058 } else { 5058 } else {
5059 ASSERT(pcs[i].IsFPRegister()); 5059 DCHECK(pcs[i].IsFPRegister());
5060 if (pcs[i].SizeInBytes() == args[i].SizeInBytes()) { 5060 if (pcs[i].SizeInBytes() == args[i].SizeInBytes()) {
5061 Fmov(FPRegister(pcs[i]), FPRegister(args[i])); 5061 Fmov(FPRegister(pcs[i]), FPRegister(args[i]));
5062 } else { 5062 } else {
5063 Fcvt(FPRegister(pcs[i]), FPRegister(args[i])); 5063 Fcvt(FPRegister(pcs[i]), FPRegister(args[i]));
5064 } 5064 }
5065 } 5065 }
5066 } 5066 }
5067 5067
5068 // Load the format string into x0, as per the procedure-call standard. 5068 // Load the format string into x0, as per the procedure-call standard.
5069 // 5069 //
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
5103 hlt(kImmExceptionIsPrintf); 5103 hlt(kImmExceptionIsPrintf);
5104 dc32(arg_count); // kPrintfArgCountOffset 5104 dc32(arg_count); // kPrintfArgCountOffset
5105 5105
5106 // Determine the argument pattern. 5106 // Determine the argument pattern.
5107 uint32_t arg_pattern_list = 0; 5107 uint32_t arg_pattern_list = 0;
5108 for (int i = 0; i < arg_count; i++) { 5108 for (int i = 0; i < arg_count; i++) {
5109 uint32_t arg_pattern; 5109 uint32_t arg_pattern;
5110 if (args[i].IsRegister()) { 5110 if (args[i].IsRegister()) {
5111 arg_pattern = args[i].Is32Bits() ? kPrintfArgW : kPrintfArgX; 5111 arg_pattern = args[i].Is32Bits() ? kPrintfArgW : kPrintfArgX;
5112 } else { 5112 } else {
5113 ASSERT(args[i].Is64Bits()); 5113 DCHECK(args[i].Is64Bits());
5114 arg_pattern = kPrintfArgD; 5114 arg_pattern = kPrintfArgD;
5115 } 5115 }
5116 ASSERT(arg_pattern < (1 << kPrintfArgPatternBits)); 5116 DCHECK(arg_pattern < (1 << kPrintfArgPatternBits));
5117 arg_pattern_list |= (arg_pattern << (kPrintfArgPatternBits * i)); 5117 arg_pattern_list |= (arg_pattern << (kPrintfArgPatternBits * i));
5118 } 5118 }
5119 dc32(arg_pattern_list); // kPrintfArgPatternListOffset 5119 dc32(arg_pattern_list); // kPrintfArgPatternListOffset
5120 } 5120 }
5121 #else 5121 #else
5122 Call(FUNCTION_ADDR(printf), RelocInfo::EXTERNAL_REFERENCE); 5122 Call(FUNCTION_ADDR(printf), RelocInfo::EXTERNAL_REFERENCE);
5123 #endif 5123 #endif
5124 } 5124 }
5125 5125
5126 5126
5127 void MacroAssembler::Printf(const char * format, 5127 void MacroAssembler::Printf(const char * format,
5128 CPURegister arg0, 5128 CPURegister arg0,
5129 CPURegister arg1, 5129 CPURegister arg1,
5130 CPURegister arg2, 5130 CPURegister arg2,
5131 CPURegister arg3) { 5131 CPURegister arg3) {
5132 // We can only print sp if it is the current stack pointer. 5132 // We can only print sp if it is the current stack pointer.
5133 if (!csp.Is(StackPointer())) { 5133 if (!csp.Is(StackPointer())) {
5134 ASSERT(!csp.Aliases(arg0)); 5134 DCHECK(!csp.Aliases(arg0));
5135 ASSERT(!csp.Aliases(arg1)); 5135 DCHECK(!csp.Aliases(arg1));
5136 ASSERT(!csp.Aliases(arg2)); 5136 DCHECK(!csp.Aliases(arg2));
5137 ASSERT(!csp.Aliases(arg3)); 5137 DCHECK(!csp.Aliases(arg3));
5138 } 5138 }
5139 5139
5140 // Printf is expected to preserve all registers, so make sure that none are 5140 // Printf is expected to preserve all registers, so make sure that none are
5141 // available as scratch registers until we've preserved them. 5141 // available as scratch registers until we've preserved them.
5142 RegList old_tmp_list = TmpList()->list(); 5142 RegList old_tmp_list = TmpList()->list();
5143 RegList old_fp_tmp_list = FPTmpList()->list(); 5143 RegList old_fp_tmp_list = FPTmpList()->list();
5144 TmpList()->set_list(0); 5144 TmpList()->set_list(0);
5145 FPTmpList()->set_list(0); 5145 FPTmpList()->set_list(0);
5146 5146
5147 // Preserve all caller-saved registers as well as NZCV. 5147 // Preserve all caller-saved registers as well as NZCV.
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
5202 FPTmpList()->set_list(old_fp_tmp_list); 5202 FPTmpList()->set_list(old_fp_tmp_list);
5203 } 5203 }
5204 5204
5205 5205
5206 void MacroAssembler::EmitFrameSetupForCodeAgePatching() { 5206 void MacroAssembler::EmitFrameSetupForCodeAgePatching() {
5207 // TODO(jbramley): Other architectures use the internal memcpy to copy the 5207 // TODO(jbramley): Other architectures use the internal memcpy to copy the
5208 // sequence. If this is a performance bottleneck, we should consider caching 5208 // sequence. If this is a performance bottleneck, we should consider caching
5209 // the sequence and copying it in the same way. 5209 // the sequence and copying it in the same way.
5210 InstructionAccurateScope scope(this, 5210 InstructionAccurateScope scope(this,
5211 kNoCodeAgeSequenceLength / kInstructionSize); 5211 kNoCodeAgeSequenceLength / kInstructionSize);
5212 ASSERT(jssp.Is(StackPointer())); 5212 DCHECK(jssp.Is(StackPointer()));
5213 EmitFrameSetupForCodeAgePatching(this); 5213 EmitFrameSetupForCodeAgePatching(this);
5214 } 5214 }
5215 5215
5216 5216
5217 5217
5218 void MacroAssembler::EmitCodeAgeSequence(Code* stub) { 5218 void MacroAssembler::EmitCodeAgeSequence(Code* stub) {
5219 InstructionAccurateScope scope(this, 5219 InstructionAccurateScope scope(this,
5220 kNoCodeAgeSequenceLength / kInstructionSize); 5220 kNoCodeAgeSequenceLength / kInstructionSize);
5221 ASSERT(jssp.Is(StackPointer())); 5221 DCHECK(jssp.Is(StackPointer()));
5222 EmitCodeAgeSequence(this, stub); 5222 EmitCodeAgeSequence(this, stub);
5223 } 5223 }
5224 5224
5225 5225
5226 #undef __ 5226 #undef __
5227 #define __ assm-> 5227 #define __ assm->
5228 5228
5229 5229
5230 void MacroAssembler::EmitFrameSetupForCodeAgePatching(Assembler * assm) { 5230 void MacroAssembler::EmitFrameSetupForCodeAgePatching(Assembler * assm) {
5231 Label start; 5231 Label start;
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
5264 __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeStubEntryOffset); 5264 __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeStubEntryOffset);
5265 if (stub) { 5265 if (stub) {
5266 __ dc64(reinterpret_cast<uint64_t>(stub->instruction_start())); 5266 __ dc64(reinterpret_cast<uint64_t>(stub->instruction_start()));
5267 __ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength); 5267 __ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength);
5268 } 5268 }
5269 } 5269 }
5270 5270
5271 5271
5272 bool MacroAssembler::IsYoungSequence(Isolate* isolate, byte* sequence) { 5272 bool MacroAssembler::IsYoungSequence(Isolate* isolate, byte* sequence) {
5273 bool is_young = isolate->code_aging_helper()->IsYoung(sequence); 5273 bool is_young = isolate->code_aging_helper()->IsYoung(sequence);
5274 ASSERT(is_young || 5274 DCHECK(is_young ||
5275 isolate->code_aging_helper()->IsOld(sequence)); 5275 isolate->code_aging_helper()->IsOld(sequence));
5276 return is_young; 5276 return is_young;
5277 } 5277 }
5278 5278
5279 5279
5280 void MacroAssembler::TruncatingDiv(Register result, 5280 void MacroAssembler::TruncatingDiv(Register result,
5281 Register dividend, 5281 Register dividend,
5282 int32_t divisor) { 5282 int32_t divisor) {
5283 ASSERT(!AreAliased(result, dividend)); 5283 DCHECK(!AreAliased(result, dividend));
5284 ASSERT(result.Is32Bits() && dividend.Is32Bits()); 5284 DCHECK(result.Is32Bits() && dividend.Is32Bits());
5285 MultiplierAndShift ms(divisor); 5285 MultiplierAndShift ms(divisor);
5286 Mov(result, ms.multiplier()); 5286 Mov(result, ms.multiplier());
5287 Smull(result.X(), dividend, result); 5287 Smull(result.X(), dividend, result);
5288 Asr(result.X(), result.X(), 32); 5288 Asr(result.X(), result.X(), 32);
5289 if (divisor > 0 && ms.multiplier() < 0) Add(result, result, dividend); 5289 if (divisor > 0 && ms.multiplier() < 0) Add(result, result, dividend);
5290 if (divisor < 0 && ms.multiplier() > 0) Sub(result, result, dividend); 5290 if (divisor < 0 && ms.multiplier() > 0) Sub(result, result, dividend);
5291 if (ms.shift() > 0) Asr(result, result, ms.shift()); 5291 if (ms.shift() > 0) Asr(result, result, ms.shift());
5292 Add(result, result, Operand(dividend, LSR, 31)); 5292 Add(result, result, Operand(dividend, LSR, 31));
5293 } 5293 }
5294 5294
(...skipping 16 matching lines...) Expand all
5311 FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) { 5311 FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) {
5312 int code = AcquireNextAvailable(availablefp_).code(); 5312 int code = AcquireNextAvailable(availablefp_).code();
5313 return FPRegister::Create(code, reg.SizeInBits()); 5313 return FPRegister::Create(code, reg.SizeInBits());
5314 } 5314 }
5315 5315
5316 5316
5317 CPURegister UseScratchRegisterScope::AcquireNextAvailable( 5317 CPURegister UseScratchRegisterScope::AcquireNextAvailable(
5318 CPURegList* available) { 5318 CPURegList* available) {
5319 CHECK(!available->IsEmpty()); 5319 CHECK(!available->IsEmpty());
5320 CPURegister result = available->PopLowestIndex(); 5320 CPURegister result = available->PopLowestIndex();
5321 ASSERT(!AreAliased(result, xzr, csp)); 5321 DCHECK(!AreAliased(result, xzr, csp));
5322 return result; 5322 return result;
5323 } 5323 }
5324 5324
5325 5325
5326 CPURegister UseScratchRegisterScope::UnsafeAcquire(CPURegList* available, 5326 CPURegister UseScratchRegisterScope::UnsafeAcquire(CPURegList* available,
5327 const CPURegister& reg) { 5327 const CPURegister& reg) {
5328 ASSERT(available->IncludesAliasOf(reg)); 5328 DCHECK(available->IncludesAliasOf(reg));
5329 available->Remove(reg); 5329 available->Remove(reg);
5330 return reg; 5330 return reg;
5331 } 5331 }
5332 5332
5333 5333
5334 #define __ masm-> 5334 #define __ masm->
5335 5335
5336 5336
5337 void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg, 5337 void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
5338 const Label* smi_check) { 5338 const Label* smi_check) {
5339 Assembler::BlockPoolsScope scope(masm); 5339 Assembler::BlockPoolsScope scope(masm);
5340 if (reg.IsValid()) { 5340 if (reg.IsValid()) {
5341 ASSERT(smi_check->is_bound()); 5341 DCHECK(smi_check->is_bound());
5342 ASSERT(reg.Is64Bits()); 5342 DCHECK(reg.Is64Bits());
5343 5343
5344 // Encode the register (x0-x30) in the lowest 5 bits, then the offset to 5344 // Encode the register (x0-x30) in the lowest 5 bits, then the offset to
5345 // 'check' in the other bits. The possible offset is limited in that we 5345 // 'check' in the other bits. The possible offset is limited in that we
5346 // use BitField to pack the data, and the underlying data type is a 5346 // use BitField to pack the data, and the underlying data type is a
5347 // uint32_t. 5347 // uint32_t.
5348 uint32_t delta = __ InstructionsGeneratedSince(smi_check); 5348 uint32_t delta = __ InstructionsGeneratedSince(smi_check);
5349 __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta)); 5349 __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta));
5350 } else { 5350 } else {
5351 ASSERT(!smi_check->is_bound()); 5351 DCHECK(!smi_check->is_bound());
5352 5352
5353 // An offset of 0 indicates that there is no patch site. 5353 // An offset of 0 indicates that there is no patch site.
5354 __ InlineData(0); 5354 __ InlineData(0);
5355 } 5355 }
5356 } 5356 }
5357 5357
5358 5358
5359 InlineSmiCheckInfo::InlineSmiCheckInfo(Address info) 5359 InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
5360 : reg_(NoReg), smi_check_(NULL) { 5360 : reg_(NoReg), smi_check_(NULL) {
5361 InstructionSequence* inline_data = InstructionSequence::At(info); 5361 InstructionSequence* inline_data = InstructionSequence::At(info);
5362 ASSERT(inline_data->IsInlineData()); 5362 DCHECK(inline_data->IsInlineData());
5363 if (inline_data->IsInlineData()) { 5363 if (inline_data->IsInlineData()) {
5364 uint64_t payload = inline_data->InlineData(); 5364 uint64_t payload = inline_data->InlineData();
5365 // We use BitField to decode the payload, and BitField can only handle 5365 // We use BitField to decode the payload, and BitField can only handle
5366 // 32-bit values. 5366 // 32-bit values.
5367 ASSERT(is_uint32(payload)); 5367 DCHECK(is_uint32(payload));
5368 if (payload != 0) { 5368 if (payload != 0) {
5369 int reg_code = RegisterBits::decode(payload); 5369 int reg_code = RegisterBits::decode(payload);
5370 reg_ = Register::XRegFromCode(reg_code); 5370 reg_ = Register::XRegFromCode(reg_code);
5371 uint64_t smi_check_delta = DeltaBits::decode(payload); 5371 uint64_t smi_check_delta = DeltaBits::decode(payload);
5372 ASSERT(smi_check_delta != 0); 5372 DCHECK(smi_check_delta != 0);
5373 smi_check_ = inline_data->preceding(smi_check_delta); 5373 smi_check_ = inline_data->preceding(smi_check_delta);
5374 } 5374 }
5375 } 5375 }
5376 } 5376 }
5377 5377
5378 5378
5379 #undef __ 5379 #undef __
5380 5380
5381 5381
5382 } } // namespace v8::internal 5382 } } // namespace v8::internal
5383 5383
5384 #endif // V8_TARGET_ARCH_ARM64 5384 #endif // V8_TARGET_ARCH_ARM64
OLDNEW
« no previous file with comments | « src/arm64/macro-assembler-arm64.h ('k') | src/arm64/macro-assembler-arm64-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698