OLD | NEW |
1 // Copyright (c) 1994-2006 Sun Microsystems Inc. | 1 // Copyright (c) 1994-2006 Sun Microsystems Inc. |
2 // All Rights Reserved. | 2 // All Rights Reserved. |
3 // | 3 // |
4 // Redistribution and use in source and binary forms, with or without | 4 // Redistribution and use in source and binary forms, with or without |
5 // modification, are permitted provided that the following conditions | 5 // modification, are permitted provided that the following conditions |
6 // are met: | 6 // are met: |
7 // | 7 // |
8 // - Redistributions of source code must retain the above copyright notice, | 8 // - Redistributions of source code must retain the above copyright notice, |
9 // this list of conditions and the following disclaimer. | 9 // this list of conditions and the following disclaimer. |
10 // | 10 // |
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
118 // ARM Cortex-A9 and Cortex-A5 have 32 byte cachelines. | 118 // ARM Cortex-A9 and Cortex-A5 have 32 byte cachelines. |
119 if (cpu.implementer() == base::CPU::ARM && | 119 if (cpu.implementer() == base::CPU::ARM && |
120 (cpu.part() == base::CPU::ARM_CORTEX_A5 || | 120 (cpu.part() == base::CPU::ARM_CORTEX_A5 || |
121 cpu.part() == base::CPU::ARM_CORTEX_A9)) { | 121 cpu.part() == base::CPU::ARM_CORTEX_A9)) { |
122 cache_line_size_ = 32; | 122 cache_line_size_ = 32; |
123 } | 123 } |
124 | 124 |
125 if (FLAG_enable_32dregs && cpu.has_vfp3_d32()) supported_ |= 1u << VFP32DREGS; | 125 if (FLAG_enable_32dregs && cpu.has_vfp3_d32()) supported_ |= 1u << VFP32DREGS; |
126 #endif | 126 #endif |
127 | 127 |
128 ASSERT(!IsSupported(VFP3) || IsSupported(ARMv7)); | 128 DCHECK(!IsSupported(VFP3) || IsSupported(ARMv7)); |
129 } | 129 } |
130 | 130 |
131 | 131 |
132 void CpuFeatures::PrintTarget() { | 132 void CpuFeatures::PrintTarget() { |
133 const char* arm_arch = NULL; | 133 const char* arm_arch = NULL; |
134 const char* arm_target_type = ""; | 134 const char* arm_target_type = ""; |
135 const char* arm_no_probe = ""; | 135 const char* arm_no_probe = ""; |
136 const char* arm_fpu = ""; | 136 const char* arm_fpu = ""; |
137 const char* arm_thumb = ""; | 137 const char* arm_thumb = ""; |
138 const char* arm_float_abi = NULL; | 138 const char* arm_float_abi = NULL; |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
200 bool eabi_hardfloat = false; | 200 bool eabi_hardfloat = false; |
201 #endif | 201 #endif |
202 printf(" USE_EABI_HARDFLOAT=%d\n", eabi_hardfloat); | 202 printf(" USE_EABI_HARDFLOAT=%d\n", eabi_hardfloat); |
203 } | 203 } |
204 | 204 |
205 | 205 |
206 // ----------------------------------------------------------------------------- | 206 // ----------------------------------------------------------------------------- |
207 // Implementation of DwVfpRegister | 207 // Implementation of DwVfpRegister |
208 | 208 |
209 const char* DwVfpRegister::AllocationIndexToString(int index) { | 209 const char* DwVfpRegister::AllocationIndexToString(int index) { |
210 ASSERT(index >= 0 && index < NumAllocatableRegisters()); | 210 DCHECK(index >= 0 && index < NumAllocatableRegisters()); |
211 ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() == | 211 DCHECK(kScratchDoubleReg.code() - kDoubleRegZero.code() == |
212 kNumReservedRegisters - 1); | 212 kNumReservedRegisters - 1); |
213 if (index >= kDoubleRegZero.code()) index += kNumReservedRegisters; | 213 if (index >= kDoubleRegZero.code()) index += kNumReservedRegisters; |
214 return VFPRegisters::Name(index, true); | 214 return VFPRegisters::Name(index, true); |
215 } | 215 } |
216 | 216 |
217 | 217 |
218 // ----------------------------------------------------------------------------- | 218 // ----------------------------------------------------------------------------- |
219 // Implementation of RelocInfo | 219 // Implementation of RelocInfo |
220 | 220 |
221 const int RelocInfo::kApplyMask = 0; | 221 const int RelocInfo::kApplyMask = 0; |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
259 // ----------------------------------------------------------------------------- | 259 // ----------------------------------------------------------------------------- |
260 // Implementation of Operand and MemOperand | 260 // Implementation of Operand and MemOperand |
261 // See assembler-arm-inl.h for inlined constructors | 261 // See assembler-arm-inl.h for inlined constructors |
262 | 262 |
263 Operand::Operand(Handle<Object> handle) { | 263 Operand::Operand(Handle<Object> handle) { |
264 AllowDeferredHandleDereference using_raw_address; | 264 AllowDeferredHandleDereference using_raw_address; |
265 rm_ = no_reg; | 265 rm_ = no_reg; |
266 // Verify all Objects referred by code are NOT in new space. | 266 // Verify all Objects referred by code are NOT in new space. |
267 Object* obj = *handle; | 267 Object* obj = *handle; |
268 if (obj->IsHeapObject()) { | 268 if (obj->IsHeapObject()) { |
269 ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj)); | 269 DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj)); |
270 imm32_ = reinterpret_cast<intptr_t>(handle.location()); | 270 imm32_ = reinterpret_cast<intptr_t>(handle.location()); |
271 rmode_ = RelocInfo::EMBEDDED_OBJECT; | 271 rmode_ = RelocInfo::EMBEDDED_OBJECT; |
272 } else { | 272 } else { |
273 // no relocation needed | 273 // no relocation needed |
274 imm32_ = reinterpret_cast<intptr_t>(obj); | 274 imm32_ = reinterpret_cast<intptr_t>(obj); |
275 rmode_ = RelocInfo::NONE32; | 275 rmode_ = RelocInfo::NONE32; |
276 } | 276 } |
277 } | 277 } |
278 | 278 |
279 | 279 |
280 Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) { | 280 Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) { |
281 ASSERT(is_uint5(shift_imm)); | 281 DCHECK(is_uint5(shift_imm)); |
282 | 282 |
283 rm_ = rm; | 283 rm_ = rm; |
284 rs_ = no_reg; | 284 rs_ = no_reg; |
285 shift_op_ = shift_op; | 285 shift_op_ = shift_op; |
286 shift_imm_ = shift_imm & 31; | 286 shift_imm_ = shift_imm & 31; |
287 | 287 |
288 if ((shift_op == ROR) && (shift_imm == 0)) { | 288 if ((shift_op == ROR) && (shift_imm == 0)) { |
289 // ROR #0 is functionally equivalent to LSL #0 and this allow us to encode | 289 // ROR #0 is functionally equivalent to LSL #0 and this allow us to encode |
290 // RRX as ROR #0 (See below). | 290 // RRX as ROR #0 (See below). |
291 shift_op = LSL; | 291 shift_op = LSL; |
292 } else if (shift_op == RRX) { | 292 } else if (shift_op == RRX) { |
293 // encoded as ROR with shift_imm == 0 | 293 // encoded as ROR with shift_imm == 0 |
294 ASSERT(shift_imm == 0); | 294 DCHECK(shift_imm == 0); |
295 shift_op_ = ROR; | 295 shift_op_ = ROR; |
296 shift_imm_ = 0; | 296 shift_imm_ = 0; |
297 } | 297 } |
298 } | 298 } |
299 | 299 |
300 | 300 |
301 Operand::Operand(Register rm, ShiftOp shift_op, Register rs) { | 301 Operand::Operand(Register rm, ShiftOp shift_op, Register rs) { |
302 ASSERT(shift_op != RRX); | 302 DCHECK(shift_op != RRX); |
303 rm_ = rm; | 303 rm_ = rm; |
304 rs_ = no_reg; | 304 rs_ = no_reg; |
305 shift_op_ = shift_op; | 305 shift_op_ = shift_op; |
306 rs_ = rs; | 306 rs_ = rs; |
307 } | 307 } |
308 | 308 |
309 | 309 |
310 MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) { | 310 MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) { |
311 rn_ = rn; | 311 rn_ = rn; |
312 rm_ = no_reg; | 312 rm_ = no_reg; |
313 offset_ = offset; | 313 offset_ = offset; |
314 am_ = am; | 314 am_ = am; |
315 } | 315 } |
316 | 316 |
317 | 317 |
318 MemOperand::MemOperand(Register rn, Register rm, AddrMode am) { | 318 MemOperand::MemOperand(Register rn, Register rm, AddrMode am) { |
319 rn_ = rn; | 319 rn_ = rn; |
320 rm_ = rm; | 320 rm_ = rm; |
321 shift_op_ = LSL; | 321 shift_op_ = LSL; |
322 shift_imm_ = 0; | 322 shift_imm_ = 0; |
323 am_ = am; | 323 am_ = am; |
324 } | 324 } |
325 | 325 |
326 | 326 |
327 MemOperand::MemOperand(Register rn, Register rm, | 327 MemOperand::MemOperand(Register rn, Register rm, |
328 ShiftOp shift_op, int shift_imm, AddrMode am) { | 328 ShiftOp shift_op, int shift_imm, AddrMode am) { |
329 ASSERT(is_uint5(shift_imm)); | 329 DCHECK(is_uint5(shift_imm)); |
330 rn_ = rn; | 330 rn_ = rn; |
331 rm_ = rm; | 331 rm_ = rm; |
332 shift_op_ = shift_op; | 332 shift_op_ = shift_op; |
333 shift_imm_ = shift_imm & 31; | 333 shift_imm_ = shift_imm & 31; |
334 am_ = am; | 334 am_ = am; |
335 } | 335 } |
336 | 336 |
337 | 337 |
338 NeonMemOperand::NeonMemOperand(Register rn, AddrMode am, int align) { | 338 NeonMemOperand::NeonMemOperand(Register rn, AddrMode am, int align) { |
339 ASSERT((am == Offset) || (am == PostIndex)); | 339 DCHECK((am == Offset) || (am == PostIndex)); |
340 rn_ = rn; | 340 rn_ = rn; |
341 rm_ = (am == Offset) ? pc : sp; | 341 rm_ = (am == Offset) ? pc : sp; |
342 SetAlignment(align); | 342 SetAlignment(align); |
343 } | 343 } |
344 | 344 |
345 | 345 |
346 NeonMemOperand::NeonMemOperand(Register rn, Register rm, int align) { | 346 NeonMemOperand::NeonMemOperand(Register rn, Register rm, int align) { |
347 rn_ = rn; | 347 rn_ = rn; |
348 rm_ = rm; | 348 rm_ = rm; |
349 SetAlignment(align); | 349 SetAlignment(align); |
(...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
466 no_const_pool_before_ = 0; | 466 no_const_pool_before_ = 0; |
467 first_const_pool_32_use_ = -1; | 467 first_const_pool_32_use_ = -1; |
468 first_const_pool_64_use_ = -1; | 468 first_const_pool_64_use_ = -1; |
469 last_bound_pos_ = 0; | 469 last_bound_pos_ = 0; |
470 constant_pool_available_ = !FLAG_enable_ool_constant_pool; | 470 constant_pool_available_ = !FLAG_enable_ool_constant_pool; |
471 ClearRecordedAstId(); | 471 ClearRecordedAstId(); |
472 } | 472 } |
473 | 473 |
474 | 474 |
475 Assembler::~Assembler() { | 475 Assembler::~Assembler() { |
476 ASSERT(const_pool_blocked_nesting_ == 0); | 476 DCHECK(const_pool_blocked_nesting_ == 0); |
477 } | 477 } |
478 | 478 |
479 | 479 |
480 void Assembler::GetCode(CodeDesc* desc) { | 480 void Assembler::GetCode(CodeDesc* desc) { |
481 if (!FLAG_enable_ool_constant_pool) { | 481 if (!FLAG_enable_ool_constant_pool) { |
482 // Emit constant pool if necessary. | 482 // Emit constant pool if necessary. |
483 CheckConstPool(true, false); | 483 CheckConstPool(true, false); |
484 ASSERT(num_pending_32_bit_reloc_info_ == 0); | 484 DCHECK(num_pending_32_bit_reloc_info_ == 0); |
485 ASSERT(num_pending_64_bit_reloc_info_ == 0); | 485 DCHECK(num_pending_64_bit_reloc_info_ == 0); |
486 } | 486 } |
487 // Set up code descriptor. | 487 // Set up code descriptor. |
488 desc->buffer = buffer_; | 488 desc->buffer = buffer_; |
489 desc->buffer_size = buffer_size_; | 489 desc->buffer_size = buffer_size_; |
490 desc->instr_size = pc_offset(); | 490 desc->instr_size = pc_offset(); |
491 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); | 491 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); |
492 desc->origin = this; | 492 desc->origin = this; |
493 } | 493 } |
494 | 494 |
495 | 495 |
496 void Assembler::Align(int m) { | 496 void Assembler::Align(int m) { |
497 ASSERT(m >= 4 && IsPowerOf2(m)); | 497 DCHECK(m >= 4 && IsPowerOf2(m)); |
498 while ((pc_offset() & (m - 1)) != 0) { | 498 while ((pc_offset() & (m - 1)) != 0) { |
499 nop(); | 499 nop(); |
500 } | 500 } |
501 } | 501 } |
502 | 502 |
503 | 503 |
504 void Assembler::CodeTargetAlign() { | 504 void Assembler::CodeTargetAlign() { |
505 // Preferred alignment of jump targets on some ARM chips. | 505 // Preferred alignment of jump targets on some ARM chips. |
506 Align(8); | 506 Align(8); |
507 } | 507 } |
508 | 508 |
509 | 509 |
510 Condition Assembler::GetCondition(Instr instr) { | 510 Condition Assembler::GetCondition(Instr instr) { |
511 return Instruction::ConditionField(instr); | 511 return Instruction::ConditionField(instr); |
512 } | 512 } |
513 | 513 |
514 | 514 |
515 bool Assembler::IsBranch(Instr instr) { | 515 bool Assembler::IsBranch(Instr instr) { |
516 return (instr & (B27 | B25)) == (B27 | B25); | 516 return (instr & (B27 | B25)) == (B27 | B25); |
517 } | 517 } |
518 | 518 |
519 | 519 |
520 int Assembler::GetBranchOffset(Instr instr) { | 520 int Assembler::GetBranchOffset(Instr instr) { |
521 ASSERT(IsBranch(instr)); | 521 DCHECK(IsBranch(instr)); |
522 // Take the jump offset in the lower 24 bits, sign extend it and multiply it | 522 // Take the jump offset in the lower 24 bits, sign extend it and multiply it |
523 // with 4 to get the offset in bytes. | 523 // with 4 to get the offset in bytes. |
524 return ((instr & kImm24Mask) << 8) >> 6; | 524 return ((instr & kImm24Mask) << 8) >> 6; |
525 } | 525 } |
526 | 526 |
527 | 527 |
528 bool Assembler::IsLdrRegisterImmediate(Instr instr) { | 528 bool Assembler::IsLdrRegisterImmediate(Instr instr) { |
529 return (instr & (B27 | B26 | B25 | B22 | B20)) == (B26 | B20); | 529 return (instr & (B27 | B26 | B25 | B22 | B20)) == (B26 | B20); |
530 } | 530 } |
531 | 531 |
532 | 532 |
533 bool Assembler::IsVldrDRegisterImmediate(Instr instr) { | 533 bool Assembler::IsVldrDRegisterImmediate(Instr instr) { |
534 return (instr & (15 * B24 | 3 * B20 | 15 * B8)) == (13 * B24 | B20 | 11 * B8); | 534 return (instr & (15 * B24 | 3 * B20 | 15 * B8)) == (13 * B24 | B20 | 11 * B8); |
535 } | 535 } |
536 | 536 |
537 | 537 |
538 int Assembler::GetLdrRegisterImmediateOffset(Instr instr) { | 538 int Assembler::GetLdrRegisterImmediateOffset(Instr instr) { |
539 ASSERT(IsLdrRegisterImmediate(instr)); | 539 DCHECK(IsLdrRegisterImmediate(instr)); |
540 bool positive = (instr & B23) == B23; | 540 bool positive = (instr & B23) == B23; |
541 int offset = instr & kOff12Mask; // Zero extended offset. | 541 int offset = instr & kOff12Mask; // Zero extended offset. |
542 return positive ? offset : -offset; | 542 return positive ? offset : -offset; |
543 } | 543 } |
544 | 544 |
545 | 545 |
546 int Assembler::GetVldrDRegisterImmediateOffset(Instr instr) { | 546 int Assembler::GetVldrDRegisterImmediateOffset(Instr instr) { |
547 ASSERT(IsVldrDRegisterImmediate(instr)); | 547 DCHECK(IsVldrDRegisterImmediate(instr)); |
548 bool positive = (instr & B23) == B23; | 548 bool positive = (instr & B23) == B23; |
549 int offset = instr & kOff8Mask; // Zero extended offset. | 549 int offset = instr & kOff8Mask; // Zero extended offset. |
550 offset <<= 2; | 550 offset <<= 2; |
551 return positive ? offset : -offset; | 551 return positive ? offset : -offset; |
552 } | 552 } |
553 | 553 |
554 | 554 |
555 Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) { | 555 Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) { |
556 ASSERT(IsLdrRegisterImmediate(instr)); | 556 DCHECK(IsLdrRegisterImmediate(instr)); |
557 bool positive = offset >= 0; | 557 bool positive = offset >= 0; |
558 if (!positive) offset = -offset; | 558 if (!positive) offset = -offset; |
559 ASSERT(is_uint12(offset)); | 559 DCHECK(is_uint12(offset)); |
560 // Set bit indicating whether the offset should be added. | 560 // Set bit indicating whether the offset should be added. |
561 instr = (instr & ~B23) | (positive ? B23 : 0); | 561 instr = (instr & ~B23) | (positive ? B23 : 0); |
562 // Set the actual offset. | 562 // Set the actual offset. |
563 return (instr & ~kOff12Mask) | offset; | 563 return (instr & ~kOff12Mask) | offset; |
564 } | 564 } |
565 | 565 |
566 | 566 |
567 Instr Assembler::SetVldrDRegisterImmediateOffset(Instr instr, int offset) { | 567 Instr Assembler::SetVldrDRegisterImmediateOffset(Instr instr, int offset) { |
568 ASSERT(IsVldrDRegisterImmediate(instr)); | 568 DCHECK(IsVldrDRegisterImmediate(instr)); |
569 ASSERT((offset & ~3) == offset); // Must be 64-bit aligned. | 569 DCHECK((offset & ~3) == offset); // Must be 64-bit aligned. |
570 bool positive = offset >= 0; | 570 bool positive = offset >= 0; |
571 if (!positive) offset = -offset; | 571 if (!positive) offset = -offset; |
572 ASSERT(is_uint10(offset)); | 572 DCHECK(is_uint10(offset)); |
573 // Set bit indicating whether the offset should be added. | 573 // Set bit indicating whether the offset should be added. |
574 instr = (instr & ~B23) | (positive ? B23 : 0); | 574 instr = (instr & ~B23) | (positive ? B23 : 0); |
575 // Set the actual offset. Its bottom 2 bits are zero. | 575 // Set the actual offset. Its bottom 2 bits are zero. |
576 return (instr & ~kOff8Mask) | (offset >> 2); | 576 return (instr & ~kOff8Mask) | (offset >> 2); |
577 } | 577 } |
578 | 578 |
579 | 579 |
580 bool Assembler::IsStrRegisterImmediate(Instr instr) { | 580 bool Assembler::IsStrRegisterImmediate(Instr instr) { |
581 return (instr & (B27 | B26 | B25 | B22 | B20)) == B26; | 581 return (instr & (B27 | B26 | B25 | B22 | B20)) == B26; |
582 } | 582 } |
583 | 583 |
584 | 584 |
585 Instr Assembler::SetStrRegisterImmediateOffset(Instr instr, int offset) { | 585 Instr Assembler::SetStrRegisterImmediateOffset(Instr instr, int offset) { |
586 ASSERT(IsStrRegisterImmediate(instr)); | 586 DCHECK(IsStrRegisterImmediate(instr)); |
587 bool positive = offset >= 0; | 587 bool positive = offset >= 0; |
588 if (!positive) offset = -offset; | 588 if (!positive) offset = -offset; |
589 ASSERT(is_uint12(offset)); | 589 DCHECK(is_uint12(offset)); |
590 // Set bit indicating whether the offset should be added. | 590 // Set bit indicating whether the offset should be added. |
591 instr = (instr & ~B23) | (positive ? B23 : 0); | 591 instr = (instr & ~B23) | (positive ? B23 : 0); |
592 // Set the actual offset. | 592 // Set the actual offset. |
593 return (instr & ~kOff12Mask) | offset; | 593 return (instr & ~kOff12Mask) | offset; |
594 } | 594 } |
595 | 595 |
596 | 596 |
597 bool Assembler::IsAddRegisterImmediate(Instr instr) { | 597 bool Assembler::IsAddRegisterImmediate(Instr instr) { |
598 return (instr & (B27 | B26 | B25 | B24 | B23 | B22 | B21)) == (B25 | B23); | 598 return (instr & (B27 | B26 | B25 | B24 | B23 | B22 | B21)) == (B25 | B23); |
599 } | 599 } |
600 | 600 |
601 | 601 |
602 Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) { | 602 Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) { |
603 ASSERT(IsAddRegisterImmediate(instr)); | 603 DCHECK(IsAddRegisterImmediate(instr)); |
604 ASSERT(offset >= 0); | 604 DCHECK(offset >= 0); |
605 ASSERT(is_uint12(offset)); | 605 DCHECK(is_uint12(offset)); |
606 // Set the offset. | 606 // Set the offset. |
607 return (instr & ~kOff12Mask) | offset; | 607 return (instr & ~kOff12Mask) | offset; |
608 } | 608 } |
609 | 609 |
610 | 610 |
611 Register Assembler::GetRd(Instr instr) { | 611 Register Assembler::GetRd(Instr instr) { |
612 Register reg; | 612 Register reg; |
613 reg.code_ = Instruction::RdValue(instr); | 613 reg.code_ = Instruction::RdValue(instr); |
614 return reg; | 614 return reg; |
615 } | 615 } |
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
741 } | 741 } |
742 | 742 |
743 | 743 |
744 bool Assembler::IsCmpImmediate(Instr instr) { | 744 bool Assembler::IsCmpImmediate(Instr instr) { |
745 return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) == | 745 return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) == |
746 (I | CMP | S); | 746 (I | CMP | S); |
747 } | 747 } |
748 | 748 |
749 | 749 |
750 Register Assembler::GetCmpImmediateRegister(Instr instr) { | 750 Register Assembler::GetCmpImmediateRegister(Instr instr) { |
751 ASSERT(IsCmpImmediate(instr)); | 751 DCHECK(IsCmpImmediate(instr)); |
752 return GetRn(instr); | 752 return GetRn(instr); |
753 } | 753 } |
754 | 754 |
755 | 755 |
756 int Assembler::GetCmpImmediateRawImmediate(Instr instr) { | 756 int Assembler::GetCmpImmediateRawImmediate(Instr instr) { |
757 ASSERT(IsCmpImmediate(instr)); | 757 DCHECK(IsCmpImmediate(instr)); |
758 return instr & kOff12Mask; | 758 return instr & kOff12Mask; |
759 } | 759 } |
760 | 760 |
761 | 761 |
762 // Labels refer to positions in the (to be) generated code. | 762 // Labels refer to positions in the (to be) generated code. |
763 // There are bound, linked, and unused labels. | 763 // There are bound, linked, and unused labels. |
764 // | 764 // |
765 // Bound labels refer to known positions in the already | 765 // Bound labels refer to known positions in the already |
766 // generated code. pos() is the position the label refers to. | 766 // generated code. pos() is the position the label refers to. |
767 // | 767 // |
768 // Linked labels refer to unknown positions in the code | 768 // Linked labels refer to unknown positions in the code |
769 // to be generated; pos() is the position of the last | 769 // to be generated; pos() is the position of the last |
770 // instruction using the label. | 770 // instruction using the label. |
771 // | 771 // |
772 // The linked labels form a link chain by making the branch offset | 772 // The linked labels form a link chain by making the branch offset |
773 // in the instruction steam to point to the previous branch | 773 // in the instruction steam to point to the previous branch |
774 // instruction using the same label. | 774 // instruction using the same label. |
775 // | 775 // |
776 // The link chain is terminated by a branch offset pointing to the | 776 // The link chain is terminated by a branch offset pointing to the |
777 // same position. | 777 // same position. |
778 | 778 |
779 | 779 |
780 int Assembler::target_at(int pos) { | 780 int Assembler::target_at(int pos) { |
781 Instr instr = instr_at(pos); | 781 Instr instr = instr_at(pos); |
782 if (is_uint24(instr)) { | 782 if (is_uint24(instr)) { |
783 // Emitted link to a label, not part of a branch. | 783 // Emitted link to a label, not part of a branch. |
784 return instr; | 784 return instr; |
785 } | 785 } |
786 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24 | 786 DCHECK((instr & 7*B25) == 5*B25); // b, bl, or blx imm24 |
787 int imm26 = ((instr & kImm24Mask) << 8) >> 6; | 787 int imm26 = ((instr & kImm24Mask) << 8) >> 6; |
788 if ((Instruction::ConditionField(instr) == kSpecialCondition) && | 788 if ((Instruction::ConditionField(instr) == kSpecialCondition) && |
789 ((instr & B24) != 0)) { | 789 ((instr & B24) != 0)) { |
790 // blx uses bit 24 to encode bit 2 of imm26 | 790 // blx uses bit 24 to encode bit 2 of imm26 |
791 imm26 += 2; | 791 imm26 += 2; |
792 } | 792 } |
793 return pos + kPcLoadDelta + imm26; | 793 return pos + kPcLoadDelta + imm26; |
794 } | 794 } |
795 | 795 |
796 | 796 |
797 void Assembler::target_at_put(int pos, int target_pos) { | 797 void Assembler::target_at_put(int pos, int target_pos) { |
798 Instr instr = instr_at(pos); | 798 Instr instr = instr_at(pos); |
799 if (is_uint24(instr)) { | 799 if (is_uint24(instr)) { |
800 ASSERT(target_pos == pos || target_pos >= 0); | 800 DCHECK(target_pos == pos || target_pos >= 0); |
801 // Emitted link to a label, not part of a branch. | 801 // Emitted link to a label, not part of a branch. |
802 // Load the position of the label relative to the generated code object | 802 // Load the position of the label relative to the generated code object |
803 // pointer in a register. | 803 // pointer in a register. |
804 | 804 |
805 // Here are the instructions we need to emit: | 805 // Here are the instructions we need to emit: |
806 // For ARMv7: target24 => target16_1:target16_0 | 806 // For ARMv7: target24 => target16_1:target16_0 |
807 // movw dst, #target16_0 | 807 // movw dst, #target16_0 |
808 // movt dst, #target16_1 | 808 // movt dst, #target16_1 |
809 // For ARMv6: target24 => target8_2:target8_1:target8_0 | 809 // For ARMv6: target24 => target8_2:target8_1:target8_0 |
810 // mov dst, #target8_0 | 810 // mov dst, #target8_0 |
811 // orr dst, dst, #target8_1 << 8 | 811 // orr dst, dst, #target8_1 << 8 |
812 // orr dst, dst, #target8_2 << 16 | 812 // orr dst, dst, #target8_2 << 16 |
813 | 813 |
814 // We extract the destination register from the emitted nop instruction. | 814 // We extract the destination register from the emitted nop instruction. |
815 Register dst = Register::from_code( | 815 Register dst = Register::from_code( |
816 Instruction::RmValue(instr_at(pos + kInstrSize))); | 816 Instruction::RmValue(instr_at(pos + kInstrSize))); |
817 ASSERT(IsNop(instr_at(pos + kInstrSize), dst.code())); | 817 DCHECK(IsNop(instr_at(pos + kInstrSize), dst.code())); |
818 uint32_t target24 = target_pos + (Code::kHeaderSize - kHeapObjectTag); | 818 uint32_t target24 = target_pos + (Code::kHeaderSize - kHeapObjectTag); |
819 ASSERT(is_uint24(target24)); | 819 DCHECK(is_uint24(target24)); |
820 if (is_uint8(target24)) { | 820 if (is_uint8(target24)) { |
821 // If the target fits in a byte then only patch with a mov | 821 // If the target fits in a byte then only patch with a mov |
822 // instruction. | 822 // instruction. |
823 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), | 823 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), |
824 1, | 824 1, |
825 CodePatcher::DONT_FLUSH); | 825 CodePatcher::DONT_FLUSH); |
826 patcher.masm()->mov(dst, Operand(target24)); | 826 patcher.masm()->mov(dst, Operand(target24)); |
827 } else { | 827 } else { |
828 uint16_t target16_0 = target24 & kImm16Mask; | 828 uint16_t target16_0 = target24 & kImm16Mask; |
829 uint16_t target16_1 = target24 >> 16; | 829 uint16_t target16_1 = target24 >> 16; |
(...skipping 28 matching lines...) Expand all Loading... |
858 CodePatcher::DONT_FLUSH); | 858 CodePatcher::DONT_FLUSH); |
859 patcher.masm()->mov(dst, Operand(target8_0)); | 859 patcher.masm()->mov(dst, Operand(target8_0)); |
860 patcher.masm()->orr(dst, dst, Operand(target8_1 << 8)); | 860 patcher.masm()->orr(dst, dst, Operand(target8_1 << 8)); |
861 patcher.masm()->orr(dst, dst, Operand(target8_2 << 16)); | 861 patcher.masm()->orr(dst, dst, Operand(target8_2 << 16)); |
862 } | 862 } |
863 } | 863 } |
864 } | 864 } |
865 return; | 865 return; |
866 } | 866 } |
867 int imm26 = target_pos - (pos + kPcLoadDelta); | 867 int imm26 = target_pos - (pos + kPcLoadDelta); |
868 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx imm24 | 868 DCHECK((instr & 7*B25) == 5*B25); // b, bl, or blx imm24 |
869 if (Instruction::ConditionField(instr) == kSpecialCondition) { | 869 if (Instruction::ConditionField(instr) == kSpecialCondition) { |
870 // blx uses bit 24 to encode bit 2 of imm26 | 870 // blx uses bit 24 to encode bit 2 of imm26 |
871 ASSERT((imm26 & 1) == 0); | 871 DCHECK((imm26 & 1) == 0); |
872 instr = (instr & ~(B24 | kImm24Mask)) | ((imm26 & 2) >> 1)*B24; | 872 instr = (instr & ~(B24 | kImm24Mask)) | ((imm26 & 2) >> 1)*B24; |
873 } else { | 873 } else { |
874 ASSERT((imm26 & 3) == 0); | 874 DCHECK((imm26 & 3) == 0); |
875 instr &= ~kImm24Mask; | 875 instr &= ~kImm24Mask; |
876 } | 876 } |
877 int imm24 = imm26 >> 2; | 877 int imm24 = imm26 >> 2; |
878 ASSERT(is_int24(imm24)); | 878 DCHECK(is_int24(imm24)); |
879 instr_at_put(pos, instr | (imm24 & kImm24Mask)); | 879 instr_at_put(pos, instr | (imm24 & kImm24Mask)); |
880 } | 880 } |
881 | 881 |
882 | 882 |
883 void Assembler::print(Label* L) { | 883 void Assembler::print(Label* L) { |
884 if (L->is_unused()) { | 884 if (L->is_unused()) { |
885 PrintF("unused label\n"); | 885 PrintF("unused label\n"); |
886 } else if (L->is_bound()) { | 886 } else if (L->is_bound()) { |
887 PrintF("bound label to %d\n", L->pos()); | 887 PrintF("bound label to %d\n", L->pos()); |
888 } else if (L->is_linked()) { | 888 } else if (L->is_linked()) { |
889 Label l = *L; | 889 Label l = *L; |
890 PrintF("unbound label"); | 890 PrintF("unbound label"); |
891 while (l.is_linked()) { | 891 while (l.is_linked()) { |
892 PrintF("@ %d ", l.pos()); | 892 PrintF("@ %d ", l.pos()); |
893 Instr instr = instr_at(l.pos()); | 893 Instr instr = instr_at(l.pos()); |
894 if ((instr & ~kImm24Mask) == 0) { | 894 if ((instr & ~kImm24Mask) == 0) { |
895 PrintF("value\n"); | 895 PrintF("value\n"); |
896 } else { | 896 } else { |
897 ASSERT((instr & 7*B25) == 5*B25); // b, bl, or blx | 897 DCHECK((instr & 7*B25) == 5*B25); // b, bl, or blx |
898 Condition cond = Instruction::ConditionField(instr); | 898 Condition cond = Instruction::ConditionField(instr); |
899 const char* b; | 899 const char* b; |
900 const char* c; | 900 const char* c; |
901 if (cond == kSpecialCondition) { | 901 if (cond == kSpecialCondition) { |
902 b = "blx"; | 902 b = "blx"; |
903 c = ""; | 903 c = ""; |
904 } else { | 904 } else { |
905 if ((instr & B24) != 0) | 905 if ((instr & B24) != 0) |
906 b = "bl"; | 906 b = "bl"; |
907 else | 907 else |
(...skipping 24 matching lines...) Expand all Loading... |
932 } | 932 } |
933 next(&l); | 933 next(&l); |
934 } | 934 } |
935 } else { | 935 } else { |
936 PrintF("label in inconsistent state (pos = %d)\n", L->pos_); | 936 PrintF("label in inconsistent state (pos = %d)\n", L->pos_); |
937 } | 937 } |
938 } | 938 } |
939 | 939 |
940 | 940 |
941 void Assembler::bind_to(Label* L, int pos) { | 941 void Assembler::bind_to(Label* L, int pos) { |
942 ASSERT(0 <= pos && pos <= pc_offset()); // must have a valid binding position | 942 DCHECK(0 <= pos && pos <= pc_offset()); // must have a valid binding position |
943 while (L->is_linked()) { | 943 while (L->is_linked()) { |
944 int fixup_pos = L->pos(); | 944 int fixup_pos = L->pos(); |
945 next(L); // call next before overwriting link with target at fixup_pos | 945 next(L); // call next before overwriting link with target at fixup_pos |
946 target_at_put(fixup_pos, pos); | 946 target_at_put(fixup_pos, pos); |
947 } | 947 } |
948 L->bind_to(pos); | 948 L->bind_to(pos); |
949 | 949 |
950 // Keep track of the last bound label so we don't eliminate any instructions | 950 // Keep track of the last bound label so we don't eliminate any instructions |
951 // before a bound label. | 951 // before a bound label. |
952 if (pos > last_bound_pos_) | 952 if (pos > last_bound_pos_) |
953 last_bound_pos_ = pos; | 953 last_bound_pos_ = pos; |
954 } | 954 } |
955 | 955 |
956 | 956 |
957 void Assembler::bind(Label* L) { | 957 void Assembler::bind(Label* L) { |
958 ASSERT(!L->is_bound()); // label can only be bound once | 958 DCHECK(!L->is_bound()); // label can only be bound once |
959 bind_to(L, pc_offset()); | 959 bind_to(L, pc_offset()); |
960 } | 960 } |
961 | 961 |
962 | 962 |
963 void Assembler::next(Label* L) { | 963 void Assembler::next(Label* L) { |
964 ASSERT(L->is_linked()); | 964 DCHECK(L->is_linked()); |
965 int link = target_at(L->pos()); | 965 int link = target_at(L->pos()); |
966 if (link == L->pos()) { | 966 if (link == L->pos()) { |
967 // Branch target points to the same instuction. This is the end of the link | 967 // Branch target points to the same instuction. This is the end of the link |
968 // chain. | 968 // chain. |
969 L->Unuse(); | 969 L->Unuse(); |
970 } else { | 970 } else { |
971 ASSERT(link >= 0); | 971 DCHECK(link >= 0); |
972 L->link_to(link); | 972 L->link_to(link); |
973 } | 973 } |
974 } | 974 } |
975 | 975 |
976 | 976 |
977 // Low-level code emission routines depending on the addressing mode. | 977 // Low-level code emission routines depending on the addressing mode. |
978 // If this returns true then you have to use the rotate_imm and immed_8 | 978 // If this returns true then you have to use the rotate_imm and immed_8 |
979 // that it returns, because it may have already changed the instruction | 979 // that it returns, because it may have already changed the instruction |
980 // to match them! | 980 // to match them! |
981 static bool fits_shifter(uint32_t imm32, | 981 static bool fits_shifter(uint32_t imm32, |
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1047 } | 1047 } |
1048 return true; | 1048 return true; |
1049 } | 1049 } |
1050 | 1050 |
1051 | 1051 |
1052 static bool use_mov_immediate_load(const Operand& x, | 1052 static bool use_mov_immediate_load(const Operand& x, |
1053 const Assembler* assembler) { | 1053 const Assembler* assembler) { |
1054 if (assembler != NULL && !assembler->is_constant_pool_available()) { | 1054 if (assembler != NULL && !assembler->is_constant_pool_available()) { |
1055 // If there is no constant pool available, we must use an mov immediate. | 1055 // If there is no constant pool available, we must use an mov immediate. |
1056 // TODO(rmcilroy): enable ARMv6 support. | 1056 // TODO(rmcilroy): enable ARMv6 support. |
1057 ASSERT(CpuFeatures::IsSupported(ARMv7)); | 1057 DCHECK(CpuFeatures::IsSupported(ARMv7)); |
1058 return true; | 1058 return true; |
1059 } else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) && | 1059 } else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) && |
1060 (assembler == NULL || !assembler->predictable_code_size())) { | 1060 (assembler == NULL || !assembler->predictable_code_size())) { |
1061 // Prefer movw / movt to constant pool if it is more efficient on the CPU. | 1061 // Prefer movw / movt to constant pool if it is more efficient on the CPU. |
1062 return true; | 1062 return true; |
1063 } else if (x.must_output_reloc_info(assembler)) { | 1063 } else if (x.must_output_reloc_info(assembler)) { |
1064 // Prefer constant pool if data is likely to be patched. | 1064 // Prefer constant pool if data is likely to be patched. |
1065 return false; | 1065 return false; |
1066 } else { | 1066 } else { |
1067 // Otherwise, use immediate load if movw / movt is available. | 1067 // Otherwise, use immediate load if movw / movt is available. |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1107 const Operand& x, | 1107 const Operand& x, |
1108 Condition cond) { | 1108 Condition cond) { |
1109 RelocInfo rinfo(pc_, x.rmode_, x.imm32_, NULL); | 1109 RelocInfo rinfo(pc_, x.rmode_, x.imm32_, NULL); |
1110 if (x.must_output_reloc_info(this)) { | 1110 if (x.must_output_reloc_info(this)) { |
1111 RecordRelocInfo(rinfo); | 1111 RecordRelocInfo(rinfo); |
1112 } | 1112 } |
1113 | 1113 |
1114 if (use_mov_immediate_load(x, this)) { | 1114 if (use_mov_immediate_load(x, this)) { |
1115 Register target = rd.code() == pc.code() ? ip : rd; | 1115 Register target = rd.code() == pc.code() ? ip : rd; |
1116 // TODO(rmcilroy): add ARMv6 support for immediate loads. | 1116 // TODO(rmcilroy): add ARMv6 support for immediate loads. |
1117 ASSERT(CpuFeatures::IsSupported(ARMv7)); | 1117 DCHECK(CpuFeatures::IsSupported(ARMv7)); |
1118 if (!FLAG_enable_ool_constant_pool && | 1118 if (!FLAG_enable_ool_constant_pool && |
1119 x.must_output_reloc_info(this)) { | 1119 x.must_output_reloc_info(this)) { |
1120 // Make sure the movw/movt doesn't get separated. | 1120 // Make sure the movw/movt doesn't get separated. |
1121 BlockConstPoolFor(2); | 1121 BlockConstPoolFor(2); |
1122 } | 1122 } |
1123 movw(target, static_cast<uint32_t>(x.imm32_ & 0xffff), cond); | 1123 movw(target, static_cast<uint32_t>(x.imm32_ & 0xffff), cond); |
1124 movt(target, static_cast<uint32_t>(x.imm32_) >> 16, cond); | 1124 movt(target, static_cast<uint32_t>(x.imm32_) >> 16, cond); |
1125 if (target.code() != rd.code()) { | 1125 if (target.code() != rd.code()) { |
1126 mov(rd, target, LeaveCC, cond); | 1126 mov(rd, target, LeaveCC, cond); |
1127 } | 1127 } |
1128 } else { | 1128 } else { |
1129 ASSERT(is_constant_pool_available()); | 1129 DCHECK(is_constant_pool_available()); |
1130 ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo); | 1130 ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo); |
1131 if (section == ConstantPoolArray::EXTENDED_SECTION) { | 1131 if (section == ConstantPoolArray::EXTENDED_SECTION) { |
1132 ASSERT(FLAG_enable_ool_constant_pool); | 1132 DCHECK(FLAG_enable_ool_constant_pool); |
1133 Register target = rd.code() == pc.code() ? ip : rd; | 1133 Register target = rd.code() == pc.code() ? ip : rd; |
1134 // Emit instructions to load constant pool offset. | 1134 // Emit instructions to load constant pool offset. |
1135 movw(target, 0, cond); | 1135 movw(target, 0, cond); |
1136 movt(target, 0, cond); | 1136 movt(target, 0, cond); |
1137 // Load from constant pool at offset. | 1137 // Load from constant pool at offset. |
1138 ldr(rd, MemOperand(pp, target), cond); | 1138 ldr(rd, MemOperand(pp, target), cond); |
1139 } else { | 1139 } else { |
1140 ASSERT(section == ConstantPoolArray::SMALL_SECTION); | 1140 DCHECK(section == ConstantPoolArray::SMALL_SECTION); |
1141 ldr(rd, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0), cond); | 1141 ldr(rd, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0), cond); |
1142 } | 1142 } |
1143 } | 1143 } |
1144 } | 1144 } |
1145 | 1145 |
1146 | 1146 |
1147 void Assembler::addrmod1(Instr instr, | 1147 void Assembler::addrmod1(Instr instr, |
1148 Register rn, | 1148 Register rn, |
1149 Register rd, | 1149 Register rd, |
1150 const Operand& x) { | 1150 const Operand& x) { |
1151 CheckBuffer(); | 1151 CheckBuffer(); |
1152 ASSERT((instr & ~(kCondMask | kOpCodeMask | S)) == 0); | 1152 DCHECK((instr & ~(kCondMask | kOpCodeMask | S)) == 0); |
1153 if (!x.rm_.is_valid()) { | 1153 if (!x.rm_.is_valid()) { |
1154 // Immediate. | 1154 // Immediate. |
1155 uint32_t rotate_imm; | 1155 uint32_t rotate_imm; |
1156 uint32_t immed_8; | 1156 uint32_t immed_8; |
1157 if (x.must_output_reloc_info(this) || | 1157 if (x.must_output_reloc_info(this) || |
1158 !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) { | 1158 !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) { |
1159 // The immediate operand cannot be encoded as a shifter operand, so load | 1159 // The immediate operand cannot be encoded as a shifter operand, so load |
1160 // it first to register ip and change the original instruction to use ip. | 1160 // it first to register ip and change the original instruction to use ip. |
1161 // However, if the original instruction is a 'mov rd, x' (not setting the | 1161 // However, if the original instruction is a 'mov rd, x' (not setting the |
1162 // condition code), then replace it with a 'ldr rd, [pc]'. | 1162 // condition code), then replace it with a 'ldr rd, [pc]'. |
1163 CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed | 1163 CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed |
1164 Condition cond = Instruction::ConditionField(instr); | 1164 Condition cond = Instruction::ConditionField(instr); |
1165 if ((instr & ~kCondMask) == 13*B21) { // mov, S not set | 1165 if ((instr & ~kCondMask) == 13*B21) { // mov, S not set |
1166 move_32_bit_immediate(rd, x, cond); | 1166 move_32_bit_immediate(rd, x, cond); |
1167 } else { | 1167 } else { |
1168 mov(ip, x, LeaveCC, cond); | 1168 mov(ip, x, LeaveCC, cond); |
1169 addrmod1(instr, rn, rd, Operand(ip)); | 1169 addrmod1(instr, rn, rd, Operand(ip)); |
1170 } | 1170 } |
1171 return; | 1171 return; |
1172 } | 1172 } |
1173 instr |= I | rotate_imm*B8 | immed_8; | 1173 instr |= I | rotate_imm*B8 | immed_8; |
1174 } else if (!x.rs_.is_valid()) { | 1174 } else if (!x.rs_.is_valid()) { |
1175 // Immediate shift. | 1175 // Immediate shift. |
1176 instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code(); | 1176 instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code(); |
1177 } else { | 1177 } else { |
1178 // Register shift. | 1178 // Register shift. |
1179 ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc)); | 1179 DCHECK(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc)); |
1180 instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code(); | 1180 instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code(); |
1181 } | 1181 } |
1182 emit(instr | rn.code()*B16 | rd.code()*B12); | 1182 emit(instr | rn.code()*B16 | rd.code()*B12); |
1183 if (rn.is(pc) || x.rm_.is(pc)) { | 1183 if (rn.is(pc) || x.rm_.is(pc)) { |
1184 // Block constant pool emission for one instruction after reading pc. | 1184 // Block constant pool emission for one instruction after reading pc. |
1185 BlockConstPoolFor(1); | 1185 BlockConstPoolFor(1); |
1186 } | 1186 } |
1187 } | 1187 } |
1188 | 1188 |
1189 | 1189 |
1190 void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) { | 1190 void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) { |
1191 ASSERT((instr & ~(kCondMask | B | L)) == B26); | 1191 DCHECK((instr & ~(kCondMask | B | L)) == B26); |
1192 int am = x.am_; | 1192 int am = x.am_; |
1193 if (!x.rm_.is_valid()) { | 1193 if (!x.rm_.is_valid()) { |
1194 // Immediate offset. | 1194 // Immediate offset. |
1195 int offset_12 = x.offset_; | 1195 int offset_12 = x.offset_; |
1196 if (offset_12 < 0) { | 1196 if (offset_12 < 0) { |
1197 offset_12 = -offset_12; | 1197 offset_12 = -offset_12; |
1198 am ^= U; | 1198 am ^= U; |
1199 } | 1199 } |
1200 if (!is_uint12(offset_12)) { | 1200 if (!is_uint12(offset_12)) { |
1201 // Immediate offset cannot be encoded, load it first to register ip | 1201 // Immediate offset cannot be encoded, load it first to register ip |
1202 // rn (and rd in a load) should never be ip, or will be trashed. | 1202 // rn (and rd in a load) should never be ip, or will be trashed. |
1203 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); | 1203 DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); |
1204 mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr)); | 1204 mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr)); |
1205 addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_)); | 1205 addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_)); |
1206 return; | 1206 return; |
1207 } | 1207 } |
1208 ASSERT(offset_12 >= 0); // no masking needed | 1208 DCHECK(offset_12 >= 0); // no masking needed |
1209 instr |= offset_12; | 1209 instr |= offset_12; |
1210 } else { | 1210 } else { |
1211 // Register offset (shift_imm_ and shift_op_ are 0) or scaled | 1211 // Register offset (shift_imm_ and shift_op_ are 0) or scaled |
1212 // register offset the constructors make sure than both shift_imm_ | 1212 // register offset the constructors make sure than both shift_imm_ |
1213 // and shift_op_ are initialized. | 1213 // and shift_op_ are initialized. |
1214 ASSERT(!x.rm_.is(pc)); | 1214 DCHECK(!x.rm_.is(pc)); |
1215 instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code(); | 1215 instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code(); |
1216 } | 1216 } |
1217 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback | 1217 DCHECK((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback |
1218 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12); | 1218 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12); |
1219 } | 1219 } |
1220 | 1220 |
1221 | 1221 |
1222 void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) { | 1222 void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) { |
1223 ASSERT((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7)); | 1223 DCHECK((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7)); |
1224 ASSERT(x.rn_.is_valid()); | 1224 DCHECK(x.rn_.is_valid()); |
1225 int am = x.am_; | 1225 int am = x.am_; |
1226 if (!x.rm_.is_valid()) { | 1226 if (!x.rm_.is_valid()) { |
1227 // Immediate offset. | 1227 // Immediate offset. |
1228 int offset_8 = x.offset_; | 1228 int offset_8 = x.offset_; |
1229 if (offset_8 < 0) { | 1229 if (offset_8 < 0) { |
1230 offset_8 = -offset_8; | 1230 offset_8 = -offset_8; |
1231 am ^= U; | 1231 am ^= U; |
1232 } | 1232 } |
1233 if (!is_uint8(offset_8)) { | 1233 if (!is_uint8(offset_8)) { |
1234 // Immediate offset cannot be encoded, load it first to register ip | 1234 // Immediate offset cannot be encoded, load it first to register ip |
1235 // rn (and rd in a load) should never be ip, or will be trashed. | 1235 // rn (and rd in a load) should never be ip, or will be trashed. |
1236 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); | 1236 DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); |
1237 mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr)); | 1237 mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr)); |
1238 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_)); | 1238 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_)); |
1239 return; | 1239 return; |
1240 } | 1240 } |
1241 ASSERT(offset_8 >= 0); // no masking needed | 1241 DCHECK(offset_8 >= 0); // no masking needed |
1242 instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf); | 1242 instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf); |
1243 } else if (x.shift_imm_ != 0) { | 1243 } else if (x.shift_imm_ != 0) { |
1244 // Scaled register offset not supported, load index first | 1244 // Scaled register offset not supported, load index first |
1245 // rn (and rd in a load) should never be ip, or will be trashed. | 1245 // rn (and rd in a load) should never be ip, or will be trashed. |
1246 ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); | 1246 DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip))); |
1247 mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC, | 1247 mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC, |
1248 Instruction::ConditionField(instr)); | 1248 Instruction::ConditionField(instr)); |
1249 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_)); | 1249 addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_)); |
1250 return; | 1250 return; |
1251 } else { | 1251 } else { |
1252 // Register offset. | 1252 // Register offset. |
1253 ASSERT((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback | 1253 DCHECK((am & (P|W)) == P || !x.rm_.is(pc)); // no pc index with writeback |
1254 instr |= x.rm_.code(); | 1254 instr |= x.rm_.code(); |
1255 } | 1255 } |
1256 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback | 1256 DCHECK((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback |
1257 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12); | 1257 emit(instr | am | x.rn_.code()*B16 | rd.code()*B12); |
1258 } | 1258 } |
1259 | 1259 |
1260 | 1260 |
1261 void Assembler::addrmod4(Instr instr, Register rn, RegList rl) { | 1261 void Assembler::addrmod4(Instr instr, Register rn, RegList rl) { |
1262 ASSERT((instr & ~(kCondMask | P | U | W | L)) == B27); | 1262 DCHECK((instr & ~(kCondMask | P | U | W | L)) == B27); |
1263 ASSERT(rl != 0); | 1263 DCHECK(rl != 0); |
1264 ASSERT(!rn.is(pc)); | 1264 DCHECK(!rn.is(pc)); |
1265 emit(instr | rn.code()*B16 | rl); | 1265 emit(instr | rn.code()*B16 | rl); |
1266 } | 1266 } |
1267 | 1267 |
1268 | 1268 |
1269 void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) { | 1269 void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) { |
1270 // Unindexed addressing is not encoded by this function. | 1270 // Unindexed addressing is not encoded by this function. |
1271 ASSERT_EQ((B27 | B26), | 1271 DCHECK_EQ((B27 | B26), |
1272 (instr & ~(kCondMask | kCoprocessorMask | P | U | N | W | L))); | 1272 (instr & ~(kCondMask | kCoprocessorMask | P | U | N | W | L))); |
1273 ASSERT(x.rn_.is_valid() && !x.rm_.is_valid()); | 1273 DCHECK(x.rn_.is_valid() && !x.rm_.is_valid()); |
1274 int am = x.am_; | 1274 int am = x.am_; |
1275 int offset_8 = x.offset_; | 1275 int offset_8 = x.offset_; |
1276 ASSERT((offset_8 & 3) == 0); // offset must be an aligned word offset | 1276 DCHECK((offset_8 & 3) == 0); // offset must be an aligned word offset |
1277 offset_8 >>= 2; | 1277 offset_8 >>= 2; |
1278 if (offset_8 < 0) { | 1278 if (offset_8 < 0) { |
1279 offset_8 = -offset_8; | 1279 offset_8 = -offset_8; |
1280 am ^= U; | 1280 am ^= U; |
1281 } | 1281 } |
1282 ASSERT(is_uint8(offset_8)); // unsigned word offset must fit in a byte | 1282 DCHECK(is_uint8(offset_8)); // unsigned word offset must fit in a byte |
1283 ASSERT((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback | 1283 DCHECK((am & (P|W)) == P || !x.rn_.is(pc)); // no pc base with writeback |
1284 | 1284 |
1285 // Post-indexed addressing requires W == 1; different than in addrmod2/3. | 1285 // Post-indexed addressing requires W == 1; different than in addrmod2/3. |
1286 if ((am & P) == 0) | 1286 if ((am & P) == 0) |
1287 am |= W; | 1287 am |= W; |
1288 | 1288 |
1289 ASSERT(offset_8 >= 0); // no masking needed | 1289 DCHECK(offset_8 >= 0); // no masking needed |
1290 emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8); | 1290 emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8); |
1291 } | 1291 } |
1292 | 1292 |
1293 | 1293 |
1294 int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) { | 1294 int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) { |
1295 int target_pos; | 1295 int target_pos; |
1296 if (L->is_bound()) { | 1296 if (L->is_bound()) { |
1297 target_pos = L->pos(); | 1297 target_pos = L->pos(); |
1298 } else { | 1298 } else { |
1299 if (L->is_linked()) { | 1299 if (L->is_linked()) { |
1300 // Point to previous instruction that uses the link. | 1300 // Point to previous instruction that uses the link. |
1301 target_pos = L->pos(); | 1301 target_pos = L->pos(); |
1302 } else { | 1302 } else { |
1303 // First entry of the link chain points to itself. | 1303 // First entry of the link chain points to itself. |
1304 target_pos = pc_offset(); | 1304 target_pos = pc_offset(); |
1305 } | 1305 } |
1306 L->link_to(pc_offset()); | 1306 L->link_to(pc_offset()); |
1307 } | 1307 } |
1308 | 1308 |
1309 // Block the emission of the constant pool, since the branch instruction must | 1309 // Block the emission of the constant pool, since the branch instruction must |
1310 // be emitted at the pc offset recorded by the label. | 1310 // be emitted at the pc offset recorded by the label. |
1311 BlockConstPoolFor(1); | 1311 BlockConstPoolFor(1); |
1312 return target_pos - (pc_offset() + kPcLoadDelta); | 1312 return target_pos - (pc_offset() + kPcLoadDelta); |
1313 } | 1313 } |
1314 | 1314 |
1315 | 1315 |
1316 // Branch instructions. | 1316 // Branch instructions. |
1317 void Assembler::b(int branch_offset, Condition cond) { | 1317 void Assembler::b(int branch_offset, Condition cond) { |
1318 ASSERT((branch_offset & 3) == 0); | 1318 DCHECK((branch_offset & 3) == 0); |
1319 int imm24 = branch_offset >> 2; | 1319 int imm24 = branch_offset >> 2; |
1320 ASSERT(is_int24(imm24)); | 1320 DCHECK(is_int24(imm24)); |
1321 emit(cond | B27 | B25 | (imm24 & kImm24Mask)); | 1321 emit(cond | B27 | B25 | (imm24 & kImm24Mask)); |
1322 | 1322 |
1323 if (cond == al) { | 1323 if (cond == al) { |
1324 // Dead code is a good location to emit the constant pool. | 1324 // Dead code is a good location to emit the constant pool. |
1325 CheckConstPool(false, false); | 1325 CheckConstPool(false, false); |
1326 } | 1326 } |
1327 } | 1327 } |
1328 | 1328 |
1329 | 1329 |
1330 void Assembler::bl(int branch_offset, Condition cond) { | 1330 void Assembler::bl(int branch_offset, Condition cond) { |
1331 positions_recorder()->WriteRecordedPositions(); | 1331 positions_recorder()->WriteRecordedPositions(); |
1332 ASSERT((branch_offset & 3) == 0); | 1332 DCHECK((branch_offset & 3) == 0); |
1333 int imm24 = branch_offset >> 2; | 1333 int imm24 = branch_offset >> 2; |
1334 ASSERT(is_int24(imm24)); | 1334 DCHECK(is_int24(imm24)); |
1335 emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask)); | 1335 emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask)); |
1336 } | 1336 } |
1337 | 1337 |
1338 | 1338 |
1339 void Assembler::blx(int branch_offset) { // v5 and above | 1339 void Assembler::blx(int branch_offset) { // v5 and above |
1340 positions_recorder()->WriteRecordedPositions(); | 1340 positions_recorder()->WriteRecordedPositions(); |
1341 ASSERT((branch_offset & 1) == 0); | 1341 DCHECK((branch_offset & 1) == 0); |
1342 int h = ((branch_offset & 2) >> 1)*B24; | 1342 int h = ((branch_offset & 2) >> 1)*B24; |
1343 int imm24 = branch_offset >> 2; | 1343 int imm24 = branch_offset >> 2; |
1344 ASSERT(is_int24(imm24)); | 1344 DCHECK(is_int24(imm24)); |
1345 emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask)); | 1345 emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask)); |
1346 } | 1346 } |
1347 | 1347 |
1348 | 1348 |
1349 void Assembler::blx(Register target, Condition cond) { // v5 and above | 1349 void Assembler::blx(Register target, Condition cond) { // v5 and above |
1350 positions_recorder()->WriteRecordedPositions(); | 1350 positions_recorder()->WriteRecordedPositions(); |
1351 ASSERT(!target.is(pc)); | 1351 DCHECK(!target.is(pc)); |
1352 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code()); | 1352 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code()); |
1353 } | 1353 } |
1354 | 1354 |
1355 | 1355 |
1356 void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t | 1356 void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t |
1357 positions_recorder()->WriteRecordedPositions(); | 1357 positions_recorder()->WriteRecordedPositions(); |
1358 ASSERT(!target.is(pc)); // use of pc is actually allowed, but discouraged | 1358 DCHECK(!target.is(pc)); // use of pc is actually allowed, but discouraged |
1359 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code()); | 1359 emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code()); |
1360 } | 1360 } |
1361 | 1361 |
1362 | 1362 |
1363 // Data-processing instructions. | 1363 // Data-processing instructions. |
1364 | 1364 |
1365 void Assembler::and_(Register dst, Register src1, const Operand& src2, | 1365 void Assembler::and_(Register dst, Register src1, const Operand& src2, |
1366 SBit s, Condition cond) { | 1366 SBit s, Condition cond) { |
1367 addrmod1(cond | AND | s, src1, dst, src2); | 1367 addrmod1(cond | AND | s, src1, dst, src2); |
1368 } | 1368 } |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1420 } | 1420 } |
1421 | 1421 |
1422 | 1422 |
1423 void Assembler::cmp(Register src1, const Operand& src2, Condition cond) { | 1423 void Assembler::cmp(Register src1, const Operand& src2, Condition cond) { |
1424 addrmod1(cond | CMP | S, src1, r0, src2); | 1424 addrmod1(cond | CMP | S, src1, r0, src2); |
1425 } | 1425 } |
1426 | 1426 |
1427 | 1427 |
1428 void Assembler::cmp_raw_immediate( | 1428 void Assembler::cmp_raw_immediate( |
1429 Register src, int raw_immediate, Condition cond) { | 1429 Register src, int raw_immediate, Condition cond) { |
1430 ASSERT(is_uint12(raw_immediate)); | 1430 DCHECK(is_uint12(raw_immediate)); |
1431 emit(cond | I | CMP | S | src.code() << 16 | raw_immediate); | 1431 emit(cond | I | CMP | S | src.code() << 16 | raw_immediate); |
1432 } | 1432 } |
1433 | 1433 |
1434 | 1434 |
1435 void Assembler::cmn(Register src1, const Operand& src2, Condition cond) { | 1435 void Assembler::cmn(Register src1, const Operand& src2, Condition cond) { |
1436 addrmod1(cond | CMN | S, src1, r0, src2); | 1436 addrmod1(cond | CMN | S, src1, r0, src2); |
1437 } | 1437 } |
1438 | 1438 |
1439 | 1439 |
1440 void Assembler::orr(Register dst, Register src1, const Operand& src2, | 1440 void Assembler::orr(Register dst, Register src1, const Operand& src2, |
1441 SBit s, Condition cond) { | 1441 SBit s, Condition cond) { |
1442 addrmod1(cond | ORR | s, src1, dst, src2); | 1442 addrmod1(cond | ORR | s, src1, dst, src2); |
1443 } | 1443 } |
1444 | 1444 |
1445 | 1445 |
1446 void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) { | 1446 void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) { |
1447 if (dst.is(pc)) { | 1447 if (dst.is(pc)) { |
1448 positions_recorder()->WriteRecordedPositions(); | 1448 positions_recorder()->WriteRecordedPositions(); |
1449 } | 1449 } |
1450 // Don't allow nop instructions in the form mov rn, rn to be generated using | 1450 // Don't allow nop instructions in the form mov rn, rn to be generated using |
1451 // the mov instruction. They must be generated using nop(int/NopMarkerTypes) | 1451 // the mov instruction. They must be generated using nop(int/NopMarkerTypes) |
1452 // or MarkCode(int/NopMarkerTypes) pseudo instructions. | 1452 // or MarkCode(int/NopMarkerTypes) pseudo instructions. |
1453 ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al)); | 1453 DCHECK(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al)); |
1454 addrmod1(cond | MOV | s, r0, dst, src); | 1454 addrmod1(cond | MOV | s, r0, dst, src); |
1455 } | 1455 } |
1456 | 1456 |
1457 | 1457 |
1458 void Assembler::mov_label_offset(Register dst, Label* label) { | 1458 void Assembler::mov_label_offset(Register dst, Label* label) { |
1459 if (label->is_bound()) { | 1459 if (label->is_bound()) { |
1460 mov(dst, Operand(label->pos() + (Code::kHeaderSize - kHeapObjectTag))); | 1460 mov(dst, Operand(label->pos() + (Code::kHeaderSize - kHeapObjectTag))); |
1461 } else { | 1461 } else { |
1462 // Emit the link to the label in the code stream followed by extra nop | 1462 // Emit the link to the label in the code stream followed by extra nop |
1463 // instructions. | 1463 // instructions. |
(...skipping 12 matching lines...) Expand all Loading... |
1476 // For ARMv7: | 1476 // For ARMv7: |
1477 // link | 1477 // link |
1478 // mov dst, dst | 1478 // mov dst, dst |
1479 // For ARMv6: | 1479 // For ARMv6: |
1480 // link | 1480 // link |
1481 // mov dst, dst | 1481 // mov dst, dst |
1482 // mov dst, dst | 1482 // mov dst, dst |
1483 // | 1483 // |
1484 // When the label gets bound: target_at extracts the link and target_at_put | 1484 // When the label gets bound: target_at extracts the link and target_at_put |
1485 // patches the instructions. | 1485 // patches the instructions. |
1486 ASSERT(is_uint24(link)); | 1486 DCHECK(is_uint24(link)); |
1487 BlockConstPoolScope block_const_pool(this); | 1487 BlockConstPoolScope block_const_pool(this); |
1488 emit(link); | 1488 emit(link); |
1489 nop(dst.code()); | 1489 nop(dst.code()); |
1490 if (!CpuFeatures::IsSupported(ARMv7)) { | 1490 if (!CpuFeatures::IsSupported(ARMv7)) { |
1491 nop(dst.code()); | 1491 nop(dst.code()); |
1492 } | 1492 } |
1493 } | 1493 } |
1494 } | 1494 } |
1495 | 1495 |
1496 | 1496 |
1497 void Assembler::movw(Register reg, uint32_t immediate, Condition cond) { | 1497 void Assembler::movw(Register reg, uint32_t immediate, Condition cond) { |
1498 ASSERT(CpuFeatures::IsSupported(ARMv7)); | 1498 DCHECK(CpuFeatures::IsSupported(ARMv7)); |
1499 emit(cond | 0x30*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate)); | 1499 emit(cond | 0x30*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate)); |
1500 } | 1500 } |
1501 | 1501 |
1502 | 1502 |
1503 void Assembler::movt(Register reg, uint32_t immediate, Condition cond) { | 1503 void Assembler::movt(Register reg, uint32_t immediate, Condition cond) { |
1504 ASSERT(CpuFeatures::IsSupported(ARMv7)); | 1504 DCHECK(CpuFeatures::IsSupported(ARMv7)); |
1505 emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate)); | 1505 emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate)); |
1506 } | 1506 } |
1507 | 1507 |
1508 | 1508 |
1509 void Assembler::bic(Register dst, Register src1, const Operand& src2, | 1509 void Assembler::bic(Register dst, Register src1, const Operand& src2, |
1510 SBit s, Condition cond) { | 1510 SBit s, Condition cond) { |
1511 addrmod1(cond | BIC | s, src1, dst, src2); | 1511 addrmod1(cond | BIC | s, src1, dst, src2); |
1512 } | 1512 } |
1513 | 1513 |
1514 | 1514 |
1515 void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) { | 1515 void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) { |
1516 addrmod1(cond | MVN | s, r0, dst, src); | 1516 addrmod1(cond | MVN | s, r0, dst, src); |
1517 } | 1517 } |
1518 | 1518 |
1519 | 1519 |
1520 // Multiply instructions. | 1520 // Multiply instructions. |
1521 void Assembler::mla(Register dst, Register src1, Register src2, Register srcA, | 1521 void Assembler::mla(Register dst, Register src1, Register src2, Register srcA, |
1522 SBit s, Condition cond) { | 1522 SBit s, Condition cond) { |
1523 ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc)); | 1523 DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc)); |
1524 emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 | | 1524 emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 | |
1525 src2.code()*B8 | B7 | B4 | src1.code()); | 1525 src2.code()*B8 | B7 | B4 | src1.code()); |
1526 } | 1526 } |
1527 | 1527 |
1528 | 1528 |
1529 void Assembler::mls(Register dst, Register src1, Register src2, Register srcA, | 1529 void Assembler::mls(Register dst, Register src1, Register src2, Register srcA, |
1530 Condition cond) { | 1530 Condition cond) { |
1531 ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc)); | 1531 DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc)); |
1532 ASSERT(IsEnabled(MLS)); | 1532 DCHECK(IsEnabled(MLS)); |
1533 emit(cond | B22 | B21 | dst.code()*B16 | srcA.code()*B12 | | 1533 emit(cond | B22 | B21 | dst.code()*B16 | srcA.code()*B12 | |
1534 src2.code()*B8 | B7 | B4 | src1.code()); | 1534 src2.code()*B8 | B7 | B4 | src1.code()); |
1535 } | 1535 } |
1536 | 1536 |
1537 | 1537 |
1538 void Assembler::sdiv(Register dst, Register src1, Register src2, | 1538 void Assembler::sdiv(Register dst, Register src1, Register src2, |
1539 Condition cond) { | 1539 Condition cond) { |
1540 ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc)); | 1540 DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc)); |
1541 ASSERT(IsEnabled(SUDIV)); | 1541 DCHECK(IsEnabled(SUDIV)); |
1542 emit(cond | B26 | B25| B24 | B20 | dst.code()*B16 | 0xf * B12 | | 1542 emit(cond | B26 | B25| B24 | B20 | dst.code()*B16 | 0xf * B12 | |
1543 src2.code()*B8 | B4 | src1.code()); | 1543 src2.code()*B8 | B4 | src1.code()); |
1544 } | 1544 } |
1545 | 1545 |
1546 | 1546 |
1547 void Assembler::udiv(Register dst, Register src1, Register src2, | 1547 void Assembler::udiv(Register dst, Register src1, Register src2, |
1548 Condition cond) { | 1548 Condition cond) { |
1549 ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc)); | 1549 DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc)); |
1550 ASSERT(IsEnabled(SUDIV)); | 1550 DCHECK(IsEnabled(SUDIV)); |
1551 emit(cond | B26 | B25 | B24 | B21 | B20 | dst.code() * B16 | 0xf * B12 | | 1551 emit(cond | B26 | B25 | B24 | B21 | B20 | dst.code() * B16 | 0xf * B12 | |
1552 src2.code() * B8 | B4 | src1.code()); | 1552 src2.code() * B8 | B4 | src1.code()); |
1553 } | 1553 } |
1554 | 1554 |
1555 | 1555 |
1556 void Assembler::mul(Register dst, Register src1, Register src2, | 1556 void Assembler::mul(Register dst, Register src1, Register src2, |
1557 SBit s, Condition cond) { | 1557 SBit s, Condition cond) { |
1558 ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc)); | 1558 DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc)); |
1559 // dst goes in bits 16-19 for this instruction! | 1559 // dst goes in bits 16-19 for this instruction! |
1560 emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code()); | 1560 emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code()); |
1561 } | 1561 } |
1562 | 1562 |
1563 | 1563 |
1564 void Assembler::smlal(Register dstL, | 1564 void Assembler::smlal(Register dstL, |
1565 Register dstH, | 1565 Register dstH, |
1566 Register src1, | 1566 Register src1, |
1567 Register src2, | 1567 Register src2, |
1568 SBit s, | 1568 SBit s, |
1569 Condition cond) { | 1569 Condition cond) { |
1570 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc)); | 1570 DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc)); |
1571 ASSERT(!dstL.is(dstH)); | 1571 DCHECK(!dstL.is(dstH)); |
1572 emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 | | 1572 emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 | |
1573 src2.code()*B8 | B7 | B4 | src1.code()); | 1573 src2.code()*B8 | B7 | B4 | src1.code()); |
1574 } | 1574 } |
1575 | 1575 |
1576 | 1576 |
1577 void Assembler::smull(Register dstL, | 1577 void Assembler::smull(Register dstL, |
1578 Register dstH, | 1578 Register dstH, |
1579 Register src1, | 1579 Register src1, |
1580 Register src2, | 1580 Register src2, |
1581 SBit s, | 1581 SBit s, |
1582 Condition cond) { | 1582 Condition cond) { |
1583 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc)); | 1583 DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc)); |
1584 ASSERT(!dstL.is(dstH)); | 1584 DCHECK(!dstL.is(dstH)); |
1585 emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 | | 1585 emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 | |
1586 src2.code()*B8 | B7 | B4 | src1.code()); | 1586 src2.code()*B8 | B7 | B4 | src1.code()); |
1587 } | 1587 } |
1588 | 1588 |
1589 | 1589 |
1590 void Assembler::umlal(Register dstL, | 1590 void Assembler::umlal(Register dstL, |
1591 Register dstH, | 1591 Register dstH, |
1592 Register src1, | 1592 Register src1, |
1593 Register src2, | 1593 Register src2, |
1594 SBit s, | 1594 SBit s, |
1595 Condition cond) { | 1595 Condition cond) { |
1596 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc)); | 1596 DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc)); |
1597 ASSERT(!dstL.is(dstH)); | 1597 DCHECK(!dstL.is(dstH)); |
1598 emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 | | 1598 emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 | |
1599 src2.code()*B8 | B7 | B4 | src1.code()); | 1599 src2.code()*B8 | B7 | B4 | src1.code()); |
1600 } | 1600 } |
1601 | 1601 |
1602 | 1602 |
1603 void Assembler::umull(Register dstL, | 1603 void Assembler::umull(Register dstL, |
1604 Register dstH, | 1604 Register dstH, |
1605 Register src1, | 1605 Register src1, |
1606 Register src2, | 1606 Register src2, |
1607 SBit s, | 1607 SBit s, |
1608 Condition cond) { | 1608 Condition cond) { |
1609 ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc)); | 1609 DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc)); |
1610 ASSERT(!dstL.is(dstH)); | 1610 DCHECK(!dstL.is(dstH)); |
1611 emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 | | 1611 emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 | |
1612 src2.code()*B8 | B7 | B4 | src1.code()); | 1612 src2.code()*B8 | B7 | B4 | src1.code()); |
1613 } | 1613 } |
1614 | 1614 |
1615 | 1615 |
1616 // Miscellaneous arithmetic instructions. | 1616 // Miscellaneous arithmetic instructions. |
1617 void Assembler::clz(Register dst, Register src, Condition cond) { | 1617 void Assembler::clz(Register dst, Register src, Condition cond) { |
1618 // v5 and above. | 1618 // v5 and above. |
1619 ASSERT(!dst.is(pc) && !src.is(pc)); | 1619 DCHECK(!dst.is(pc) && !src.is(pc)); |
1620 emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 | | 1620 emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 | |
1621 15*B8 | CLZ | src.code()); | 1621 15*B8 | CLZ | src.code()); |
1622 } | 1622 } |
1623 | 1623 |
1624 | 1624 |
1625 // Saturating instructions. | 1625 // Saturating instructions. |
1626 | 1626 |
1627 // Unsigned saturate. | 1627 // Unsigned saturate. |
1628 void Assembler::usat(Register dst, | 1628 void Assembler::usat(Register dst, |
1629 int satpos, | 1629 int satpos, |
1630 const Operand& src, | 1630 const Operand& src, |
1631 Condition cond) { | 1631 Condition cond) { |
1632 // v6 and above. | 1632 // v6 and above. |
1633 ASSERT(CpuFeatures::IsSupported(ARMv7)); | 1633 DCHECK(CpuFeatures::IsSupported(ARMv7)); |
1634 ASSERT(!dst.is(pc) && !src.rm_.is(pc)); | 1634 DCHECK(!dst.is(pc) && !src.rm_.is(pc)); |
1635 ASSERT((satpos >= 0) && (satpos <= 31)); | 1635 DCHECK((satpos >= 0) && (satpos <= 31)); |
1636 ASSERT((src.shift_op_ == ASR) || (src.shift_op_ == LSL)); | 1636 DCHECK((src.shift_op_ == ASR) || (src.shift_op_ == LSL)); |
1637 ASSERT(src.rs_.is(no_reg)); | 1637 DCHECK(src.rs_.is(no_reg)); |
1638 | 1638 |
1639 int sh = 0; | 1639 int sh = 0; |
1640 if (src.shift_op_ == ASR) { | 1640 if (src.shift_op_ == ASR) { |
1641 sh = 1; | 1641 sh = 1; |
1642 } | 1642 } |
1643 | 1643 |
1644 emit(cond | 0x6*B24 | 0xe*B20 | satpos*B16 | dst.code()*B12 | | 1644 emit(cond | 0x6*B24 | 0xe*B20 | satpos*B16 | dst.code()*B12 | |
1645 src.shift_imm_*B7 | sh*B6 | 0x1*B4 | src.rm_.code()); | 1645 src.shift_imm_*B7 | sh*B6 | 0x1*B4 | src.rm_.code()); |
1646 } | 1646 } |
1647 | 1647 |
1648 | 1648 |
1649 // Bitfield manipulation instructions. | 1649 // Bitfield manipulation instructions. |
1650 | 1650 |
1651 // Unsigned bit field extract. | 1651 // Unsigned bit field extract. |
1652 // Extracts #width adjacent bits from position #lsb in a register, and | 1652 // Extracts #width adjacent bits from position #lsb in a register, and |
1653 // writes them to the low bits of a destination register. | 1653 // writes them to the low bits of a destination register. |
1654 // ubfx dst, src, #lsb, #width | 1654 // ubfx dst, src, #lsb, #width |
1655 void Assembler::ubfx(Register dst, | 1655 void Assembler::ubfx(Register dst, |
1656 Register src, | 1656 Register src, |
1657 int lsb, | 1657 int lsb, |
1658 int width, | 1658 int width, |
1659 Condition cond) { | 1659 Condition cond) { |
1660 // v7 and above. | 1660 // v7 and above. |
1661 ASSERT(CpuFeatures::IsSupported(ARMv7)); | 1661 DCHECK(CpuFeatures::IsSupported(ARMv7)); |
1662 ASSERT(!dst.is(pc) && !src.is(pc)); | 1662 DCHECK(!dst.is(pc) && !src.is(pc)); |
1663 ASSERT((lsb >= 0) && (lsb <= 31)); | 1663 DCHECK((lsb >= 0) && (lsb <= 31)); |
1664 ASSERT((width >= 1) && (width <= (32 - lsb))); | 1664 DCHECK((width >= 1) && (width <= (32 - lsb))); |
1665 emit(cond | 0xf*B23 | B22 | B21 | (width - 1)*B16 | dst.code()*B12 | | 1665 emit(cond | 0xf*B23 | B22 | B21 | (width - 1)*B16 | dst.code()*B12 | |
1666 lsb*B7 | B6 | B4 | src.code()); | 1666 lsb*B7 | B6 | B4 | src.code()); |
1667 } | 1667 } |
1668 | 1668 |
1669 | 1669 |
1670 // Signed bit field extract. | 1670 // Signed bit field extract. |
1671 // Extracts #width adjacent bits from position #lsb in a register, and | 1671 // Extracts #width adjacent bits from position #lsb in a register, and |
1672 // writes them to the low bits of a destination register. The extracted | 1672 // writes them to the low bits of a destination register. The extracted |
1673 // value is sign extended to fill the destination register. | 1673 // value is sign extended to fill the destination register. |
1674 // sbfx dst, src, #lsb, #width | 1674 // sbfx dst, src, #lsb, #width |
1675 void Assembler::sbfx(Register dst, | 1675 void Assembler::sbfx(Register dst, |
1676 Register src, | 1676 Register src, |
1677 int lsb, | 1677 int lsb, |
1678 int width, | 1678 int width, |
1679 Condition cond) { | 1679 Condition cond) { |
1680 // v7 and above. | 1680 // v7 and above. |
1681 ASSERT(CpuFeatures::IsSupported(ARMv7)); | 1681 DCHECK(CpuFeatures::IsSupported(ARMv7)); |
1682 ASSERT(!dst.is(pc) && !src.is(pc)); | 1682 DCHECK(!dst.is(pc) && !src.is(pc)); |
1683 ASSERT((lsb >= 0) && (lsb <= 31)); | 1683 DCHECK((lsb >= 0) && (lsb <= 31)); |
1684 ASSERT((width >= 1) && (width <= (32 - lsb))); | 1684 DCHECK((width >= 1) && (width <= (32 - lsb))); |
1685 emit(cond | 0xf*B23 | B21 | (width - 1)*B16 | dst.code()*B12 | | 1685 emit(cond | 0xf*B23 | B21 | (width - 1)*B16 | dst.code()*B12 | |
1686 lsb*B7 | B6 | B4 | src.code()); | 1686 lsb*B7 | B6 | B4 | src.code()); |
1687 } | 1687 } |
1688 | 1688 |
1689 | 1689 |
1690 // Bit field clear. | 1690 // Bit field clear. |
1691 // Sets #width adjacent bits at position #lsb in the destination register | 1691 // Sets #width adjacent bits at position #lsb in the destination register |
1692 // to zero, preserving the value of the other bits. | 1692 // to zero, preserving the value of the other bits. |
1693 // bfc dst, #lsb, #width | 1693 // bfc dst, #lsb, #width |
1694 void Assembler::bfc(Register dst, int lsb, int width, Condition cond) { | 1694 void Assembler::bfc(Register dst, int lsb, int width, Condition cond) { |
1695 // v7 and above. | 1695 // v7 and above. |
1696 ASSERT(CpuFeatures::IsSupported(ARMv7)); | 1696 DCHECK(CpuFeatures::IsSupported(ARMv7)); |
1697 ASSERT(!dst.is(pc)); | 1697 DCHECK(!dst.is(pc)); |
1698 ASSERT((lsb >= 0) && (lsb <= 31)); | 1698 DCHECK((lsb >= 0) && (lsb <= 31)); |
1699 ASSERT((width >= 1) && (width <= (32 - lsb))); | 1699 DCHECK((width >= 1) && (width <= (32 - lsb))); |
1700 int msb = lsb + width - 1; | 1700 int msb = lsb + width - 1; |
1701 emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | 0xf); | 1701 emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | 0xf); |
1702 } | 1702 } |
1703 | 1703 |
1704 | 1704 |
1705 // Bit field insert. | 1705 // Bit field insert. |
1706 // Inserts #width adjacent bits from the low bits of the source register | 1706 // Inserts #width adjacent bits from the low bits of the source register |
1707 // into position #lsb of the destination register. | 1707 // into position #lsb of the destination register. |
1708 // bfi dst, src, #lsb, #width | 1708 // bfi dst, src, #lsb, #width |
1709 void Assembler::bfi(Register dst, | 1709 void Assembler::bfi(Register dst, |
1710 Register src, | 1710 Register src, |
1711 int lsb, | 1711 int lsb, |
1712 int width, | 1712 int width, |
1713 Condition cond) { | 1713 Condition cond) { |
1714 // v7 and above. | 1714 // v7 and above. |
1715 ASSERT(CpuFeatures::IsSupported(ARMv7)); | 1715 DCHECK(CpuFeatures::IsSupported(ARMv7)); |
1716 ASSERT(!dst.is(pc) && !src.is(pc)); | 1716 DCHECK(!dst.is(pc) && !src.is(pc)); |
1717 ASSERT((lsb >= 0) && (lsb <= 31)); | 1717 DCHECK((lsb >= 0) && (lsb <= 31)); |
1718 ASSERT((width >= 1) && (width <= (32 - lsb))); | 1718 DCHECK((width >= 1) && (width <= (32 - lsb))); |
1719 int msb = lsb + width - 1; | 1719 int msb = lsb + width - 1; |
1720 emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | | 1720 emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | |
1721 src.code()); | 1721 src.code()); |
1722 } | 1722 } |
1723 | 1723 |
1724 | 1724 |
1725 void Assembler::pkhbt(Register dst, | 1725 void Assembler::pkhbt(Register dst, |
1726 Register src1, | 1726 Register src1, |
1727 const Operand& src2, | 1727 const Operand& src2, |
1728 Condition cond ) { | 1728 Condition cond ) { |
1729 // Instruction details available in ARM DDI 0406C.b, A8.8.125. | 1729 // Instruction details available in ARM DDI 0406C.b, A8.8.125. |
1730 // cond(31-28) | 01101000(27-20) | Rn(19-16) | | 1730 // cond(31-28) | 01101000(27-20) | Rn(19-16) | |
1731 // Rd(15-12) | imm5(11-7) | 0(6) | 01(5-4) | Rm(3-0) | 1731 // Rd(15-12) | imm5(11-7) | 0(6) | 01(5-4) | Rm(3-0) |
1732 ASSERT(!dst.is(pc)); | 1732 DCHECK(!dst.is(pc)); |
1733 ASSERT(!src1.is(pc)); | 1733 DCHECK(!src1.is(pc)); |
1734 ASSERT(!src2.rm().is(pc)); | 1734 DCHECK(!src2.rm().is(pc)); |
1735 ASSERT(!src2.rm().is(no_reg)); | 1735 DCHECK(!src2.rm().is(no_reg)); |
1736 ASSERT(src2.rs().is(no_reg)); | 1736 DCHECK(src2.rs().is(no_reg)); |
1737 ASSERT((src2.shift_imm_ >= 0) && (src2.shift_imm_ <= 31)); | 1737 DCHECK((src2.shift_imm_ >= 0) && (src2.shift_imm_ <= 31)); |
1738 ASSERT(src2.shift_op() == LSL); | 1738 DCHECK(src2.shift_op() == LSL); |
1739 emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 | | 1739 emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 | |
1740 src2.shift_imm_*B7 | B4 | src2.rm().code()); | 1740 src2.shift_imm_*B7 | B4 | src2.rm().code()); |
1741 } | 1741 } |
1742 | 1742 |
1743 | 1743 |
1744 void Assembler::pkhtb(Register dst, | 1744 void Assembler::pkhtb(Register dst, |
1745 Register src1, | 1745 Register src1, |
1746 const Operand& src2, | 1746 const Operand& src2, |
1747 Condition cond) { | 1747 Condition cond) { |
1748 // Instruction details available in ARM DDI 0406C.b, A8.8.125. | 1748 // Instruction details available in ARM DDI 0406C.b, A8.8.125. |
1749 // cond(31-28) | 01101000(27-20) | Rn(19-16) | | 1749 // cond(31-28) | 01101000(27-20) | Rn(19-16) | |
1750 // Rd(15-12) | imm5(11-7) | 1(6) | 01(5-4) | Rm(3-0) | 1750 // Rd(15-12) | imm5(11-7) | 1(6) | 01(5-4) | Rm(3-0) |
1751 ASSERT(!dst.is(pc)); | 1751 DCHECK(!dst.is(pc)); |
1752 ASSERT(!src1.is(pc)); | 1752 DCHECK(!src1.is(pc)); |
1753 ASSERT(!src2.rm().is(pc)); | 1753 DCHECK(!src2.rm().is(pc)); |
1754 ASSERT(!src2.rm().is(no_reg)); | 1754 DCHECK(!src2.rm().is(no_reg)); |
1755 ASSERT(src2.rs().is(no_reg)); | 1755 DCHECK(src2.rs().is(no_reg)); |
1756 ASSERT((src2.shift_imm_ >= 1) && (src2.shift_imm_ <= 32)); | 1756 DCHECK((src2.shift_imm_ >= 1) && (src2.shift_imm_ <= 32)); |
1757 ASSERT(src2.shift_op() == ASR); | 1757 DCHECK(src2.shift_op() == ASR); |
1758 int asr = (src2.shift_imm_ == 32) ? 0 : src2.shift_imm_; | 1758 int asr = (src2.shift_imm_ == 32) ? 0 : src2.shift_imm_; |
1759 emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 | | 1759 emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 | |
1760 asr*B7 | B6 | B4 | src2.rm().code()); | 1760 asr*B7 | B6 | B4 | src2.rm().code()); |
1761 } | 1761 } |
1762 | 1762 |
1763 | 1763 |
1764 void Assembler::uxtb(Register dst, | 1764 void Assembler::uxtb(Register dst, |
1765 const Operand& src, | 1765 const Operand& src, |
1766 Condition cond) { | 1766 Condition cond) { |
1767 // Instruction details available in ARM DDI 0406C.b, A8.8.274. | 1767 // Instruction details available in ARM DDI 0406C.b, A8.8.274. |
1768 // cond(31-28) | 01101110(27-20) | 1111(19-16) | | 1768 // cond(31-28) | 01101110(27-20) | 1111(19-16) | |
1769 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0) | 1769 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0) |
1770 ASSERT(!dst.is(pc)); | 1770 DCHECK(!dst.is(pc)); |
1771 ASSERT(!src.rm().is(pc)); | 1771 DCHECK(!src.rm().is(pc)); |
1772 ASSERT(!src.rm().is(no_reg)); | 1772 DCHECK(!src.rm().is(no_reg)); |
1773 ASSERT(src.rs().is(no_reg)); | 1773 DCHECK(src.rs().is(no_reg)); |
1774 ASSERT((src.shift_imm_ == 0) || | 1774 DCHECK((src.shift_imm_ == 0) || |
1775 (src.shift_imm_ == 8) || | 1775 (src.shift_imm_ == 8) || |
1776 (src.shift_imm_ == 16) || | 1776 (src.shift_imm_ == 16) || |
1777 (src.shift_imm_ == 24)); | 1777 (src.shift_imm_ == 24)); |
1778 // Operand maps ROR #0 to LSL #0. | 1778 // Operand maps ROR #0 to LSL #0. |
1779 ASSERT((src.shift_op() == ROR) || | 1779 DCHECK((src.shift_op() == ROR) || |
1780 ((src.shift_op() == LSL) && (src.shift_imm_ == 0))); | 1780 ((src.shift_op() == LSL) && (src.shift_imm_ == 0))); |
1781 emit(cond | 0x6E*B20 | 0xF*B16 | dst.code()*B12 | | 1781 emit(cond | 0x6E*B20 | 0xF*B16 | dst.code()*B12 | |
1782 ((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code()); | 1782 ((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code()); |
1783 } | 1783 } |
1784 | 1784 |
1785 | 1785 |
1786 void Assembler::uxtab(Register dst, | 1786 void Assembler::uxtab(Register dst, |
1787 Register src1, | 1787 Register src1, |
1788 const Operand& src2, | 1788 const Operand& src2, |
1789 Condition cond) { | 1789 Condition cond) { |
1790 // Instruction details available in ARM DDI 0406C.b, A8.8.271. | 1790 // Instruction details available in ARM DDI 0406C.b, A8.8.271. |
1791 // cond(31-28) | 01101110(27-20) | Rn(19-16) | | 1791 // cond(31-28) | 01101110(27-20) | Rn(19-16) | |
1792 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0) | 1792 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0) |
1793 ASSERT(!dst.is(pc)); | 1793 DCHECK(!dst.is(pc)); |
1794 ASSERT(!src1.is(pc)); | 1794 DCHECK(!src1.is(pc)); |
1795 ASSERT(!src2.rm().is(pc)); | 1795 DCHECK(!src2.rm().is(pc)); |
1796 ASSERT(!src2.rm().is(no_reg)); | 1796 DCHECK(!src2.rm().is(no_reg)); |
1797 ASSERT(src2.rs().is(no_reg)); | 1797 DCHECK(src2.rs().is(no_reg)); |
1798 ASSERT((src2.shift_imm_ == 0) || | 1798 DCHECK((src2.shift_imm_ == 0) || |
1799 (src2.shift_imm_ == 8) || | 1799 (src2.shift_imm_ == 8) || |
1800 (src2.shift_imm_ == 16) || | 1800 (src2.shift_imm_ == 16) || |
1801 (src2.shift_imm_ == 24)); | 1801 (src2.shift_imm_ == 24)); |
1802 // Operand maps ROR #0 to LSL #0. | 1802 // Operand maps ROR #0 to LSL #0. |
1803 ASSERT((src2.shift_op() == ROR) || | 1803 DCHECK((src2.shift_op() == ROR) || |
1804 ((src2.shift_op() == LSL) && (src2.shift_imm_ == 0))); | 1804 ((src2.shift_op() == LSL) && (src2.shift_imm_ == 0))); |
1805 emit(cond | 0x6E*B20 | src1.code()*B16 | dst.code()*B12 | | 1805 emit(cond | 0x6E*B20 | src1.code()*B16 | dst.code()*B12 | |
1806 ((src2.shift_imm_ >> 1) &0xC)*B8 | 7*B4 | src2.rm().code()); | 1806 ((src2.shift_imm_ >> 1) &0xC)*B8 | 7*B4 | src2.rm().code()); |
1807 } | 1807 } |
1808 | 1808 |
1809 | 1809 |
1810 void Assembler::uxtb16(Register dst, | 1810 void Assembler::uxtb16(Register dst, |
1811 const Operand& src, | 1811 const Operand& src, |
1812 Condition cond) { | 1812 Condition cond) { |
1813 // Instruction details available in ARM DDI 0406C.b, A8.8.275. | 1813 // Instruction details available in ARM DDI 0406C.b, A8.8.275. |
1814 // cond(31-28) | 01101100(27-20) | 1111(19-16) | | 1814 // cond(31-28) | 01101100(27-20) | 1111(19-16) | |
1815 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0) | 1815 // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0) |
1816 ASSERT(!dst.is(pc)); | 1816 DCHECK(!dst.is(pc)); |
1817 ASSERT(!src.rm().is(pc)); | 1817 DCHECK(!src.rm().is(pc)); |
1818 ASSERT(!src.rm().is(no_reg)); | 1818 DCHECK(!src.rm().is(no_reg)); |
1819 ASSERT(src.rs().is(no_reg)); | 1819 DCHECK(src.rs().is(no_reg)); |
1820 ASSERT((src.shift_imm_ == 0) || | 1820 DCHECK((src.shift_imm_ == 0) || |
1821 (src.shift_imm_ == 8) || | 1821 (src.shift_imm_ == 8) || |
1822 (src.shift_imm_ == 16) || | 1822 (src.shift_imm_ == 16) || |
1823 (src.shift_imm_ == 24)); | 1823 (src.shift_imm_ == 24)); |
1824 // Operand maps ROR #0 to LSL #0. | 1824 // Operand maps ROR #0 to LSL #0. |
1825 ASSERT((src.shift_op() == ROR) || | 1825 DCHECK((src.shift_op() == ROR) || |
1826 ((src.shift_op() == LSL) && (src.shift_imm_ == 0))); | 1826 ((src.shift_op() == LSL) && (src.shift_imm_ == 0))); |
1827 emit(cond | 0x6C*B20 | 0xF*B16 | dst.code()*B12 | | 1827 emit(cond | 0x6C*B20 | 0xF*B16 | dst.code()*B12 | |
1828 ((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code()); | 1828 ((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code()); |
1829 } | 1829 } |
1830 | 1830 |
1831 | 1831 |
1832 // Status register access instructions. | 1832 // Status register access instructions. |
1833 void Assembler::mrs(Register dst, SRegister s, Condition cond) { | 1833 void Assembler::mrs(Register dst, SRegister s, Condition cond) { |
1834 ASSERT(!dst.is(pc)); | 1834 DCHECK(!dst.is(pc)); |
1835 emit(cond | B24 | s | 15*B16 | dst.code()*B12); | 1835 emit(cond | B24 | s | 15*B16 | dst.code()*B12); |
1836 } | 1836 } |
1837 | 1837 |
1838 | 1838 |
1839 void Assembler::msr(SRegisterFieldMask fields, const Operand& src, | 1839 void Assembler::msr(SRegisterFieldMask fields, const Operand& src, |
1840 Condition cond) { | 1840 Condition cond) { |
1841 ASSERT(fields >= B16 && fields < B20); // at least one field set | 1841 DCHECK(fields >= B16 && fields < B20); // at least one field set |
1842 Instr instr; | 1842 Instr instr; |
1843 if (!src.rm_.is_valid()) { | 1843 if (!src.rm_.is_valid()) { |
1844 // Immediate. | 1844 // Immediate. |
1845 uint32_t rotate_imm; | 1845 uint32_t rotate_imm; |
1846 uint32_t immed_8; | 1846 uint32_t immed_8; |
1847 if (src.must_output_reloc_info(this) || | 1847 if (src.must_output_reloc_info(this) || |
1848 !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) { | 1848 !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) { |
1849 // Immediate operand cannot be encoded, load it first to register ip. | 1849 // Immediate operand cannot be encoded, load it first to register ip. |
1850 move_32_bit_immediate(ip, src); | 1850 move_32_bit_immediate(ip, src); |
1851 msr(fields, Operand(ip), cond); | 1851 msr(fields, Operand(ip), cond); |
1852 return; | 1852 return; |
1853 } | 1853 } |
1854 instr = I | rotate_imm*B8 | immed_8; | 1854 instr = I | rotate_imm*B8 | immed_8; |
1855 } else { | 1855 } else { |
1856 ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed | 1856 DCHECK(!src.rs_.is_valid() && src.shift_imm_ == 0); // only rm allowed |
1857 instr = src.rm_.code(); | 1857 instr = src.rm_.code(); |
1858 } | 1858 } |
1859 emit(cond | instr | B24 | B21 | fields | 15*B12); | 1859 emit(cond | instr | B24 | B21 | fields | 15*B12); |
1860 } | 1860 } |
1861 | 1861 |
1862 | 1862 |
1863 // Load/Store instructions. | 1863 // Load/Store instructions. |
1864 void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) { | 1864 void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) { |
1865 if (dst.is(pc)) { | 1865 if (dst.is(pc)) { |
1866 positions_recorder()->WriteRecordedPositions(); | 1866 positions_recorder()->WriteRecordedPositions(); |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1899 } | 1899 } |
1900 | 1900 |
1901 | 1901 |
1902 void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) { | 1902 void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) { |
1903 addrmod3(cond | L | B7 | S6 | H | B4, dst, src); | 1903 addrmod3(cond | L | B7 | S6 | H | B4, dst, src); |
1904 } | 1904 } |
1905 | 1905 |
1906 | 1906 |
1907 void Assembler::ldrd(Register dst1, Register dst2, | 1907 void Assembler::ldrd(Register dst1, Register dst2, |
1908 const MemOperand& src, Condition cond) { | 1908 const MemOperand& src, Condition cond) { |
1909 ASSERT(IsEnabled(ARMv7)); | 1909 DCHECK(IsEnabled(ARMv7)); |
1910 ASSERT(src.rm().is(no_reg)); | 1910 DCHECK(src.rm().is(no_reg)); |
1911 ASSERT(!dst1.is(lr)); // r14. | 1911 DCHECK(!dst1.is(lr)); // r14. |
1912 ASSERT_EQ(0, dst1.code() % 2); | 1912 DCHECK_EQ(0, dst1.code() % 2); |
1913 ASSERT_EQ(dst1.code() + 1, dst2.code()); | 1913 DCHECK_EQ(dst1.code() + 1, dst2.code()); |
1914 addrmod3(cond | B7 | B6 | B4, dst1, src); | 1914 addrmod3(cond | B7 | B6 | B4, dst1, src); |
1915 } | 1915 } |
1916 | 1916 |
1917 | 1917 |
1918 void Assembler::strd(Register src1, Register src2, | 1918 void Assembler::strd(Register src1, Register src2, |
1919 const MemOperand& dst, Condition cond) { | 1919 const MemOperand& dst, Condition cond) { |
1920 ASSERT(dst.rm().is(no_reg)); | 1920 DCHECK(dst.rm().is(no_reg)); |
1921 ASSERT(!src1.is(lr)); // r14. | 1921 DCHECK(!src1.is(lr)); // r14. |
1922 ASSERT_EQ(0, src1.code() % 2); | 1922 DCHECK_EQ(0, src1.code() % 2); |
1923 ASSERT_EQ(src1.code() + 1, src2.code()); | 1923 DCHECK_EQ(src1.code() + 1, src2.code()); |
1924 ASSERT(IsEnabled(ARMv7)); | 1924 DCHECK(IsEnabled(ARMv7)); |
1925 addrmod3(cond | B7 | B6 | B5 | B4, src1, dst); | 1925 addrmod3(cond | B7 | B6 | B5 | B4, src1, dst); |
1926 } | 1926 } |
1927 | 1927 |
1928 | 1928 |
1929 // Preload instructions. | 1929 // Preload instructions. |
1930 void Assembler::pld(const MemOperand& address) { | 1930 void Assembler::pld(const MemOperand& address) { |
1931 // Instruction details available in ARM DDI 0406C.b, A8.8.128. | 1931 // Instruction details available in ARM DDI 0406C.b, A8.8.128. |
1932 // 1111(31-28) | 0111(27-24) | U(23) | R(22) | 01(21-20) | Rn(19-16) | | 1932 // 1111(31-28) | 0111(27-24) | U(23) | R(22) | 01(21-20) | Rn(19-16) | |
1933 // 1111(15-12) | imm5(11-07) | type(6-5) | 0(4)| Rm(3-0) | | 1933 // 1111(15-12) | imm5(11-07) | type(6-5) | 0(4)| Rm(3-0) | |
1934 ASSERT(address.rm().is(no_reg)); | 1934 DCHECK(address.rm().is(no_reg)); |
1935 ASSERT(address.am() == Offset); | 1935 DCHECK(address.am() == Offset); |
1936 int U = B23; | 1936 int U = B23; |
1937 int offset = address.offset(); | 1937 int offset = address.offset(); |
1938 if (offset < 0) { | 1938 if (offset < 0) { |
1939 offset = -offset; | 1939 offset = -offset; |
1940 U = 0; | 1940 U = 0; |
1941 } | 1941 } |
1942 ASSERT(offset < 4096); | 1942 DCHECK(offset < 4096); |
1943 emit(kSpecialCondition | B26 | B24 | U | B22 | B20 | address.rn().code()*B16 | | 1943 emit(kSpecialCondition | B26 | B24 | U | B22 | B20 | address.rn().code()*B16 | |
1944 0xf*B12 | offset); | 1944 0xf*B12 | offset); |
1945 } | 1945 } |
1946 | 1946 |
1947 | 1947 |
1948 // Load/Store multiple instructions. | 1948 // Load/Store multiple instructions. |
1949 void Assembler::ldm(BlockAddrMode am, | 1949 void Assembler::ldm(BlockAddrMode am, |
1950 Register base, | 1950 Register base, |
1951 RegList dst, | 1951 RegList dst, |
1952 Condition cond) { | 1952 Condition cond) { |
1953 // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable. | 1953 // ABI stack constraint: ldmxx base, {..sp..} base != sp is not restartable. |
1954 ASSERT(base.is(sp) || (dst & sp.bit()) == 0); | 1954 DCHECK(base.is(sp) || (dst & sp.bit()) == 0); |
1955 | 1955 |
1956 addrmod4(cond | B27 | am | L, base, dst); | 1956 addrmod4(cond | B27 | am | L, base, dst); |
1957 | 1957 |
1958 // Emit the constant pool after a function return implemented by ldm ..{..pc}. | 1958 // Emit the constant pool after a function return implemented by ldm ..{..pc}. |
1959 if (cond == al && (dst & pc.bit()) != 0) { | 1959 if (cond == al && (dst & pc.bit()) != 0) { |
1960 // There is a slight chance that the ldm instruction was actually a call, | 1960 // There is a slight chance that the ldm instruction was actually a call, |
1961 // in which case it would be wrong to return into the constant pool; we | 1961 // in which case it would be wrong to return into the constant pool; we |
1962 // recognize this case by checking if the emission of the pool was blocked | 1962 // recognize this case by checking if the emission of the pool was blocked |
1963 // at the pc of the ldm instruction by a mov lr, pc instruction; if this is | 1963 // at the pc of the ldm instruction by a mov lr, pc instruction; if this is |
1964 // the case, we emit a jump over the pool. | 1964 // the case, we emit a jump over the pool. |
1965 CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize); | 1965 CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize); |
1966 } | 1966 } |
1967 } | 1967 } |
1968 | 1968 |
1969 | 1969 |
1970 void Assembler::stm(BlockAddrMode am, | 1970 void Assembler::stm(BlockAddrMode am, |
1971 Register base, | 1971 Register base, |
1972 RegList src, | 1972 RegList src, |
1973 Condition cond) { | 1973 Condition cond) { |
1974 addrmod4(cond | B27 | am, base, src); | 1974 addrmod4(cond | B27 | am, base, src); |
1975 } | 1975 } |
1976 | 1976 |
1977 | 1977 |
1978 // Exception-generating instructions and debugging support. | 1978 // Exception-generating instructions and debugging support. |
1979 // Stops with a non-negative code less than kNumOfWatchedStops support | 1979 // Stops with a non-negative code less than kNumOfWatchedStops support |
1980 // enabling/disabling and a counter feature. See simulator-arm.h . | 1980 // enabling/disabling and a counter feature. See simulator-arm.h . |
1981 void Assembler::stop(const char* msg, Condition cond, int32_t code) { | 1981 void Assembler::stop(const char* msg, Condition cond, int32_t code) { |
1982 #ifndef __arm__ | 1982 #ifndef __arm__ |
1983 ASSERT(code >= kDefaultStopCode); | 1983 DCHECK(code >= kDefaultStopCode); |
1984 { | 1984 { |
1985 // The Simulator will handle the stop instruction and get the message | 1985 // The Simulator will handle the stop instruction and get the message |
1986 // address. It expects to find the address just after the svc instruction. | 1986 // address. It expects to find the address just after the svc instruction. |
1987 BlockConstPoolScope block_const_pool(this); | 1987 BlockConstPoolScope block_const_pool(this); |
1988 if (code >= 0) { | 1988 if (code >= 0) { |
1989 svc(kStopCode + code, cond); | 1989 svc(kStopCode + code, cond); |
1990 } else { | 1990 } else { |
1991 svc(kStopCode + kMaxStopCode, cond); | 1991 svc(kStopCode + kMaxStopCode, cond); |
1992 } | 1992 } |
1993 emit(reinterpret_cast<Instr>(msg)); | 1993 emit(reinterpret_cast<Instr>(msg)); |
1994 } | 1994 } |
1995 #else // def __arm__ | 1995 #else // def __arm__ |
1996 if (cond != al) { | 1996 if (cond != al) { |
1997 Label skip; | 1997 Label skip; |
1998 b(&skip, NegateCondition(cond)); | 1998 b(&skip, NegateCondition(cond)); |
1999 bkpt(0); | 1999 bkpt(0); |
2000 bind(&skip); | 2000 bind(&skip); |
2001 } else { | 2001 } else { |
2002 bkpt(0); | 2002 bkpt(0); |
2003 } | 2003 } |
2004 #endif // def __arm__ | 2004 #endif // def __arm__ |
2005 } | 2005 } |
2006 | 2006 |
2007 | 2007 |
2008 void Assembler::bkpt(uint32_t imm16) { // v5 and above | 2008 void Assembler::bkpt(uint32_t imm16) { // v5 and above |
2009 ASSERT(is_uint16(imm16)); | 2009 DCHECK(is_uint16(imm16)); |
2010 emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf)); | 2010 emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf)); |
2011 } | 2011 } |
2012 | 2012 |
2013 | 2013 |
2014 void Assembler::svc(uint32_t imm24, Condition cond) { | 2014 void Assembler::svc(uint32_t imm24, Condition cond) { |
2015 ASSERT(is_uint24(imm24)); | 2015 DCHECK(is_uint24(imm24)); |
2016 emit(cond | 15*B24 | imm24); | 2016 emit(cond | 15*B24 | imm24); |
2017 } | 2017 } |
2018 | 2018 |
2019 | 2019 |
2020 // Coprocessor instructions. | 2020 // Coprocessor instructions. |
2021 void Assembler::cdp(Coprocessor coproc, | 2021 void Assembler::cdp(Coprocessor coproc, |
2022 int opcode_1, | 2022 int opcode_1, |
2023 CRegister crd, | 2023 CRegister crd, |
2024 CRegister crn, | 2024 CRegister crn, |
2025 CRegister crm, | 2025 CRegister crm, |
2026 int opcode_2, | 2026 int opcode_2, |
2027 Condition cond) { | 2027 Condition cond) { |
2028 ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2)); | 2028 DCHECK(is_uint4(opcode_1) && is_uint3(opcode_2)); |
2029 emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 | | 2029 emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 | |
2030 crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code()); | 2030 crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code()); |
2031 } | 2031 } |
2032 | 2032 |
2033 | 2033 |
2034 void Assembler::cdp2(Coprocessor coproc, | 2034 void Assembler::cdp2(Coprocessor coproc, |
2035 int opcode_1, | 2035 int opcode_1, |
2036 CRegister crd, | 2036 CRegister crd, |
2037 CRegister crn, | 2037 CRegister crn, |
2038 CRegister crm, | 2038 CRegister crm, |
2039 int opcode_2) { // v5 and above | 2039 int opcode_2) { // v5 and above |
2040 cdp(coproc, opcode_1, crd, crn, crm, opcode_2, kSpecialCondition); | 2040 cdp(coproc, opcode_1, crd, crn, crm, opcode_2, kSpecialCondition); |
2041 } | 2041 } |
2042 | 2042 |
2043 | 2043 |
2044 void Assembler::mcr(Coprocessor coproc, | 2044 void Assembler::mcr(Coprocessor coproc, |
2045 int opcode_1, | 2045 int opcode_1, |
2046 Register rd, | 2046 Register rd, |
2047 CRegister crn, | 2047 CRegister crn, |
2048 CRegister crm, | 2048 CRegister crm, |
2049 int opcode_2, | 2049 int opcode_2, |
2050 Condition cond) { | 2050 Condition cond) { |
2051 ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2)); | 2051 DCHECK(is_uint3(opcode_1) && is_uint3(opcode_2)); |
2052 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 | | 2052 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 | |
2053 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code()); | 2053 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code()); |
2054 } | 2054 } |
2055 | 2055 |
2056 | 2056 |
2057 void Assembler::mcr2(Coprocessor coproc, | 2057 void Assembler::mcr2(Coprocessor coproc, |
2058 int opcode_1, | 2058 int opcode_1, |
2059 Register rd, | 2059 Register rd, |
2060 CRegister crn, | 2060 CRegister crn, |
2061 CRegister crm, | 2061 CRegister crm, |
2062 int opcode_2) { // v5 and above | 2062 int opcode_2) { // v5 and above |
2063 mcr(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition); | 2063 mcr(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition); |
2064 } | 2064 } |
2065 | 2065 |
2066 | 2066 |
2067 void Assembler::mrc(Coprocessor coproc, | 2067 void Assembler::mrc(Coprocessor coproc, |
2068 int opcode_1, | 2068 int opcode_1, |
2069 Register rd, | 2069 Register rd, |
2070 CRegister crn, | 2070 CRegister crn, |
2071 CRegister crm, | 2071 CRegister crm, |
2072 int opcode_2, | 2072 int opcode_2, |
2073 Condition cond) { | 2073 Condition cond) { |
2074 ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2)); | 2074 DCHECK(is_uint3(opcode_1) && is_uint3(opcode_2)); |
2075 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 | | 2075 emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 | |
2076 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code()); | 2076 rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code()); |
2077 } | 2077 } |
2078 | 2078 |
2079 | 2079 |
2080 void Assembler::mrc2(Coprocessor coproc, | 2080 void Assembler::mrc2(Coprocessor coproc, |
2081 int opcode_1, | 2081 int opcode_1, |
2082 Register rd, | 2082 Register rd, |
2083 CRegister crn, | 2083 CRegister crn, |
2084 CRegister crm, | 2084 CRegister crm, |
(...skipping 11 matching lines...) Expand all Loading... |
2096 } | 2096 } |
2097 | 2097 |
2098 | 2098 |
2099 void Assembler::ldc(Coprocessor coproc, | 2099 void Assembler::ldc(Coprocessor coproc, |
2100 CRegister crd, | 2100 CRegister crd, |
2101 Register rn, | 2101 Register rn, |
2102 int option, | 2102 int option, |
2103 LFlag l, | 2103 LFlag l, |
2104 Condition cond) { | 2104 Condition cond) { |
2105 // Unindexed addressing. | 2105 // Unindexed addressing. |
2106 ASSERT(is_uint8(option)); | 2106 DCHECK(is_uint8(option)); |
2107 emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 | | 2107 emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 | |
2108 coproc*B8 | (option & 255)); | 2108 coproc*B8 | (option & 255)); |
2109 } | 2109 } |
2110 | 2110 |
2111 | 2111 |
2112 void Assembler::ldc2(Coprocessor coproc, | 2112 void Assembler::ldc2(Coprocessor coproc, |
2113 CRegister crd, | 2113 CRegister crd, |
2114 const MemOperand& src, | 2114 const MemOperand& src, |
2115 LFlag l) { // v5 and above | 2115 LFlag l) { // v5 and above |
2116 ldc(coproc, crd, src, l, kSpecialCondition); | 2116 ldc(coproc, crd, src, l, kSpecialCondition); |
(...skipping 20 matching lines...) Expand all Loading... |
2137 // cond(31-28) | 1101(27-24)| U(23) | D(22) | 01(21-20) | Rbase(19-16) | | 2137 // cond(31-28) | 1101(27-24)| U(23) | D(22) | 01(21-20) | Rbase(19-16) | |
2138 // Vd(15-12) | 1011(11-8) | offset | 2138 // Vd(15-12) | 1011(11-8) | offset |
2139 int u = 1; | 2139 int u = 1; |
2140 if (offset < 0) { | 2140 if (offset < 0) { |
2141 offset = -offset; | 2141 offset = -offset; |
2142 u = 0; | 2142 u = 0; |
2143 } | 2143 } |
2144 int vd, d; | 2144 int vd, d; |
2145 dst.split_code(&vd, &d); | 2145 dst.split_code(&vd, &d); |
2146 | 2146 |
2147 ASSERT(offset >= 0); | 2147 DCHECK(offset >= 0); |
2148 if ((offset % 4) == 0 && (offset / 4) < 256) { | 2148 if ((offset % 4) == 0 && (offset / 4) < 256) { |
2149 emit(cond | 0xD*B24 | u*B23 | d*B22 | B20 | base.code()*B16 | vd*B12 | | 2149 emit(cond | 0xD*B24 | u*B23 | d*B22 | B20 | base.code()*B16 | vd*B12 | |
2150 0xB*B8 | ((offset / 4) & 255)); | 2150 0xB*B8 | ((offset / 4) & 255)); |
2151 } else { | 2151 } else { |
2152 // Larger offsets must be handled by computing the correct address | 2152 // Larger offsets must be handled by computing the correct address |
2153 // in the ip register. | 2153 // in the ip register. |
2154 ASSERT(!base.is(ip)); | 2154 DCHECK(!base.is(ip)); |
2155 if (u == 1) { | 2155 if (u == 1) { |
2156 add(ip, base, Operand(offset)); | 2156 add(ip, base, Operand(offset)); |
2157 } else { | 2157 } else { |
2158 sub(ip, base, Operand(offset)); | 2158 sub(ip, base, Operand(offset)); |
2159 } | 2159 } |
2160 emit(cond | 0xD*B24 | d*B22 | B20 | ip.code()*B16 | vd*B12 | 0xB*B8); | 2160 emit(cond | 0xD*B24 | d*B22 | B20 | ip.code()*B16 | vd*B12 | 0xB*B8); |
2161 } | 2161 } |
2162 } | 2162 } |
2163 | 2163 |
2164 | 2164 |
2165 void Assembler::vldr(const DwVfpRegister dst, | 2165 void Assembler::vldr(const DwVfpRegister dst, |
2166 const MemOperand& operand, | 2166 const MemOperand& operand, |
2167 const Condition cond) { | 2167 const Condition cond) { |
2168 ASSERT(operand.am_ == Offset); | 2168 DCHECK(operand.am_ == Offset); |
2169 if (operand.rm().is_valid()) { | 2169 if (operand.rm().is_valid()) { |
2170 add(ip, operand.rn(), | 2170 add(ip, operand.rn(), |
2171 Operand(operand.rm(), operand.shift_op_, operand.shift_imm_)); | 2171 Operand(operand.rm(), operand.shift_op_, operand.shift_imm_)); |
2172 vldr(dst, ip, 0, cond); | 2172 vldr(dst, ip, 0, cond); |
2173 } else { | 2173 } else { |
2174 vldr(dst, operand.rn(), operand.offset(), cond); | 2174 vldr(dst, operand.rn(), operand.offset(), cond); |
2175 } | 2175 } |
2176 } | 2176 } |
2177 | 2177 |
2178 | 2178 |
2179 void Assembler::vldr(const SwVfpRegister dst, | 2179 void Assembler::vldr(const SwVfpRegister dst, |
2180 const Register base, | 2180 const Register base, |
2181 int offset, | 2181 int offset, |
2182 const Condition cond) { | 2182 const Condition cond) { |
2183 // Sdst = MEM(Rbase + offset). | 2183 // Sdst = MEM(Rbase + offset). |
2184 // Instruction details available in ARM DDI 0406A, A8-628. | 2184 // Instruction details available in ARM DDI 0406A, A8-628. |
2185 // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) | | 2185 // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) | |
2186 // Vdst(15-12) | 1010(11-8) | offset | 2186 // Vdst(15-12) | 1010(11-8) | offset |
2187 int u = 1; | 2187 int u = 1; |
2188 if (offset < 0) { | 2188 if (offset < 0) { |
2189 offset = -offset; | 2189 offset = -offset; |
2190 u = 0; | 2190 u = 0; |
2191 } | 2191 } |
2192 int sd, d; | 2192 int sd, d; |
2193 dst.split_code(&sd, &d); | 2193 dst.split_code(&sd, &d); |
2194 ASSERT(offset >= 0); | 2194 DCHECK(offset >= 0); |
2195 | 2195 |
2196 if ((offset % 4) == 0 && (offset / 4) < 256) { | 2196 if ((offset % 4) == 0 && (offset / 4) < 256) { |
2197 emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 | | 2197 emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 | |
2198 0xA*B8 | ((offset / 4) & 255)); | 2198 0xA*B8 | ((offset / 4) & 255)); |
2199 } else { | 2199 } else { |
2200 // Larger offsets must be handled by computing the correct address | 2200 // Larger offsets must be handled by computing the correct address |
2201 // in the ip register. | 2201 // in the ip register. |
2202 ASSERT(!base.is(ip)); | 2202 DCHECK(!base.is(ip)); |
2203 if (u == 1) { | 2203 if (u == 1) { |
2204 add(ip, base, Operand(offset)); | 2204 add(ip, base, Operand(offset)); |
2205 } else { | 2205 } else { |
2206 sub(ip, base, Operand(offset)); | 2206 sub(ip, base, Operand(offset)); |
2207 } | 2207 } |
2208 emit(cond | d*B22 | 0xD1*B20 | ip.code()*B16 | sd*B12 | 0xA*B8); | 2208 emit(cond | d*B22 | 0xD1*B20 | ip.code()*B16 | sd*B12 | 0xA*B8); |
2209 } | 2209 } |
2210 } | 2210 } |
2211 | 2211 |
2212 | 2212 |
2213 void Assembler::vldr(const SwVfpRegister dst, | 2213 void Assembler::vldr(const SwVfpRegister dst, |
2214 const MemOperand& operand, | 2214 const MemOperand& operand, |
2215 const Condition cond) { | 2215 const Condition cond) { |
2216 ASSERT(operand.am_ == Offset); | 2216 DCHECK(operand.am_ == Offset); |
2217 if (operand.rm().is_valid()) { | 2217 if (operand.rm().is_valid()) { |
2218 add(ip, operand.rn(), | 2218 add(ip, operand.rn(), |
2219 Operand(operand.rm(), operand.shift_op_, operand.shift_imm_)); | 2219 Operand(operand.rm(), operand.shift_op_, operand.shift_imm_)); |
2220 vldr(dst, ip, 0, cond); | 2220 vldr(dst, ip, 0, cond); |
2221 } else { | 2221 } else { |
2222 vldr(dst, operand.rn(), operand.offset(), cond); | 2222 vldr(dst, operand.rn(), operand.offset(), cond); |
2223 } | 2223 } |
2224 } | 2224 } |
2225 | 2225 |
2226 | 2226 |
2227 void Assembler::vstr(const DwVfpRegister src, | 2227 void Assembler::vstr(const DwVfpRegister src, |
2228 const Register base, | 2228 const Register base, |
2229 int offset, | 2229 int offset, |
2230 const Condition cond) { | 2230 const Condition cond) { |
2231 // MEM(Rbase + offset) = Dsrc. | 2231 // MEM(Rbase + offset) = Dsrc. |
2232 // Instruction details available in ARM DDI 0406C.b, A8-1082. | 2232 // Instruction details available in ARM DDI 0406C.b, A8-1082. |
2233 // cond(31-28) | 1101(27-24)| U(23) | D(22) | 00(21-20) | Rbase(19-16) | | 2233 // cond(31-28) | 1101(27-24)| U(23) | D(22) | 00(21-20) | Rbase(19-16) | |
2234 // Vd(15-12) | 1011(11-8) | (offset/4) | 2234 // Vd(15-12) | 1011(11-8) | (offset/4) |
2235 int u = 1; | 2235 int u = 1; |
2236 if (offset < 0) { | 2236 if (offset < 0) { |
2237 offset = -offset; | 2237 offset = -offset; |
2238 u = 0; | 2238 u = 0; |
2239 } | 2239 } |
2240 ASSERT(offset >= 0); | 2240 DCHECK(offset >= 0); |
2241 int vd, d; | 2241 int vd, d; |
2242 src.split_code(&vd, &d); | 2242 src.split_code(&vd, &d); |
2243 | 2243 |
2244 if ((offset % 4) == 0 && (offset / 4) < 256) { | 2244 if ((offset % 4) == 0 && (offset / 4) < 256) { |
2245 emit(cond | 0xD*B24 | u*B23 | d*B22 | base.code()*B16 | vd*B12 | 0xB*B8 | | 2245 emit(cond | 0xD*B24 | u*B23 | d*B22 | base.code()*B16 | vd*B12 | 0xB*B8 | |
2246 ((offset / 4) & 255)); | 2246 ((offset / 4) & 255)); |
2247 } else { | 2247 } else { |
2248 // Larger offsets must be handled by computing the correct address | 2248 // Larger offsets must be handled by computing the correct address |
2249 // in the ip register. | 2249 // in the ip register. |
2250 ASSERT(!base.is(ip)); | 2250 DCHECK(!base.is(ip)); |
2251 if (u == 1) { | 2251 if (u == 1) { |
2252 add(ip, base, Operand(offset)); | 2252 add(ip, base, Operand(offset)); |
2253 } else { | 2253 } else { |
2254 sub(ip, base, Operand(offset)); | 2254 sub(ip, base, Operand(offset)); |
2255 } | 2255 } |
2256 emit(cond | 0xD*B24 | d*B22 | ip.code()*B16 | vd*B12 | 0xB*B8); | 2256 emit(cond | 0xD*B24 | d*B22 | ip.code()*B16 | vd*B12 | 0xB*B8); |
2257 } | 2257 } |
2258 } | 2258 } |
2259 | 2259 |
2260 | 2260 |
2261 void Assembler::vstr(const DwVfpRegister src, | 2261 void Assembler::vstr(const DwVfpRegister src, |
2262 const MemOperand& operand, | 2262 const MemOperand& operand, |
2263 const Condition cond) { | 2263 const Condition cond) { |
2264 ASSERT(operand.am_ == Offset); | 2264 DCHECK(operand.am_ == Offset); |
2265 if (operand.rm().is_valid()) { | 2265 if (operand.rm().is_valid()) { |
2266 add(ip, operand.rn(), | 2266 add(ip, operand.rn(), |
2267 Operand(operand.rm(), operand.shift_op_, operand.shift_imm_)); | 2267 Operand(operand.rm(), operand.shift_op_, operand.shift_imm_)); |
2268 vstr(src, ip, 0, cond); | 2268 vstr(src, ip, 0, cond); |
2269 } else { | 2269 } else { |
2270 vstr(src, operand.rn(), operand.offset(), cond); | 2270 vstr(src, operand.rn(), operand.offset(), cond); |
2271 } | 2271 } |
2272 } | 2272 } |
2273 | 2273 |
2274 | 2274 |
2275 void Assembler::vstr(const SwVfpRegister src, | 2275 void Assembler::vstr(const SwVfpRegister src, |
2276 const Register base, | 2276 const Register base, |
2277 int offset, | 2277 int offset, |
2278 const Condition cond) { | 2278 const Condition cond) { |
2279 // MEM(Rbase + offset) = SSrc. | 2279 // MEM(Rbase + offset) = SSrc. |
2280 // Instruction details available in ARM DDI 0406A, A8-786. | 2280 // Instruction details available in ARM DDI 0406A, A8-786. |
2281 // cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) | | 2281 // cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) | |
2282 // Vdst(15-12) | 1010(11-8) | (offset/4) | 2282 // Vdst(15-12) | 1010(11-8) | (offset/4) |
2283 int u = 1; | 2283 int u = 1; |
2284 if (offset < 0) { | 2284 if (offset < 0) { |
2285 offset = -offset; | 2285 offset = -offset; |
2286 u = 0; | 2286 u = 0; |
2287 } | 2287 } |
2288 int sd, d; | 2288 int sd, d; |
2289 src.split_code(&sd, &d); | 2289 src.split_code(&sd, &d); |
2290 ASSERT(offset >= 0); | 2290 DCHECK(offset >= 0); |
2291 if ((offset % 4) == 0 && (offset / 4) < 256) { | 2291 if ((offset % 4) == 0 && (offset / 4) < 256) { |
2292 emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 | | 2292 emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 | |
2293 0xA*B8 | ((offset / 4) & 255)); | 2293 0xA*B8 | ((offset / 4) & 255)); |
2294 } else { | 2294 } else { |
2295 // Larger offsets must be handled by computing the correct address | 2295 // Larger offsets must be handled by computing the correct address |
2296 // in the ip register. | 2296 // in the ip register. |
2297 ASSERT(!base.is(ip)); | 2297 DCHECK(!base.is(ip)); |
2298 if (u == 1) { | 2298 if (u == 1) { |
2299 add(ip, base, Operand(offset)); | 2299 add(ip, base, Operand(offset)); |
2300 } else { | 2300 } else { |
2301 sub(ip, base, Operand(offset)); | 2301 sub(ip, base, Operand(offset)); |
2302 } | 2302 } |
2303 emit(cond | d*B22 | 0xD0*B20 | ip.code()*B16 | sd*B12 | 0xA*B8); | 2303 emit(cond | d*B22 | 0xD0*B20 | ip.code()*B16 | sd*B12 | 0xA*B8); |
2304 } | 2304 } |
2305 } | 2305 } |
2306 | 2306 |
2307 | 2307 |
2308 void Assembler::vstr(const SwVfpRegister src, | 2308 void Assembler::vstr(const SwVfpRegister src, |
2309 const MemOperand& operand, | 2309 const MemOperand& operand, |
2310 const Condition cond) { | 2310 const Condition cond) { |
2311 ASSERT(operand.am_ == Offset); | 2311 DCHECK(operand.am_ == Offset); |
2312 if (operand.rm().is_valid()) { | 2312 if (operand.rm().is_valid()) { |
2313 add(ip, operand.rn(), | 2313 add(ip, operand.rn(), |
2314 Operand(operand.rm(), operand.shift_op_, operand.shift_imm_)); | 2314 Operand(operand.rm(), operand.shift_op_, operand.shift_imm_)); |
2315 vstr(src, ip, 0, cond); | 2315 vstr(src, ip, 0, cond); |
2316 } else { | 2316 } else { |
2317 vstr(src, operand.rn(), operand.offset(), cond); | 2317 vstr(src, operand.rn(), operand.offset(), cond); |
2318 } | 2318 } |
2319 } | 2319 } |
2320 | 2320 |
2321 | 2321 |
2322 void Assembler::vldm(BlockAddrMode am, | 2322 void Assembler::vldm(BlockAddrMode am, |
2323 Register base, | 2323 Register base, |
2324 DwVfpRegister first, | 2324 DwVfpRegister first, |
2325 DwVfpRegister last, | 2325 DwVfpRegister last, |
2326 Condition cond) { | 2326 Condition cond) { |
2327 // Instruction details available in ARM DDI 0406C.b, A8-922. | 2327 // Instruction details available in ARM DDI 0406C.b, A8-922. |
2328 // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) | | 2328 // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) | |
2329 // first(15-12) | 1011(11-8) | (count * 2) | 2329 // first(15-12) | 1011(11-8) | (count * 2) |
2330 ASSERT_LE(first.code(), last.code()); | 2330 DCHECK_LE(first.code(), last.code()); |
2331 ASSERT(am == ia || am == ia_w || am == db_w); | 2331 DCHECK(am == ia || am == ia_w || am == db_w); |
2332 ASSERT(!base.is(pc)); | 2332 DCHECK(!base.is(pc)); |
2333 | 2333 |
2334 int sd, d; | 2334 int sd, d; |
2335 first.split_code(&sd, &d); | 2335 first.split_code(&sd, &d); |
2336 int count = last.code() - first.code() + 1; | 2336 int count = last.code() - first.code() + 1; |
2337 ASSERT(count <= 16); | 2337 DCHECK(count <= 16); |
2338 emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 | | 2338 emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 | |
2339 0xB*B8 | count*2); | 2339 0xB*B8 | count*2); |
2340 } | 2340 } |
2341 | 2341 |
2342 | 2342 |
2343 void Assembler::vstm(BlockAddrMode am, | 2343 void Assembler::vstm(BlockAddrMode am, |
2344 Register base, | 2344 Register base, |
2345 DwVfpRegister first, | 2345 DwVfpRegister first, |
2346 DwVfpRegister last, | 2346 DwVfpRegister last, |
2347 Condition cond) { | 2347 Condition cond) { |
2348 // Instruction details available in ARM DDI 0406C.b, A8-1080. | 2348 // Instruction details available in ARM DDI 0406C.b, A8-1080. |
2349 // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) | | 2349 // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) | |
2350 // first(15-12) | 1011(11-8) | (count * 2) | 2350 // first(15-12) | 1011(11-8) | (count * 2) |
2351 ASSERT_LE(first.code(), last.code()); | 2351 DCHECK_LE(first.code(), last.code()); |
2352 ASSERT(am == ia || am == ia_w || am == db_w); | 2352 DCHECK(am == ia || am == ia_w || am == db_w); |
2353 ASSERT(!base.is(pc)); | 2353 DCHECK(!base.is(pc)); |
2354 | 2354 |
2355 int sd, d; | 2355 int sd, d; |
2356 first.split_code(&sd, &d); | 2356 first.split_code(&sd, &d); |
2357 int count = last.code() - first.code() + 1; | 2357 int count = last.code() - first.code() + 1; |
2358 ASSERT(count <= 16); | 2358 DCHECK(count <= 16); |
2359 emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 | | 2359 emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 | |
2360 0xB*B8 | count*2); | 2360 0xB*B8 | count*2); |
2361 } | 2361 } |
2362 | 2362 |
2363 void Assembler::vldm(BlockAddrMode am, | 2363 void Assembler::vldm(BlockAddrMode am, |
2364 Register base, | 2364 Register base, |
2365 SwVfpRegister first, | 2365 SwVfpRegister first, |
2366 SwVfpRegister last, | 2366 SwVfpRegister last, |
2367 Condition cond) { | 2367 Condition cond) { |
2368 // Instruction details available in ARM DDI 0406A, A8-626. | 2368 // Instruction details available in ARM DDI 0406A, A8-626. |
2369 // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) | | 2369 // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) | |
2370 // first(15-12) | 1010(11-8) | (count/2) | 2370 // first(15-12) | 1010(11-8) | (count/2) |
2371 ASSERT_LE(first.code(), last.code()); | 2371 DCHECK_LE(first.code(), last.code()); |
2372 ASSERT(am == ia || am == ia_w || am == db_w); | 2372 DCHECK(am == ia || am == ia_w || am == db_w); |
2373 ASSERT(!base.is(pc)); | 2373 DCHECK(!base.is(pc)); |
2374 | 2374 |
2375 int sd, d; | 2375 int sd, d; |
2376 first.split_code(&sd, &d); | 2376 first.split_code(&sd, &d); |
2377 int count = last.code() - first.code() + 1; | 2377 int count = last.code() - first.code() + 1; |
2378 emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 | | 2378 emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 | |
2379 0xA*B8 | count); | 2379 0xA*B8 | count); |
2380 } | 2380 } |
2381 | 2381 |
2382 | 2382 |
2383 void Assembler::vstm(BlockAddrMode am, | 2383 void Assembler::vstm(BlockAddrMode am, |
2384 Register base, | 2384 Register base, |
2385 SwVfpRegister first, | 2385 SwVfpRegister first, |
2386 SwVfpRegister last, | 2386 SwVfpRegister last, |
2387 Condition cond) { | 2387 Condition cond) { |
2388 // Instruction details available in ARM DDI 0406A, A8-784. | 2388 // Instruction details available in ARM DDI 0406A, A8-784. |
2389 // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) | | 2389 // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) | |
2390 // first(15-12) | 1011(11-8) | (count/2) | 2390 // first(15-12) | 1011(11-8) | (count/2) |
2391 ASSERT_LE(first.code(), last.code()); | 2391 DCHECK_LE(first.code(), last.code()); |
2392 ASSERT(am == ia || am == ia_w || am == db_w); | 2392 DCHECK(am == ia || am == ia_w || am == db_w); |
2393 ASSERT(!base.is(pc)); | 2393 DCHECK(!base.is(pc)); |
2394 | 2394 |
2395 int sd, d; | 2395 int sd, d; |
2396 first.split_code(&sd, &d); | 2396 first.split_code(&sd, &d); |
2397 int count = last.code() - first.code() + 1; | 2397 int count = last.code() - first.code() + 1; |
2398 emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 | | 2398 emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 | |
2399 0xA*B8 | count); | 2399 0xA*B8 | count); |
2400 } | 2400 } |
2401 | 2401 |
2402 | 2402 |
2403 static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) { | 2403 static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) { |
2404 uint64_t i; | 2404 uint64_t i; |
2405 memcpy(&i, &d, 8); | 2405 memcpy(&i, &d, 8); |
2406 | 2406 |
2407 *lo = i & 0xffffffff; | 2407 *lo = i & 0xffffffff; |
2408 *hi = i >> 32; | 2408 *hi = i >> 32; |
2409 } | 2409 } |
2410 | 2410 |
2411 | 2411 |
2412 // Only works for little endian floating point formats. | 2412 // Only works for little endian floating point formats. |
2413 // We don't support VFP on the mixed endian floating point platform. | 2413 // We don't support VFP on the mixed endian floating point platform. |
2414 static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) { | 2414 static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) { |
2415 ASSERT(CpuFeatures::IsSupported(VFP3)); | 2415 DCHECK(CpuFeatures::IsSupported(VFP3)); |
2416 | 2416 |
2417 // VMOV can accept an immediate of the form: | 2417 // VMOV can accept an immediate of the form: |
2418 // | 2418 // |
2419 // +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7 | 2419 // +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7 |
2420 // | 2420 // |
2421 // The immediate is encoded using an 8-bit quantity, comprised of two | 2421 // The immediate is encoded using an 8-bit quantity, comprised of two |
2422 // 4-bit fields. For an 8-bit immediate of the form: | 2422 // 4-bit fields. For an 8-bit immediate of the form: |
2423 // | 2423 // |
2424 // [abcdefgh] | 2424 // [abcdefgh] |
2425 // | 2425 // |
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2486 // We could also add a few peepholes here like detecting 0.0 and | 2486 // We could also add a few peepholes here like detecting 0.0 and |
2487 // -0.0 and doing a vmov from the sequestered d14, forcing denorms | 2487 // -0.0 and doing a vmov from the sequestered d14, forcing denorms |
2488 // to zero (we set flush-to-zero), and normalizing NaN values. | 2488 // to zero (we set flush-to-zero), and normalizing NaN values. |
2489 // We could also detect redundant values. | 2489 // We could also detect redundant values. |
2490 // The code could also randomize the order of values, though | 2490 // The code could also randomize the order of values, though |
2491 // that's tricky because vldr has a limited reach. Furthermore | 2491 // that's tricky because vldr has a limited reach. Furthermore |
2492 // it breaks load locality. | 2492 // it breaks load locality. |
2493 RelocInfo rinfo(pc_, imm); | 2493 RelocInfo rinfo(pc_, imm); |
2494 ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo); | 2494 ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo); |
2495 if (section == ConstantPoolArray::EXTENDED_SECTION) { | 2495 if (section == ConstantPoolArray::EXTENDED_SECTION) { |
2496 ASSERT(FLAG_enable_ool_constant_pool); | 2496 DCHECK(FLAG_enable_ool_constant_pool); |
2497 // Emit instructions to load constant pool offset. | 2497 // Emit instructions to load constant pool offset. |
2498 movw(ip, 0); | 2498 movw(ip, 0); |
2499 movt(ip, 0); | 2499 movt(ip, 0); |
2500 // Load from constant pool at offset. | 2500 // Load from constant pool at offset. |
2501 vldr(dst, MemOperand(pp, ip)); | 2501 vldr(dst, MemOperand(pp, ip)); |
2502 } else { | 2502 } else { |
2503 ASSERT(section == ConstantPoolArray::SMALL_SECTION); | 2503 DCHECK(section == ConstantPoolArray::SMALL_SECTION); |
2504 vldr(dst, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0)); | 2504 vldr(dst, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0)); |
2505 } | 2505 } |
2506 } else { | 2506 } else { |
2507 // Synthesise the double from ARM immediates. | 2507 // Synthesise the double from ARM immediates. |
2508 uint32_t lo, hi; | 2508 uint32_t lo, hi; |
2509 DoubleAsTwoUInt32(imm, &lo, &hi); | 2509 DoubleAsTwoUInt32(imm, &lo, &hi); |
2510 | 2510 |
2511 if (scratch.is(no_reg)) { | 2511 if (scratch.is(no_reg)) { |
2512 if (dst.code() < 16) { | 2512 if (dst.code() < 16) { |
2513 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code()); | 2513 const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code()); |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2569 | 2569 |
2570 | 2570 |
2571 void Assembler::vmov(const DwVfpRegister dst, | 2571 void Assembler::vmov(const DwVfpRegister dst, |
2572 const VmovIndex index, | 2572 const VmovIndex index, |
2573 const Register src, | 2573 const Register src, |
2574 const Condition cond) { | 2574 const Condition cond) { |
2575 // Dd[index] = Rt | 2575 // Dd[index] = Rt |
2576 // Instruction details available in ARM DDI 0406C.b, A8-940. | 2576 // Instruction details available in ARM DDI 0406C.b, A8-940. |
2577 // cond(31-28) | 1110(27-24) | 0(23) | opc1=0index(22-21) | 0(20) | | 2577 // cond(31-28) | 1110(27-24) | 0(23) | opc1=0index(22-21) | 0(20) | |
2578 // Vd(19-16) | Rt(15-12) | 1011(11-8) | D(7) | opc2=00(6-5) | 1(4) | 0000(3-0) | 2578 // Vd(19-16) | Rt(15-12) | 1011(11-8) | D(7) | opc2=00(6-5) | 1(4) | 0000(3-0) |
2579 ASSERT(index.index == 0 || index.index == 1); | 2579 DCHECK(index.index == 0 || index.index == 1); |
2580 int vd, d; | 2580 int vd, d; |
2581 dst.split_code(&vd, &d); | 2581 dst.split_code(&vd, &d); |
2582 emit(cond | 0xE*B24 | index.index*B21 | vd*B16 | src.code()*B12 | 0xB*B8 | | 2582 emit(cond | 0xE*B24 | index.index*B21 | vd*B16 | src.code()*B12 | 0xB*B8 | |
2583 d*B7 | B4); | 2583 d*B7 | B4); |
2584 } | 2584 } |
2585 | 2585 |
2586 | 2586 |
2587 void Assembler::vmov(const Register dst, | 2587 void Assembler::vmov(const Register dst, |
2588 const VmovIndex index, | 2588 const VmovIndex index, |
2589 const DwVfpRegister src, | 2589 const DwVfpRegister src, |
2590 const Condition cond) { | 2590 const Condition cond) { |
2591 // Dd[index] = Rt | 2591 // Dd[index] = Rt |
2592 // Instruction details available in ARM DDI 0406C.b, A8.8.342. | 2592 // Instruction details available in ARM DDI 0406C.b, A8.8.342. |
2593 // cond(31-28) | 1110(27-24) | U=0(23) | opc1=0index(22-21) | 1(20) | | 2593 // cond(31-28) | 1110(27-24) | U=0(23) | opc1=0index(22-21) | 1(20) | |
2594 // Vn(19-16) | Rt(15-12) | 1011(11-8) | N(7) | opc2=00(6-5) | 1(4) | 0000(3-0) | 2594 // Vn(19-16) | Rt(15-12) | 1011(11-8) | N(7) | opc2=00(6-5) | 1(4) | 0000(3-0) |
2595 ASSERT(index.index == 0 || index.index == 1); | 2595 DCHECK(index.index == 0 || index.index == 1); |
2596 int vn, n; | 2596 int vn, n; |
2597 src.split_code(&vn, &n); | 2597 src.split_code(&vn, &n); |
2598 emit(cond | 0xE*B24 | index.index*B21 | B20 | vn*B16 | dst.code()*B12 | | 2598 emit(cond | 0xE*B24 | index.index*B21 | B20 | vn*B16 | dst.code()*B12 | |
2599 0xB*B8 | n*B7 | B4); | 2599 0xB*B8 | n*B7 | B4); |
2600 } | 2600 } |
2601 | 2601 |
2602 | 2602 |
2603 void Assembler::vmov(const DwVfpRegister dst, | 2603 void Assembler::vmov(const DwVfpRegister dst, |
2604 const Register src1, | 2604 const Register src1, |
2605 const Register src2, | 2605 const Register src2, |
2606 const Condition cond) { | 2606 const Condition cond) { |
2607 // Dm = <Rt,Rt2>. | 2607 // Dm = <Rt,Rt2>. |
2608 // Instruction details available in ARM DDI 0406C.b, A8-948. | 2608 // Instruction details available in ARM DDI 0406C.b, A8-948. |
2609 // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) | | 2609 // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) | |
2610 // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm | 2610 // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm |
2611 ASSERT(!src1.is(pc) && !src2.is(pc)); | 2611 DCHECK(!src1.is(pc) && !src2.is(pc)); |
2612 int vm, m; | 2612 int vm, m; |
2613 dst.split_code(&vm, &m); | 2613 dst.split_code(&vm, &m); |
2614 emit(cond | 0xC*B24 | B22 | src2.code()*B16 | | 2614 emit(cond | 0xC*B24 | B22 | src2.code()*B16 | |
2615 src1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm); | 2615 src1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm); |
2616 } | 2616 } |
2617 | 2617 |
2618 | 2618 |
2619 void Assembler::vmov(const Register dst1, | 2619 void Assembler::vmov(const Register dst1, |
2620 const Register dst2, | 2620 const Register dst2, |
2621 const DwVfpRegister src, | 2621 const DwVfpRegister src, |
2622 const Condition cond) { | 2622 const Condition cond) { |
2623 // <Rt,Rt2> = Dm. | 2623 // <Rt,Rt2> = Dm. |
2624 // Instruction details available in ARM DDI 0406C.b, A8-948. | 2624 // Instruction details available in ARM DDI 0406C.b, A8-948. |
2625 // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) | | 2625 // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) | |
2626 // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm | 2626 // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm |
2627 ASSERT(!dst1.is(pc) && !dst2.is(pc)); | 2627 DCHECK(!dst1.is(pc) && !dst2.is(pc)); |
2628 int vm, m; | 2628 int vm, m; |
2629 src.split_code(&vm, &m); | 2629 src.split_code(&vm, &m); |
2630 emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 | | 2630 emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 | |
2631 dst1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm); | 2631 dst1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm); |
2632 } | 2632 } |
2633 | 2633 |
2634 | 2634 |
2635 void Assembler::vmov(const SwVfpRegister dst, | 2635 void Assembler::vmov(const SwVfpRegister dst, |
2636 const Register src, | 2636 const Register src, |
2637 const Condition cond) { | 2637 const Condition cond) { |
2638 // Sn = Rt. | 2638 // Sn = Rt. |
2639 // Instruction details available in ARM DDI 0406A, A8-642. | 2639 // Instruction details available in ARM DDI 0406A, A8-642. |
2640 // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) | | 2640 // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) | |
2641 // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0) | 2641 // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0) |
2642 ASSERT(!src.is(pc)); | 2642 DCHECK(!src.is(pc)); |
2643 int sn, n; | 2643 int sn, n; |
2644 dst.split_code(&sn, &n); | 2644 dst.split_code(&sn, &n); |
2645 emit(cond | 0xE*B24 | sn*B16 | src.code()*B12 | 0xA*B8 | n*B7 | B4); | 2645 emit(cond | 0xE*B24 | sn*B16 | src.code()*B12 | 0xA*B8 | n*B7 | B4); |
2646 } | 2646 } |
2647 | 2647 |
2648 | 2648 |
2649 void Assembler::vmov(const Register dst, | 2649 void Assembler::vmov(const Register dst, |
2650 const SwVfpRegister src, | 2650 const SwVfpRegister src, |
2651 const Condition cond) { | 2651 const Condition cond) { |
2652 // Rt = Sn. | 2652 // Rt = Sn. |
2653 // Instruction details available in ARM DDI 0406A, A8-642. | 2653 // Instruction details available in ARM DDI 0406A, A8-642. |
2654 // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) | | 2654 // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) | |
2655 // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0) | 2655 // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0) |
2656 ASSERT(!dst.is(pc)); | 2656 DCHECK(!dst.is(pc)); |
2657 int sn, n; | 2657 int sn, n; |
2658 src.split_code(&sn, &n); | 2658 src.split_code(&sn, &n); |
2659 emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4); | 2659 emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4); |
2660 } | 2660 } |
2661 | 2661 |
2662 | 2662 |
2663 // Type of data to read from or write to VFP register. | 2663 // Type of data to read from or write to VFP register. |
2664 // Used as specifier in generic vcvt instruction. | 2664 // Used as specifier in generic vcvt instruction. |
2665 enum VFPType { S32, U32, F32, F64 }; | 2665 enum VFPType { S32, U32, F32, F64 }; |
2666 | 2666 |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2707 | 2707 |
2708 | 2708 |
2709 // Split five bit reg_code based on size of reg_type. | 2709 // Split five bit reg_code based on size of reg_type. |
2710 // 32-bit register codes are Vm:M | 2710 // 32-bit register codes are Vm:M |
2711 // 64-bit register codes are M:Vm | 2711 // 64-bit register codes are M:Vm |
2712 // where Vm is four bits, and M is a single bit. | 2712 // where Vm is four bits, and M is a single bit. |
2713 static void SplitRegCode(VFPType reg_type, | 2713 static void SplitRegCode(VFPType reg_type, |
2714 int reg_code, | 2714 int reg_code, |
2715 int* vm, | 2715 int* vm, |
2716 int* m) { | 2716 int* m) { |
2717 ASSERT((reg_code >= 0) && (reg_code <= 31)); | 2717 DCHECK((reg_code >= 0) && (reg_code <= 31)); |
2718 if (IsIntegerVFPType(reg_type) || !IsDoubleVFPType(reg_type)) { | 2718 if (IsIntegerVFPType(reg_type) || !IsDoubleVFPType(reg_type)) { |
2719 // 32 bit type. | 2719 // 32 bit type. |
2720 *m = reg_code & 0x1; | 2720 *m = reg_code & 0x1; |
2721 *vm = reg_code >> 1; | 2721 *vm = reg_code >> 1; |
2722 } else { | 2722 } else { |
2723 // 64 bit type. | 2723 // 64 bit type. |
2724 *m = (reg_code & 0x10) >> 4; | 2724 *m = (reg_code & 0x10) >> 4; |
2725 *vm = reg_code & 0x0F; | 2725 *vm = reg_code & 0x0F; |
2726 } | 2726 } |
2727 } | 2727 } |
2728 | 2728 |
2729 | 2729 |
2730 // Encode vcvt.src_type.dst_type instruction. | 2730 // Encode vcvt.src_type.dst_type instruction. |
2731 static Instr EncodeVCVT(const VFPType dst_type, | 2731 static Instr EncodeVCVT(const VFPType dst_type, |
2732 const int dst_code, | 2732 const int dst_code, |
2733 const VFPType src_type, | 2733 const VFPType src_type, |
2734 const int src_code, | 2734 const int src_code, |
2735 VFPConversionMode mode, | 2735 VFPConversionMode mode, |
2736 const Condition cond) { | 2736 const Condition cond) { |
2737 ASSERT(src_type != dst_type); | 2737 DCHECK(src_type != dst_type); |
2738 int D, Vd, M, Vm; | 2738 int D, Vd, M, Vm; |
2739 SplitRegCode(src_type, src_code, &Vm, &M); | 2739 SplitRegCode(src_type, src_code, &Vm, &M); |
2740 SplitRegCode(dst_type, dst_code, &Vd, &D); | 2740 SplitRegCode(dst_type, dst_code, &Vd, &D); |
2741 | 2741 |
2742 if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) { | 2742 if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) { |
2743 // Conversion between IEEE floating point and 32-bit integer. | 2743 // Conversion between IEEE floating point and 32-bit integer. |
2744 // Instruction details available in ARM DDI 0406B, A8.6.295. | 2744 // Instruction details available in ARM DDI 0406B, A8.6.295. |
2745 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 1(19) | opc2(18-16) | | 2745 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 1(19) | opc2(18-16) | |
2746 // Vd(15-12) | 101(11-9) | sz(8) | op(7) | 1(6) | M(5) | 0(4) | Vm(3-0) | 2746 // Vd(15-12) | 101(11-9) | sz(8) | op(7) | 1(6) | M(5) | 0(4) | Vm(3-0) |
2747 ASSERT(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type)); | 2747 DCHECK(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type)); |
2748 | 2748 |
2749 int sz, opc2, op; | 2749 int sz, opc2, op; |
2750 | 2750 |
2751 if (IsIntegerVFPType(dst_type)) { | 2751 if (IsIntegerVFPType(dst_type)) { |
2752 opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4; | 2752 opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4; |
2753 sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0; | 2753 sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0; |
2754 op = mode; | 2754 op = mode; |
2755 } else { | 2755 } else { |
2756 ASSERT(IsIntegerVFPType(src_type)); | 2756 DCHECK(IsIntegerVFPType(src_type)); |
2757 opc2 = 0x0; | 2757 opc2 = 0x0; |
2758 sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0; | 2758 sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0; |
2759 op = IsSignedVFPType(src_type) ? 0x1 : 0x0; | 2759 op = IsSignedVFPType(src_type) ? 0x1 : 0x0; |
2760 } | 2760 } |
2761 | 2761 |
2762 return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | B19 | opc2*B16 | | 2762 return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | B19 | opc2*B16 | |
2763 Vd*B12 | 0x5*B9 | sz*B8 | op*B7 | B6 | M*B5 | Vm); | 2763 Vd*B12 | 0x5*B9 | sz*B8 | op*B7 | B6 | M*B5 | Vm); |
2764 } else { | 2764 } else { |
2765 // Conversion between IEEE double and single precision. | 2765 // Conversion between IEEE double and single precision. |
2766 // Instruction details available in ARM DDI 0406B, A8.6.298. | 2766 // Instruction details available in ARM DDI 0406B, A8.6.298. |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2828 emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond)); | 2828 emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond)); |
2829 } | 2829 } |
2830 | 2830 |
2831 | 2831 |
2832 void Assembler::vcvt_f64_s32(const DwVfpRegister dst, | 2832 void Assembler::vcvt_f64_s32(const DwVfpRegister dst, |
2833 int fraction_bits, | 2833 int fraction_bits, |
2834 const Condition cond) { | 2834 const Condition cond) { |
2835 // Instruction details available in ARM DDI 0406C.b, A8-874. | 2835 // Instruction details available in ARM DDI 0406C.b, A8-874. |
2836 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 1010(19-16) | Vd(15-12) | | 2836 // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 1010(19-16) | Vd(15-12) | |
2837 // 101(11-9) | sf=1(8) | sx=1(7) | 1(6) | i(5) | 0(4) | imm4(3-0) | 2837 // 101(11-9) | sf=1(8) | sx=1(7) | 1(6) | i(5) | 0(4) | imm4(3-0) |
2838 ASSERT(fraction_bits > 0 && fraction_bits <= 32); | 2838 DCHECK(fraction_bits > 0 && fraction_bits <= 32); |
2839 ASSERT(CpuFeatures::IsSupported(VFP3)); | 2839 DCHECK(CpuFeatures::IsSupported(VFP3)); |
2840 int vd, d; | 2840 int vd, d; |
2841 dst.split_code(&vd, &d); | 2841 dst.split_code(&vd, &d); |
2842 int imm5 = 32 - fraction_bits; | 2842 int imm5 = 32 - fraction_bits; |
2843 int i = imm5 & 1; | 2843 int i = imm5 & 1; |
2844 int imm4 = (imm5 >> 1) & 0xf; | 2844 int imm4 = (imm5 >> 1) & 0xf; |
2845 emit(cond | 0xE*B24 | B23 | d*B22 | 0x3*B20 | B19 | 0x2*B16 | | 2845 emit(cond | 0xE*B24 | B23 | d*B22 | 0x3*B20 | B19 | 0x2*B16 | |
2846 vd*B12 | 0x5*B9 | B8 | B7 | B6 | i*B5 | imm4); | 2846 vd*B12 | 0x5*B9 | B8 | B7 | B6 | i*B5 | imm4); |
2847 } | 2847 } |
2848 | 2848 |
2849 | 2849 |
(...skipping 160 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3010 } | 3010 } |
3011 | 3011 |
3012 | 3012 |
3013 void Assembler::vcmp(const DwVfpRegister src1, | 3013 void Assembler::vcmp(const DwVfpRegister src1, |
3014 const double src2, | 3014 const double src2, |
3015 const Condition cond) { | 3015 const Condition cond) { |
3016 // vcmp(Dd, #0.0) double precision floating point comparison. | 3016 // vcmp(Dd, #0.0) double precision floating point comparison. |
3017 // Instruction details available in ARM DDI 0406C.b, A8-864. | 3017 // Instruction details available in ARM DDI 0406C.b, A8-864. |
3018 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) | | 3018 // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) | |
3019 // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0) | 3019 // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0) |
3020 ASSERT(src2 == 0.0); | 3020 DCHECK(src2 == 0.0); |
3021 int vd, d; | 3021 int vd, d; |
3022 src1.split_code(&vd, &d); | 3022 src1.split_code(&vd, &d); |
3023 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x5*B16 | vd*B12 | 0x5*B9 | B8 | B6); | 3023 emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x5*B16 | vd*B12 | 0x5*B9 | B8 | B6); |
3024 } | 3024 } |
3025 | 3025 |
3026 | 3026 |
3027 void Assembler::vmsr(Register dst, Condition cond) { | 3027 void Assembler::vmsr(Register dst, Condition cond) { |
3028 // Instruction details available in ARM DDI 0406A, A8-652. | 3028 // Instruction details available in ARM DDI 0406A, A8-652. |
3029 // cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) | | 3029 // cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) | |
3030 // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0) | 3030 // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0) |
(...skipping 27 matching lines...) Expand all Loading... |
3058 | 3058 |
3059 | 3059 |
3060 // Support for NEON. | 3060 // Support for NEON. |
3061 | 3061 |
3062 void Assembler::vld1(NeonSize size, | 3062 void Assembler::vld1(NeonSize size, |
3063 const NeonListOperand& dst, | 3063 const NeonListOperand& dst, |
3064 const NeonMemOperand& src) { | 3064 const NeonMemOperand& src) { |
3065 // Instruction details available in ARM DDI 0406C.b, A8.8.320. | 3065 // Instruction details available in ARM DDI 0406C.b, A8.8.320. |
3066 // 1111(31-28) | 01000(27-23) | D(22) | 10(21-20) | Rn(19-16) | | 3066 // 1111(31-28) | 01000(27-23) | D(22) | 10(21-20) | Rn(19-16) | |
3067 // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0) | 3067 // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0) |
3068 ASSERT(CpuFeatures::IsSupported(NEON)); | 3068 DCHECK(CpuFeatures::IsSupported(NEON)); |
3069 int vd, d; | 3069 int vd, d; |
3070 dst.base().split_code(&vd, &d); | 3070 dst.base().split_code(&vd, &d); |
3071 emit(0xFU*B28 | 4*B24 | d*B22 | 2*B20 | src.rn().code()*B16 | vd*B12 | | 3071 emit(0xFU*B28 | 4*B24 | d*B22 | 2*B20 | src.rn().code()*B16 | vd*B12 | |
3072 dst.type()*B8 | size*B6 | src.align()*B4 | src.rm().code()); | 3072 dst.type()*B8 | size*B6 | src.align()*B4 | src.rm().code()); |
3073 } | 3073 } |
3074 | 3074 |
3075 | 3075 |
3076 void Assembler::vst1(NeonSize size, | 3076 void Assembler::vst1(NeonSize size, |
3077 const NeonListOperand& src, | 3077 const NeonListOperand& src, |
3078 const NeonMemOperand& dst) { | 3078 const NeonMemOperand& dst) { |
3079 // Instruction details available in ARM DDI 0406C.b, A8.8.404. | 3079 // Instruction details available in ARM DDI 0406C.b, A8.8.404. |
3080 // 1111(31-28) | 01000(27-23) | D(22) | 00(21-20) | Rn(19-16) | | 3080 // 1111(31-28) | 01000(27-23) | D(22) | 00(21-20) | Rn(19-16) | |
3081 // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0) | 3081 // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0) |
3082 ASSERT(CpuFeatures::IsSupported(NEON)); | 3082 DCHECK(CpuFeatures::IsSupported(NEON)); |
3083 int vd, d; | 3083 int vd, d; |
3084 src.base().split_code(&vd, &d); | 3084 src.base().split_code(&vd, &d); |
3085 emit(0xFU*B28 | 4*B24 | d*B22 | dst.rn().code()*B16 | vd*B12 | src.type()*B8 | | 3085 emit(0xFU*B28 | 4*B24 | d*B22 | dst.rn().code()*B16 | vd*B12 | src.type()*B8 | |
3086 size*B6 | dst.align()*B4 | dst.rm().code()); | 3086 size*B6 | dst.align()*B4 | dst.rm().code()); |
3087 } | 3087 } |
3088 | 3088 |
3089 | 3089 |
3090 void Assembler::vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src) { | 3090 void Assembler::vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src) { |
3091 // Instruction details available in ARM DDI 0406C.b, A8.8.346. | 3091 // Instruction details available in ARM DDI 0406C.b, A8.8.346. |
3092 // 1111(31-28) | 001(27-25) | U(24) | 1(23) | D(22) | imm3(21-19) | | 3092 // 1111(31-28) | 001(27-25) | U(24) | 1(23) | D(22) | imm3(21-19) | |
3093 // 000(18-16) | Vd(15-12) | 101000(11-6) | M(5) | 1(4) | Vm(3-0) | 3093 // 000(18-16) | Vd(15-12) | 101000(11-6) | M(5) | 1(4) | Vm(3-0) |
3094 ASSERT(CpuFeatures::IsSupported(NEON)); | 3094 DCHECK(CpuFeatures::IsSupported(NEON)); |
3095 int vd, d; | 3095 int vd, d; |
3096 dst.split_code(&vd, &d); | 3096 dst.split_code(&vd, &d); |
3097 int vm, m; | 3097 int vm, m; |
3098 src.split_code(&vm, &m); | 3098 src.split_code(&vm, &m); |
3099 emit(0xFU*B28 | B25 | (dt & NeonDataTypeUMask) | B23 | d*B22 | | 3099 emit(0xFU*B28 | B25 | (dt & NeonDataTypeUMask) | B23 | d*B22 | |
3100 (dt & NeonDataTypeSizeMask)*B19 | vd*B12 | 0xA*B8 | m*B5 | B4 | vm); | 3100 (dt & NeonDataTypeSizeMask)*B19 | vd*B12 | 0xA*B8 | m*B5 | B4 | vm); |
3101 } | 3101 } |
3102 | 3102 |
3103 | 3103 |
3104 // Pseudo instructions. | 3104 // Pseudo instructions. |
3105 void Assembler::nop(int type) { | 3105 void Assembler::nop(int type) { |
3106 // ARMv6{K/T2} and v7 have an actual NOP instruction but it serializes | 3106 // ARMv6{K/T2} and v7 have an actual NOP instruction but it serializes |
3107 // some of the CPU's pipeline and has to issue. Older ARM chips simply used | 3107 // some of the CPU's pipeline and has to issue. Older ARM chips simply used |
3108 // MOV Rx, Rx as NOP and it performs better even in newer CPUs. | 3108 // MOV Rx, Rx as NOP and it performs better even in newer CPUs. |
3109 // We therefore use MOV Rx, Rx, even on newer CPUs, and use Rx to encode | 3109 // We therefore use MOV Rx, Rx, even on newer CPUs, and use Rx to encode |
3110 // a type. | 3110 // a type. |
3111 ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop. | 3111 DCHECK(0 <= type && type <= 14); // mov pc, pc isn't a nop. |
3112 emit(al | 13*B21 | type*B12 | type); | 3112 emit(al | 13*B21 | type*B12 | type); |
3113 } | 3113 } |
3114 | 3114 |
3115 | 3115 |
3116 bool Assembler::IsMovT(Instr instr) { | 3116 bool Assembler::IsMovT(Instr instr) { |
3117 instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions | 3117 instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions |
3118 ((kNumRegisters-1)*B12) | // mask out register | 3118 ((kNumRegisters-1)*B12) | // mask out register |
3119 EncodeMovwImmediate(0xFFFF)); // mask out immediate value | 3119 EncodeMovwImmediate(0xFFFF)); // mask out immediate value |
3120 return instr == kMovtPattern; | 3120 return instr == kMovtPattern; |
3121 } | 3121 } |
3122 | 3122 |
3123 | 3123 |
3124 bool Assembler::IsMovW(Instr instr) { | 3124 bool Assembler::IsMovW(Instr instr) { |
3125 instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions | 3125 instr &= ~(((kNumberOfConditions - 1) << 28) | // Mask off conditions |
3126 ((kNumRegisters-1)*B12) | // mask out destination | 3126 ((kNumRegisters-1)*B12) | // mask out destination |
3127 EncodeMovwImmediate(0xFFFF)); // mask out immediate value | 3127 EncodeMovwImmediate(0xFFFF)); // mask out immediate value |
3128 return instr == kMovwPattern; | 3128 return instr == kMovwPattern; |
3129 } | 3129 } |
3130 | 3130 |
3131 | 3131 |
3132 Instr Assembler::GetMovTPattern() { return kMovtPattern; } | 3132 Instr Assembler::GetMovTPattern() { return kMovtPattern; } |
3133 | 3133 |
3134 | 3134 |
3135 Instr Assembler::GetMovWPattern() { return kMovwPattern; } | 3135 Instr Assembler::GetMovWPattern() { return kMovwPattern; } |
3136 | 3136 |
3137 | 3137 |
3138 Instr Assembler::EncodeMovwImmediate(uint32_t immediate) { | 3138 Instr Assembler::EncodeMovwImmediate(uint32_t immediate) { |
3139 ASSERT(immediate < 0x10000); | 3139 DCHECK(immediate < 0x10000); |
3140 return ((immediate & 0xf000) << 4) | (immediate & 0xfff); | 3140 return ((immediate & 0xf000) << 4) | (immediate & 0xfff); |
3141 } | 3141 } |
3142 | 3142 |
3143 | 3143 |
3144 Instr Assembler::PatchMovwImmediate(Instr instruction, uint32_t immediate) { | 3144 Instr Assembler::PatchMovwImmediate(Instr instruction, uint32_t immediate) { |
3145 instruction &= ~EncodeMovwImmediate(0xffff); | 3145 instruction &= ~EncodeMovwImmediate(0xffff); |
3146 return instruction | EncodeMovwImmediate(immediate); | 3146 return instruction | EncodeMovwImmediate(immediate); |
3147 } | 3147 } |
3148 | 3148 |
3149 | 3149 |
3150 bool Assembler::IsNop(Instr instr, int type) { | 3150 bool Assembler::IsNop(Instr instr, int type) { |
3151 ASSERT(0 <= type && type <= 14); // mov pc, pc isn't a nop. | 3151 DCHECK(0 <= type && type <= 14); // mov pc, pc isn't a nop. |
3152 // Check for mov rx, rx where x = type. | 3152 // Check for mov rx, rx where x = type. |
3153 return instr == (al | 13*B21 | type*B12 | type); | 3153 return instr == (al | 13*B21 | type*B12 | type); |
3154 } | 3154 } |
3155 | 3155 |
3156 | 3156 |
3157 // static | 3157 // static |
3158 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) { | 3158 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) { |
3159 uint32_t dummy1; | 3159 uint32_t dummy1; |
3160 uint32_t dummy2; | 3160 uint32_t dummy2; |
3161 return fits_shifter(imm32, &dummy1, &dummy2, NULL); | 3161 return fits_shifter(imm32, &dummy1, &dummy2, NULL); |
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3232 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, | 3232 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta, |
3233 reloc_info_writer.last_pc() + pc_delta); | 3233 reloc_info_writer.last_pc() + pc_delta); |
3234 | 3234 |
3235 // None of our relocation types are pc relative pointing outside the code | 3235 // None of our relocation types are pc relative pointing outside the code |
3236 // buffer nor pc absolute pointing inside the code buffer, so there is no need | 3236 // buffer nor pc absolute pointing inside the code buffer, so there is no need |
3237 // to relocate any emitted relocation entries. | 3237 // to relocate any emitted relocation entries. |
3238 | 3238 |
3239 // Relocate pending relocation entries. | 3239 // Relocate pending relocation entries. |
3240 for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) { | 3240 for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) { |
3241 RelocInfo& rinfo = pending_32_bit_reloc_info_[i]; | 3241 RelocInfo& rinfo = pending_32_bit_reloc_info_[i]; |
3242 ASSERT(rinfo.rmode() != RelocInfo::COMMENT && | 3242 DCHECK(rinfo.rmode() != RelocInfo::COMMENT && |
3243 rinfo.rmode() != RelocInfo::POSITION); | 3243 rinfo.rmode() != RelocInfo::POSITION); |
3244 if (rinfo.rmode() != RelocInfo::JS_RETURN) { | 3244 if (rinfo.rmode() != RelocInfo::JS_RETURN) { |
3245 rinfo.set_pc(rinfo.pc() + pc_delta); | 3245 rinfo.set_pc(rinfo.pc() + pc_delta); |
3246 } | 3246 } |
3247 } | 3247 } |
3248 for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) { | 3248 for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) { |
3249 RelocInfo& rinfo = pending_64_bit_reloc_info_[i]; | 3249 RelocInfo& rinfo = pending_64_bit_reloc_info_[i]; |
3250 ASSERT(rinfo.rmode() == RelocInfo::NONE64); | 3250 DCHECK(rinfo.rmode() == RelocInfo::NONE64); |
3251 rinfo.set_pc(rinfo.pc() + pc_delta); | 3251 rinfo.set_pc(rinfo.pc() + pc_delta); |
3252 } | 3252 } |
3253 constant_pool_builder_.Relocate(pc_delta); | 3253 constant_pool_builder_.Relocate(pc_delta); |
3254 } | 3254 } |
3255 | 3255 |
3256 | 3256 |
3257 void Assembler::db(uint8_t data) { | 3257 void Assembler::db(uint8_t data) { |
3258 // No relocation info should be pending while using db. db is used | 3258 // No relocation info should be pending while using db. db is used |
3259 // to write pure data with no pointers and the constant pool should | 3259 // to write pure data with no pointers and the constant pool should |
3260 // be emitted before using db. | 3260 // be emitted before using db. |
3261 ASSERT(num_pending_32_bit_reloc_info_ == 0); | 3261 DCHECK(num_pending_32_bit_reloc_info_ == 0); |
3262 ASSERT(num_pending_64_bit_reloc_info_ == 0); | 3262 DCHECK(num_pending_64_bit_reloc_info_ == 0); |
3263 CheckBuffer(); | 3263 CheckBuffer(); |
3264 *reinterpret_cast<uint8_t*>(pc_) = data; | 3264 *reinterpret_cast<uint8_t*>(pc_) = data; |
3265 pc_ += sizeof(uint8_t); | 3265 pc_ += sizeof(uint8_t); |
3266 } | 3266 } |
3267 | 3267 |
3268 | 3268 |
3269 void Assembler::dd(uint32_t data) { | 3269 void Assembler::dd(uint32_t data) { |
3270 // No relocation info should be pending while using dd. dd is used | 3270 // No relocation info should be pending while using dd. dd is used |
3271 // to write pure data with no pointers and the constant pool should | 3271 // to write pure data with no pointers and the constant pool should |
3272 // be emitted before using dd. | 3272 // be emitted before using dd. |
3273 ASSERT(num_pending_32_bit_reloc_info_ == 0); | 3273 DCHECK(num_pending_32_bit_reloc_info_ == 0); |
3274 ASSERT(num_pending_64_bit_reloc_info_ == 0); | 3274 DCHECK(num_pending_64_bit_reloc_info_ == 0); |
3275 CheckBuffer(); | 3275 CheckBuffer(); |
3276 *reinterpret_cast<uint32_t*>(pc_) = data; | 3276 *reinterpret_cast<uint32_t*>(pc_) = data; |
3277 pc_ += sizeof(uint32_t); | 3277 pc_ += sizeof(uint32_t); |
3278 } | 3278 } |
3279 | 3279 |
3280 | 3280 |
3281 void Assembler::emit_code_stub_address(Code* stub) { | 3281 void Assembler::emit_code_stub_address(Code* stub) { |
3282 CheckBuffer(); | 3282 CheckBuffer(); |
3283 *reinterpret_cast<uint32_t*>(pc_) = | 3283 *reinterpret_cast<uint32_t*>(pc_) = |
3284 reinterpret_cast<uint32_t>(stub->instruction_start()); | 3284 reinterpret_cast<uint32_t>(stub->instruction_start()); |
3285 pc_ += sizeof(uint32_t); | 3285 pc_ += sizeof(uint32_t); |
3286 } | 3286 } |
3287 | 3287 |
3288 | 3288 |
3289 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { | 3289 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { |
3290 RelocInfo rinfo(pc_, rmode, data, NULL); | 3290 RelocInfo rinfo(pc_, rmode, data, NULL); |
3291 RecordRelocInfo(rinfo); | 3291 RecordRelocInfo(rinfo); |
3292 } | 3292 } |
3293 | 3293 |
3294 | 3294 |
3295 void Assembler::RecordRelocInfo(const RelocInfo& rinfo) { | 3295 void Assembler::RecordRelocInfo(const RelocInfo& rinfo) { |
3296 if (!RelocInfo::IsNone(rinfo.rmode())) { | 3296 if (!RelocInfo::IsNone(rinfo.rmode())) { |
3297 // Don't record external references unless the heap will be serialized. | 3297 // Don't record external references unless the heap will be serialized. |
3298 if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE && | 3298 if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE && |
3299 !serializer_enabled() && !emit_debug_code()) { | 3299 !serializer_enabled() && !emit_debug_code()) { |
3300 return; | 3300 return; |
3301 } | 3301 } |
3302 ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here | 3302 DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here |
3303 if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) { | 3303 if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) { |
3304 RelocInfo reloc_info_with_ast_id(rinfo.pc(), | 3304 RelocInfo reloc_info_with_ast_id(rinfo.pc(), |
3305 rinfo.rmode(), | 3305 rinfo.rmode(), |
3306 RecordedAstId().ToInt(), | 3306 RecordedAstId().ToInt(), |
3307 NULL); | 3307 NULL); |
3308 ClearRecordedAstId(); | 3308 ClearRecordedAstId(); |
3309 reloc_info_writer.Write(&reloc_info_with_ast_id); | 3309 reloc_info_writer.Write(&reloc_info_with_ast_id); |
3310 } else { | 3310 } else { |
3311 reloc_info_writer.Write(&rinfo); | 3311 reloc_info_writer.Write(&rinfo); |
3312 } | 3312 } |
3313 } | 3313 } |
3314 } | 3314 } |
3315 | 3315 |
3316 | 3316 |
3317 ConstantPoolArray::LayoutSection Assembler::ConstantPoolAddEntry( | 3317 ConstantPoolArray::LayoutSection Assembler::ConstantPoolAddEntry( |
3318 const RelocInfo& rinfo) { | 3318 const RelocInfo& rinfo) { |
3319 if (FLAG_enable_ool_constant_pool) { | 3319 if (FLAG_enable_ool_constant_pool) { |
3320 return constant_pool_builder_.AddEntry(this, rinfo); | 3320 return constant_pool_builder_.AddEntry(this, rinfo); |
3321 } else { | 3321 } else { |
3322 if (rinfo.rmode() == RelocInfo::NONE64) { | 3322 if (rinfo.rmode() == RelocInfo::NONE64) { |
3323 ASSERT(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo); | 3323 DCHECK(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo); |
3324 if (num_pending_64_bit_reloc_info_ == 0) { | 3324 if (num_pending_64_bit_reloc_info_ == 0) { |
3325 first_const_pool_64_use_ = pc_offset(); | 3325 first_const_pool_64_use_ = pc_offset(); |
3326 } | 3326 } |
3327 pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo; | 3327 pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo; |
3328 } else { | 3328 } else { |
3329 ASSERT(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo); | 3329 DCHECK(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo); |
3330 if (num_pending_32_bit_reloc_info_ == 0) { | 3330 if (num_pending_32_bit_reloc_info_ == 0) { |
3331 first_const_pool_32_use_ = pc_offset(); | 3331 first_const_pool_32_use_ = pc_offset(); |
3332 } | 3332 } |
3333 pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo; | 3333 pending_32_bit_reloc_info_[num_pending_32_bit_reloc_info_++] = rinfo; |
3334 } | 3334 } |
3335 // Make sure the constant pool is not emitted in place of the next | 3335 // Make sure the constant pool is not emitted in place of the next |
3336 // instruction for which we just recorded relocation info. | 3336 // instruction for which we just recorded relocation info. |
3337 BlockConstPoolFor(1); | 3337 BlockConstPoolFor(1); |
3338 return ConstantPoolArray::SMALL_SECTION; | 3338 return ConstantPoolArray::SMALL_SECTION; |
3339 } | 3339 } |
3340 } | 3340 } |
3341 | 3341 |
3342 | 3342 |
3343 void Assembler::BlockConstPoolFor(int instructions) { | 3343 void Assembler::BlockConstPoolFor(int instructions) { |
3344 if (FLAG_enable_ool_constant_pool) { | 3344 if (FLAG_enable_ool_constant_pool) { |
3345 // Should be a no-op if using an out-of-line constant pool. | 3345 // Should be a no-op if using an out-of-line constant pool. |
3346 ASSERT(num_pending_32_bit_reloc_info_ == 0); | 3346 DCHECK(num_pending_32_bit_reloc_info_ == 0); |
3347 ASSERT(num_pending_64_bit_reloc_info_ == 0); | 3347 DCHECK(num_pending_64_bit_reloc_info_ == 0); |
3348 return; | 3348 return; |
3349 } | 3349 } |
3350 | 3350 |
3351 int pc_limit = pc_offset() + instructions * kInstrSize; | 3351 int pc_limit = pc_offset() + instructions * kInstrSize; |
3352 if (no_const_pool_before_ < pc_limit) { | 3352 if (no_const_pool_before_ < pc_limit) { |
3353 // Max pool start (if we need a jump and an alignment). | 3353 // Max pool start (if we need a jump and an alignment). |
3354 #ifdef DEBUG | 3354 #ifdef DEBUG |
3355 int start = pc_limit + kInstrSize + 2 * kPointerSize; | 3355 int start = pc_limit + kInstrSize + 2 * kPointerSize; |
3356 ASSERT((num_pending_32_bit_reloc_info_ == 0) || | 3356 DCHECK((num_pending_32_bit_reloc_info_ == 0) || |
3357 (start - first_const_pool_32_use_ + | 3357 (start - first_const_pool_32_use_ + |
3358 num_pending_64_bit_reloc_info_ * kDoubleSize < kMaxDistToIntPool)); | 3358 num_pending_64_bit_reloc_info_ * kDoubleSize < kMaxDistToIntPool)); |
3359 ASSERT((num_pending_64_bit_reloc_info_ == 0) || | 3359 DCHECK((num_pending_64_bit_reloc_info_ == 0) || |
3360 (start - first_const_pool_64_use_ < kMaxDistToFPPool)); | 3360 (start - first_const_pool_64_use_ < kMaxDistToFPPool)); |
3361 #endif | 3361 #endif |
3362 no_const_pool_before_ = pc_limit; | 3362 no_const_pool_before_ = pc_limit; |
3363 } | 3363 } |
3364 | 3364 |
3365 if (next_buffer_check_ < no_const_pool_before_) { | 3365 if (next_buffer_check_ < no_const_pool_before_) { |
3366 next_buffer_check_ = no_const_pool_before_; | 3366 next_buffer_check_ = no_const_pool_before_; |
3367 } | 3367 } |
3368 } | 3368 } |
3369 | 3369 |
3370 | 3370 |
3371 void Assembler::CheckConstPool(bool force_emit, bool require_jump) { | 3371 void Assembler::CheckConstPool(bool force_emit, bool require_jump) { |
3372 if (FLAG_enable_ool_constant_pool) { | 3372 if (FLAG_enable_ool_constant_pool) { |
3373 // Should be a no-op if using an out-of-line constant pool. | 3373 // Should be a no-op if using an out-of-line constant pool. |
3374 ASSERT(num_pending_32_bit_reloc_info_ == 0); | 3374 DCHECK(num_pending_32_bit_reloc_info_ == 0); |
3375 ASSERT(num_pending_64_bit_reloc_info_ == 0); | 3375 DCHECK(num_pending_64_bit_reloc_info_ == 0); |
3376 return; | 3376 return; |
3377 } | 3377 } |
3378 | 3378 |
3379 // Some short sequence of instruction mustn't be broken up by constant pool | 3379 // Some short sequence of instruction mustn't be broken up by constant pool |
3380 // emission, such sequences are protected by calls to BlockConstPoolFor and | 3380 // emission, such sequences are protected by calls to BlockConstPoolFor and |
3381 // BlockConstPoolScope. | 3381 // BlockConstPoolScope. |
3382 if (is_const_pool_blocked()) { | 3382 if (is_const_pool_blocked()) { |
3383 // Something is wrong if emission is forced and blocked at the same time. | 3383 // Something is wrong if emission is forced and blocked at the same time. |
3384 ASSERT(!force_emit); | 3384 DCHECK(!force_emit); |
3385 return; | 3385 return; |
3386 } | 3386 } |
3387 | 3387 |
3388 // There is nothing to do if there are no pending constant pool entries. | 3388 // There is nothing to do if there are no pending constant pool entries. |
3389 if ((num_pending_32_bit_reloc_info_ == 0) && | 3389 if ((num_pending_32_bit_reloc_info_ == 0) && |
3390 (num_pending_64_bit_reloc_info_ == 0)) { | 3390 (num_pending_64_bit_reloc_info_ == 0)) { |
3391 // Calculate the offset of the next check. | 3391 // Calculate the offset of the next check. |
3392 next_buffer_check_ = pc_offset() + kCheckPoolInterval; | 3392 next_buffer_check_ = pc_offset() + kCheckPoolInterval; |
3393 return; | 3393 return; |
3394 } | 3394 } |
(...skipping 18 matching lines...) Expand all Loading... |
3413 | 3413 |
3414 // We emit a constant pool when: | 3414 // We emit a constant pool when: |
3415 // * requested to do so by parameter force_emit (e.g. after each function). | 3415 // * requested to do so by parameter force_emit (e.g. after each function). |
3416 // * the distance from the first instruction accessing the constant pool to | 3416 // * the distance from the first instruction accessing the constant pool to |
3417 // any of the constant pool entries will exceed its limit the next | 3417 // any of the constant pool entries will exceed its limit the next |
3418 // time the pool is checked. This is overly restrictive, but we don't emit | 3418 // time the pool is checked. This is overly restrictive, but we don't emit |
3419 // constant pool entries in-order so it's conservatively correct. | 3419 // constant pool entries in-order so it's conservatively correct. |
3420 // * the instruction doesn't require a jump after itself to jump over the | 3420 // * the instruction doesn't require a jump after itself to jump over the |
3421 // constant pool, and we're getting close to running out of range. | 3421 // constant pool, and we're getting close to running out of range. |
3422 if (!force_emit) { | 3422 if (!force_emit) { |
3423 ASSERT((first_const_pool_32_use_ >= 0) || (first_const_pool_64_use_ >= 0)); | 3423 DCHECK((first_const_pool_32_use_ >= 0) || (first_const_pool_64_use_ >= 0)); |
3424 bool need_emit = false; | 3424 bool need_emit = false; |
3425 if (has_fp_values) { | 3425 if (has_fp_values) { |
3426 int dist64 = pc_offset() + | 3426 int dist64 = pc_offset() + |
3427 size - | 3427 size - |
3428 num_pending_32_bit_reloc_info_ * kPointerSize - | 3428 num_pending_32_bit_reloc_info_ * kPointerSize - |
3429 first_const_pool_64_use_; | 3429 first_const_pool_64_use_; |
3430 if ((dist64 >= kMaxDistToFPPool - kCheckPoolInterval) || | 3430 if ((dist64 >= kMaxDistToFPPool - kCheckPoolInterval) || |
3431 (!require_jump && (dist64 >= kMaxDistToFPPool / 2))) { | 3431 (!require_jump && (dist64 >= kMaxDistToFPPool / 2))) { |
3432 need_emit = true; | 3432 need_emit = true; |
3433 } | 3433 } |
(...skipping 29 matching lines...) Expand all Loading... |
3463 | 3463 |
3464 if (require_64_bit_align) { | 3464 if (require_64_bit_align) { |
3465 emit(kConstantPoolMarker); | 3465 emit(kConstantPoolMarker); |
3466 } | 3466 } |
3467 | 3467 |
3468 // Emit 64-bit constant pool entries first: their range is smaller than | 3468 // Emit 64-bit constant pool entries first: their range is smaller than |
3469 // 32-bit entries. | 3469 // 32-bit entries. |
3470 for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) { | 3470 for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) { |
3471 RelocInfo& rinfo = pending_64_bit_reloc_info_[i]; | 3471 RelocInfo& rinfo = pending_64_bit_reloc_info_[i]; |
3472 | 3472 |
3473 ASSERT(!((uintptr_t)pc_ & 0x7)); // Check 64-bit alignment. | 3473 DCHECK(!((uintptr_t)pc_ & 0x7)); // Check 64-bit alignment. |
3474 | 3474 |
3475 Instr instr = instr_at(rinfo.pc()); | 3475 Instr instr = instr_at(rinfo.pc()); |
3476 // Instruction to patch must be 'vldr rd, [pc, #offset]' with offset == 0. | 3476 // Instruction to patch must be 'vldr rd, [pc, #offset]' with offset == 0. |
3477 ASSERT((IsVldrDPcImmediateOffset(instr) && | 3477 DCHECK((IsVldrDPcImmediateOffset(instr) && |
3478 GetVldrDRegisterImmediateOffset(instr) == 0)); | 3478 GetVldrDRegisterImmediateOffset(instr) == 0)); |
3479 | 3479 |
3480 int delta = pc_ - rinfo.pc() - kPcLoadDelta; | 3480 int delta = pc_ - rinfo.pc() - kPcLoadDelta; |
3481 ASSERT(is_uint10(delta)); | 3481 DCHECK(is_uint10(delta)); |
3482 | 3482 |
3483 bool found = false; | 3483 bool found = false; |
3484 uint64_t value = rinfo.raw_data64(); | 3484 uint64_t value = rinfo.raw_data64(); |
3485 for (int j = 0; j < i; j++) { | 3485 for (int j = 0; j < i; j++) { |
3486 RelocInfo& rinfo2 = pending_64_bit_reloc_info_[j]; | 3486 RelocInfo& rinfo2 = pending_64_bit_reloc_info_[j]; |
3487 if (value == rinfo2.raw_data64()) { | 3487 if (value == rinfo2.raw_data64()) { |
3488 found = true; | 3488 found = true; |
3489 ASSERT(rinfo2.rmode() == RelocInfo::NONE64); | 3489 DCHECK(rinfo2.rmode() == RelocInfo::NONE64); |
3490 Instr instr2 = instr_at(rinfo2.pc()); | 3490 Instr instr2 = instr_at(rinfo2.pc()); |
3491 ASSERT(IsVldrDPcImmediateOffset(instr2)); | 3491 DCHECK(IsVldrDPcImmediateOffset(instr2)); |
3492 delta = GetVldrDRegisterImmediateOffset(instr2); | 3492 delta = GetVldrDRegisterImmediateOffset(instr2); |
3493 delta += rinfo2.pc() - rinfo.pc(); | 3493 delta += rinfo2.pc() - rinfo.pc(); |
3494 break; | 3494 break; |
3495 } | 3495 } |
3496 } | 3496 } |
3497 | 3497 |
3498 instr_at_put(rinfo.pc(), SetVldrDRegisterImmediateOffset(instr, delta)); | 3498 instr_at_put(rinfo.pc(), SetVldrDRegisterImmediateOffset(instr, delta)); |
3499 | 3499 |
3500 if (!found) { | 3500 if (!found) { |
3501 uint64_t uint_data = rinfo.raw_data64(); | 3501 uint64_t uint_data = rinfo.raw_data64(); |
3502 emit(uint_data & 0xFFFFFFFF); | 3502 emit(uint_data & 0xFFFFFFFF); |
3503 emit(uint_data >> 32); | 3503 emit(uint_data >> 32); |
3504 } | 3504 } |
3505 } | 3505 } |
3506 | 3506 |
3507 // Emit 32-bit constant pool entries. | 3507 // Emit 32-bit constant pool entries. |
3508 for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) { | 3508 for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) { |
3509 RelocInfo& rinfo = pending_32_bit_reloc_info_[i]; | 3509 RelocInfo& rinfo = pending_32_bit_reloc_info_[i]; |
3510 ASSERT(rinfo.rmode() != RelocInfo::COMMENT && | 3510 DCHECK(rinfo.rmode() != RelocInfo::COMMENT && |
3511 rinfo.rmode() != RelocInfo::POSITION && | 3511 rinfo.rmode() != RelocInfo::POSITION && |
3512 rinfo.rmode() != RelocInfo::STATEMENT_POSITION && | 3512 rinfo.rmode() != RelocInfo::STATEMENT_POSITION && |
3513 rinfo.rmode() != RelocInfo::CONST_POOL && | 3513 rinfo.rmode() != RelocInfo::CONST_POOL && |
3514 rinfo.rmode() != RelocInfo::NONE64); | 3514 rinfo.rmode() != RelocInfo::NONE64); |
3515 | 3515 |
3516 Instr instr = instr_at(rinfo.pc()); | 3516 Instr instr = instr_at(rinfo.pc()); |
3517 | 3517 |
3518 // 64-bit loads shouldn't get here. | 3518 // 64-bit loads shouldn't get here. |
3519 ASSERT(!IsVldrDPcImmediateOffset(instr)); | 3519 DCHECK(!IsVldrDPcImmediateOffset(instr)); |
3520 | 3520 |
3521 if (IsLdrPcImmediateOffset(instr) && | 3521 if (IsLdrPcImmediateOffset(instr) && |
3522 GetLdrRegisterImmediateOffset(instr) == 0) { | 3522 GetLdrRegisterImmediateOffset(instr) == 0) { |
3523 int delta = pc_ - rinfo.pc() - kPcLoadDelta; | 3523 int delta = pc_ - rinfo.pc() - kPcLoadDelta; |
3524 ASSERT(is_uint12(delta)); | 3524 DCHECK(is_uint12(delta)); |
3525 // 0 is the smallest delta: | 3525 // 0 is the smallest delta: |
3526 // ldr rd, [pc, #0] | 3526 // ldr rd, [pc, #0] |
3527 // constant pool marker | 3527 // constant pool marker |
3528 // data | 3528 // data |
3529 | 3529 |
3530 bool found = false; | 3530 bool found = false; |
3531 if (!serializer_enabled() && rinfo.rmode() >= RelocInfo::CELL) { | 3531 if (!serializer_enabled() && rinfo.rmode() >= RelocInfo::CELL) { |
3532 for (int j = 0; j < i; j++) { | 3532 for (int j = 0; j < i; j++) { |
3533 RelocInfo& rinfo2 = pending_32_bit_reloc_info_[j]; | 3533 RelocInfo& rinfo2 = pending_32_bit_reloc_info_[j]; |
3534 | 3534 |
3535 if ((rinfo2.data() == rinfo.data()) && | 3535 if ((rinfo2.data() == rinfo.data()) && |
3536 (rinfo2.rmode() == rinfo.rmode())) { | 3536 (rinfo2.rmode() == rinfo.rmode())) { |
3537 Instr instr2 = instr_at(rinfo2.pc()); | 3537 Instr instr2 = instr_at(rinfo2.pc()); |
3538 if (IsLdrPcImmediateOffset(instr2)) { | 3538 if (IsLdrPcImmediateOffset(instr2)) { |
3539 delta = GetLdrRegisterImmediateOffset(instr2); | 3539 delta = GetLdrRegisterImmediateOffset(instr2); |
3540 delta += rinfo2.pc() - rinfo.pc(); | 3540 delta += rinfo2.pc() - rinfo.pc(); |
3541 found = true; | 3541 found = true; |
3542 break; | 3542 break; |
3543 } | 3543 } |
3544 } | 3544 } |
3545 } | 3545 } |
3546 } | 3546 } |
3547 | 3547 |
3548 instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta)); | 3548 instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta)); |
3549 | 3549 |
3550 if (!found) { | 3550 if (!found) { |
3551 emit(rinfo.data()); | 3551 emit(rinfo.data()); |
3552 } | 3552 } |
3553 } else { | 3553 } else { |
3554 ASSERT(IsMovW(instr)); | 3554 DCHECK(IsMovW(instr)); |
3555 } | 3555 } |
3556 } | 3556 } |
3557 | 3557 |
3558 num_pending_32_bit_reloc_info_ = 0; | 3558 num_pending_32_bit_reloc_info_ = 0; |
3559 num_pending_64_bit_reloc_info_ = 0; | 3559 num_pending_64_bit_reloc_info_ = 0; |
3560 first_const_pool_32_use_ = -1; | 3560 first_const_pool_32_use_ = -1; |
3561 first_const_pool_64_use_ = -1; | 3561 first_const_pool_64_use_ = -1; |
3562 | 3562 |
3563 RecordComment("]"); | 3563 RecordComment("]"); |
3564 | 3564 |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3597 | 3597 |
3598 ConstantPoolArray::Type ConstantPoolBuilder::GetConstantPoolType( | 3598 ConstantPoolArray::Type ConstantPoolBuilder::GetConstantPoolType( |
3599 RelocInfo::Mode rmode) { | 3599 RelocInfo::Mode rmode) { |
3600 if (rmode == RelocInfo::NONE64) { | 3600 if (rmode == RelocInfo::NONE64) { |
3601 return ConstantPoolArray::INT64; | 3601 return ConstantPoolArray::INT64; |
3602 } else if (!RelocInfo::IsGCRelocMode(rmode)) { | 3602 } else if (!RelocInfo::IsGCRelocMode(rmode)) { |
3603 return ConstantPoolArray::INT32; | 3603 return ConstantPoolArray::INT32; |
3604 } else if (RelocInfo::IsCodeTarget(rmode)) { | 3604 } else if (RelocInfo::IsCodeTarget(rmode)) { |
3605 return ConstantPoolArray::CODE_PTR; | 3605 return ConstantPoolArray::CODE_PTR; |
3606 } else { | 3606 } else { |
3607 ASSERT(RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode)); | 3607 DCHECK(RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode)); |
3608 return ConstantPoolArray::HEAP_PTR; | 3608 return ConstantPoolArray::HEAP_PTR; |
3609 } | 3609 } |
3610 } | 3610 } |
3611 | 3611 |
3612 | 3612 |
3613 ConstantPoolArray::LayoutSection ConstantPoolBuilder::AddEntry( | 3613 ConstantPoolArray::LayoutSection ConstantPoolBuilder::AddEntry( |
3614 Assembler* assm, const RelocInfo& rinfo) { | 3614 Assembler* assm, const RelocInfo& rinfo) { |
3615 RelocInfo::Mode rmode = rinfo.rmode(); | 3615 RelocInfo::Mode rmode = rinfo.rmode(); |
3616 ASSERT(rmode != RelocInfo::COMMENT && | 3616 DCHECK(rmode != RelocInfo::COMMENT && |
3617 rmode != RelocInfo::POSITION && | 3617 rmode != RelocInfo::POSITION && |
3618 rmode != RelocInfo::STATEMENT_POSITION && | 3618 rmode != RelocInfo::STATEMENT_POSITION && |
3619 rmode != RelocInfo::CONST_POOL); | 3619 rmode != RelocInfo::CONST_POOL); |
3620 | 3620 |
3621 // Try to merge entries which won't be patched. | 3621 // Try to merge entries which won't be patched. |
3622 int merged_index = -1; | 3622 int merged_index = -1; |
3623 ConstantPoolArray::LayoutSection entry_section = current_section_; | 3623 ConstantPoolArray::LayoutSection entry_section = current_section_; |
3624 if (RelocInfo::IsNone(rmode) || | 3624 if (RelocInfo::IsNone(rmode) || |
3625 (!assm->serializer_enabled() && (rmode >= RelocInfo::CELL))) { | 3625 (!assm->serializer_enabled() && (rmode >= RelocInfo::CELL))) { |
3626 size_t i; | 3626 size_t i; |
3627 std::vector<ConstantPoolEntry>::const_iterator it; | 3627 std::vector<ConstantPoolEntry>::const_iterator it; |
3628 for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) { | 3628 for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) { |
3629 if (RelocInfo::IsEqual(rinfo, it->rinfo_)) { | 3629 if (RelocInfo::IsEqual(rinfo, it->rinfo_)) { |
3630 // Merge with found entry. | 3630 // Merge with found entry. |
3631 merged_index = i; | 3631 merged_index = i; |
3632 entry_section = entries_[i].section_; | 3632 entry_section = entries_[i].section_; |
3633 break; | 3633 break; |
3634 } | 3634 } |
3635 } | 3635 } |
3636 } | 3636 } |
3637 ASSERT(entry_section <= current_section_); | 3637 DCHECK(entry_section <= current_section_); |
3638 entries_.push_back(ConstantPoolEntry(rinfo, entry_section, merged_index)); | 3638 entries_.push_back(ConstantPoolEntry(rinfo, entry_section, merged_index)); |
3639 | 3639 |
3640 if (merged_index == -1) { | 3640 if (merged_index == -1) { |
3641 // Not merged, so update the appropriate count. | 3641 // Not merged, so update the appropriate count. |
3642 number_of_entries_[entry_section].increment(GetConstantPoolType(rmode)); | 3642 number_of_entries_[entry_section].increment(GetConstantPoolType(rmode)); |
3643 } | 3643 } |
3644 | 3644 |
3645 // Check if we still have room for another entry in the small section | 3645 // Check if we still have room for another entry in the small section |
3646 // given Arm's ldr and vldr immediate offset range. | 3646 // given Arm's ldr and vldr immediate offset range. |
3647 if (current_section_ == ConstantPoolArray::SMALL_SECTION && | 3647 if (current_section_ == ConstantPoolArray::SMALL_SECTION && |
3648 !(is_uint12(ConstantPoolArray::SizeFor(*small_entries())) && | 3648 !(is_uint12(ConstantPoolArray::SizeFor(*small_entries())) && |
3649 is_uint10(ConstantPoolArray::MaxInt64Offset( | 3649 is_uint10(ConstantPoolArray::MaxInt64Offset( |
3650 small_entries()->count_of(ConstantPoolArray::INT64))))) { | 3650 small_entries()->count_of(ConstantPoolArray::INT64))))) { |
3651 current_section_ = ConstantPoolArray::EXTENDED_SECTION; | 3651 current_section_ = ConstantPoolArray::EXTENDED_SECTION; |
3652 } | 3652 } |
3653 return entry_section; | 3653 return entry_section; |
3654 } | 3654 } |
3655 | 3655 |
3656 | 3656 |
3657 void ConstantPoolBuilder::Relocate(int pc_delta) { | 3657 void ConstantPoolBuilder::Relocate(int pc_delta) { |
3658 for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin(); | 3658 for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin(); |
3659 entry != entries_.end(); entry++) { | 3659 entry != entries_.end(); entry++) { |
3660 ASSERT(entry->rinfo_.rmode() != RelocInfo::JS_RETURN); | 3660 DCHECK(entry->rinfo_.rmode() != RelocInfo::JS_RETURN); |
3661 entry->rinfo_.set_pc(entry->rinfo_.pc() + pc_delta); | 3661 entry->rinfo_.set_pc(entry->rinfo_.pc() + pc_delta); |
3662 } | 3662 } |
3663 } | 3663 } |
3664 | 3664 |
3665 | 3665 |
3666 Handle<ConstantPoolArray> ConstantPoolBuilder::New(Isolate* isolate) { | 3666 Handle<ConstantPoolArray> ConstantPoolBuilder::New(Isolate* isolate) { |
3667 if (IsEmpty()) { | 3667 if (IsEmpty()) { |
3668 return isolate->factory()->empty_constant_pool_array(); | 3668 return isolate->factory()->empty_constant_pool_array(); |
3669 } else if (extended_entries()->is_empty()) { | 3669 } else if (extended_entries()->is_empty()) { |
3670 return isolate->factory()->NewConstantPoolArray(*small_entries()); | 3670 return isolate->factory()->NewConstantPoolArray(*small_entries()); |
3671 } else { | 3671 } else { |
3672 ASSERT(current_section_ == ConstantPoolArray::EXTENDED_SECTION); | 3672 DCHECK(current_section_ == ConstantPoolArray::EXTENDED_SECTION); |
3673 return isolate->factory()->NewExtendedConstantPoolArray( | 3673 return isolate->factory()->NewExtendedConstantPoolArray( |
3674 *small_entries(), *extended_entries()); | 3674 *small_entries(), *extended_entries()); |
3675 } | 3675 } |
3676 } | 3676 } |
3677 | 3677 |
3678 | 3678 |
3679 void ConstantPoolBuilder::Populate(Assembler* assm, | 3679 void ConstantPoolBuilder::Populate(Assembler* assm, |
3680 ConstantPoolArray* constant_pool) { | 3680 ConstantPoolArray* constant_pool) { |
3681 ASSERT_EQ(extended_entries()->is_empty(), | 3681 DCHECK_EQ(extended_entries()->is_empty(), |
3682 !constant_pool->is_extended_layout()); | 3682 !constant_pool->is_extended_layout()); |
3683 ASSERT(small_entries()->equals(ConstantPoolArray::NumberOfEntries( | 3683 DCHECK(small_entries()->equals(ConstantPoolArray::NumberOfEntries( |
3684 constant_pool, ConstantPoolArray::SMALL_SECTION))); | 3684 constant_pool, ConstantPoolArray::SMALL_SECTION))); |
3685 if (constant_pool->is_extended_layout()) { | 3685 if (constant_pool->is_extended_layout()) { |
3686 ASSERT(extended_entries()->equals(ConstantPoolArray::NumberOfEntries( | 3686 DCHECK(extended_entries()->equals(ConstantPoolArray::NumberOfEntries( |
3687 constant_pool, ConstantPoolArray::EXTENDED_SECTION))); | 3687 constant_pool, ConstantPoolArray::EXTENDED_SECTION))); |
3688 } | 3688 } |
3689 | 3689 |
3690 // Set up initial offsets. | 3690 // Set up initial offsets. |
3691 int offsets[ConstantPoolArray::NUMBER_OF_LAYOUT_SECTIONS] | 3691 int offsets[ConstantPoolArray::NUMBER_OF_LAYOUT_SECTIONS] |
3692 [ConstantPoolArray::NUMBER_OF_TYPES]; | 3692 [ConstantPoolArray::NUMBER_OF_TYPES]; |
3693 for (int section = 0; section <= constant_pool->final_section(); section++) { | 3693 for (int section = 0; section <= constant_pool->final_section(); section++) { |
3694 int section_start = (section == ConstantPoolArray::EXTENDED_SECTION) | 3694 int section_start = (section == ConstantPoolArray::EXTENDED_SECTION) |
3695 ? small_entries()->total_count() | 3695 ? small_entries()->total_count() |
3696 : 0; | 3696 : 0; |
(...skipping 19 matching lines...) Expand all Loading... |
3716 offsets[entry->section_][type] += ConstantPoolArray::entry_size(type); | 3716 offsets[entry->section_][type] += ConstantPoolArray::entry_size(type); |
3717 if (type == ConstantPoolArray::INT64) { | 3717 if (type == ConstantPoolArray::INT64) { |
3718 constant_pool->set_at_offset(offset, rinfo.data64()); | 3718 constant_pool->set_at_offset(offset, rinfo.data64()); |
3719 } else if (type == ConstantPoolArray::INT32) { | 3719 } else if (type == ConstantPoolArray::INT32) { |
3720 constant_pool->set_at_offset(offset, | 3720 constant_pool->set_at_offset(offset, |
3721 static_cast<int32_t>(rinfo.data())); | 3721 static_cast<int32_t>(rinfo.data())); |
3722 } else if (type == ConstantPoolArray::CODE_PTR) { | 3722 } else if (type == ConstantPoolArray::CODE_PTR) { |
3723 constant_pool->set_at_offset(offset, | 3723 constant_pool->set_at_offset(offset, |
3724 reinterpret_cast<Address>(rinfo.data())); | 3724 reinterpret_cast<Address>(rinfo.data())); |
3725 } else { | 3725 } else { |
3726 ASSERT(type == ConstantPoolArray::HEAP_PTR); | 3726 DCHECK(type == ConstantPoolArray::HEAP_PTR); |
3727 constant_pool->set_at_offset(offset, | 3727 constant_pool->set_at_offset(offset, |
3728 reinterpret_cast<Object*>(rinfo.data())); | 3728 reinterpret_cast<Object*>(rinfo.data())); |
3729 } | 3729 } |
3730 offset -= kHeapObjectTag; | 3730 offset -= kHeapObjectTag; |
3731 entry->merged_index_ = offset; // Stash offset for merged entries. | 3731 entry->merged_index_ = offset; // Stash offset for merged entries. |
3732 } else { | 3732 } else { |
3733 ASSERT(entry->merged_index_ < (entry - entries_.begin())); | 3733 DCHECK(entry->merged_index_ < (entry - entries_.begin())); |
3734 offset = entries_[entry->merged_index_].merged_index_; | 3734 offset = entries_[entry->merged_index_].merged_index_; |
3735 } | 3735 } |
3736 | 3736 |
3737 // Patch vldr/ldr instruction with correct offset. | 3737 // Patch vldr/ldr instruction with correct offset. |
3738 Instr instr = assm->instr_at(rinfo.pc()); | 3738 Instr instr = assm->instr_at(rinfo.pc()); |
3739 if (entry->section_ == ConstantPoolArray::EXTENDED_SECTION) { | 3739 if (entry->section_ == ConstantPoolArray::EXTENDED_SECTION) { |
3740 // Instructions to patch must be 'movw rd, [#0]' and 'movt rd, [#0]. | 3740 // Instructions to patch must be 'movw rd, [#0]' and 'movt rd, [#0]. |
3741 Instr next_instr = assm->instr_at(rinfo.pc() + Assembler::kInstrSize); | 3741 Instr next_instr = assm->instr_at(rinfo.pc() + Assembler::kInstrSize); |
3742 ASSERT((Assembler::IsMovW(instr) && | 3742 DCHECK((Assembler::IsMovW(instr) && |
3743 Instruction::ImmedMovwMovtValue(instr) == 0)); | 3743 Instruction::ImmedMovwMovtValue(instr) == 0)); |
3744 ASSERT((Assembler::IsMovT(next_instr) && | 3744 DCHECK((Assembler::IsMovT(next_instr) && |
3745 Instruction::ImmedMovwMovtValue(next_instr) == 0)); | 3745 Instruction::ImmedMovwMovtValue(next_instr) == 0)); |
3746 assm->instr_at_put(rinfo.pc(), | 3746 assm->instr_at_put(rinfo.pc(), |
3747 Assembler::PatchMovwImmediate(instr, offset & 0xffff)); | 3747 Assembler::PatchMovwImmediate(instr, offset & 0xffff)); |
3748 assm->instr_at_put( | 3748 assm->instr_at_put( |
3749 rinfo.pc() + Assembler::kInstrSize, | 3749 rinfo.pc() + Assembler::kInstrSize, |
3750 Assembler::PatchMovwImmediate(next_instr, offset >> 16)); | 3750 Assembler::PatchMovwImmediate(next_instr, offset >> 16)); |
3751 } else if (type == ConstantPoolArray::INT64) { | 3751 } else if (type == ConstantPoolArray::INT64) { |
3752 // Instruction to patch must be 'vldr rd, [pp, #0]'. | 3752 // Instruction to patch must be 'vldr rd, [pp, #0]'. |
3753 ASSERT((Assembler::IsVldrDPpImmediateOffset(instr) && | 3753 DCHECK((Assembler::IsVldrDPpImmediateOffset(instr) && |
3754 Assembler::GetVldrDRegisterImmediateOffset(instr) == 0)); | 3754 Assembler::GetVldrDRegisterImmediateOffset(instr) == 0)); |
3755 ASSERT(is_uint10(offset)); | 3755 DCHECK(is_uint10(offset)); |
3756 assm->instr_at_put(rinfo.pc(), Assembler::SetVldrDRegisterImmediateOffset( | 3756 assm->instr_at_put(rinfo.pc(), Assembler::SetVldrDRegisterImmediateOffset( |
3757 instr, offset)); | 3757 instr, offset)); |
3758 } else { | 3758 } else { |
3759 // Instruction to patch must be 'ldr rd, [pp, #0]'. | 3759 // Instruction to patch must be 'ldr rd, [pp, #0]'. |
3760 ASSERT((Assembler::IsLdrPpImmediateOffset(instr) && | 3760 DCHECK((Assembler::IsLdrPpImmediateOffset(instr) && |
3761 Assembler::GetLdrRegisterImmediateOffset(instr) == 0)); | 3761 Assembler::GetLdrRegisterImmediateOffset(instr) == 0)); |
3762 ASSERT(is_uint12(offset)); | 3762 DCHECK(is_uint12(offset)); |
3763 assm->instr_at_put( | 3763 assm->instr_at_put( |
3764 rinfo.pc(), Assembler::SetLdrRegisterImmediateOffset(instr, offset)); | 3764 rinfo.pc(), Assembler::SetLdrRegisterImmediateOffset(instr, offset)); |
3765 } | 3765 } |
3766 } | 3766 } |
3767 } | 3767 } |
3768 | 3768 |
3769 | 3769 |
3770 } } // namespace v8::internal | 3770 } } // namespace v8::internal |
3771 | 3771 |
3772 #endif // V8_TARGET_ARCH_ARM | 3772 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |