Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(299)

Side by Side Diff: src/arm64/assembler-arm64.cc

Issue 430503007: Rename ASSERT* to DCHECK*. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: REBASE and fixes Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm64/assembler-arm64.h ('k') | src/arm64/assembler-arm64-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // 2 //
3 // Redistribution and use in source and binary forms, with or without 3 // Redistribution and use in source and binary forms, with or without
4 // modification, are permitted provided that the following conditions are 4 // modification, are permitted provided that the following conditions are
5 // met: 5 // met:
6 // 6 //
7 // * Redistributions of source code must retain the above copyright 7 // * Redistributions of source code must retain the above copyright
8 // notice, this list of conditions and the following disclaimer. 8 // notice, this list of conditions and the following disclaimer.
9 // * Redistributions in binary form must reproduce the above 9 // * Redistributions in binary form must reproduce the above
10 // copyright notice, this list of conditions and the following 10 // copyright notice, this list of conditions and the following
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
58 58
59 59
60 void CpuFeatures::PrintTarget() { } 60 void CpuFeatures::PrintTarget() { }
61 void CpuFeatures::PrintFeatures() { } 61 void CpuFeatures::PrintFeatures() { }
62 62
63 63
64 // ----------------------------------------------------------------------------- 64 // -----------------------------------------------------------------------------
65 // CPURegList utilities. 65 // CPURegList utilities.
66 66
67 CPURegister CPURegList::PopLowestIndex() { 67 CPURegister CPURegList::PopLowestIndex() {
68 ASSERT(IsValid()); 68 DCHECK(IsValid());
69 if (IsEmpty()) { 69 if (IsEmpty()) {
70 return NoCPUReg; 70 return NoCPUReg;
71 } 71 }
72 int index = CountTrailingZeros(list_, kRegListSizeInBits); 72 int index = CountTrailingZeros(list_, kRegListSizeInBits);
73 ASSERT((1 << index) & list_); 73 DCHECK((1 << index) & list_);
74 Remove(index); 74 Remove(index);
75 return CPURegister::Create(index, size_, type_); 75 return CPURegister::Create(index, size_, type_);
76 } 76 }
77 77
78 78
79 CPURegister CPURegList::PopHighestIndex() { 79 CPURegister CPURegList::PopHighestIndex() {
80 ASSERT(IsValid()); 80 DCHECK(IsValid());
81 if (IsEmpty()) { 81 if (IsEmpty()) {
82 return NoCPUReg; 82 return NoCPUReg;
83 } 83 }
84 int index = CountLeadingZeros(list_, kRegListSizeInBits); 84 int index = CountLeadingZeros(list_, kRegListSizeInBits);
85 index = kRegListSizeInBits - 1 - index; 85 index = kRegListSizeInBits - 1 - index;
86 ASSERT((1 << index) & list_); 86 DCHECK((1 << index) & list_);
87 Remove(index); 87 Remove(index);
88 return CPURegister::Create(index, size_, type_); 88 return CPURegister::Create(index, size_, type_);
89 } 89 }
90 90
91 91
92 void CPURegList::RemoveCalleeSaved() { 92 void CPURegList::RemoveCalleeSaved() {
93 if (type() == CPURegister::kRegister) { 93 if (type() == CPURegister::kRegister) {
94 Remove(GetCalleeSaved(RegisterSizeInBits())); 94 Remove(GetCalleeSaved(RegisterSizeInBits()));
95 } else if (type() == CPURegister::kFPRegister) { 95 } else if (type() == CPURegister::kFPRegister) {
96 Remove(GetCalleeSavedFP(RegisterSizeInBits())); 96 Remove(GetCalleeSavedFP(RegisterSizeInBits()));
97 } else { 97 } else {
98 ASSERT(type() == CPURegister::kNoRegister); 98 DCHECK(type() == CPURegister::kNoRegister);
99 ASSERT(IsEmpty()); 99 DCHECK(IsEmpty());
100 // The list must already be empty, so do nothing. 100 // The list must already be empty, so do nothing.
101 } 101 }
102 } 102 }
103 103
104 104
105 CPURegList CPURegList::GetCalleeSaved(unsigned size) { 105 CPURegList CPURegList::GetCalleeSaved(unsigned size) {
106 return CPURegList(CPURegister::kRegister, size, 19, 29); 106 return CPURegList(CPURegister::kRegister, size, 19, 29);
107 } 107 }
108 108
109 109
(...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after
228 const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8}; 228 const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8};
229 229
230 for (unsigned i = 0; i < sizeof(regs) / sizeof(regs[0]); i++) { 230 for (unsigned i = 0; i < sizeof(regs) / sizeof(regs[0]); i++) {
231 if (regs[i].IsRegister()) { 231 if (regs[i].IsRegister()) {
232 number_of_valid_regs++; 232 number_of_valid_regs++;
233 unique_regs |= regs[i].Bit(); 233 unique_regs |= regs[i].Bit();
234 } else if (regs[i].IsFPRegister()) { 234 } else if (regs[i].IsFPRegister()) {
235 number_of_valid_fpregs++; 235 number_of_valid_fpregs++;
236 unique_fpregs |= regs[i].Bit(); 236 unique_fpregs |= regs[i].Bit();
237 } else { 237 } else {
238 ASSERT(!regs[i].IsValid()); 238 DCHECK(!regs[i].IsValid());
239 } 239 }
240 } 240 }
241 241
242 int number_of_unique_regs = 242 int number_of_unique_regs =
243 CountSetBits(unique_regs, sizeof(unique_regs) * kBitsPerByte); 243 CountSetBits(unique_regs, sizeof(unique_regs) * kBitsPerByte);
244 int number_of_unique_fpregs = 244 int number_of_unique_fpregs =
245 CountSetBits(unique_fpregs, sizeof(unique_fpregs) * kBitsPerByte); 245 CountSetBits(unique_fpregs, sizeof(unique_fpregs) * kBitsPerByte);
246 246
247 ASSERT(number_of_valid_regs >= number_of_unique_regs); 247 DCHECK(number_of_valid_regs >= number_of_unique_regs);
248 ASSERT(number_of_valid_fpregs >= number_of_unique_fpregs); 248 DCHECK(number_of_valid_fpregs >= number_of_unique_fpregs);
249 249
250 return (number_of_valid_regs != number_of_unique_regs) || 250 return (number_of_valid_regs != number_of_unique_regs) ||
251 (number_of_valid_fpregs != number_of_unique_fpregs); 251 (number_of_valid_fpregs != number_of_unique_fpregs);
252 } 252 }
253 253
254 254
255 bool AreSameSizeAndType(const CPURegister& reg1, const CPURegister& reg2, 255 bool AreSameSizeAndType(const CPURegister& reg1, const CPURegister& reg2,
256 const CPURegister& reg3, const CPURegister& reg4, 256 const CPURegister& reg3, const CPURegister& reg4,
257 const CPURegister& reg5, const CPURegister& reg6, 257 const CPURegister& reg5, const CPURegister& reg6,
258 const CPURegister& reg7, const CPURegister& reg8) { 258 const CPURegister& reg7, const CPURegister& reg8) {
259 ASSERT(reg1.IsValid()); 259 DCHECK(reg1.IsValid());
260 bool match = true; 260 bool match = true;
261 match &= !reg2.IsValid() || reg2.IsSameSizeAndType(reg1); 261 match &= !reg2.IsValid() || reg2.IsSameSizeAndType(reg1);
262 match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1); 262 match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1);
263 match &= !reg4.IsValid() || reg4.IsSameSizeAndType(reg1); 263 match &= !reg4.IsValid() || reg4.IsSameSizeAndType(reg1);
264 match &= !reg5.IsValid() || reg5.IsSameSizeAndType(reg1); 264 match &= !reg5.IsValid() || reg5.IsSameSizeAndType(reg1);
265 match &= !reg6.IsValid() || reg6.IsSameSizeAndType(reg1); 265 match &= !reg6.IsValid() || reg6.IsSameSizeAndType(reg1);
266 match &= !reg7.IsValid() || reg7.IsSameSizeAndType(reg1); 266 match &= !reg7.IsValid() || reg7.IsSameSizeAndType(reg1);
267 match &= !reg8.IsValid() || reg8.IsSameSizeAndType(reg1); 267 match &= !reg8.IsValid() || reg8.IsSameSizeAndType(reg1);
268 return match; 268 return match;
269 } 269 }
270 270
271 271
272 void Immediate::InitializeHandle(Handle<Object> handle) { 272 void Immediate::InitializeHandle(Handle<Object> handle) {
273 AllowDeferredHandleDereference using_raw_address; 273 AllowDeferredHandleDereference using_raw_address;
274 274
275 // Verify all Objects referred by code are NOT in new space. 275 // Verify all Objects referred by code are NOT in new space.
276 Object* obj = *handle; 276 Object* obj = *handle;
277 if (obj->IsHeapObject()) { 277 if (obj->IsHeapObject()) {
278 ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj)); 278 DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
279 value_ = reinterpret_cast<intptr_t>(handle.location()); 279 value_ = reinterpret_cast<intptr_t>(handle.location());
280 rmode_ = RelocInfo::EMBEDDED_OBJECT; 280 rmode_ = RelocInfo::EMBEDDED_OBJECT;
281 } else { 281 } else {
282 STATIC_ASSERT(sizeof(intptr_t) == sizeof(int64_t)); 282 STATIC_ASSERT(sizeof(intptr_t) == sizeof(int64_t));
283 value_ = reinterpret_cast<intptr_t>(obj); 283 value_ = reinterpret_cast<intptr_t>(obj);
284 rmode_ = RelocInfo::NONE64; 284 rmode_ = RelocInfo::NONE64;
285 } 285 }
286 } 286 }
287 287
288 288
289 bool Operand::NeedsRelocation(const Assembler* assembler) const { 289 bool Operand::NeedsRelocation(const Assembler* assembler) const {
290 RelocInfo::Mode rmode = immediate_.rmode(); 290 RelocInfo::Mode rmode = immediate_.rmode();
291 291
292 if (rmode == RelocInfo::EXTERNAL_REFERENCE) { 292 if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
293 return assembler->serializer_enabled(); 293 return assembler->serializer_enabled();
294 } 294 }
295 295
296 return !RelocInfo::IsNone(rmode); 296 return !RelocInfo::IsNone(rmode);
297 } 297 }
298 298
299 299
300 // Constant Pool. 300 // Constant Pool.
301 void ConstPool::RecordEntry(intptr_t data, 301 void ConstPool::RecordEntry(intptr_t data,
302 RelocInfo::Mode mode) { 302 RelocInfo::Mode mode) {
303 ASSERT(mode != RelocInfo::COMMENT && 303 DCHECK(mode != RelocInfo::COMMENT &&
304 mode != RelocInfo::POSITION && 304 mode != RelocInfo::POSITION &&
305 mode != RelocInfo::STATEMENT_POSITION && 305 mode != RelocInfo::STATEMENT_POSITION &&
306 mode != RelocInfo::CONST_POOL && 306 mode != RelocInfo::CONST_POOL &&
307 mode != RelocInfo::VENEER_POOL && 307 mode != RelocInfo::VENEER_POOL &&
308 mode != RelocInfo::CODE_AGE_SEQUENCE); 308 mode != RelocInfo::CODE_AGE_SEQUENCE);
309 309
310 uint64_t raw_data = static_cast<uint64_t>(data); 310 uint64_t raw_data = static_cast<uint64_t>(data);
311 int offset = assm_->pc_offset(); 311 int offset = assm_->pc_offset();
312 if (IsEmpty()) { 312 if (IsEmpty()) {
313 first_use_ = offset; 313 first_use_ = offset;
(...skipping 10 matching lines...) Expand all
324 } 324 }
325 325
326 if (EntryCount() > Assembler::kApproxMaxPoolEntryCount) { 326 if (EntryCount() > Assembler::kApproxMaxPoolEntryCount) {
327 // Request constant pool emission after the next instruction. 327 // Request constant pool emission after the next instruction.
328 assm_->SetNextConstPoolCheckIn(1); 328 assm_->SetNextConstPoolCheckIn(1);
329 } 329 }
330 } 330 }
331 331
332 332
333 int ConstPool::DistanceToFirstUse() { 333 int ConstPool::DistanceToFirstUse() {
334 ASSERT(first_use_ >= 0); 334 DCHECK(first_use_ >= 0);
335 return assm_->pc_offset() - first_use_; 335 return assm_->pc_offset() - first_use_;
336 } 336 }
337 337
338 338
339 int ConstPool::MaxPcOffset() { 339 int ConstPool::MaxPcOffset() {
340 // There are no pending entries in the pool so we can never get out of 340 // There are no pending entries in the pool so we can never get out of
341 // range. 341 // range.
342 if (IsEmpty()) return kMaxInt; 342 if (IsEmpty()) return kMaxInt;
343 343
344 // Entries are not necessarily emitted in the order they are added so in the 344 // Entries are not necessarily emitted in the order they are added so in the
(...skipping 27 matching lines...) Expand all
372 prologue_size += 2 * kInstructionSize; 372 prologue_size += 2 * kInstructionSize;
373 prologue_size += IsAligned(assm_->pc_offset() + prologue_size, 8) ? 373 prologue_size += IsAligned(assm_->pc_offset() + prologue_size, 8) ?
374 0 : kInstructionSize; 374 0 : kInstructionSize;
375 375
376 // All entries are 64-bit for now. 376 // All entries are 64-bit for now.
377 return prologue_size + EntryCount() * kPointerSize; 377 return prologue_size + EntryCount() * kPointerSize;
378 } 378 }
379 379
380 380
381 void ConstPool::Emit(bool require_jump) { 381 void ConstPool::Emit(bool require_jump) {
382 ASSERT(!assm_->is_const_pool_blocked()); 382 DCHECK(!assm_->is_const_pool_blocked());
383 // Prevent recursive pool emission and protect from veneer pools. 383 // Prevent recursive pool emission and protect from veneer pools.
384 Assembler::BlockPoolsScope block_pools(assm_); 384 Assembler::BlockPoolsScope block_pools(assm_);
385 385
386 int size = SizeIfEmittedAtCurrentPc(require_jump); 386 int size = SizeIfEmittedAtCurrentPc(require_jump);
387 Label size_check; 387 Label size_check;
388 assm_->bind(&size_check); 388 assm_->bind(&size_check);
389 389
390 assm_->RecordConstPool(size); 390 assm_->RecordConstPool(size);
391 // Emit the constant pool. It is preceded by an optional branch if 391 // Emit the constant pool. It is preceded by an optional branch if
392 // require_jump and a header which will: 392 // require_jump and a header which will:
(...skipping 28 matching lines...) Expand all
421 // Emit constant pool entries. 421 // Emit constant pool entries.
422 // TODO(all): currently each relocated constant is 64 bits, consider adding 422 // TODO(all): currently each relocated constant is 64 bits, consider adding
423 // support for 32-bit entries. 423 // support for 32-bit entries.
424 EmitEntries(); 424 EmitEntries();
425 assm_->RecordComment("]"); 425 assm_->RecordComment("]");
426 426
427 if (after_pool.is_linked()) { 427 if (after_pool.is_linked()) {
428 assm_->bind(&after_pool); 428 assm_->bind(&after_pool);
429 } 429 }
430 430
431 ASSERT(assm_->SizeOfCodeGeneratedSince(&size_check) == 431 DCHECK(assm_->SizeOfCodeGeneratedSince(&size_check) ==
432 static_cast<unsigned>(size)); 432 static_cast<unsigned>(size));
433 } 433 }
434 434
435 435
436 void ConstPool::Clear() { 436 void ConstPool::Clear() {
437 shared_entries_.clear(); 437 shared_entries_.clear();
438 shared_entries_count = 0; 438 shared_entries_count = 0;
439 unique_entries_.clear(); 439 unique_entries_.clear();
440 first_use_ = -1; 440 first_use_ = -1;
441 } 441 }
442 442
443 443
444 bool ConstPool::CanBeShared(RelocInfo::Mode mode) { 444 bool ConstPool::CanBeShared(RelocInfo::Mode mode) {
445 // Constant pool currently does not support 32-bit entries. 445 // Constant pool currently does not support 32-bit entries.
446 ASSERT(mode != RelocInfo::NONE32); 446 DCHECK(mode != RelocInfo::NONE32);
447 447
448 return RelocInfo::IsNone(mode) || 448 return RelocInfo::IsNone(mode) ||
449 (!assm_->serializer_enabled() && (mode >= RelocInfo::CELL)); 449 (!assm_->serializer_enabled() && (mode >= RelocInfo::CELL));
450 } 450 }
451 451
452 452
453 void ConstPool::EmitMarker() { 453 void ConstPool::EmitMarker() {
454 // A constant pool size is expressed in number of 32-bits words. 454 // A constant pool size is expressed in number of 32-bits words.
455 // Currently all entries are 64-bit. 455 // Currently all entries are 64-bit.
456 // + 1 is for the crash guard. 456 // + 1 is for the crash guard.
457 // + 0/1 for alignment. 457 // + 0/1 for alignment.
458 int word_count = EntryCount() * 2 + 1 + 458 int word_count = EntryCount() * 2 + 1 +
459 (IsAligned(assm_->pc_offset(), 8) ? 0 : 1); 459 (IsAligned(assm_->pc_offset(), 8) ? 0 : 1);
460 assm_->Emit(LDR_x_lit | 460 assm_->Emit(LDR_x_lit |
461 Assembler::ImmLLiteral(word_count) | 461 Assembler::ImmLLiteral(word_count) |
462 Assembler::Rt(xzr)); 462 Assembler::Rt(xzr));
463 } 463 }
464 464
465 465
466 MemOperand::PairResult MemOperand::AreConsistentForPair( 466 MemOperand::PairResult MemOperand::AreConsistentForPair(
467 const MemOperand& operandA, 467 const MemOperand& operandA,
468 const MemOperand& operandB, 468 const MemOperand& operandB,
469 int access_size_log2) { 469 int access_size_log2) {
470 ASSERT(access_size_log2 >= 0); 470 DCHECK(access_size_log2 >= 0);
471 ASSERT(access_size_log2 <= 3); 471 DCHECK(access_size_log2 <= 3);
472 // Step one: check that they share the same base, that the mode is Offset 472 // Step one: check that they share the same base, that the mode is Offset
473 // and that the offset is a multiple of access size. 473 // and that the offset is a multiple of access size.
474 if (!operandA.base().Is(operandB.base()) || 474 if (!operandA.base().Is(operandB.base()) ||
475 (operandA.addrmode() != Offset) || 475 (operandA.addrmode() != Offset) ||
476 (operandB.addrmode() != Offset) || 476 (operandB.addrmode() != Offset) ||
477 ((operandA.offset() & ((1 << access_size_log2) - 1)) != 0)) { 477 ((operandA.offset() & ((1 << access_size_log2) - 1)) != 0)) {
478 return kNotPair; 478 return kNotPair;
479 } 479 }
480 // Step two: check that the offsets are contiguous and that the range 480 // Step two: check that the offsets are contiguous and that the range
481 // is OK for ldp/stp. 481 // is OK for ldp/stp.
482 if ((operandB.offset() == operandA.offset() + (1 << access_size_log2)) && 482 if ((operandB.offset() == operandA.offset() + (1 << access_size_log2)) &&
483 is_int7(operandA.offset() >> access_size_log2)) { 483 is_int7(operandA.offset() >> access_size_log2)) {
484 return kPairAB; 484 return kPairAB;
485 } 485 }
486 if ((operandA.offset() == operandB.offset() + (1 << access_size_log2)) && 486 if ((operandA.offset() == operandB.offset() + (1 << access_size_log2)) &&
487 is_int7(operandB.offset() >> access_size_log2)) { 487 is_int7(operandB.offset() >> access_size_log2)) {
488 return kPairBA; 488 return kPairBA;
489 } 489 }
490 return kNotPair; 490 return kNotPair;
491 } 491 }
492 492
493 493
494 void ConstPool::EmitGuard() { 494 void ConstPool::EmitGuard() {
495 #ifdef DEBUG 495 #ifdef DEBUG
496 Instruction* instr = reinterpret_cast<Instruction*>(assm_->pc()); 496 Instruction* instr = reinterpret_cast<Instruction*>(assm_->pc());
497 ASSERT(instr->preceding()->IsLdrLiteralX() && 497 DCHECK(instr->preceding()->IsLdrLiteralX() &&
498 instr->preceding()->Rt() == xzr.code()); 498 instr->preceding()->Rt() == xzr.code());
499 #endif 499 #endif
500 assm_->EmitPoolGuard(); 500 assm_->EmitPoolGuard();
501 } 501 }
502 502
503 503
504 void ConstPool::EmitEntries() { 504 void ConstPool::EmitEntries() {
505 ASSERT(IsAligned(assm_->pc_offset(), 8)); 505 DCHECK(IsAligned(assm_->pc_offset(), 8));
506 506
507 typedef std::multimap<uint64_t, int>::const_iterator SharedEntriesIterator; 507 typedef std::multimap<uint64_t, int>::const_iterator SharedEntriesIterator;
508 SharedEntriesIterator value_it; 508 SharedEntriesIterator value_it;
509 // Iterate through the keys (constant pool values). 509 // Iterate through the keys (constant pool values).
510 for (value_it = shared_entries_.begin(); 510 for (value_it = shared_entries_.begin();
511 value_it != shared_entries_.end(); 511 value_it != shared_entries_.end();
512 value_it = shared_entries_.upper_bound(value_it->first)) { 512 value_it = shared_entries_.upper_bound(value_it->first)) {
513 std::pair<SharedEntriesIterator, SharedEntriesIterator> range; 513 std::pair<SharedEntriesIterator, SharedEntriesIterator> range;
514 uint64_t data = value_it->first; 514 uint64_t data = value_it->first;
515 range = shared_entries_.equal_range(data); 515 range = shared_entries_.equal_range(data);
516 SharedEntriesIterator offset_it; 516 SharedEntriesIterator offset_it;
517 // Iterate through the offsets of a given key. 517 // Iterate through the offsets of a given key.
518 for (offset_it = range.first; offset_it != range.second; offset_it++) { 518 for (offset_it = range.first; offset_it != range.second; offset_it++) {
519 Instruction* instr = assm_->InstructionAt(offset_it->second); 519 Instruction* instr = assm_->InstructionAt(offset_it->second);
520 520
521 // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0. 521 // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
522 ASSERT(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0); 522 DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
523 instr->SetImmPCOffsetTarget(assm_->pc()); 523 instr->SetImmPCOffsetTarget(assm_->pc());
524 } 524 }
525 assm_->dc64(data); 525 assm_->dc64(data);
526 } 526 }
527 shared_entries_.clear(); 527 shared_entries_.clear();
528 shared_entries_count = 0; 528 shared_entries_count = 0;
529 529
530 // Emit unique entries. 530 // Emit unique entries.
531 std::vector<std::pair<uint64_t, int> >::const_iterator unique_it; 531 std::vector<std::pair<uint64_t, int> >::const_iterator unique_it;
532 for (unique_it = unique_entries_.begin(); 532 for (unique_it = unique_entries_.begin();
533 unique_it != unique_entries_.end(); 533 unique_it != unique_entries_.end();
534 unique_it++) { 534 unique_it++) {
535 Instruction* instr = assm_->InstructionAt(unique_it->second); 535 Instruction* instr = assm_->InstructionAt(unique_it->second);
536 536
537 // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0. 537 // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
538 ASSERT(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0); 538 DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
539 instr->SetImmPCOffsetTarget(assm_->pc()); 539 instr->SetImmPCOffsetTarget(assm_->pc());
540 assm_->dc64(unique_it->first); 540 assm_->dc64(unique_it->first);
541 } 541 }
542 unique_entries_.clear(); 542 unique_entries_.clear();
543 first_use_ = -1; 543 first_use_ = -1;
544 } 544 }
545 545
546 546
547 // Assembler 547 // Assembler
548 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size) 548 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
549 : AssemblerBase(isolate, buffer, buffer_size), 549 : AssemblerBase(isolate, buffer, buffer_size),
550 constpool_(this), 550 constpool_(this),
551 recorded_ast_id_(TypeFeedbackId::None()), 551 recorded_ast_id_(TypeFeedbackId::None()),
552 unresolved_branches_(), 552 unresolved_branches_(),
553 positions_recorder_(this) { 553 positions_recorder_(this) {
554 const_pool_blocked_nesting_ = 0; 554 const_pool_blocked_nesting_ = 0;
555 veneer_pool_blocked_nesting_ = 0; 555 veneer_pool_blocked_nesting_ = 0;
556 Reset(); 556 Reset();
557 } 557 }
558 558
559 559
560 Assembler::~Assembler() { 560 Assembler::~Assembler() {
561 ASSERT(constpool_.IsEmpty()); 561 DCHECK(constpool_.IsEmpty());
562 ASSERT(const_pool_blocked_nesting_ == 0); 562 DCHECK(const_pool_blocked_nesting_ == 0);
563 ASSERT(veneer_pool_blocked_nesting_ == 0); 563 DCHECK(veneer_pool_blocked_nesting_ == 0);
564 } 564 }
565 565
566 566
567 void Assembler::Reset() { 567 void Assembler::Reset() {
568 #ifdef DEBUG 568 #ifdef DEBUG
569 ASSERT((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_)); 569 DCHECK((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_));
570 ASSERT(const_pool_blocked_nesting_ == 0); 570 DCHECK(const_pool_blocked_nesting_ == 0);
571 ASSERT(veneer_pool_blocked_nesting_ == 0); 571 DCHECK(veneer_pool_blocked_nesting_ == 0);
572 ASSERT(unresolved_branches_.empty()); 572 DCHECK(unresolved_branches_.empty());
573 memset(buffer_, 0, pc_ - buffer_); 573 memset(buffer_, 0, pc_ - buffer_);
574 #endif 574 #endif
575 pc_ = buffer_; 575 pc_ = buffer_;
576 reloc_info_writer.Reposition(reinterpret_cast<byte*>(buffer_ + buffer_size_), 576 reloc_info_writer.Reposition(reinterpret_cast<byte*>(buffer_ + buffer_size_),
577 reinterpret_cast<byte*>(pc_)); 577 reinterpret_cast<byte*>(pc_));
578 constpool_.Clear(); 578 constpool_.Clear();
579 next_constant_pool_check_ = 0; 579 next_constant_pool_check_ = 0;
580 next_veneer_pool_check_ = kMaxInt; 580 next_veneer_pool_check_ = kMaxInt;
581 no_const_pool_before_ = 0; 581 no_const_pool_before_ = 0;
582 ClearRecordedAstId(); 582 ClearRecordedAstId();
583 } 583 }
584 584
585 585
586 void Assembler::GetCode(CodeDesc* desc) { 586 void Assembler::GetCode(CodeDesc* desc) {
587 // Emit constant pool if necessary. 587 // Emit constant pool if necessary.
588 CheckConstPool(true, false); 588 CheckConstPool(true, false);
589 ASSERT(constpool_.IsEmpty()); 589 DCHECK(constpool_.IsEmpty());
590 590
591 // Set up code descriptor. 591 // Set up code descriptor.
592 if (desc) { 592 if (desc) {
593 desc->buffer = reinterpret_cast<byte*>(buffer_); 593 desc->buffer = reinterpret_cast<byte*>(buffer_);
594 desc->buffer_size = buffer_size_; 594 desc->buffer_size = buffer_size_;
595 desc->instr_size = pc_offset(); 595 desc->instr_size = pc_offset();
596 desc->reloc_size = (reinterpret_cast<byte*>(buffer_) + buffer_size_) - 596 desc->reloc_size = (reinterpret_cast<byte*>(buffer_) + buffer_size_) -
597 reloc_info_writer.pos(); 597 reloc_info_writer.pos();
598 desc->origin = this; 598 desc->origin = this;
599 } 599 }
600 } 600 }
601 601
602 602
603 void Assembler::Align(int m) { 603 void Assembler::Align(int m) {
604 ASSERT(m >= 4 && IsPowerOf2(m)); 604 DCHECK(m >= 4 && IsPowerOf2(m));
605 while ((pc_offset() & (m - 1)) != 0) { 605 while ((pc_offset() & (m - 1)) != 0) {
606 nop(); 606 nop();
607 } 607 }
608 } 608 }
609 609
610 610
611 void Assembler::CheckLabelLinkChain(Label const * label) { 611 void Assembler::CheckLabelLinkChain(Label const * label) {
612 #ifdef DEBUG 612 #ifdef DEBUG
613 if (label->is_linked()) { 613 if (label->is_linked()) {
614 int linkoffset = label->pos(); 614 int linkoffset = label->pos();
615 bool end_of_chain = false; 615 bool end_of_chain = false;
616 while (!end_of_chain) { 616 while (!end_of_chain) {
617 Instruction * link = InstructionAt(linkoffset); 617 Instruction * link = InstructionAt(linkoffset);
618 int linkpcoffset = link->ImmPCOffset(); 618 int linkpcoffset = link->ImmPCOffset();
619 int prevlinkoffset = linkoffset + linkpcoffset; 619 int prevlinkoffset = linkoffset + linkpcoffset;
620 620
621 end_of_chain = (linkoffset == prevlinkoffset); 621 end_of_chain = (linkoffset == prevlinkoffset);
622 linkoffset = linkoffset + linkpcoffset; 622 linkoffset = linkoffset + linkpcoffset;
623 } 623 }
624 } 624 }
625 #endif 625 #endif
626 } 626 }
627 627
628 628
629 void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch, 629 void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch,
630 Label* label, 630 Label* label,
631 Instruction* label_veneer) { 631 Instruction* label_veneer) {
632 ASSERT(label->is_linked()); 632 DCHECK(label->is_linked());
633 633
634 CheckLabelLinkChain(label); 634 CheckLabelLinkChain(label);
635 635
636 Instruction* link = InstructionAt(label->pos()); 636 Instruction* link = InstructionAt(label->pos());
637 Instruction* prev_link = link; 637 Instruction* prev_link = link;
638 Instruction* next_link; 638 Instruction* next_link;
639 bool end_of_chain = false; 639 bool end_of_chain = false;
640 640
641 while (link != branch && !end_of_chain) { 641 while (link != branch && !end_of_chain) {
642 next_link = link->ImmPCOffsetTarget(); 642 next_link = link->ImmPCOffsetTarget();
643 end_of_chain = (link == next_link); 643 end_of_chain = (link == next_link);
644 prev_link = link; 644 prev_link = link;
645 link = next_link; 645 link = next_link;
646 } 646 }
647 647
648 ASSERT(branch == link); 648 DCHECK(branch == link);
649 next_link = branch->ImmPCOffsetTarget(); 649 next_link = branch->ImmPCOffsetTarget();
650 650
651 if (branch == prev_link) { 651 if (branch == prev_link) {
652 // The branch is the first instruction in the chain. 652 // The branch is the first instruction in the chain.
653 if (branch == next_link) { 653 if (branch == next_link) {
654 // It is also the last instruction in the chain, so it is the only branch 654 // It is also the last instruction in the chain, so it is the only branch
655 // currently referring to this label. 655 // currently referring to this label.
656 label->Unuse(); 656 label->Unuse();
657 } else { 657 } else {
658 label->link_to(reinterpret_cast<byte*>(next_link) - buffer_); 658 label->link_to(reinterpret_cast<byte*>(next_link) - buffer_);
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
704 704
705 CheckLabelLinkChain(label); 705 CheckLabelLinkChain(label);
706 } 706 }
707 707
708 708
709 void Assembler::bind(Label* label) { 709 void Assembler::bind(Label* label) {
710 // Bind label to the address at pc_. All instructions (most likely branches) 710 // Bind label to the address at pc_. All instructions (most likely branches)
711 // that are linked to this label will be updated to point to the newly-bound 711 // that are linked to this label will be updated to point to the newly-bound
712 // label. 712 // label.
713 713
714 ASSERT(!label->is_near_linked()); 714 DCHECK(!label->is_near_linked());
715 ASSERT(!label->is_bound()); 715 DCHECK(!label->is_bound());
716 716
717 DeleteUnresolvedBranchInfoForLabel(label); 717 DeleteUnresolvedBranchInfoForLabel(label);
718 718
719 // If the label is linked, the link chain looks something like this: 719 // If the label is linked, the link chain looks something like this:
720 // 720 //
721 // |--I----I-------I-------L 721 // |--I----I-------I-------L
722 // |---------------------->| pc_offset 722 // |---------------------->| pc_offset
723 // |-------------->| linkoffset = label->pos() 723 // |-------------->| linkoffset = label->pos()
724 // |<------| link->ImmPCOffset() 724 // |<------| link->ImmPCOffset()
725 // |------>| prevlinkoffset = linkoffset + link->ImmPCOffset() 725 // |------>| prevlinkoffset = linkoffset + link->ImmPCOffset()
726 // 726 //
727 // On each iteration, the last link is updated and then removed from the 727 // On each iteration, the last link is updated and then removed from the
728 // chain until only one remains. At that point, the label is bound. 728 // chain until only one remains. At that point, the label is bound.
729 // 729 //
730 // If the label is not linked, no preparation is required before binding. 730 // If the label is not linked, no preparation is required before binding.
731 while (label->is_linked()) { 731 while (label->is_linked()) {
732 int linkoffset = label->pos(); 732 int linkoffset = label->pos();
733 Instruction* link = InstructionAt(linkoffset); 733 Instruction* link = InstructionAt(linkoffset);
734 int prevlinkoffset = linkoffset + link->ImmPCOffset(); 734 int prevlinkoffset = linkoffset + link->ImmPCOffset();
735 735
736 CheckLabelLinkChain(label); 736 CheckLabelLinkChain(label);
737 737
738 ASSERT(linkoffset >= 0); 738 DCHECK(linkoffset >= 0);
739 ASSERT(linkoffset < pc_offset()); 739 DCHECK(linkoffset < pc_offset());
740 ASSERT((linkoffset > prevlinkoffset) || 740 DCHECK((linkoffset > prevlinkoffset) ||
741 (linkoffset - prevlinkoffset == kStartOfLabelLinkChain)); 741 (linkoffset - prevlinkoffset == kStartOfLabelLinkChain));
742 ASSERT(prevlinkoffset >= 0); 742 DCHECK(prevlinkoffset >= 0);
743 743
744 // Update the link to point to the label. 744 // Update the link to point to the label.
745 link->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_)); 745 link->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_));
746 746
747 // Link the label to the previous link in the chain. 747 // Link the label to the previous link in the chain.
748 if (linkoffset - prevlinkoffset == kStartOfLabelLinkChain) { 748 if (linkoffset - prevlinkoffset == kStartOfLabelLinkChain) {
749 // We hit kStartOfLabelLinkChain, so the chain is fully processed. 749 // We hit kStartOfLabelLinkChain, so the chain is fully processed.
750 label->Unuse(); 750 label->Unuse();
751 } else { 751 } else {
752 // Update the label for the next iteration. 752 // Update the label for the next iteration.
753 label->link_to(prevlinkoffset); 753 label->link_to(prevlinkoffset);
754 } 754 }
755 } 755 }
756 label->bind_to(pc_offset()); 756 label->bind_to(pc_offset());
757 757
758 ASSERT(label->is_bound()); 758 DCHECK(label->is_bound());
759 ASSERT(!label->is_linked()); 759 DCHECK(!label->is_linked());
760 } 760 }
761 761
762 762
763 int Assembler::LinkAndGetByteOffsetTo(Label* label) { 763 int Assembler::LinkAndGetByteOffsetTo(Label* label) {
764 ASSERT(sizeof(*pc_) == 1); 764 DCHECK(sizeof(*pc_) == 1);
765 CheckLabelLinkChain(label); 765 CheckLabelLinkChain(label);
766 766
767 int offset; 767 int offset;
768 if (label->is_bound()) { 768 if (label->is_bound()) {
769 // The label is bound, so it does not need to be updated. Referring 769 // The label is bound, so it does not need to be updated. Referring
770 // instructions must link directly to the label as they will not be 770 // instructions must link directly to the label as they will not be
771 // updated. 771 // updated.
772 // 772 //
773 // In this case, label->pos() returns the offset of the label from the 773 // In this case, label->pos() returns the offset of the label from the
774 // start of the buffer. 774 // start of the buffer.
775 // 775 //
776 // Note that offset can be zero for self-referential instructions. (This 776 // Note that offset can be zero for self-referential instructions. (This
777 // could be useful for ADR, for example.) 777 // could be useful for ADR, for example.)
778 offset = label->pos() - pc_offset(); 778 offset = label->pos() - pc_offset();
779 ASSERT(offset <= 0); 779 DCHECK(offset <= 0);
780 } else { 780 } else {
781 if (label->is_linked()) { 781 if (label->is_linked()) {
782 // The label is linked, so the referring instruction should be added onto 782 // The label is linked, so the referring instruction should be added onto
783 // the end of the label's link chain. 783 // the end of the label's link chain.
784 // 784 //
785 // In this case, label->pos() returns the offset of the last linked 785 // In this case, label->pos() returns the offset of the last linked
786 // instruction from the start of the buffer. 786 // instruction from the start of the buffer.
787 offset = label->pos() - pc_offset(); 787 offset = label->pos() - pc_offset();
788 ASSERT(offset != kStartOfLabelLinkChain); 788 DCHECK(offset != kStartOfLabelLinkChain);
789 // Note that the offset here needs to be PC-relative only so that the 789 // Note that the offset here needs to be PC-relative only so that the
790 // first instruction in a buffer can link to an unbound label. Otherwise, 790 // first instruction in a buffer can link to an unbound label. Otherwise,
791 // the offset would be 0 for this case, and 0 is reserved for 791 // the offset would be 0 for this case, and 0 is reserved for
792 // kStartOfLabelLinkChain. 792 // kStartOfLabelLinkChain.
793 } else { 793 } else {
794 // The label is unused, so it now becomes linked and the referring 794 // The label is unused, so it now becomes linked and the referring
795 // instruction is at the start of the new link chain. 795 // instruction is at the start of the new link chain.
796 offset = kStartOfLabelLinkChain; 796 offset = kStartOfLabelLinkChain;
797 } 797 }
798 // The instruction at pc is now the last link in the label's chain. 798 // The instruction at pc is now the last link in the label's chain.
799 label->link_to(pc_offset()); 799 label->link_to(pc_offset());
800 } 800 }
801 801
802 return offset; 802 return offset;
803 } 803 }
804 804
805 805
806 void Assembler::DeleteUnresolvedBranchInfoForLabelTraverse(Label* label) { 806 void Assembler::DeleteUnresolvedBranchInfoForLabelTraverse(Label* label) {
807 ASSERT(label->is_linked()); 807 DCHECK(label->is_linked());
808 CheckLabelLinkChain(label); 808 CheckLabelLinkChain(label);
809 809
810 int link_offset = label->pos(); 810 int link_offset = label->pos();
811 int link_pcoffset; 811 int link_pcoffset;
812 bool end_of_chain = false; 812 bool end_of_chain = false;
813 813
814 while (!end_of_chain) { 814 while (!end_of_chain) {
815 Instruction * link = InstructionAt(link_offset); 815 Instruction * link = InstructionAt(link_offset);
816 link_pcoffset = link->ImmPCOffset(); 816 link_pcoffset = link->ImmPCOffset();
817 817
(...skipping 14 matching lines...) Expand all
832 } 832 }
833 833
834 end_of_chain = (link_pcoffset == 0); 834 end_of_chain = (link_pcoffset == 0);
835 link_offset = link_offset + link_pcoffset; 835 link_offset = link_offset + link_pcoffset;
836 } 836 }
837 } 837 }
838 838
839 839
840 void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) { 840 void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) {
841 if (unresolved_branches_.empty()) { 841 if (unresolved_branches_.empty()) {
842 ASSERT(next_veneer_pool_check_ == kMaxInt); 842 DCHECK(next_veneer_pool_check_ == kMaxInt);
843 return; 843 return;
844 } 844 }
845 845
846 if (label->is_linked()) { 846 if (label->is_linked()) {
847 // Branches to this label will be resolved when the label is bound, normally 847 // Branches to this label will be resolved when the label is bound, normally
848 // just after all the associated info has been deleted. 848 // just after all the associated info has been deleted.
849 DeleteUnresolvedBranchInfoForLabelTraverse(label); 849 DeleteUnresolvedBranchInfoForLabelTraverse(label);
850 } 850 }
851 if (unresolved_branches_.empty()) { 851 if (unresolved_branches_.empty()) {
852 next_veneer_pool_check_ = kMaxInt; 852 next_veneer_pool_check_ = kMaxInt;
853 } else { 853 } else {
854 next_veneer_pool_check_ = 854 next_veneer_pool_check_ =
855 unresolved_branches_first_limit() - kVeneerDistanceCheckMargin; 855 unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
856 } 856 }
857 } 857 }
858 858
859 859
860 void Assembler::StartBlockConstPool() { 860 void Assembler::StartBlockConstPool() {
861 if (const_pool_blocked_nesting_++ == 0) { 861 if (const_pool_blocked_nesting_++ == 0) {
862 // Prevent constant pool checks happening by setting the next check to 862 // Prevent constant pool checks happening by setting the next check to
863 // the biggest possible offset. 863 // the biggest possible offset.
864 next_constant_pool_check_ = kMaxInt; 864 next_constant_pool_check_ = kMaxInt;
865 } 865 }
866 } 866 }
867 867
868 868
869 void Assembler::EndBlockConstPool() { 869 void Assembler::EndBlockConstPool() {
870 if (--const_pool_blocked_nesting_ == 0) { 870 if (--const_pool_blocked_nesting_ == 0) {
871 // Check the constant pool hasn't been blocked for too long. 871 // Check the constant pool hasn't been blocked for too long.
872 ASSERT(pc_offset() < constpool_.MaxPcOffset()); 872 DCHECK(pc_offset() < constpool_.MaxPcOffset());
873 // Two cases: 873 // Two cases:
874 // * no_const_pool_before_ >= next_constant_pool_check_ and the emission is 874 // * no_const_pool_before_ >= next_constant_pool_check_ and the emission is
875 // still blocked 875 // still blocked
876 // * no_const_pool_before_ < next_constant_pool_check_ and the next emit 876 // * no_const_pool_before_ < next_constant_pool_check_ and the next emit
877 // will trigger a check. 877 // will trigger a check.
878 next_constant_pool_check_ = no_const_pool_before_; 878 next_constant_pool_check_ = no_const_pool_before_;
879 } 879 }
880 } 880 }
881 881
882 882
883 bool Assembler::is_const_pool_blocked() const { 883 bool Assembler::is_const_pool_blocked() const {
884 return (const_pool_blocked_nesting_ > 0) || 884 return (const_pool_blocked_nesting_ > 0) ||
885 (pc_offset() < no_const_pool_before_); 885 (pc_offset() < no_const_pool_before_);
886 } 886 }
887 887
888 888
889 bool Assembler::IsConstantPoolAt(Instruction* instr) { 889 bool Assembler::IsConstantPoolAt(Instruction* instr) {
890 // The constant pool marker is made of two instructions. These instructions 890 // The constant pool marker is made of two instructions. These instructions
891 // will never be emitted by the JIT, so checking for the first one is enough: 891 // will never be emitted by the JIT, so checking for the first one is enough:
892 // 0: ldr xzr, #<size of pool> 892 // 0: ldr xzr, #<size of pool>
893 bool result = instr->IsLdrLiteralX() && (instr->Rt() == xzr.code()); 893 bool result = instr->IsLdrLiteralX() && (instr->Rt() == xzr.code());
894 894
895 // It is still worth asserting the marker is complete. 895 // It is still worth asserting the marker is complete.
896 // 4: blr xzr 896 // 4: blr xzr
897 ASSERT(!result || (instr->following()->IsBranchAndLinkToRegister() && 897 DCHECK(!result || (instr->following()->IsBranchAndLinkToRegister() &&
898 instr->following()->Rn() == xzr.code())); 898 instr->following()->Rn() == xzr.code()));
899 899
900 return result; 900 return result;
901 } 901 }
902 902
903 903
904 int Assembler::ConstantPoolSizeAt(Instruction* instr) { 904 int Assembler::ConstantPoolSizeAt(Instruction* instr) {
905 #ifdef USE_SIMULATOR 905 #ifdef USE_SIMULATOR
906 // Assembler::debug() embeds constants directly into the instruction stream. 906 // Assembler::debug() embeds constants directly into the instruction stream.
907 // Although this is not a genuine constant pool, treat it like one to avoid 907 // Although this is not a genuine constant pool, treat it like one to avoid
(...skipping 28 matching lines...) Expand all
936 936
937 937
938 void Assembler::StartBlockVeneerPool() { 938 void Assembler::StartBlockVeneerPool() {
939 ++veneer_pool_blocked_nesting_; 939 ++veneer_pool_blocked_nesting_;
940 } 940 }
941 941
942 942
943 void Assembler::EndBlockVeneerPool() { 943 void Assembler::EndBlockVeneerPool() {
944 if (--veneer_pool_blocked_nesting_ == 0) { 944 if (--veneer_pool_blocked_nesting_ == 0) {
945 // Check the veneer pool hasn't been blocked for too long. 945 // Check the veneer pool hasn't been blocked for too long.
946 ASSERT(unresolved_branches_.empty() || 946 DCHECK(unresolved_branches_.empty() ||
947 (pc_offset() < unresolved_branches_first_limit())); 947 (pc_offset() < unresolved_branches_first_limit()));
948 } 948 }
949 } 949 }
950 950
951 951
952 void Assembler::br(const Register& xn) { 952 void Assembler::br(const Register& xn) {
953 positions_recorder()->WriteRecordedPositions(); 953 positions_recorder()->WriteRecordedPositions();
954 ASSERT(xn.Is64Bits()); 954 DCHECK(xn.Is64Bits());
955 Emit(BR | Rn(xn)); 955 Emit(BR | Rn(xn));
956 } 956 }
957 957
958 958
959 void Assembler::blr(const Register& xn) { 959 void Assembler::blr(const Register& xn) {
960 positions_recorder()->WriteRecordedPositions(); 960 positions_recorder()->WriteRecordedPositions();
961 ASSERT(xn.Is64Bits()); 961 DCHECK(xn.Is64Bits());
962 // The pattern 'blr xzr' is used as a guard to detect when execution falls 962 // The pattern 'blr xzr' is used as a guard to detect when execution falls
963 // through the constant pool. It should not be emitted. 963 // through the constant pool. It should not be emitted.
964 ASSERT(!xn.Is(xzr)); 964 DCHECK(!xn.Is(xzr));
965 Emit(BLR | Rn(xn)); 965 Emit(BLR | Rn(xn));
966 } 966 }
967 967
968 968
969 void Assembler::ret(const Register& xn) { 969 void Assembler::ret(const Register& xn) {
970 positions_recorder()->WriteRecordedPositions(); 970 positions_recorder()->WriteRecordedPositions();
971 ASSERT(xn.Is64Bits()); 971 DCHECK(xn.Is64Bits());
972 Emit(RET | Rn(xn)); 972 Emit(RET | Rn(xn));
973 } 973 }
974 974
975 975
976 void Assembler::b(int imm26) { 976 void Assembler::b(int imm26) {
977 Emit(B | ImmUncondBranch(imm26)); 977 Emit(B | ImmUncondBranch(imm26));
978 } 978 }
979 979
980 980
981 void Assembler::b(Label* label) { 981 void Assembler::b(Label* label) {
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
1032 Label* label) { 1032 Label* label) {
1033 positions_recorder()->WriteRecordedPositions(); 1033 positions_recorder()->WriteRecordedPositions();
1034 cbnz(rt, LinkAndGetInstructionOffsetTo(label)); 1034 cbnz(rt, LinkAndGetInstructionOffsetTo(label));
1035 } 1035 }
1036 1036
1037 1037
1038 void Assembler::tbz(const Register& rt, 1038 void Assembler::tbz(const Register& rt,
1039 unsigned bit_pos, 1039 unsigned bit_pos,
1040 int imm14) { 1040 int imm14) {
1041 positions_recorder()->WriteRecordedPositions(); 1041 positions_recorder()->WriteRecordedPositions();
1042 ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits))); 1042 DCHECK(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
1043 Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt)); 1043 Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
1044 } 1044 }
1045 1045
1046 1046
1047 void Assembler::tbz(const Register& rt, 1047 void Assembler::tbz(const Register& rt,
1048 unsigned bit_pos, 1048 unsigned bit_pos,
1049 Label* label) { 1049 Label* label) {
1050 positions_recorder()->WriteRecordedPositions(); 1050 positions_recorder()->WriteRecordedPositions();
1051 tbz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label)); 1051 tbz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
1052 } 1052 }
1053 1053
1054 1054
1055 void Assembler::tbnz(const Register& rt, 1055 void Assembler::tbnz(const Register& rt,
1056 unsigned bit_pos, 1056 unsigned bit_pos,
1057 int imm14) { 1057 int imm14) {
1058 positions_recorder()->WriteRecordedPositions(); 1058 positions_recorder()->WriteRecordedPositions();
1059 ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits))); 1059 DCHECK(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
1060 Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt)); 1060 Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
1061 } 1061 }
1062 1062
1063 1063
1064 void Assembler::tbnz(const Register& rt, 1064 void Assembler::tbnz(const Register& rt,
1065 unsigned bit_pos, 1065 unsigned bit_pos,
1066 Label* label) { 1066 Label* label) {
1067 positions_recorder()->WriteRecordedPositions(); 1067 positions_recorder()->WriteRecordedPositions();
1068 tbnz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label)); 1068 tbnz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
1069 } 1069 }
1070 1070
1071 1071
1072 void Assembler::adr(const Register& rd, int imm21) { 1072 void Assembler::adr(const Register& rd, int imm21) {
1073 ASSERT(rd.Is64Bits()); 1073 DCHECK(rd.Is64Bits());
1074 Emit(ADR | ImmPCRelAddress(imm21) | Rd(rd)); 1074 Emit(ADR | ImmPCRelAddress(imm21) | Rd(rd));
1075 } 1075 }
1076 1076
1077 1077
1078 void Assembler::adr(const Register& rd, Label* label) { 1078 void Assembler::adr(const Register& rd, Label* label) {
1079 adr(rd, LinkAndGetByteOffsetTo(label)); 1079 adr(rd, LinkAndGetByteOffsetTo(label));
1080 } 1080 }
1081 1081
1082 1082
1083 void Assembler::add(const Register& rd, 1083 void Assembler::add(const Register& rd,
(...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after
1232 void Assembler::eon(const Register& rd, 1232 void Assembler::eon(const Register& rd,
1233 const Register& rn, 1233 const Register& rn,
1234 const Operand& operand) { 1234 const Operand& operand) {
1235 Logical(rd, rn, operand, EON); 1235 Logical(rd, rn, operand, EON);
1236 } 1236 }
1237 1237
1238 1238
1239 void Assembler::lslv(const Register& rd, 1239 void Assembler::lslv(const Register& rd,
1240 const Register& rn, 1240 const Register& rn,
1241 const Register& rm) { 1241 const Register& rm) {
1242 ASSERT(rd.SizeInBits() == rn.SizeInBits()); 1242 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1243 ASSERT(rd.SizeInBits() == rm.SizeInBits()); 1243 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1244 Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd)); 1244 Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd));
1245 } 1245 }
1246 1246
1247 1247
1248 void Assembler::lsrv(const Register& rd, 1248 void Assembler::lsrv(const Register& rd,
1249 const Register& rn, 1249 const Register& rn,
1250 const Register& rm) { 1250 const Register& rm) {
1251 ASSERT(rd.SizeInBits() == rn.SizeInBits()); 1251 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1252 ASSERT(rd.SizeInBits() == rm.SizeInBits()); 1252 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1253 Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd)); 1253 Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd));
1254 } 1254 }
1255 1255
1256 1256
1257 void Assembler::asrv(const Register& rd, 1257 void Assembler::asrv(const Register& rd,
1258 const Register& rn, 1258 const Register& rn,
1259 const Register& rm) { 1259 const Register& rm) {
1260 ASSERT(rd.SizeInBits() == rn.SizeInBits()); 1260 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1261 ASSERT(rd.SizeInBits() == rm.SizeInBits()); 1261 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1262 Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd)); 1262 Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd));
1263 } 1263 }
1264 1264
1265 1265
1266 void Assembler::rorv(const Register& rd, 1266 void Assembler::rorv(const Register& rd,
1267 const Register& rn, 1267 const Register& rn,
1268 const Register& rm) { 1268 const Register& rm) {
1269 ASSERT(rd.SizeInBits() == rn.SizeInBits()); 1269 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1270 ASSERT(rd.SizeInBits() == rm.SizeInBits()); 1270 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1271 Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd)); 1271 Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd));
1272 } 1272 }
1273 1273
1274 1274
1275 // Bitfield operations. 1275 // Bitfield operations.
1276 void Assembler::bfm(const Register& rd, 1276 void Assembler::bfm(const Register& rd,
1277 const Register& rn, 1277 const Register& rn,
1278 unsigned immr, 1278 unsigned immr,
1279 unsigned imms) { 1279 unsigned imms) {
1280 ASSERT(rd.SizeInBits() == rn.SizeInBits()); 1280 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1281 Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); 1281 Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
1282 Emit(SF(rd) | BFM | N | 1282 Emit(SF(rd) | BFM | N |
1283 ImmR(immr, rd.SizeInBits()) | 1283 ImmR(immr, rd.SizeInBits()) |
1284 ImmS(imms, rn.SizeInBits()) | 1284 ImmS(imms, rn.SizeInBits()) |
1285 Rn(rn) | Rd(rd)); 1285 Rn(rn) | Rd(rd));
1286 } 1286 }
1287 1287
1288 1288
1289 void Assembler::sbfm(const Register& rd, 1289 void Assembler::sbfm(const Register& rd,
1290 const Register& rn, 1290 const Register& rn,
1291 unsigned immr, 1291 unsigned immr,
1292 unsigned imms) { 1292 unsigned imms) {
1293 ASSERT(rd.Is64Bits() || rn.Is32Bits()); 1293 DCHECK(rd.Is64Bits() || rn.Is32Bits());
1294 Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); 1294 Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
1295 Emit(SF(rd) | SBFM | N | 1295 Emit(SF(rd) | SBFM | N |
1296 ImmR(immr, rd.SizeInBits()) | 1296 ImmR(immr, rd.SizeInBits()) |
1297 ImmS(imms, rn.SizeInBits()) | 1297 ImmS(imms, rn.SizeInBits()) |
1298 Rn(rn) | Rd(rd)); 1298 Rn(rn) | Rd(rd));
1299 } 1299 }
1300 1300
1301 1301
1302 void Assembler::ubfm(const Register& rd, 1302 void Assembler::ubfm(const Register& rd,
1303 const Register& rn, 1303 const Register& rn,
1304 unsigned immr, 1304 unsigned immr,
1305 unsigned imms) { 1305 unsigned imms) {
1306 ASSERT(rd.SizeInBits() == rn.SizeInBits()); 1306 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1307 Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); 1307 Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
1308 Emit(SF(rd) | UBFM | N | 1308 Emit(SF(rd) | UBFM | N |
1309 ImmR(immr, rd.SizeInBits()) | 1309 ImmR(immr, rd.SizeInBits()) |
1310 ImmS(imms, rn.SizeInBits()) | 1310 ImmS(imms, rn.SizeInBits()) |
1311 Rn(rn) | Rd(rd)); 1311 Rn(rn) | Rd(rd));
1312 } 1312 }
1313 1313
1314 1314
1315 void Assembler::extr(const Register& rd, 1315 void Assembler::extr(const Register& rd,
1316 const Register& rn, 1316 const Register& rn,
1317 const Register& rm, 1317 const Register& rm,
1318 unsigned lsb) { 1318 unsigned lsb) {
1319 ASSERT(rd.SizeInBits() == rn.SizeInBits()); 1319 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1320 ASSERT(rd.SizeInBits() == rm.SizeInBits()); 1320 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1321 Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset); 1321 Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
1322 Emit(SF(rd) | EXTR | N | Rm(rm) | 1322 Emit(SF(rd) | EXTR | N | Rm(rm) |
1323 ImmS(lsb, rn.SizeInBits()) | Rn(rn) | Rd(rd)); 1323 ImmS(lsb, rn.SizeInBits()) | Rn(rn) | Rd(rd));
1324 } 1324 }
1325 1325
1326 1326
1327 void Assembler::csel(const Register& rd, 1327 void Assembler::csel(const Register& rd,
1328 const Register& rn, 1328 const Register& rn,
1329 const Register& rm, 1329 const Register& rm,
1330 Condition cond) { 1330 Condition cond) {
(...skipping 19 matching lines...) Expand all
1350 1350
1351 void Assembler::csneg(const Register& rd, 1351 void Assembler::csneg(const Register& rd,
1352 const Register& rn, 1352 const Register& rn,
1353 const Register& rm, 1353 const Register& rm,
1354 Condition cond) { 1354 Condition cond) {
1355 ConditionalSelect(rd, rn, rm, cond, CSNEG); 1355 ConditionalSelect(rd, rn, rm, cond, CSNEG);
1356 } 1356 }
1357 1357
1358 1358
1359 void Assembler::cset(const Register &rd, Condition cond) { 1359 void Assembler::cset(const Register &rd, Condition cond) {
1360 ASSERT((cond != al) && (cond != nv)); 1360 DCHECK((cond != al) && (cond != nv));
1361 Register zr = AppropriateZeroRegFor(rd); 1361 Register zr = AppropriateZeroRegFor(rd);
1362 csinc(rd, zr, zr, NegateCondition(cond)); 1362 csinc(rd, zr, zr, NegateCondition(cond));
1363 } 1363 }
1364 1364
1365 1365
1366 void Assembler::csetm(const Register &rd, Condition cond) { 1366 void Assembler::csetm(const Register &rd, Condition cond) {
1367 ASSERT((cond != al) && (cond != nv)); 1367 DCHECK((cond != al) && (cond != nv));
1368 Register zr = AppropriateZeroRegFor(rd); 1368 Register zr = AppropriateZeroRegFor(rd);
1369 csinv(rd, zr, zr, NegateCondition(cond)); 1369 csinv(rd, zr, zr, NegateCondition(cond));
1370 } 1370 }
1371 1371
1372 1372
1373 void Assembler::cinc(const Register &rd, const Register &rn, Condition cond) { 1373 void Assembler::cinc(const Register &rd, const Register &rn, Condition cond) {
1374 ASSERT((cond != al) && (cond != nv)); 1374 DCHECK((cond != al) && (cond != nv));
1375 csinc(rd, rn, rn, NegateCondition(cond)); 1375 csinc(rd, rn, rn, NegateCondition(cond));
1376 } 1376 }
1377 1377
1378 1378
1379 void Assembler::cinv(const Register &rd, const Register &rn, Condition cond) { 1379 void Assembler::cinv(const Register &rd, const Register &rn, Condition cond) {
1380 ASSERT((cond != al) && (cond != nv)); 1380 DCHECK((cond != al) && (cond != nv));
1381 csinv(rd, rn, rn, NegateCondition(cond)); 1381 csinv(rd, rn, rn, NegateCondition(cond));
1382 } 1382 }
1383 1383
1384 1384
1385 void Assembler::cneg(const Register &rd, const Register &rn, Condition cond) { 1385 void Assembler::cneg(const Register &rd, const Register &rn, Condition cond) {
1386 ASSERT((cond != al) && (cond != nv)); 1386 DCHECK((cond != al) && (cond != nv));
1387 csneg(rd, rn, rn, NegateCondition(cond)); 1387 csneg(rd, rn, rn, NegateCondition(cond));
1388 } 1388 }
1389 1389
1390 1390
1391 void Assembler::ConditionalSelect(const Register& rd, 1391 void Assembler::ConditionalSelect(const Register& rd,
1392 const Register& rn, 1392 const Register& rn,
1393 const Register& rm, 1393 const Register& rm,
1394 Condition cond, 1394 Condition cond,
1395 ConditionalSelectOp op) { 1395 ConditionalSelectOp op) {
1396 ASSERT(rd.SizeInBits() == rn.SizeInBits()); 1396 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1397 ASSERT(rd.SizeInBits() == rm.SizeInBits()); 1397 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1398 Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd)); 1398 Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd));
1399 } 1399 }
1400 1400
1401 1401
1402 void Assembler::ccmn(const Register& rn, 1402 void Assembler::ccmn(const Register& rn,
1403 const Operand& operand, 1403 const Operand& operand,
1404 StatusFlags nzcv, 1404 StatusFlags nzcv,
1405 Condition cond) { 1405 Condition cond) {
1406 ConditionalCompare(rn, operand, nzcv, cond, CCMN); 1406 ConditionalCompare(rn, operand, nzcv, cond, CCMN);
1407 } 1407 }
(...skipping 12 matching lines...) Expand all
1420 const Register& rm, 1420 const Register& rm,
1421 const Register& ra, 1421 const Register& ra,
1422 DataProcessing3SourceOp op) { 1422 DataProcessing3SourceOp op) {
1423 Emit(SF(rd) | op | Rm(rm) | Ra(ra) | Rn(rn) | Rd(rd)); 1423 Emit(SF(rd) | op | Rm(rm) | Ra(ra) | Rn(rn) | Rd(rd));
1424 } 1424 }
1425 1425
1426 1426
1427 void Assembler::mul(const Register& rd, 1427 void Assembler::mul(const Register& rd,
1428 const Register& rn, 1428 const Register& rn,
1429 const Register& rm) { 1429 const Register& rm) {
1430 ASSERT(AreSameSizeAndType(rd, rn, rm)); 1430 DCHECK(AreSameSizeAndType(rd, rn, rm));
1431 Register zr = AppropriateZeroRegFor(rn); 1431 Register zr = AppropriateZeroRegFor(rn);
1432 DataProcessing3Source(rd, rn, rm, zr, MADD); 1432 DataProcessing3Source(rd, rn, rm, zr, MADD);
1433 } 1433 }
1434 1434
1435 1435
1436 void Assembler::madd(const Register& rd, 1436 void Assembler::madd(const Register& rd,
1437 const Register& rn, 1437 const Register& rn,
1438 const Register& rm, 1438 const Register& rm,
1439 const Register& ra) { 1439 const Register& ra) {
1440 ASSERT(AreSameSizeAndType(rd, rn, rm, ra)); 1440 DCHECK(AreSameSizeAndType(rd, rn, rm, ra));
1441 DataProcessing3Source(rd, rn, rm, ra, MADD); 1441 DataProcessing3Source(rd, rn, rm, ra, MADD);
1442 } 1442 }
1443 1443
1444 1444
1445 void Assembler::mneg(const Register& rd, 1445 void Assembler::mneg(const Register& rd,
1446 const Register& rn, 1446 const Register& rn,
1447 const Register& rm) { 1447 const Register& rm) {
1448 ASSERT(AreSameSizeAndType(rd, rn, rm)); 1448 DCHECK(AreSameSizeAndType(rd, rn, rm));
1449 Register zr = AppropriateZeroRegFor(rn); 1449 Register zr = AppropriateZeroRegFor(rn);
1450 DataProcessing3Source(rd, rn, rm, zr, MSUB); 1450 DataProcessing3Source(rd, rn, rm, zr, MSUB);
1451 } 1451 }
1452 1452
1453 1453
1454 void Assembler::msub(const Register& rd, 1454 void Assembler::msub(const Register& rd,
1455 const Register& rn, 1455 const Register& rn,
1456 const Register& rm, 1456 const Register& rm,
1457 const Register& ra) { 1457 const Register& ra) {
1458 ASSERT(AreSameSizeAndType(rd, rn, rm, ra)); 1458 DCHECK(AreSameSizeAndType(rd, rn, rm, ra));
1459 DataProcessing3Source(rd, rn, rm, ra, MSUB); 1459 DataProcessing3Source(rd, rn, rm, ra, MSUB);
1460 } 1460 }
1461 1461
1462 1462
1463 void Assembler::smaddl(const Register& rd, 1463 void Assembler::smaddl(const Register& rd,
1464 const Register& rn, 1464 const Register& rn,
1465 const Register& rm, 1465 const Register& rm,
1466 const Register& ra) { 1466 const Register& ra) {
1467 ASSERT(rd.Is64Bits() && ra.Is64Bits()); 1467 DCHECK(rd.Is64Bits() && ra.Is64Bits());
1468 ASSERT(rn.Is32Bits() && rm.Is32Bits()); 1468 DCHECK(rn.Is32Bits() && rm.Is32Bits());
1469 DataProcessing3Source(rd, rn, rm, ra, SMADDL_x); 1469 DataProcessing3Source(rd, rn, rm, ra, SMADDL_x);
1470 } 1470 }
1471 1471
1472 1472
1473 void Assembler::smsubl(const Register& rd, 1473 void Assembler::smsubl(const Register& rd,
1474 const Register& rn, 1474 const Register& rn,
1475 const Register& rm, 1475 const Register& rm,
1476 const Register& ra) { 1476 const Register& ra) {
1477 ASSERT(rd.Is64Bits() && ra.Is64Bits()); 1477 DCHECK(rd.Is64Bits() && ra.Is64Bits());
1478 ASSERT(rn.Is32Bits() && rm.Is32Bits()); 1478 DCHECK(rn.Is32Bits() && rm.Is32Bits());
1479 DataProcessing3Source(rd, rn, rm, ra, SMSUBL_x); 1479 DataProcessing3Source(rd, rn, rm, ra, SMSUBL_x);
1480 } 1480 }
1481 1481
1482 1482
1483 void Assembler::umaddl(const Register& rd, 1483 void Assembler::umaddl(const Register& rd,
1484 const Register& rn, 1484 const Register& rn,
1485 const Register& rm, 1485 const Register& rm,
1486 const Register& ra) { 1486 const Register& ra) {
1487 ASSERT(rd.Is64Bits() && ra.Is64Bits()); 1487 DCHECK(rd.Is64Bits() && ra.Is64Bits());
1488 ASSERT(rn.Is32Bits() && rm.Is32Bits()); 1488 DCHECK(rn.Is32Bits() && rm.Is32Bits());
1489 DataProcessing3Source(rd, rn, rm, ra, UMADDL_x); 1489 DataProcessing3Source(rd, rn, rm, ra, UMADDL_x);
1490 } 1490 }
1491 1491
1492 1492
1493 void Assembler::umsubl(const Register& rd, 1493 void Assembler::umsubl(const Register& rd,
1494 const Register& rn, 1494 const Register& rn,
1495 const Register& rm, 1495 const Register& rm,
1496 const Register& ra) { 1496 const Register& ra) {
1497 ASSERT(rd.Is64Bits() && ra.Is64Bits()); 1497 DCHECK(rd.Is64Bits() && ra.Is64Bits());
1498 ASSERT(rn.Is32Bits() && rm.Is32Bits()); 1498 DCHECK(rn.Is32Bits() && rm.Is32Bits());
1499 DataProcessing3Source(rd, rn, rm, ra, UMSUBL_x); 1499 DataProcessing3Source(rd, rn, rm, ra, UMSUBL_x);
1500 } 1500 }
1501 1501
1502 1502
1503 void Assembler::smull(const Register& rd, 1503 void Assembler::smull(const Register& rd,
1504 const Register& rn, 1504 const Register& rn,
1505 const Register& rm) { 1505 const Register& rm) {
1506 ASSERT(rd.Is64Bits()); 1506 DCHECK(rd.Is64Bits());
1507 ASSERT(rn.Is32Bits() && rm.Is32Bits()); 1507 DCHECK(rn.Is32Bits() && rm.Is32Bits());
1508 DataProcessing3Source(rd, rn, rm, xzr, SMADDL_x); 1508 DataProcessing3Source(rd, rn, rm, xzr, SMADDL_x);
1509 } 1509 }
1510 1510
1511 1511
1512 void Assembler::smulh(const Register& rd, 1512 void Assembler::smulh(const Register& rd,
1513 const Register& rn, 1513 const Register& rn,
1514 const Register& rm) { 1514 const Register& rm) {
1515 ASSERT(AreSameSizeAndType(rd, rn, rm)); 1515 DCHECK(AreSameSizeAndType(rd, rn, rm));
1516 DataProcessing3Source(rd, rn, rm, xzr, SMULH_x); 1516 DataProcessing3Source(rd, rn, rm, xzr, SMULH_x);
1517 } 1517 }
1518 1518
1519 1519
1520 void Assembler::sdiv(const Register& rd, 1520 void Assembler::sdiv(const Register& rd,
1521 const Register& rn, 1521 const Register& rn,
1522 const Register& rm) { 1522 const Register& rm) {
1523 ASSERT(rd.SizeInBits() == rn.SizeInBits()); 1523 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1524 ASSERT(rd.SizeInBits() == rm.SizeInBits()); 1524 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1525 Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd)); 1525 Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd));
1526 } 1526 }
1527 1527
1528 1528
1529 void Assembler::udiv(const Register& rd, 1529 void Assembler::udiv(const Register& rd,
1530 const Register& rn, 1530 const Register& rn,
1531 const Register& rm) { 1531 const Register& rm) {
1532 ASSERT(rd.SizeInBits() == rn.SizeInBits()); 1532 DCHECK(rd.SizeInBits() == rn.SizeInBits());
1533 ASSERT(rd.SizeInBits() == rm.SizeInBits()); 1533 DCHECK(rd.SizeInBits() == rm.SizeInBits());
1534 Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd)); 1534 Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd));
1535 } 1535 }
1536 1536
1537 1537
1538 void Assembler::rbit(const Register& rd, 1538 void Assembler::rbit(const Register& rd,
1539 const Register& rn) { 1539 const Register& rn) {
1540 DataProcessing1Source(rd, rn, RBIT); 1540 DataProcessing1Source(rd, rn, RBIT);
1541 } 1541 }
1542 1542
1543 1543
1544 void Assembler::rev16(const Register& rd, 1544 void Assembler::rev16(const Register& rd,
1545 const Register& rn) { 1545 const Register& rn) {
1546 DataProcessing1Source(rd, rn, REV16); 1546 DataProcessing1Source(rd, rn, REV16);
1547 } 1547 }
1548 1548
1549 1549
1550 void Assembler::rev32(const Register& rd, 1550 void Assembler::rev32(const Register& rd,
1551 const Register& rn) { 1551 const Register& rn) {
1552 ASSERT(rd.Is64Bits()); 1552 DCHECK(rd.Is64Bits());
1553 DataProcessing1Source(rd, rn, REV); 1553 DataProcessing1Source(rd, rn, REV);
1554 } 1554 }
1555 1555
1556 1556
1557 void Assembler::rev(const Register& rd, 1557 void Assembler::rev(const Register& rd,
1558 const Register& rn) { 1558 const Register& rn) {
1559 DataProcessing1Source(rd, rn, rd.Is64Bits() ? REV_x : REV_w); 1559 DataProcessing1Source(rd, rn, rd.Is64Bits() ? REV_x : REV_w);
1560 } 1560 }
1561 1561
1562 1562
(...skipping 19 matching lines...) Expand all
1582 void Assembler::stp(const CPURegister& rt, 1582 void Assembler::stp(const CPURegister& rt,
1583 const CPURegister& rt2, 1583 const CPURegister& rt2,
1584 const MemOperand& dst) { 1584 const MemOperand& dst) {
1585 LoadStorePair(rt, rt2, dst, StorePairOpFor(rt, rt2)); 1585 LoadStorePair(rt, rt2, dst, StorePairOpFor(rt, rt2));
1586 } 1586 }
1587 1587
1588 1588
1589 void Assembler::ldpsw(const Register& rt, 1589 void Assembler::ldpsw(const Register& rt,
1590 const Register& rt2, 1590 const Register& rt2,
1591 const MemOperand& src) { 1591 const MemOperand& src) {
1592 ASSERT(rt.Is64Bits()); 1592 DCHECK(rt.Is64Bits());
1593 LoadStorePair(rt, rt2, src, LDPSW_x); 1593 LoadStorePair(rt, rt2, src, LDPSW_x);
1594 } 1594 }
1595 1595
1596 1596
1597 void Assembler::LoadStorePair(const CPURegister& rt, 1597 void Assembler::LoadStorePair(const CPURegister& rt,
1598 const CPURegister& rt2, 1598 const CPURegister& rt2,
1599 const MemOperand& addr, 1599 const MemOperand& addr,
1600 LoadStorePairOp op) { 1600 LoadStorePairOp op) {
1601 // 'rt' and 'rt2' can only be aliased for stores. 1601 // 'rt' and 'rt2' can only be aliased for stores.
1602 ASSERT(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2)); 1602 DCHECK(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2));
1603 ASSERT(AreSameSizeAndType(rt, rt2)); 1603 DCHECK(AreSameSizeAndType(rt, rt2));
1604 1604
1605 Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) | 1605 Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
1606 ImmLSPair(addr.offset(), CalcLSPairDataSize(op)); 1606 ImmLSPair(addr.offset(), CalcLSPairDataSize(op));
1607 1607
1608 Instr addrmodeop; 1608 Instr addrmodeop;
1609 if (addr.IsImmediateOffset()) { 1609 if (addr.IsImmediateOffset()) {
1610 addrmodeop = LoadStorePairOffsetFixed; 1610 addrmodeop = LoadStorePairOffsetFixed;
1611 } else { 1611 } else {
1612 // Pre-index and post-index modes. 1612 // Pre-index and post-index modes.
1613 ASSERT(!rt.Is(addr.base())); 1613 DCHECK(!rt.Is(addr.base()));
1614 ASSERT(!rt2.Is(addr.base())); 1614 DCHECK(!rt2.Is(addr.base()));
1615 ASSERT(addr.offset() != 0); 1615 DCHECK(addr.offset() != 0);
1616 if (addr.IsPreIndex()) { 1616 if (addr.IsPreIndex()) {
1617 addrmodeop = LoadStorePairPreIndexFixed; 1617 addrmodeop = LoadStorePairPreIndexFixed;
1618 } else { 1618 } else {
1619 ASSERT(addr.IsPostIndex()); 1619 DCHECK(addr.IsPostIndex());
1620 addrmodeop = LoadStorePairPostIndexFixed; 1620 addrmodeop = LoadStorePairPostIndexFixed;
1621 } 1621 }
1622 } 1622 }
1623 Emit(addrmodeop | memop); 1623 Emit(addrmodeop | memop);
1624 } 1624 }
1625 1625
1626 1626
1627 void Assembler::ldnp(const CPURegister& rt, 1627 void Assembler::ldnp(const CPURegister& rt,
1628 const CPURegister& rt2, 1628 const CPURegister& rt2,
1629 const MemOperand& src) { 1629 const MemOperand& src) {
1630 LoadStorePairNonTemporal(rt, rt2, src, 1630 LoadStorePairNonTemporal(rt, rt2, src,
1631 LoadPairNonTemporalOpFor(rt, rt2)); 1631 LoadPairNonTemporalOpFor(rt, rt2));
1632 } 1632 }
1633 1633
1634 1634
1635 void Assembler::stnp(const CPURegister& rt, 1635 void Assembler::stnp(const CPURegister& rt,
1636 const CPURegister& rt2, 1636 const CPURegister& rt2,
1637 const MemOperand& dst) { 1637 const MemOperand& dst) {
1638 LoadStorePairNonTemporal(rt, rt2, dst, 1638 LoadStorePairNonTemporal(rt, rt2, dst,
1639 StorePairNonTemporalOpFor(rt, rt2)); 1639 StorePairNonTemporalOpFor(rt, rt2));
1640 } 1640 }
1641 1641
1642 1642
1643 void Assembler::LoadStorePairNonTemporal(const CPURegister& rt, 1643 void Assembler::LoadStorePairNonTemporal(const CPURegister& rt,
1644 const CPURegister& rt2, 1644 const CPURegister& rt2,
1645 const MemOperand& addr, 1645 const MemOperand& addr,
1646 LoadStorePairNonTemporalOp op) { 1646 LoadStorePairNonTemporalOp op) {
1647 ASSERT(!rt.Is(rt2)); 1647 DCHECK(!rt.Is(rt2));
1648 ASSERT(AreSameSizeAndType(rt, rt2)); 1648 DCHECK(AreSameSizeAndType(rt, rt2));
1649 ASSERT(addr.IsImmediateOffset()); 1649 DCHECK(addr.IsImmediateOffset());
1650 1650
1651 LSDataSize size = CalcLSPairDataSize( 1651 LSDataSize size = CalcLSPairDataSize(
1652 static_cast<LoadStorePairOp>(op & LoadStorePairMask)); 1652 static_cast<LoadStorePairOp>(op & LoadStorePairMask));
1653 Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) | 1653 Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
1654 ImmLSPair(addr.offset(), size)); 1654 ImmLSPair(addr.offset(), size));
1655 } 1655 }
1656 1656
1657 1657
1658 // Memory instructions. 1658 // Memory instructions.
1659 void Assembler::ldrb(const Register& rt, const MemOperand& src) { 1659 void Assembler::ldrb(const Register& rt, const MemOperand& src) {
(...skipping 30 matching lines...) Expand all
1690 LoadStore(rt, src, LoadOpFor(rt)); 1690 LoadStore(rt, src, LoadOpFor(rt));
1691 } 1691 }
1692 1692
1693 1693
1694 void Assembler::str(const CPURegister& rt, const MemOperand& src) { 1694 void Assembler::str(const CPURegister& rt, const MemOperand& src) {
1695 LoadStore(rt, src, StoreOpFor(rt)); 1695 LoadStore(rt, src, StoreOpFor(rt));
1696 } 1696 }
1697 1697
1698 1698
1699 void Assembler::ldrsw(const Register& rt, const MemOperand& src) { 1699 void Assembler::ldrsw(const Register& rt, const MemOperand& src) {
1700 ASSERT(rt.Is64Bits()); 1700 DCHECK(rt.Is64Bits());
1701 LoadStore(rt, src, LDRSW_x); 1701 LoadStore(rt, src, LDRSW_x);
1702 } 1702 }
1703 1703
1704 1704
1705 void Assembler::ldr_pcrel(const CPURegister& rt, int imm19) { 1705 void Assembler::ldr_pcrel(const CPURegister& rt, int imm19) {
1706 // The pattern 'ldr xzr, #offset' is used to indicate the beginning of a 1706 // The pattern 'ldr xzr, #offset' is used to indicate the beginning of a
1707 // constant pool. It should not be emitted. 1707 // constant pool. It should not be emitted.
1708 ASSERT(!rt.IsZero()); 1708 DCHECK(!rt.IsZero());
1709 Emit(LoadLiteralOpFor(rt) | ImmLLiteral(imm19) | Rt(rt)); 1709 Emit(LoadLiteralOpFor(rt) | ImmLLiteral(imm19) | Rt(rt));
1710 } 1710 }
1711 1711
1712 1712
1713 void Assembler::ldr(const CPURegister& rt, const Immediate& imm) { 1713 void Assembler::ldr(const CPURegister& rt, const Immediate& imm) {
1714 // Currently we only support 64-bit literals. 1714 // Currently we only support 64-bit literals.
1715 ASSERT(rt.Is64Bits()); 1715 DCHECK(rt.Is64Bits());
1716 1716
1717 RecordRelocInfo(imm.rmode(), imm.value()); 1717 RecordRelocInfo(imm.rmode(), imm.value());
1718 BlockConstPoolFor(1); 1718 BlockConstPoolFor(1);
1719 // The load will be patched when the constpool is emitted, patching code 1719 // The load will be patched when the constpool is emitted, patching code
1720 // expect a load literal with offset 0. 1720 // expect a load literal with offset 0.
1721 ldr_pcrel(rt, 0); 1721 ldr_pcrel(rt, 0);
1722 } 1722 }
1723 1723
1724 1724
1725 void Assembler::mov(const Register& rd, const Register& rm) { 1725 void Assembler::mov(const Register& rd, const Register& rm) {
1726 // Moves involving the stack pointer are encoded as add immediate with 1726 // Moves involving the stack pointer are encoded as add immediate with
1727 // second operand of zero. Otherwise, orr with first operand zr is 1727 // second operand of zero. Otherwise, orr with first operand zr is
1728 // used. 1728 // used.
1729 if (rd.IsSP() || rm.IsSP()) { 1729 if (rd.IsSP() || rm.IsSP()) {
1730 add(rd, rm, 0); 1730 add(rd, rm, 0);
1731 } else { 1731 } else {
1732 orr(rd, AppropriateZeroRegFor(rd), rm); 1732 orr(rd, AppropriateZeroRegFor(rd), rm);
1733 } 1733 }
1734 } 1734 }
1735 1735
1736 1736
1737 void Assembler::mvn(const Register& rd, const Operand& operand) { 1737 void Assembler::mvn(const Register& rd, const Operand& operand) {
1738 orn(rd, AppropriateZeroRegFor(rd), operand); 1738 orn(rd, AppropriateZeroRegFor(rd), operand);
1739 } 1739 }
1740 1740
1741 1741
1742 void Assembler::mrs(const Register& rt, SystemRegister sysreg) { 1742 void Assembler::mrs(const Register& rt, SystemRegister sysreg) {
1743 ASSERT(rt.Is64Bits()); 1743 DCHECK(rt.Is64Bits());
1744 Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt)); 1744 Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt));
1745 } 1745 }
1746 1746
1747 1747
1748 void Assembler::msr(SystemRegister sysreg, const Register& rt) { 1748 void Assembler::msr(SystemRegister sysreg, const Register& rt) {
1749 ASSERT(rt.Is64Bits()); 1749 DCHECK(rt.Is64Bits());
1750 Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg)); 1750 Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg));
1751 } 1751 }
1752 1752
1753 1753
1754 void Assembler::hint(SystemHint code) { 1754 void Assembler::hint(SystemHint code) {
1755 Emit(HINT | ImmHint(code) | Rt(xzr)); 1755 Emit(HINT | ImmHint(code) | Rt(xzr));
1756 } 1756 }
1757 1757
1758 1758
1759 void Assembler::dmb(BarrierDomain domain, BarrierType type) { 1759 void Assembler::dmb(BarrierDomain domain, BarrierType type) {
1760 Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type)); 1760 Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type));
1761 } 1761 }
1762 1762
1763 1763
1764 void Assembler::dsb(BarrierDomain domain, BarrierType type) { 1764 void Assembler::dsb(BarrierDomain domain, BarrierType type) {
1765 Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type)); 1765 Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type));
1766 } 1766 }
1767 1767
1768 1768
1769 void Assembler::isb() { 1769 void Assembler::isb() {
1770 Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll)); 1770 Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll));
1771 } 1771 }
1772 1772
1773 1773
1774 void Assembler::fmov(FPRegister fd, double imm) { 1774 void Assembler::fmov(FPRegister fd, double imm) {
1775 ASSERT(fd.Is64Bits()); 1775 DCHECK(fd.Is64Bits());
1776 ASSERT(IsImmFP64(imm)); 1776 DCHECK(IsImmFP64(imm));
1777 Emit(FMOV_d_imm | Rd(fd) | ImmFP64(imm)); 1777 Emit(FMOV_d_imm | Rd(fd) | ImmFP64(imm));
1778 } 1778 }
1779 1779
1780 1780
1781 void Assembler::fmov(FPRegister fd, float imm) { 1781 void Assembler::fmov(FPRegister fd, float imm) {
1782 ASSERT(fd.Is32Bits()); 1782 DCHECK(fd.Is32Bits());
1783 ASSERT(IsImmFP32(imm)); 1783 DCHECK(IsImmFP32(imm));
1784 Emit(FMOV_s_imm | Rd(fd) | ImmFP32(imm)); 1784 Emit(FMOV_s_imm | Rd(fd) | ImmFP32(imm));
1785 } 1785 }
1786 1786
1787 1787
1788 void Assembler::fmov(Register rd, FPRegister fn) { 1788 void Assembler::fmov(Register rd, FPRegister fn) {
1789 ASSERT(rd.SizeInBits() == fn.SizeInBits()); 1789 DCHECK(rd.SizeInBits() == fn.SizeInBits());
1790 FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd; 1790 FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd;
1791 Emit(op | Rd(rd) | Rn(fn)); 1791 Emit(op | Rd(rd) | Rn(fn));
1792 } 1792 }
1793 1793
1794 1794
1795 void Assembler::fmov(FPRegister fd, Register rn) { 1795 void Assembler::fmov(FPRegister fd, Register rn) {
1796 ASSERT(fd.SizeInBits() == rn.SizeInBits()); 1796 DCHECK(fd.SizeInBits() == rn.SizeInBits());
1797 FPIntegerConvertOp op = fd.Is32Bits() ? FMOV_sw : FMOV_dx; 1797 FPIntegerConvertOp op = fd.Is32Bits() ? FMOV_sw : FMOV_dx;
1798 Emit(op | Rd(fd) | Rn(rn)); 1798 Emit(op | Rd(fd) | Rn(rn));
1799 } 1799 }
1800 1800
1801 1801
1802 void Assembler::fmov(FPRegister fd, FPRegister fn) { 1802 void Assembler::fmov(FPRegister fd, FPRegister fn) {
1803 ASSERT(fd.SizeInBits() == fn.SizeInBits()); 1803 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1804 Emit(FPType(fd) | FMOV | Rd(fd) | Rn(fn)); 1804 Emit(FPType(fd) | FMOV | Rd(fd) | Rn(fn));
1805 } 1805 }
1806 1806
1807 1807
1808 void Assembler::fadd(const FPRegister& fd, 1808 void Assembler::fadd(const FPRegister& fd,
1809 const FPRegister& fn, 1809 const FPRegister& fn,
1810 const FPRegister& fm) { 1810 const FPRegister& fm) {
1811 FPDataProcessing2Source(fd, fn, fm, FADD); 1811 FPDataProcessing2Source(fd, fn, fm, FADD);
1812 } 1812 }
1813 1813
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after
1888 1888
1889 void Assembler::fminnm(const FPRegister& fd, 1889 void Assembler::fminnm(const FPRegister& fd,
1890 const FPRegister& fn, 1890 const FPRegister& fn,
1891 const FPRegister& fm) { 1891 const FPRegister& fm) {
1892 FPDataProcessing2Source(fd, fn, fm, FMINNM); 1892 FPDataProcessing2Source(fd, fn, fm, FMINNM);
1893 } 1893 }
1894 1894
1895 1895
1896 void Assembler::fabs(const FPRegister& fd, 1896 void Assembler::fabs(const FPRegister& fd,
1897 const FPRegister& fn) { 1897 const FPRegister& fn) {
1898 ASSERT(fd.SizeInBits() == fn.SizeInBits()); 1898 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1899 FPDataProcessing1Source(fd, fn, FABS); 1899 FPDataProcessing1Source(fd, fn, FABS);
1900 } 1900 }
1901 1901
1902 1902
1903 void Assembler::fneg(const FPRegister& fd, 1903 void Assembler::fneg(const FPRegister& fd,
1904 const FPRegister& fn) { 1904 const FPRegister& fn) {
1905 ASSERT(fd.SizeInBits() == fn.SizeInBits()); 1905 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1906 FPDataProcessing1Source(fd, fn, FNEG); 1906 FPDataProcessing1Source(fd, fn, FNEG);
1907 } 1907 }
1908 1908
1909 1909
1910 void Assembler::fsqrt(const FPRegister& fd, 1910 void Assembler::fsqrt(const FPRegister& fd,
1911 const FPRegister& fn) { 1911 const FPRegister& fn) {
1912 ASSERT(fd.SizeInBits() == fn.SizeInBits()); 1912 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1913 FPDataProcessing1Source(fd, fn, FSQRT); 1913 FPDataProcessing1Source(fd, fn, FSQRT);
1914 } 1914 }
1915 1915
1916 1916
1917 void Assembler::frinta(const FPRegister& fd, 1917 void Assembler::frinta(const FPRegister& fd,
1918 const FPRegister& fn) { 1918 const FPRegister& fn) {
1919 ASSERT(fd.SizeInBits() == fn.SizeInBits()); 1919 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1920 FPDataProcessing1Source(fd, fn, FRINTA); 1920 FPDataProcessing1Source(fd, fn, FRINTA);
1921 } 1921 }
1922 1922
1923 1923
1924 void Assembler::frintm(const FPRegister& fd, 1924 void Assembler::frintm(const FPRegister& fd,
1925 const FPRegister& fn) { 1925 const FPRegister& fn) {
1926 ASSERT(fd.SizeInBits() == fn.SizeInBits()); 1926 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1927 FPDataProcessing1Source(fd, fn, FRINTM); 1927 FPDataProcessing1Source(fd, fn, FRINTM);
1928 } 1928 }
1929 1929
1930 1930
1931 void Assembler::frintn(const FPRegister& fd, 1931 void Assembler::frintn(const FPRegister& fd,
1932 const FPRegister& fn) { 1932 const FPRegister& fn) {
1933 ASSERT(fd.SizeInBits() == fn.SizeInBits()); 1933 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1934 FPDataProcessing1Source(fd, fn, FRINTN); 1934 FPDataProcessing1Source(fd, fn, FRINTN);
1935 } 1935 }
1936 1936
1937 1937
1938 void Assembler::frintz(const FPRegister& fd, 1938 void Assembler::frintz(const FPRegister& fd,
1939 const FPRegister& fn) { 1939 const FPRegister& fn) {
1940 ASSERT(fd.SizeInBits() == fn.SizeInBits()); 1940 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1941 FPDataProcessing1Source(fd, fn, FRINTZ); 1941 FPDataProcessing1Source(fd, fn, FRINTZ);
1942 } 1942 }
1943 1943
1944 1944
1945 void Assembler::fcmp(const FPRegister& fn, 1945 void Assembler::fcmp(const FPRegister& fn,
1946 const FPRegister& fm) { 1946 const FPRegister& fm) {
1947 ASSERT(fn.SizeInBits() == fm.SizeInBits()); 1947 DCHECK(fn.SizeInBits() == fm.SizeInBits());
1948 Emit(FPType(fn) | FCMP | Rm(fm) | Rn(fn)); 1948 Emit(FPType(fn) | FCMP | Rm(fm) | Rn(fn));
1949 } 1949 }
1950 1950
1951 1951
1952 void Assembler::fcmp(const FPRegister& fn, 1952 void Assembler::fcmp(const FPRegister& fn,
1953 double value) { 1953 double value) {
1954 USE(value); 1954 USE(value);
1955 // Although the fcmp instruction can strictly only take an immediate value of 1955 // Although the fcmp instruction can strictly only take an immediate value of
1956 // +0.0, we don't need to check for -0.0 because the sign of 0.0 doesn't 1956 // +0.0, we don't need to check for -0.0 because the sign of 0.0 doesn't
1957 // affect the result of the comparison. 1957 // affect the result of the comparison.
1958 ASSERT(value == 0.0); 1958 DCHECK(value == 0.0);
1959 Emit(FPType(fn) | FCMP_zero | Rn(fn)); 1959 Emit(FPType(fn) | FCMP_zero | Rn(fn));
1960 } 1960 }
1961 1961
1962 1962
1963 void Assembler::fccmp(const FPRegister& fn, 1963 void Assembler::fccmp(const FPRegister& fn,
1964 const FPRegister& fm, 1964 const FPRegister& fm,
1965 StatusFlags nzcv, 1965 StatusFlags nzcv,
1966 Condition cond) { 1966 Condition cond) {
1967 ASSERT(fn.SizeInBits() == fm.SizeInBits()); 1967 DCHECK(fn.SizeInBits() == fm.SizeInBits());
1968 Emit(FPType(fn) | FCCMP | Rm(fm) | Cond(cond) | Rn(fn) | Nzcv(nzcv)); 1968 Emit(FPType(fn) | FCCMP | Rm(fm) | Cond(cond) | Rn(fn) | Nzcv(nzcv));
1969 } 1969 }
1970 1970
1971 1971
1972 void Assembler::fcsel(const FPRegister& fd, 1972 void Assembler::fcsel(const FPRegister& fd,
1973 const FPRegister& fn, 1973 const FPRegister& fn,
1974 const FPRegister& fm, 1974 const FPRegister& fm,
1975 Condition cond) { 1975 Condition cond) {
1976 ASSERT(fd.SizeInBits() == fn.SizeInBits()); 1976 DCHECK(fd.SizeInBits() == fn.SizeInBits());
1977 ASSERT(fd.SizeInBits() == fm.SizeInBits()); 1977 DCHECK(fd.SizeInBits() == fm.SizeInBits());
1978 Emit(FPType(fd) | FCSEL | Rm(fm) | Cond(cond) | Rn(fn) | Rd(fd)); 1978 Emit(FPType(fd) | FCSEL | Rm(fm) | Cond(cond) | Rn(fn) | Rd(fd));
1979 } 1979 }
1980 1980
1981 1981
1982 void Assembler::FPConvertToInt(const Register& rd, 1982 void Assembler::FPConvertToInt(const Register& rd,
1983 const FPRegister& fn, 1983 const FPRegister& fn,
1984 FPIntegerConvertOp op) { 1984 FPIntegerConvertOp op) {
1985 Emit(SF(rd) | FPType(fn) | op | Rn(fn) | Rd(rd)); 1985 Emit(SF(rd) | FPType(fn) | op | Rn(fn) | Rd(rd));
1986 } 1986 }
1987 1987
1988 1988
1989 void Assembler::fcvt(const FPRegister& fd, 1989 void Assembler::fcvt(const FPRegister& fd,
1990 const FPRegister& fn) { 1990 const FPRegister& fn) {
1991 if (fd.Is64Bits()) { 1991 if (fd.Is64Bits()) {
1992 // Convert float to double. 1992 // Convert float to double.
1993 ASSERT(fn.Is32Bits()); 1993 DCHECK(fn.Is32Bits());
1994 FPDataProcessing1Source(fd, fn, FCVT_ds); 1994 FPDataProcessing1Source(fd, fn, FCVT_ds);
1995 } else { 1995 } else {
1996 // Convert double to float. 1996 // Convert double to float.
1997 ASSERT(fn.Is64Bits()); 1997 DCHECK(fn.Is64Bits());
1998 FPDataProcessing1Source(fd, fn, FCVT_sd); 1998 FPDataProcessing1Source(fd, fn, FCVT_sd);
1999 } 1999 }
2000 } 2000 }
2001 2001
2002 2002
2003 void Assembler::fcvtau(const Register& rd, const FPRegister& fn) { 2003 void Assembler::fcvtau(const Register& rd, const FPRegister& fn) {
2004 FPConvertToInt(rd, fn, FCVTAU); 2004 FPConvertToInt(rd, fn, FCVTAU);
2005 } 2005 }
2006 2006
2007 2007
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
2062 Rd(fd)); 2062 Rd(fd));
2063 } 2063 }
2064 } 2064 }
2065 2065
2066 2066
2067 // Note: 2067 // Note:
2068 // Below, a difference in case for the same letter indicates a 2068 // Below, a difference in case for the same letter indicates a
2069 // negated bit. 2069 // negated bit.
2070 // If b is 1, then B is 0. 2070 // If b is 1, then B is 0.
2071 Instr Assembler::ImmFP32(float imm) { 2071 Instr Assembler::ImmFP32(float imm) {
2072 ASSERT(IsImmFP32(imm)); 2072 DCHECK(IsImmFP32(imm));
2073 // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000 2073 // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
2074 uint32_t bits = float_to_rawbits(imm); 2074 uint32_t bits = float_to_rawbits(imm);
2075 // bit7: a000.0000 2075 // bit7: a000.0000
2076 uint32_t bit7 = ((bits >> 31) & 0x1) << 7; 2076 uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
2077 // bit6: 0b00.0000 2077 // bit6: 0b00.0000
2078 uint32_t bit6 = ((bits >> 29) & 0x1) << 6; 2078 uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
2079 // bit5_to_0: 00cd.efgh 2079 // bit5_to_0: 00cd.efgh
2080 uint32_t bit5_to_0 = (bits >> 19) & 0x3f; 2080 uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
2081 2081
2082 return (bit7 | bit6 | bit5_to_0) << ImmFP_offset; 2082 return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
2083 } 2083 }
2084 2084
2085 2085
2086 Instr Assembler::ImmFP64(double imm) { 2086 Instr Assembler::ImmFP64(double imm) {
2087 ASSERT(IsImmFP64(imm)); 2087 DCHECK(IsImmFP64(imm));
2088 // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000 2088 // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
2089 // 0000.0000.0000.0000.0000.0000.0000.0000 2089 // 0000.0000.0000.0000.0000.0000.0000.0000
2090 uint64_t bits = double_to_rawbits(imm); 2090 uint64_t bits = double_to_rawbits(imm);
2091 // bit7: a000.0000 2091 // bit7: a000.0000
2092 uint32_t bit7 = ((bits >> 63) & 0x1) << 7; 2092 uint32_t bit7 = ((bits >> 63) & 0x1) << 7;
2093 // bit6: 0b00.0000 2093 // bit6: 0b00.0000
2094 uint32_t bit6 = ((bits >> 61) & 0x1) << 6; 2094 uint32_t bit6 = ((bits >> 61) & 0x1) << 6;
2095 // bit5_to_0: 00cd.efgh 2095 // bit5_to_0: 00cd.efgh
2096 uint32_t bit5_to_0 = (bits >> 48) & 0x3f; 2096 uint32_t bit5_to_0 = (bits >> 48) & 0x3f;
2097 2097
2098 return (bit7 | bit6 | bit5_to_0) << ImmFP_offset; 2098 return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
2099 } 2099 }
2100 2100
2101 2101
2102 // Code generation helpers. 2102 // Code generation helpers.
2103 void Assembler::MoveWide(const Register& rd, 2103 void Assembler::MoveWide(const Register& rd,
2104 uint64_t imm, 2104 uint64_t imm,
2105 int shift, 2105 int shift,
2106 MoveWideImmediateOp mov_op) { 2106 MoveWideImmediateOp mov_op) {
2107 // Ignore the top 32 bits of an immediate if we're moving to a W register. 2107 // Ignore the top 32 bits of an immediate if we're moving to a W register.
2108 if (rd.Is32Bits()) { 2108 if (rd.Is32Bits()) {
2109 // Check that the top 32 bits are zero (a positive 32-bit number) or top 2109 // Check that the top 32 bits are zero (a positive 32-bit number) or top
2110 // 33 bits are one (a negative 32-bit number, sign extended to 64 bits). 2110 // 33 bits are one (a negative 32-bit number, sign extended to 64 bits).
2111 ASSERT(((imm >> kWRegSizeInBits) == 0) || 2111 DCHECK(((imm >> kWRegSizeInBits) == 0) ||
2112 ((imm >> (kWRegSizeInBits - 1)) == 0x1ffffffff)); 2112 ((imm >> (kWRegSizeInBits - 1)) == 0x1ffffffff));
2113 imm &= kWRegMask; 2113 imm &= kWRegMask;
2114 } 2114 }
2115 2115
2116 if (shift >= 0) { 2116 if (shift >= 0) {
2117 // Explicit shift specified. 2117 // Explicit shift specified.
2118 ASSERT((shift == 0) || (shift == 16) || (shift == 32) || (shift == 48)); 2118 DCHECK((shift == 0) || (shift == 16) || (shift == 32) || (shift == 48));
2119 ASSERT(rd.Is64Bits() || (shift == 0) || (shift == 16)); 2119 DCHECK(rd.Is64Bits() || (shift == 0) || (shift == 16));
2120 shift /= 16; 2120 shift /= 16;
2121 } else { 2121 } else {
2122 // Calculate a new immediate and shift combination to encode the immediate 2122 // Calculate a new immediate and shift combination to encode the immediate
2123 // argument. 2123 // argument.
2124 shift = 0; 2124 shift = 0;
2125 if ((imm & ~0xffffUL) == 0) { 2125 if ((imm & ~0xffffUL) == 0) {
2126 // Nothing to do. 2126 // Nothing to do.
2127 } else if ((imm & ~(0xffffUL << 16)) == 0) { 2127 } else if ((imm & ~(0xffffUL << 16)) == 0) {
2128 imm >>= 16; 2128 imm >>= 16;
2129 shift = 1; 2129 shift = 1;
2130 } else if ((imm & ~(0xffffUL << 32)) == 0) { 2130 } else if ((imm & ~(0xffffUL << 32)) == 0) {
2131 ASSERT(rd.Is64Bits()); 2131 DCHECK(rd.Is64Bits());
2132 imm >>= 32; 2132 imm >>= 32;
2133 shift = 2; 2133 shift = 2;
2134 } else if ((imm & ~(0xffffUL << 48)) == 0) { 2134 } else if ((imm & ~(0xffffUL << 48)) == 0) {
2135 ASSERT(rd.Is64Bits()); 2135 DCHECK(rd.Is64Bits());
2136 imm >>= 48; 2136 imm >>= 48;
2137 shift = 3; 2137 shift = 3;
2138 } 2138 }
2139 } 2139 }
2140 2140
2141 ASSERT(is_uint16(imm)); 2141 DCHECK(is_uint16(imm));
2142 2142
2143 Emit(SF(rd) | MoveWideImmediateFixed | mov_op | 2143 Emit(SF(rd) | MoveWideImmediateFixed | mov_op |
2144 Rd(rd) | ImmMoveWide(imm) | ShiftMoveWide(shift)); 2144 Rd(rd) | ImmMoveWide(imm) | ShiftMoveWide(shift));
2145 } 2145 }
2146 2146
2147 2147
2148 void Assembler::AddSub(const Register& rd, 2148 void Assembler::AddSub(const Register& rd,
2149 const Register& rn, 2149 const Register& rn,
2150 const Operand& operand, 2150 const Operand& operand,
2151 FlagsUpdate S, 2151 FlagsUpdate S,
2152 AddSubOp op) { 2152 AddSubOp op) {
2153 ASSERT(rd.SizeInBits() == rn.SizeInBits()); 2153 DCHECK(rd.SizeInBits() == rn.SizeInBits());
2154 ASSERT(!operand.NeedsRelocation(this)); 2154 DCHECK(!operand.NeedsRelocation(this));
2155 if (operand.IsImmediate()) { 2155 if (operand.IsImmediate()) {
2156 int64_t immediate = operand.ImmediateValue(); 2156 int64_t immediate = operand.ImmediateValue();
2157 ASSERT(IsImmAddSub(immediate)); 2157 DCHECK(IsImmAddSub(immediate));
2158 Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd); 2158 Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
2159 Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) | 2159 Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) |
2160 ImmAddSub(immediate) | dest_reg | RnSP(rn)); 2160 ImmAddSub(immediate) | dest_reg | RnSP(rn));
2161 } else if (operand.IsShiftedRegister()) { 2161 } else if (operand.IsShiftedRegister()) {
2162 ASSERT(operand.reg().SizeInBits() == rd.SizeInBits()); 2162 DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
2163 ASSERT(operand.shift() != ROR); 2163 DCHECK(operand.shift() != ROR);
2164 2164
2165 // For instructions of the form: 2165 // For instructions of the form:
2166 // add/sub wsp, <Wn>, <Wm> [, LSL #0-3 ] 2166 // add/sub wsp, <Wn>, <Wm> [, LSL #0-3 ]
2167 // add/sub <Wd>, wsp, <Wm> [, LSL #0-3 ] 2167 // add/sub <Wd>, wsp, <Wm> [, LSL #0-3 ]
2168 // add/sub wsp, wsp, <Wm> [, LSL #0-3 ] 2168 // add/sub wsp, wsp, <Wm> [, LSL #0-3 ]
2169 // adds/subs <Wd>, wsp, <Wm> [, LSL #0-3 ] 2169 // adds/subs <Wd>, wsp, <Wm> [, LSL #0-3 ]
2170 // or their 64-bit register equivalents, convert the operand from shifted to 2170 // or their 64-bit register equivalents, convert the operand from shifted to
2171 // extended register mode, and emit an add/sub extended instruction. 2171 // extended register mode, and emit an add/sub extended instruction.
2172 if (rn.IsSP() || rd.IsSP()) { 2172 if (rn.IsSP() || rd.IsSP()) {
2173 ASSERT(!(rd.IsSP() && (S == SetFlags))); 2173 DCHECK(!(rd.IsSP() && (S == SetFlags)));
2174 DataProcExtendedRegister(rd, rn, operand.ToExtendedRegister(), S, 2174 DataProcExtendedRegister(rd, rn, operand.ToExtendedRegister(), S,
2175 AddSubExtendedFixed | op); 2175 AddSubExtendedFixed | op);
2176 } else { 2176 } else {
2177 DataProcShiftedRegister(rd, rn, operand, S, AddSubShiftedFixed | op); 2177 DataProcShiftedRegister(rd, rn, operand, S, AddSubShiftedFixed | op);
2178 } 2178 }
2179 } else { 2179 } else {
2180 ASSERT(operand.IsExtendedRegister()); 2180 DCHECK(operand.IsExtendedRegister());
2181 DataProcExtendedRegister(rd, rn, operand, S, AddSubExtendedFixed | op); 2181 DataProcExtendedRegister(rd, rn, operand, S, AddSubExtendedFixed | op);
2182 } 2182 }
2183 } 2183 }
2184 2184
2185 2185
2186 void Assembler::AddSubWithCarry(const Register& rd, 2186 void Assembler::AddSubWithCarry(const Register& rd,
2187 const Register& rn, 2187 const Register& rn,
2188 const Operand& operand, 2188 const Operand& operand,
2189 FlagsUpdate S, 2189 FlagsUpdate S,
2190 AddSubWithCarryOp op) { 2190 AddSubWithCarryOp op) {
2191 ASSERT(rd.SizeInBits() == rn.SizeInBits()); 2191 DCHECK(rd.SizeInBits() == rn.SizeInBits());
2192 ASSERT(rd.SizeInBits() == operand.reg().SizeInBits()); 2192 DCHECK(rd.SizeInBits() == operand.reg().SizeInBits());
2193 ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0)); 2193 DCHECK(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
2194 ASSERT(!operand.NeedsRelocation(this)); 2194 DCHECK(!operand.NeedsRelocation(this));
2195 Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd)); 2195 Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd));
2196 } 2196 }
2197 2197
2198 2198
2199 void Assembler::hlt(int code) { 2199 void Assembler::hlt(int code) {
2200 ASSERT(is_uint16(code)); 2200 DCHECK(is_uint16(code));
2201 Emit(HLT | ImmException(code)); 2201 Emit(HLT | ImmException(code));
2202 } 2202 }
2203 2203
2204 2204
2205 void Assembler::brk(int code) { 2205 void Assembler::brk(int code) {
2206 ASSERT(is_uint16(code)); 2206 DCHECK(is_uint16(code));
2207 Emit(BRK | ImmException(code)); 2207 Emit(BRK | ImmException(code));
2208 } 2208 }
2209 2209
2210 2210
2211 void Assembler::debug(const char* message, uint32_t code, Instr params) { 2211 void Assembler::debug(const char* message, uint32_t code, Instr params) {
2212 #ifdef USE_SIMULATOR 2212 #ifdef USE_SIMULATOR
2213 // Don't generate simulator specific code if we are building a snapshot, which 2213 // Don't generate simulator specific code if we are building a snapshot, which
2214 // might be run on real hardware. 2214 // might be run on real hardware.
2215 if (!serializer_enabled()) { 2215 if (!serializer_enabled()) {
2216 // The arguments to the debug marker need to be contiguous in memory, so 2216 // The arguments to the debug marker need to be contiguous in memory, so
2217 // make sure we don't try to emit pools. 2217 // make sure we don't try to emit pools.
2218 BlockPoolsScope scope(this); 2218 BlockPoolsScope scope(this);
2219 2219
2220 Label start; 2220 Label start;
2221 bind(&start); 2221 bind(&start);
2222 2222
2223 // Refer to instructions-arm64.h for a description of the marker and its 2223 // Refer to instructions-arm64.h for a description of the marker and its
2224 // arguments. 2224 // arguments.
2225 hlt(kImmExceptionIsDebug); 2225 hlt(kImmExceptionIsDebug);
2226 ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugCodeOffset); 2226 DCHECK(SizeOfCodeGeneratedSince(&start) == kDebugCodeOffset);
2227 dc32(code); 2227 dc32(code);
2228 ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugParamsOffset); 2228 DCHECK(SizeOfCodeGeneratedSince(&start) == kDebugParamsOffset);
2229 dc32(params); 2229 dc32(params);
2230 ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugMessageOffset); 2230 DCHECK(SizeOfCodeGeneratedSince(&start) == kDebugMessageOffset);
2231 EmitStringData(message); 2231 EmitStringData(message);
2232 hlt(kImmExceptionIsUnreachable); 2232 hlt(kImmExceptionIsUnreachable);
2233 2233
2234 return; 2234 return;
2235 } 2235 }
2236 // Fall through if Serializer is enabled. 2236 // Fall through if Serializer is enabled.
2237 #endif 2237 #endif
2238 2238
2239 if (params & BREAK) { 2239 if (params & BREAK) {
2240 hlt(kImmExceptionIsDebug); 2240 hlt(kImmExceptionIsDebug);
2241 } 2241 }
2242 } 2242 }
2243 2243
2244 2244
2245 void Assembler::Logical(const Register& rd, 2245 void Assembler::Logical(const Register& rd,
2246 const Register& rn, 2246 const Register& rn,
2247 const Operand& operand, 2247 const Operand& operand,
2248 LogicalOp op) { 2248 LogicalOp op) {
2249 ASSERT(rd.SizeInBits() == rn.SizeInBits()); 2249 DCHECK(rd.SizeInBits() == rn.SizeInBits());
2250 ASSERT(!operand.NeedsRelocation(this)); 2250 DCHECK(!operand.NeedsRelocation(this));
2251 if (operand.IsImmediate()) { 2251 if (operand.IsImmediate()) {
2252 int64_t immediate = operand.ImmediateValue(); 2252 int64_t immediate = operand.ImmediateValue();
2253 unsigned reg_size = rd.SizeInBits(); 2253 unsigned reg_size = rd.SizeInBits();
2254 2254
2255 ASSERT(immediate != 0); 2255 DCHECK(immediate != 0);
2256 ASSERT(immediate != -1); 2256 DCHECK(immediate != -1);
2257 ASSERT(rd.Is64Bits() || is_uint32(immediate)); 2257 DCHECK(rd.Is64Bits() || is_uint32(immediate));
2258 2258
2259 // If the operation is NOT, invert the operation and immediate. 2259 // If the operation is NOT, invert the operation and immediate.
2260 if ((op & NOT) == NOT) { 2260 if ((op & NOT) == NOT) {
2261 op = static_cast<LogicalOp>(op & ~NOT); 2261 op = static_cast<LogicalOp>(op & ~NOT);
2262 immediate = rd.Is64Bits() ? ~immediate : (~immediate & kWRegMask); 2262 immediate = rd.Is64Bits() ? ~immediate : (~immediate & kWRegMask);
2263 } 2263 }
2264 2264
2265 unsigned n, imm_s, imm_r; 2265 unsigned n, imm_s, imm_r;
2266 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) { 2266 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
2267 // Immediate can be encoded in the instruction. 2267 // Immediate can be encoded in the instruction.
2268 LogicalImmediate(rd, rn, n, imm_s, imm_r, op); 2268 LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
2269 } else { 2269 } else {
2270 // This case is handled in the macro assembler. 2270 // This case is handled in the macro assembler.
2271 UNREACHABLE(); 2271 UNREACHABLE();
2272 } 2272 }
2273 } else { 2273 } else {
2274 ASSERT(operand.IsShiftedRegister()); 2274 DCHECK(operand.IsShiftedRegister());
2275 ASSERT(operand.reg().SizeInBits() == rd.SizeInBits()); 2275 DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
2276 Instr dp_op = static_cast<Instr>(op | LogicalShiftedFixed); 2276 Instr dp_op = static_cast<Instr>(op | LogicalShiftedFixed);
2277 DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op); 2277 DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op);
2278 } 2278 }
2279 } 2279 }
2280 2280
2281 2281
2282 void Assembler::LogicalImmediate(const Register& rd, 2282 void Assembler::LogicalImmediate(const Register& rd,
2283 const Register& rn, 2283 const Register& rn,
2284 unsigned n, 2284 unsigned n,
2285 unsigned imm_s, 2285 unsigned imm_s,
2286 unsigned imm_r, 2286 unsigned imm_r,
2287 LogicalOp op) { 2287 LogicalOp op) {
2288 unsigned reg_size = rd.SizeInBits(); 2288 unsigned reg_size = rd.SizeInBits();
2289 Instr dest_reg = (op == ANDS) ? Rd(rd) : RdSP(rd); 2289 Instr dest_reg = (op == ANDS) ? Rd(rd) : RdSP(rd);
2290 Emit(SF(rd) | LogicalImmediateFixed | op | BitN(n, reg_size) | 2290 Emit(SF(rd) | LogicalImmediateFixed | op | BitN(n, reg_size) |
2291 ImmSetBits(imm_s, reg_size) | ImmRotate(imm_r, reg_size) | dest_reg | 2291 ImmSetBits(imm_s, reg_size) | ImmRotate(imm_r, reg_size) | dest_reg |
2292 Rn(rn)); 2292 Rn(rn));
2293 } 2293 }
2294 2294
2295 2295
2296 void Assembler::ConditionalCompare(const Register& rn, 2296 void Assembler::ConditionalCompare(const Register& rn,
2297 const Operand& operand, 2297 const Operand& operand,
2298 StatusFlags nzcv, 2298 StatusFlags nzcv,
2299 Condition cond, 2299 Condition cond,
2300 ConditionalCompareOp op) { 2300 ConditionalCompareOp op) {
2301 Instr ccmpop; 2301 Instr ccmpop;
2302 ASSERT(!operand.NeedsRelocation(this)); 2302 DCHECK(!operand.NeedsRelocation(this));
2303 if (operand.IsImmediate()) { 2303 if (operand.IsImmediate()) {
2304 int64_t immediate = operand.ImmediateValue(); 2304 int64_t immediate = operand.ImmediateValue();
2305 ASSERT(IsImmConditionalCompare(immediate)); 2305 DCHECK(IsImmConditionalCompare(immediate));
2306 ccmpop = ConditionalCompareImmediateFixed | op | ImmCondCmp(immediate); 2306 ccmpop = ConditionalCompareImmediateFixed | op | ImmCondCmp(immediate);
2307 } else { 2307 } else {
2308 ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0)); 2308 DCHECK(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
2309 ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.reg()); 2309 ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.reg());
2310 } 2310 }
2311 Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv)); 2311 Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv));
2312 } 2312 }
2313 2313
2314 2314
2315 void Assembler::DataProcessing1Source(const Register& rd, 2315 void Assembler::DataProcessing1Source(const Register& rd,
2316 const Register& rn, 2316 const Register& rn,
2317 DataProcessing1SourceOp op) { 2317 DataProcessing1SourceOp op) {
2318 ASSERT(rd.SizeInBits() == rn.SizeInBits()); 2318 DCHECK(rd.SizeInBits() == rn.SizeInBits());
2319 Emit(SF(rn) | op | Rn(rn) | Rd(rd)); 2319 Emit(SF(rn) | op | Rn(rn) | Rd(rd));
2320 } 2320 }
2321 2321
2322 2322
2323 void Assembler::FPDataProcessing1Source(const FPRegister& fd, 2323 void Assembler::FPDataProcessing1Source(const FPRegister& fd,
2324 const FPRegister& fn, 2324 const FPRegister& fn,
2325 FPDataProcessing1SourceOp op) { 2325 FPDataProcessing1SourceOp op) {
2326 Emit(FPType(fn) | op | Rn(fn) | Rd(fd)); 2326 Emit(FPType(fn) | op | Rn(fn) | Rd(fd));
2327 } 2327 }
2328 2328
2329 2329
2330 void Assembler::FPDataProcessing2Source(const FPRegister& fd, 2330 void Assembler::FPDataProcessing2Source(const FPRegister& fd,
2331 const FPRegister& fn, 2331 const FPRegister& fn,
2332 const FPRegister& fm, 2332 const FPRegister& fm,
2333 FPDataProcessing2SourceOp op) { 2333 FPDataProcessing2SourceOp op) {
2334 ASSERT(fd.SizeInBits() == fn.SizeInBits()); 2334 DCHECK(fd.SizeInBits() == fn.SizeInBits());
2335 ASSERT(fd.SizeInBits() == fm.SizeInBits()); 2335 DCHECK(fd.SizeInBits() == fm.SizeInBits());
2336 Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd)); 2336 Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd));
2337 } 2337 }
2338 2338
2339 2339
2340 void Assembler::FPDataProcessing3Source(const FPRegister& fd, 2340 void Assembler::FPDataProcessing3Source(const FPRegister& fd,
2341 const FPRegister& fn, 2341 const FPRegister& fn,
2342 const FPRegister& fm, 2342 const FPRegister& fm,
2343 const FPRegister& fa, 2343 const FPRegister& fa,
2344 FPDataProcessing3SourceOp op) { 2344 FPDataProcessing3SourceOp op) {
2345 ASSERT(AreSameSizeAndType(fd, fn, fm, fa)); 2345 DCHECK(AreSameSizeAndType(fd, fn, fm, fa));
2346 Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd) | Ra(fa)); 2346 Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd) | Ra(fa));
2347 } 2347 }
2348 2348
2349 2349
2350 void Assembler::EmitShift(const Register& rd, 2350 void Assembler::EmitShift(const Register& rd,
2351 const Register& rn, 2351 const Register& rn,
2352 Shift shift, 2352 Shift shift,
2353 unsigned shift_amount) { 2353 unsigned shift_amount) {
2354 switch (shift) { 2354 switch (shift) {
2355 case LSL: 2355 case LSL:
(...skipping 11 matching lines...) Expand all
2367 default: 2367 default:
2368 UNREACHABLE(); 2368 UNREACHABLE();
2369 } 2369 }
2370 } 2370 }
2371 2371
2372 2372
2373 void Assembler::EmitExtendShift(const Register& rd, 2373 void Assembler::EmitExtendShift(const Register& rd,
2374 const Register& rn, 2374 const Register& rn,
2375 Extend extend, 2375 Extend extend,
2376 unsigned left_shift) { 2376 unsigned left_shift) {
2377 ASSERT(rd.SizeInBits() >= rn.SizeInBits()); 2377 DCHECK(rd.SizeInBits() >= rn.SizeInBits());
2378 unsigned reg_size = rd.SizeInBits(); 2378 unsigned reg_size = rd.SizeInBits();
2379 // Use the correct size of register. 2379 // Use the correct size of register.
2380 Register rn_ = Register::Create(rn.code(), rd.SizeInBits()); 2380 Register rn_ = Register::Create(rn.code(), rd.SizeInBits());
2381 // Bits extracted are high_bit:0. 2381 // Bits extracted are high_bit:0.
2382 unsigned high_bit = (8 << (extend & 0x3)) - 1; 2382 unsigned high_bit = (8 << (extend & 0x3)) - 1;
2383 // Number of bits left in the result that are not introduced by the shift. 2383 // Number of bits left in the result that are not introduced by the shift.
2384 unsigned non_shift_bits = (reg_size - left_shift) & (reg_size - 1); 2384 unsigned non_shift_bits = (reg_size - left_shift) & (reg_size - 1);
2385 2385
2386 if ((non_shift_bits > high_bit) || (non_shift_bits == 0)) { 2386 if ((non_shift_bits > high_bit) || (non_shift_bits == 0)) {
2387 switch (extend) { 2387 switch (extend) {
2388 case UXTB: 2388 case UXTB:
2389 case UXTH: 2389 case UXTH:
2390 case UXTW: ubfm(rd, rn_, non_shift_bits, high_bit); break; 2390 case UXTW: ubfm(rd, rn_, non_shift_bits, high_bit); break;
2391 case SXTB: 2391 case SXTB:
2392 case SXTH: 2392 case SXTH:
2393 case SXTW: sbfm(rd, rn_, non_shift_bits, high_bit); break; 2393 case SXTW: sbfm(rd, rn_, non_shift_bits, high_bit); break;
2394 case UXTX: 2394 case UXTX:
2395 case SXTX: { 2395 case SXTX: {
2396 ASSERT(rn.SizeInBits() == kXRegSizeInBits); 2396 DCHECK(rn.SizeInBits() == kXRegSizeInBits);
2397 // Nothing to extend. Just shift. 2397 // Nothing to extend. Just shift.
2398 lsl(rd, rn_, left_shift); 2398 lsl(rd, rn_, left_shift);
2399 break; 2399 break;
2400 } 2400 }
2401 default: UNREACHABLE(); 2401 default: UNREACHABLE();
2402 } 2402 }
2403 } else { 2403 } else {
2404 // No need to extend as the extended bits would be shifted away. 2404 // No need to extend as the extended bits would be shifted away.
2405 lsl(rd, rn_, left_shift); 2405 lsl(rd, rn_, left_shift);
2406 } 2406 }
2407 } 2407 }
2408 2408
2409 2409
2410 void Assembler::DataProcShiftedRegister(const Register& rd, 2410 void Assembler::DataProcShiftedRegister(const Register& rd,
2411 const Register& rn, 2411 const Register& rn,
2412 const Operand& operand, 2412 const Operand& operand,
2413 FlagsUpdate S, 2413 FlagsUpdate S,
2414 Instr op) { 2414 Instr op) {
2415 ASSERT(operand.IsShiftedRegister()); 2415 DCHECK(operand.IsShiftedRegister());
2416 ASSERT(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount()))); 2416 DCHECK(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount())));
2417 ASSERT(!operand.NeedsRelocation(this)); 2417 DCHECK(!operand.NeedsRelocation(this));
2418 Emit(SF(rd) | op | Flags(S) | 2418 Emit(SF(rd) | op | Flags(S) |
2419 ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) | 2419 ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) |
2420 Rm(operand.reg()) | Rn(rn) | Rd(rd)); 2420 Rm(operand.reg()) | Rn(rn) | Rd(rd));
2421 } 2421 }
2422 2422
2423 2423
2424 void Assembler::DataProcExtendedRegister(const Register& rd, 2424 void Assembler::DataProcExtendedRegister(const Register& rd,
2425 const Register& rn, 2425 const Register& rn,
2426 const Operand& operand, 2426 const Operand& operand,
2427 FlagsUpdate S, 2427 FlagsUpdate S,
2428 Instr op) { 2428 Instr op) {
2429 ASSERT(!operand.NeedsRelocation(this)); 2429 DCHECK(!operand.NeedsRelocation(this));
2430 Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd); 2430 Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
2431 Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | 2431 Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) |
2432 ExtendMode(operand.extend()) | ImmExtendShift(operand.shift_amount()) | 2432 ExtendMode(operand.extend()) | ImmExtendShift(operand.shift_amount()) |
2433 dest_reg | RnSP(rn)); 2433 dest_reg | RnSP(rn));
2434 } 2434 }
2435 2435
2436 2436
2437 bool Assembler::IsImmAddSub(int64_t immediate) { 2437 bool Assembler::IsImmAddSub(int64_t immediate) {
2438 return is_uint12(immediate) || 2438 return is_uint12(immediate) ||
2439 (is_uint12(immediate >> 12) && ((immediate & 0xfff) == 0)); 2439 (is_uint12(immediate >> 12) && ((immediate & 0xfff) == 0));
(...skipping 23 matching lines...) Expand all
2463 Shift shift = addr.shift(); 2463 Shift shift = addr.shift();
2464 unsigned shift_amount = addr.shift_amount(); 2464 unsigned shift_amount = addr.shift_amount();
2465 2465
2466 // LSL is encoded in the option field as UXTX. 2466 // LSL is encoded in the option field as UXTX.
2467 if (shift == LSL) { 2467 if (shift == LSL) {
2468 ext = UXTX; 2468 ext = UXTX;
2469 } 2469 }
2470 2470
2471 // Shifts are encoded in one bit, indicating a left shift by the memory 2471 // Shifts are encoded in one bit, indicating a left shift by the memory
2472 // access size. 2472 // access size.
2473 ASSERT((shift_amount == 0) || 2473 DCHECK((shift_amount == 0) ||
2474 (shift_amount == static_cast<unsigned>(CalcLSDataSize(op)))); 2474 (shift_amount == static_cast<unsigned>(CalcLSDataSize(op))));
2475 Emit(LoadStoreRegisterOffsetFixed | memop | Rm(addr.regoffset()) | 2475 Emit(LoadStoreRegisterOffsetFixed | memop | Rm(addr.regoffset()) |
2476 ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0)); 2476 ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0));
2477 } else { 2477 } else {
2478 // Pre-index and post-index modes. 2478 // Pre-index and post-index modes.
2479 ASSERT(!rt.Is(addr.base())); 2479 DCHECK(!rt.Is(addr.base()));
2480 if (IsImmLSUnscaled(offset)) { 2480 if (IsImmLSUnscaled(offset)) {
2481 if (addr.IsPreIndex()) { 2481 if (addr.IsPreIndex()) {
2482 Emit(LoadStorePreIndexFixed | memop | ImmLS(offset)); 2482 Emit(LoadStorePreIndexFixed | memop | ImmLS(offset));
2483 } else { 2483 } else {
2484 ASSERT(addr.IsPostIndex()); 2484 DCHECK(addr.IsPostIndex());
2485 Emit(LoadStorePostIndexFixed | memop | ImmLS(offset)); 2485 Emit(LoadStorePostIndexFixed | memop | ImmLS(offset));
2486 } 2486 }
2487 } else { 2487 } else {
2488 // This case is handled in the macro assembler. 2488 // This case is handled in the macro assembler.
2489 UNREACHABLE(); 2489 UNREACHABLE();
2490 } 2490 }
2491 } 2491 }
2492 } 2492 }
2493 2493
2494 2494
(...skipping 13 matching lines...) Expand all
2508 // If it can be encoded, the function returns true, and values pointed to by n, 2508 // If it can be encoded, the function returns true, and values pointed to by n,
2509 // imm_s and imm_r are updated with immediates encoded in the format required 2509 // imm_s and imm_r are updated with immediates encoded in the format required
2510 // by the corresponding fields in the logical instruction. 2510 // by the corresponding fields in the logical instruction.
2511 // If it can not be encoded, the function returns false, and the values pointed 2511 // If it can not be encoded, the function returns false, and the values pointed
2512 // to by n, imm_s and imm_r are undefined. 2512 // to by n, imm_s and imm_r are undefined.
2513 bool Assembler::IsImmLogical(uint64_t value, 2513 bool Assembler::IsImmLogical(uint64_t value,
2514 unsigned width, 2514 unsigned width,
2515 unsigned* n, 2515 unsigned* n,
2516 unsigned* imm_s, 2516 unsigned* imm_s,
2517 unsigned* imm_r) { 2517 unsigned* imm_r) {
2518 ASSERT((n != NULL) && (imm_s != NULL) && (imm_r != NULL)); 2518 DCHECK((n != NULL) && (imm_s != NULL) && (imm_r != NULL));
2519 ASSERT((width == kWRegSizeInBits) || (width == kXRegSizeInBits)); 2519 DCHECK((width == kWRegSizeInBits) || (width == kXRegSizeInBits));
2520 2520
2521 bool negate = false; 2521 bool negate = false;
2522 2522
2523 // Logical immediates are encoded using parameters n, imm_s and imm_r using 2523 // Logical immediates are encoded using parameters n, imm_s and imm_r using
2524 // the following table: 2524 // the following table:
2525 // 2525 //
2526 // N imms immr size S R 2526 // N imms immr size S R
2527 // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr) 2527 // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
2528 // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr) 2528 // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
2529 // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr) 2529 // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after
2650 static const uint64_t multipliers[] = { 2650 static const uint64_t multipliers[] = {
2651 0x0000000000000001UL, 2651 0x0000000000000001UL,
2652 0x0000000100000001UL, 2652 0x0000000100000001UL,
2653 0x0001000100010001UL, 2653 0x0001000100010001UL,
2654 0x0101010101010101UL, 2654 0x0101010101010101UL,
2655 0x1111111111111111UL, 2655 0x1111111111111111UL,
2656 0x5555555555555555UL, 2656 0x5555555555555555UL,
2657 }; 2657 };
2658 int multiplier_idx = CountLeadingZeros(d, kXRegSizeInBits) - 57; 2658 int multiplier_idx = CountLeadingZeros(d, kXRegSizeInBits) - 57;
2659 // Ensure that the index to the multipliers array is within bounds. 2659 // Ensure that the index to the multipliers array is within bounds.
2660 ASSERT((multiplier_idx >= 0) && 2660 DCHECK((multiplier_idx >= 0) &&
2661 (static_cast<size_t>(multiplier_idx) < 2661 (static_cast<size_t>(multiplier_idx) <
2662 (sizeof(multipliers) / sizeof(multipliers[0])))); 2662 (sizeof(multipliers) / sizeof(multipliers[0]))));
2663 uint64_t multiplier = multipliers[multiplier_idx]; 2663 uint64_t multiplier = multipliers[multiplier_idx];
2664 uint64_t candidate = (b - a) * multiplier; 2664 uint64_t candidate = (b - a) * multiplier;
2665 2665
2666 if (value != candidate) { 2666 if (value != candidate) {
2667 // The candidate pattern doesn't match our input value, so fail. 2667 // The candidate pattern doesn't match our input value, so fail.
2668 return false; 2668 return false;
2669 } 2669 }
2670 2670
(...skipping 143 matching lines...) Expand 10 before | Expand all | Expand 10 after
2814 2814
2815 2815
2816 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) { 2816 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2817 // We do not try to reuse pool constants. 2817 // We do not try to reuse pool constants.
2818 RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL); 2818 RelocInfo rinfo(reinterpret_cast<byte*>(pc_), rmode, data, NULL);
2819 if (((rmode >= RelocInfo::JS_RETURN) && 2819 if (((rmode >= RelocInfo::JS_RETURN) &&
2820 (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) || 2820 (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
2821 (rmode == RelocInfo::CONST_POOL) || 2821 (rmode == RelocInfo::CONST_POOL) ||
2822 (rmode == RelocInfo::VENEER_POOL)) { 2822 (rmode == RelocInfo::VENEER_POOL)) {
2823 // Adjust code for new modes. 2823 // Adjust code for new modes.
2824 ASSERT(RelocInfo::IsDebugBreakSlot(rmode) 2824 DCHECK(RelocInfo::IsDebugBreakSlot(rmode)
2825 || RelocInfo::IsJSReturn(rmode) 2825 || RelocInfo::IsJSReturn(rmode)
2826 || RelocInfo::IsComment(rmode) 2826 || RelocInfo::IsComment(rmode)
2827 || RelocInfo::IsPosition(rmode) 2827 || RelocInfo::IsPosition(rmode)
2828 || RelocInfo::IsConstPool(rmode) 2828 || RelocInfo::IsConstPool(rmode)
2829 || RelocInfo::IsVeneerPool(rmode)); 2829 || RelocInfo::IsVeneerPool(rmode));
2830 // These modes do not need an entry in the constant pool. 2830 // These modes do not need an entry in the constant pool.
2831 } else { 2831 } else {
2832 constpool_.RecordEntry(data, rmode); 2832 constpool_.RecordEntry(data, rmode);
2833 // Make sure the constant pool is not emitted in place of the next 2833 // Make sure the constant pool is not emitted in place of the next
2834 // instruction for which we just recorded relocation info. 2834 // instruction for which we just recorded relocation info.
2835 BlockConstPoolFor(1); 2835 BlockConstPoolFor(1);
2836 } 2836 }
2837 2837
2838 if (!RelocInfo::IsNone(rmode)) { 2838 if (!RelocInfo::IsNone(rmode)) {
2839 // Don't record external references unless the heap will be serialized. 2839 // Don't record external references unless the heap will be serialized.
2840 if (rmode == RelocInfo::EXTERNAL_REFERENCE && 2840 if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
2841 !serializer_enabled() && !emit_debug_code()) { 2841 !serializer_enabled() && !emit_debug_code()) {
2842 return; 2842 return;
2843 } 2843 }
2844 ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here 2844 DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
2845 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) { 2845 if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
2846 RelocInfo reloc_info_with_ast_id( 2846 RelocInfo reloc_info_with_ast_id(
2847 reinterpret_cast<byte*>(pc_), rmode, RecordedAstId().ToInt(), NULL); 2847 reinterpret_cast<byte*>(pc_), rmode, RecordedAstId().ToInt(), NULL);
2848 ClearRecordedAstId(); 2848 ClearRecordedAstId();
2849 reloc_info_writer.Write(&reloc_info_with_ast_id); 2849 reloc_info_writer.Write(&reloc_info_with_ast_id);
2850 } else { 2850 } else {
2851 reloc_info_writer.Write(&rinfo); 2851 reloc_info_writer.Write(&rinfo);
2852 } 2852 }
2853 } 2853 }
2854 } 2854 }
2855 2855
2856 2856
2857 void Assembler::BlockConstPoolFor(int instructions) { 2857 void Assembler::BlockConstPoolFor(int instructions) {
2858 int pc_limit = pc_offset() + instructions * kInstructionSize; 2858 int pc_limit = pc_offset() + instructions * kInstructionSize;
2859 if (no_const_pool_before_ < pc_limit) { 2859 if (no_const_pool_before_ < pc_limit) {
2860 no_const_pool_before_ = pc_limit; 2860 no_const_pool_before_ = pc_limit;
2861 // Make sure the pool won't be blocked for too long. 2861 // Make sure the pool won't be blocked for too long.
2862 ASSERT(pc_limit < constpool_.MaxPcOffset()); 2862 DCHECK(pc_limit < constpool_.MaxPcOffset());
2863 } 2863 }
2864 2864
2865 if (next_constant_pool_check_ < no_const_pool_before_) { 2865 if (next_constant_pool_check_ < no_const_pool_before_) {
2866 next_constant_pool_check_ = no_const_pool_before_; 2866 next_constant_pool_check_ = no_const_pool_before_;
2867 } 2867 }
2868 } 2868 }
2869 2869
2870 2870
2871 void Assembler::CheckConstPool(bool force_emit, bool require_jump) { 2871 void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
2872 // Some short sequence of instruction mustn't be broken up by constant pool 2872 // Some short sequence of instruction mustn't be broken up by constant pool
2873 // emission, such sequences are protected by calls to BlockConstPoolFor and 2873 // emission, such sequences are protected by calls to BlockConstPoolFor and
2874 // BlockConstPoolScope. 2874 // BlockConstPoolScope.
2875 if (is_const_pool_blocked()) { 2875 if (is_const_pool_blocked()) {
2876 // Something is wrong if emission is forced and blocked at the same time. 2876 // Something is wrong if emission is forced and blocked at the same time.
2877 ASSERT(!force_emit); 2877 DCHECK(!force_emit);
2878 return; 2878 return;
2879 } 2879 }
2880 2880
2881 // There is nothing to do if there are no pending constant pool entries. 2881 // There is nothing to do if there are no pending constant pool entries.
2882 if (constpool_.IsEmpty()) { 2882 if (constpool_.IsEmpty()) {
2883 // Calculate the offset of the next check. 2883 // Calculate the offset of the next check.
2884 SetNextConstPoolCheckIn(kCheckConstPoolInterval); 2884 SetNextConstPoolCheckIn(kCheckConstPoolInterval);
2885 return; 2885 return;
2886 } 2886 }
2887 2887
(...skipping 20 matching lines...) Expand all
2908 // Check that the code buffer is large enough before emitting the constant 2908 // Check that the code buffer is large enough before emitting the constant
2909 // pool (this includes the gap to the relocation information). 2909 // pool (this includes the gap to the relocation information).
2910 int needed_space = worst_case_size + kGap + 1 * kInstructionSize; 2910 int needed_space = worst_case_size + kGap + 1 * kInstructionSize;
2911 while (buffer_space() <= needed_space) { 2911 while (buffer_space() <= needed_space) {
2912 GrowBuffer(); 2912 GrowBuffer();
2913 } 2913 }
2914 2914
2915 Label size_check; 2915 Label size_check;
2916 bind(&size_check); 2916 bind(&size_check);
2917 constpool_.Emit(require_jump); 2917 constpool_.Emit(require_jump);
2918 ASSERT(SizeOfCodeGeneratedSince(&size_check) <= 2918 DCHECK(SizeOfCodeGeneratedSince(&size_check) <=
2919 static_cast<unsigned>(worst_case_size)); 2919 static_cast<unsigned>(worst_case_size));
2920 2920
2921 // Since a constant pool was just emitted, move the check offset forward by 2921 // Since a constant pool was just emitted, move the check offset forward by
2922 // the standard interval. 2922 // the standard interval.
2923 SetNextConstPoolCheckIn(kCheckConstPoolInterval); 2923 SetNextConstPoolCheckIn(kCheckConstPoolInterval);
2924 } 2924 }
2925 2925
2926 2926
2927 bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) { 2927 bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) {
2928 // Account for the branch around the veneers and the guard. 2928 // Account for the branch around the veneers and the guard.
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
2973 #ifdef DEBUG 2973 #ifdef DEBUG
2974 bind(&veneer_size_check); 2974 bind(&veneer_size_check);
2975 #endif 2975 #endif
2976 // Patch the branch to point to the current position, and emit a branch 2976 // Patch the branch to point to the current position, and emit a branch
2977 // to the label. 2977 // to the label.
2978 Instruction* veneer = reinterpret_cast<Instruction*>(pc_); 2978 Instruction* veneer = reinterpret_cast<Instruction*>(pc_);
2979 RemoveBranchFromLabelLinkChain(branch, label, veneer); 2979 RemoveBranchFromLabelLinkChain(branch, label, veneer);
2980 branch->SetImmPCOffsetTarget(veneer); 2980 branch->SetImmPCOffsetTarget(veneer);
2981 b(label); 2981 b(label);
2982 #ifdef DEBUG 2982 #ifdef DEBUG
2983 ASSERT(SizeOfCodeGeneratedSince(&veneer_size_check) <= 2983 DCHECK(SizeOfCodeGeneratedSince(&veneer_size_check) <=
2984 static_cast<uint64_t>(kMaxVeneerCodeSize)); 2984 static_cast<uint64_t>(kMaxVeneerCodeSize));
2985 veneer_size_check.Unuse(); 2985 veneer_size_check.Unuse();
2986 #endif 2986 #endif
2987 2987
2988 it_to_delete = it++; 2988 it_to_delete = it++;
2989 unresolved_branches_.erase(it_to_delete); 2989 unresolved_branches_.erase(it_to_delete);
2990 } else { 2990 } else {
2991 ++it; 2991 ++it;
2992 } 2992 }
2993 } 2993 }
(...skipping 12 matching lines...) Expand all
3006 bind(&end); 3006 bind(&end);
3007 3007
3008 RecordComment("]"); 3008 RecordComment("]");
3009 } 3009 }
3010 3010
3011 3011
3012 void Assembler::CheckVeneerPool(bool force_emit, bool require_jump, 3012 void Assembler::CheckVeneerPool(bool force_emit, bool require_jump,
3013 int margin) { 3013 int margin) {
3014 // There is nothing to do if there are no pending veneer pool entries. 3014 // There is nothing to do if there are no pending veneer pool entries.
3015 if (unresolved_branches_.empty()) { 3015 if (unresolved_branches_.empty()) {
3016 ASSERT(next_veneer_pool_check_ == kMaxInt); 3016 DCHECK(next_veneer_pool_check_ == kMaxInt);
3017 return; 3017 return;
3018 } 3018 }
3019 3019
3020 ASSERT(pc_offset() < unresolved_branches_first_limit()); 3020 DCHECK(pc_offset() < unresolved_branches_first_limit());
3021 3021
3022 // Some short sequence of instruction mustn't be broken up by veneer pool 3022 // Some short sequence of instruction mustn't be broken up by veneer pool
3023 // emission, such sequences are protected by calls to BlockVeneerPoolFor and 3023 // emission, such sequences are protected by calls to BlockVeneerPoolFor and
3024 // BlockVeneerPoolScope. 3024 // BlockVeneerPoolScope.
3025 if (is_veneer_pool_blocked()) { 3025 if (is_veneer_pool_blocked()) {
3026 ASSERT(!force_emit); 3026 DCHECK(!force_emit);
3027 return; 3027 return;
3028 } 3028 }
3029 3029
3030 if (!require_jump) { 3030 if (!require_jump) {
3031 // Prefer emitting veneers protected by an existing instruction. 3031 // Prefer emitting veneers protected by an existing instruction.
3032 margin *= kVeneerNoProtectionFactor; 3032 margin *= kVeneerNoProtectionFactor;
3033 } 3033 }
3034 if (force_emit || ShouldEmitVeneers(margin)) { 3034 if (force_emit || ShouldEmitVeneers(margin)) {
3035 EmitVeneers(force_emit, require_jump, margin); 3035 EmitVeneers(force_emit, require_jump, margin);
3036 } else { 3036 } else {
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
3069 3069
3070 void Assembler::RecordConstPool(int size) { 3070 void Assembler::RecordConstPool(int size) {
3071 // We only need this for debugger support, to correctly compute offsets in the 3071 // We only need this for debugger support, to correctly compute offsets in the
3072 // code. 3072 // code.
3073 RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size)); 3073 RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
3074 } 3074 }
3075 3075
3076 3076
3077 Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) { 3077 Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
3078 // No out-of-line constant pool support. 3078 // No out-of-line constant pool support.
3079 ASSERT(!FLAG_enable_ool_constant_pool); 3079 DCHECK(!FLAG_enable_ool_constant_pool);
3080 return isolate->factory()->empty_constant_pool_array(); 3080 return isolate->factory()->empty_constant_pool_array();
3081 } 3081 }
3082 3082
3083 3083
3084 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) { 3084 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
3085 // No out-of-line constant pool support. 3085 // No out-of-line constant pool support.
3086 ASSERT(!FLAG_enable_ool_constant_pool); 3086 DCHECK(!FLAG_enable_ool_constant_pool);
3087 return; 3087 return;
3088 } 3088 }
3089 3089
3090 3090
3091 void PatchingAssembler::PatchAdrFar(ptrdiff_t target_offset) { 3091 void PatchingAssembler::PatchAdrFar(ptrdiff_t target_offset) {
3092 // The code at the current instruction should be: 3092 // The code at the current instruction should be:
3093 // adr rd, 0 3093 // adr rd, 0
3094 // nop (adr_far) 3094 // nop (adr_far)
3095 // nop (adr_far) 3095 // nop (adr_far)
3096 // movz scratch, 0 3096 // movz scratch, 0
(...skipping 12 matching lines...) Expand all
3109 (expected_movz->ShiftMoveWide() == 0)); 3109 (expected_movz->ShiftMoveWide() == 0));
3110 int scratch_code = expected_movz->Rd(); 3110 int scratch_code = expected_movz->Rd();
3111 3111
3112 // Patch to load the correct address. 3112 // Patch to load the correct address.
3113 Register rd = Register::XRegFromCode(rd_code); 3113 Register rd = Register::XRegFromCode(rd_code);
3114 Register scratch = Register::XRegFromCode(scratch_code); 3114 Register scratch = Register::XRegFromCode(scratch_code);
3115 // Addresses are only 48 bits. 3115 // Addresses are only 48 bits.
3116 adr(rd, target_offset & 0xFFFF); 3116 adr(rd, target_offset & 0xFFFF);
3117 movz(scratch, (target_offset >> 16) & 0xFFFF, 16); 3117 movz(scratch, (target_offset >> 16) & 0xFFFF, 16);
3118 movk(scratch, (target_offset >> 32) & 0xFFFF, 32); 3118 movk(scratch, (target_offset >> 32) & 0xFFFF, 32);
3119 ASSERT((target_offset >> 48) == 0); 3119 DCHECK((target_offset >> 48) == 0);
3120 add(rd, rd, scratch); 3120 add(rd, rd, scratch);
3121 } 3121 }
3122 3122
3123 3123
3124 } } // namespace v8::internal 3124 } } // namespace v8::internal
3125 3125
3126 #endif // V8_TARGET_ARCH_ARM64 3126 #endif // V8_TARGET_ARCH_ARM64
OLDNEW
« no previous file with comments | « src/arm64/assembler-arm64.h ('k') | src/arm64/assembler-arm64-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698