Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(199)

Side by Side Diff: src/a64/assembler-a64-inl.h

Issue 181453002: Reset trunk to 3.24.35.4 (Closed) Base URL: https://v8.googlecode.com/svn/trunk
Patch Set: Created 6 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/a64/assembler-a64.cc ('k') | src/a64/builtins-a64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #ifndef V8_A64_ASSEMBLER_A64_INL_H_
29 #define V8_A64_ASSEMBLER_A64_INL_H_
30
31 #include "a64/assembler-a64.h"
32 #include "cpu.h"
33 #include "debug.h"
34
35
36 namespace v8 {
37 namespace internal {
38
39
40 void RelocInfo::apply(intptr_t delta) {
41 UNIMPLEMENTED();
42 }
43
44
45 void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
46 ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
47 Assembler::set_target_address_at(pc_, target);
48 if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
49 Object* target_code = Code::GetCodeFromTargetAddress(target);
50 host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
51 host(), this, HeapObject::cast(target_code));
52 }
53 }
54
55
56 inline unsigned CPURegister::code() const {
57 ASSERT(IsValid());
58 return reg_code;
59 }
60
61
62 inline CPURegister::RegisterType CPURegister::type() const {
63 ASSERT(IsValidOrNone());
64 return reg_type;
65 }
66
67
68 inline RegList CPURegister::Bit() const {
69 ASSERT(reg_code < (sizeof(RegList) * kBitsPerByte));
70 return IsValid() ? 1UL << reg_code : 0;
71 }
72
73
74 inline unsigned CPURegister::SizeInBits() const {
75 ASSERT(IsValid());
76 return reg_size;
77 }
78
79
80 inline int CPURegister::SizeInBytes() const {
81 ASSERT(IsValid());
82 ASSERT(SizeInBits() % 8 == 0);
83 return reg_size / 8;
84 }
85
86
87 inline bool CPURegister::Is32Bits() const {
88 ASSERT(IsValid());
89 return reg_size == 32;
90 }
91
92
93 inline bool CPURegister::Is64Bits() const {
94 ASSERT(IsValid());
95 return reg_size == 64;
96 }
97
98
99 inline bool CPURegister::IsValid() const {
100 if (IsValidRegister() || IsValidFPRegister()) {
101 ASSERT(!IsNone());
102 return true;
103 } else {
104 ASSERT(IsNone());
105 return false;
106 }
107 }
108
109
110 inline bool CPURegister::IsValidRegister() const {
111 return IsRegister() &&
112 ((reg_size == kWRegSize) || (reg_size == kXRegSize)) &&
113 ((reg_code < kNumberOfRegisters) || (reg_code == kSPRegInternalCode));
114 }
115
116
117 inline bool CPURegister::IsValidFPRegister() const {
118 return IsFPRegister() &&
119 ((reg_size == kSRegSize) || (reg_size == kDRegSize)) &&
120 (reg_code < kNumberOfFPRegisters);
121 }
122
123
124 inline bool CPURegister::IsNone() const {
125 // kNoRegister types should always have size 0 and code 0.
126 ASSERT((reg_type != kNoRegister) || (reg_code == 0));
127 ASSERT((reg_type != kNoRegister) || (reg_size == 0));
128
129 return reg_type == kNoRegister;
130 }
131
132
133 inline bool CPURegister::Is(const CPURegister& other) const {
134 ASSERT(IsValidOrNone() && other.IsValidOrNone());
135 return (reg_code == other.reg_code) && (reg_size == other.reg_size) &&
136 (reg_type == other.reg_type);
137 }
138
139
140 inline bool CPURegister::IsRegister() const {
141 return reg_type == kRegister;
142 }
143
144
145 inline bool CPURegister::IsFPRegister() const {
146 return reg_type == kFPRegister;
147 }
148
149
150 inline bool CPURegister::IsSameSizeAndType(const CPURegister& other) const {
151 return (reg_size == other.reg_size) && (reg_type == other.reg_type);
152 }
153
154
155 inline bool CPURegister::IsValidOrNone() const {
156 return IsValid() || IsNone();
157 }
158
159
160 inline bool CPURegister::IsZero() const {
161 ASSERT(IsValid());
162 return IsRegister() && (reg_code == kZeroRegCode);
163 }
164
165
166 inline bool CPURegister::IsSP() const {
167 ASSERT(IsValid());
168 return IsRegister() && (reg_code == kSPRegInternalCode);
169 }
170
171
172 inline void CPURegList::Combine(const CPURegList& other) {
173 ASSERT(IsValid());
174 ASSERT(other.type() == type_);
175 ASSERT(other.RegisterSizeInBits() == size_);
176 list_ |= other.list();
177 }
178
179
180 inline void CPURegList::Remove(const CPURegList& other) {
181 ASSERT(IsValid());
182 ASSERT(other.type() == type_);
183 ASSERT(other.RegisterSizeInBits() == size_);
184 list_ &= ~other.list();
185 }
186
187
188 inline void CPURegList::Combine(const CPURegister& other) {
189 ASSERT(other.type() == type_);
190 ASSERT(other.SizeInBits() == size_);
191 Combine(other.code());
192 }
193
194
195 inline void CPURegList::Remove(const CPURegister& other) {
196 ASSERT(other.type() == type_);
197 ASSERT(other.SizeInBits() == size_);
198 Remove(other.code());
199 }
200
201
202 inline void CPURegList::Combine(int code) {
203 ASSERT(IsValid());
204 ASSERT(CPURegister::Create(code, size_, type_).IsValid());
205 list_ |= (1UL << code);
206 }
207
208
209 inline void CPURegList::Remove(int code) {
210 ASSERT(IsValid());
211 ASSERT(CPURegister::Create(code, size_, type_).IsValid());
212 list_ &= ~(1UL << code);
213 }
214
215
216 inline Register Register::XRegFromCode(unsigned code) {
217 // This function returns the zero register when code = 31. The stack pointer
218 // can not be returned.
219 ASSERT(code < kNumberOfRegisters);
220 return Register::Create(code, kXRegSize);
221 }
222
223
224 inline Register Register::WRegFromCode(unsigned code) {
225 ASSERT(code < kNumberOfRegisters);
226 return Register::Create(code, kWRegSize);
227 }
228
229
230 inline FPRegister FPRegister::SRegFromCode(unsigned code) {
231 ASSERT(code < kNumberOfFPRegisters);
232 return FPRegister::Create(code, kSRegSize);
233 }
234
235
236 inline FPRegister FPRegister::DRegFromCode(unsigned code) {
237 ASSERT(code < kNumberOfFPRegisters);
238 return FPRegister::Create(code, kDRegSize);
239 }
240
241
242 inline Register CPURegister::W() const {
243 ASSERT(IsValidRegister());
244 return Register::WRegFromCode(reg_code);
245 }
246
247
248 inline Register CPURegister::X() const {
249 ASSERT(IsValidRegister());
250 return Register::XRegFromCode(reg_code);
251 }
252
253
254 inline FPRegister CPURegister::S() const {
255 ASSERT(IsValidFPRegister());
256 return FPRegister::SRegFromCode(reg_code);
257 }
258
259
260 inline FPRegister CPURegister::D() const {
261 ASSERT(IsValidFPRegister());
262 return FPRegister::DRegFromCode(reg_code);
263 }
264
265
266 // Operand.
267 template<typename T>
268 Operand::Operand(Handle<T> value) : reg_(NoReg) {
269 initialize_handle(value);
270 }
271
272
273 // Default initializer is for int types
274 template<typename int_t>
275 struct OperandInitializer {
276 static const bool kIsIntType = true;
277 static inline RelocInfo::Mode rmode_for(int_t) {
278 return sizeof(int_t) == 8 ? RelocInfo::NONE64 : RelocInfo::NONE32;
279 }
280 static inline int64_t immediate_for(int_t t) {
281 STATIC_ASSERT(sizeof(int_t) <= 8);
282 return t;
283 }
284 };
285
286
287 template<>
288 struct OperandInitializer<Smi*> {
289 static const bool kIsIntType = false;
290 static inline RelocInfo::Mode rmode_for(Smi* t) {
291 return RelocInfo::NONE64;
292 }
293 static inline int64_t immediate_for(Smi* t) {;
294 return reinterpret_cast<int64_t>(t);
295 }
296 };
297
298
299 template<>
300 struct OperandInitializer<ExternalReference> {
301 static const bool kIsIntType = false;
302 static inline RelocInfo::Mode rmode_for(ExternalReference t) {
303 return RelocInfo::EXTERNAL_REFERENCE;
304 }
305 static inline int64_t immediate_for(ExternalReference t) {;
306 return reinterpret_cast<int64_t>(t.address());
307 }
308 };
309
310
311 template<typename T>
312 Operand::Operand(T t)
313 : immediate_(OperandInitializer<T>::immediate_for(t)),
314 reg_(NoReg),
315 rmode_(OperandInitializer<T>::rmode_for(t)) {}
316
317
318 template<typename T>
319 Operand::Operand(T t, RelocInfo::Mode rmode)
320 : immediate_(OperandInitializer<T>::immediate_for(t)),
321 reg_(NoReg),
322 rmode_(rmode) {
323 STATIC_ASSERT(OperandInitializer<T>::kIsIntType);
324 }
325
326
327 Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
328 : reg_(reg),
329 shift_(shift),
330 extend_(NO_EXTEND),
331 shift_amount_(shift_amount),
332 rmode_(reg.Is64Bits() ? RelocInfo::NONE64 : RelocInfo::NONE32) {
333 ASSERT(reg.Is64Bits() || (shift_amount < kWRegSize));
334 ASSERT(reg.Is32Bits() || (shift_amount < kXRegSize));
335 ASSERT(!reg.IsSP());
336 }
337
338
339 Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
340 : reg_(reg),
341 shift_(NO_SHIFT),
342 extend_(extend),
343 shift_amount_(shift_amount),
344 rmode_(reg.Is64Bits() ? RelocInfo::NONE64 : RelocInfo::NONE32) {
345 ASSERT(reg.IsValid());
346 ASSERT(shift_amount <= 4);
347 ASSERT(!reg.IsSP());
348
349 // Extend modes SXTX and UXTX require a 64-bit register.
350 ASSERT(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
351 }
352
353
354 bool Operand::IsImmediate() const {
355 return reg_.Is(NoReg);
356 }
357
358
359 bool Operand::IsShiftedRegister() const {
360 return reg_.IsValid() && (shift_ != NO_SHIFT);
361 }
362
363
364 bool Operand::IsExtendedRegister() const {
365 return reg_.IsValid() && (extend_ != NO_EXTEND);
366 }
367
368
369 bool Operand::IsZero() const {
370 if (IsImmediate()) {
371 return immediate() == 0;
372 } else {
373 return reg().IsZero();
374 }
375 }
376
377
378 Operand Operand::ToExtendedRegister() const {
379 ASSERT(IsShiftedRegister());
380 ASSERT((shift_ == LSL) && (shift_amount_ <= 4));
381 return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
382 }
383
384
385 int64_t Operand::immediate() const {
386 ASSERT(IsImmediate());
387 return immediate_;
388 }
389
390
391 Register Operand::reg() const {
392 ASSERT(IsShiftedRegister() || IsExtendedRegister());
393 return reg_;
394 }
395
396
397 Shift Operand::shift() const {
398 ASSERT(IsShiftedRegister());
399 return shift_;
400 }
401
402
403 Extend Operand::extend() const {
404 ASSERT(IsExtendedRegister());
405 return extend_;
406 }
407
408
409 unsigned Operand::shift_amount() const {
410 ASSERT(IsShiftedRegister() || IsExtendedRegister());
411 return shift_amount_;
412 }
413
414
415 Operand Operand::UntagSmi(Register smi) {
416 ASSERT(smi.Is64Bits());
417 return Operand(smi, ASR, kSmiShift);
418 }
419
420
421 Operand Operand::UntagSmiAndScale(Register smi, int scale) {
422 ASSERT(smi.Is64Bits());
423 ASSERT((scale >= 0) && (scale <= (64 - kSmiValueSize)));
424 if (scale > kSmiShift) {
425 return Operand(smi, LSL, scale - kSmiShift);
426 } else if (scale < kSmiShift) {
427 return Operand(smi, ASR, kSmiShift - scale);
428 }
429 return Operand(smi);
430 }
431
432
433 MemOperand::MemOperand(Register base, ptrdiff_t offset, AddrMode addrmode)
434 : base_(base), regoffset_(NoReg), offset_(offset), addrmode_(addrmode),
435 shift_(NO_SHIFT), extend_(NO_EXTEND), shift_amount_(0) {
436 ASSERT(base.Is64Bits() && !base.IsZero());
437 }
438
439
440 MemOperand::MemOperand(Register base,
441 Register regoffset,
442 Extend extend,
443 unsigned shift_amount)
444 : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
445 shift_(NO_SHIFT), extend_(extend), shift_amount_(shift_amount) {
446 ASSERT(base.Is64Bits() && !base.IsZero());
447 ASSERT(!regoffset.IsSP());
448 ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
449
450 // SXTX extend mode requires a 64-bit offset register.
451 ASSERT(regoffset.Is64Bits() || (extend != SXTX));
452 }
453
454
455 MemOperand::MemOperand(Register base,
456 Register regoffset,
457 Shift shift,
458 unsigned shift_amount)
459 : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
460 shift_(shift), extend_(NO_EXTEND), shift_amount_(shift_amount) {
461 ASSERT(base.Is64Bits() && !base.IsZero());
462 ASSERT(regoffset.Is64Bits() && !regoffset.IsSP());
463 ASSERT(shift == LSL);
464 }
465
466
467 MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
468 : base_(base), addrmode_(addrmode) {
469 ASSERT(base.Is64Bits() && !base.IsZero());
470
471 if (offset.IsImmediate()) {
472 offset_ = offset.immediate();
473
474 regoffset_ = NoReg;
475 } else if (offset.IsShiftedRegister()) {
476 ASSERT(addrmode == Offset);
477
478 regoffset_ = offset.reg();
479 shift_= offset.shift();
480 shift_amount_ = offset.shift_amount();
481
482 extend_ = NO_EXTEND;
483 offset_ = 0;
484
485 // These assertions match those in the shifted-register constructor.
486 ASSERT(regoffset_.Is64Bits() && !regoffset_.IsSP());
487 ASSERT(shift_ == LSL);
488 } else {
489 ASSERT(offset.IsExtendedRegister());
490 ASSERT(addrmode == Offset);
491
492 regoffset_ = offset.reg();
493 extend_ = offset.extend();
494 shift_amount_ = offset.shift_amount();
495
496 shift_= NO_SHIFT;
497 offset_ = 0;
498
499 // These assertions match those in the extended-register constructor.
500 ASSERT(!regoffset_.IsSP());
501 ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));
502 ASSERT((regoffset_.Is64Bits() || (extend_ != SXTX)));
503 }
504 }
505
506 bool MemOperand::IsImmediateOffset() const {
507 return (addrmode_ == Offset) && regoffset_.Is(NoReg);
508 }
509
510
511 bool MemOperand::IsRegisterOffset() const {
512 return (addrmode_ == Offset) && !regoffset_.Is(NoReg);
513 }
514
515
516 bool MemOperand::IsPreIndex() const {
517 return addrmode_ == PreIndex;
518 }
519
520
521 bool MemOperand::IsPostIndex() const {
522 return addrmode_ == PostIndex;
523 }
524
525 Operand MemOperand::OffsetAsOperand() const {
526 if (IsImmediateOffset()) {
527 return offset();
528 } else {
529 ASSERT(IsRegisterOffset());
530 if (extend() == NO_EXTEND) {
531 return Operand(regoffset(), shift(), shift_amount());
532 } else {
533 return Operand(regoffset(), extend(), shift_amount());
534 }
535 }
536 }
537
538
539 void Assembler::Unreachable() {
540 #ifdef USE_SIMULATOR
541 debug("UNREACHABLE", __LINE__, BREAK);
542 #else
543 // Crash by branching to 0. lr now points near the fault.
544 Emit(BLR | Rn(xzr));
545 #endif
546 }
547
548
549 Address Assembler::target_pointer_address_at(Address pc) {
550 Instruction* instr = reinterpret_cast<Instruction*>(pc);
551 ASSERT(instr->IsLdrLiteralX());
552 return reinterpret_cast<Address>(instr->ImmPCOffsetTarget());
553 }
554
555
556 // Read/Modify the code target address in the branch/call instruction at pc.
557 Address Assembler::target_address_at(Address pc) {
558 return Memory::Address_at(target_pointer_address_at(pc));
559 }
560
561
562 Address Assembler::target_address_from_return_address(Address pc) {
563 // Returns the address of the call target from the return address that will
564 // be returned to after a call.
565 // Call sequence on A64 is:
566 // ldr ip0, #... @ load from literal pool
567 // blr ip0
568 Address candidate = pc - 2 * kInstructionSize;
569 Instruction* instr = reinterpret_cast<Instruction*>(candidate);
570 USE(instr);
571 ASSERT(instr->IsLdrLiteralX());
572 return candidate;
573 }
574
575
576 Address Assembler::return_address_from_call_start(Address pc) {
577 // The call, generated by MacroAssembler::Call, is one of two possible
578 // sequences:
579 //
580 // Without relocation:
581 // movz ip0, #(target & 0x000000000000ffff)
582 // movk ip0, #(target & 0x00000000ffff0000)
583 // movk ip0, #(target & 0x0000ffff00000000)
584 // movk ip0, #(target & 0xffff000000000000)
585 // blr ip0
586 //
587 // With relocation:
588 // ldr ip0, =target
589 // blr ip0
590 //
591 // The return address is immediately after the blr instruction in both cases,
592 // so it can be found by adding the call size to the address at the start of
593 // the call sequence.
594 STATIC_ASSERT(Assembler::kCallSizeWithoutRelocation == 5 * kInstructionSize);
595 STATIC_ASSERT(Assembler::kCallSizeWithRelocation == 2 * kInstructionSize);
596
597 Instruction* instr = reinterpret_cast<Instruction*>(pc);
598 if (instr->IsMovz()) {
599 // Verify the instruction sequence.
600 ASSERT(instr->following(1)->IsMovk());
601 ASSERT(instr->following(2)->IsMovk());
602 ASSERT(instr->following(3)->IsMovk());
603 ASSERT(instr->following(4)->IsBranchAndLinkToRegister());
604 return pc + Assembler::kCallSizeWithoutRelocation;
605 } else {
606 // Verify the instruction sequence.
607 ASSERT(instr->IsLdrLiteralX());
608 ASSERT(instr->following(1)->IsBranchAndLinkToRegister());
609 return pc + Assembler::kCallSizeWithRelocation;
610 }
611 }
612
613
614 void Assembler::deserialization_set_special_target_at(
615 Address constant_pool_entry, Address target) {
616 Memory::Address_at(constant_pool_entry) = target;
617 }
618
619
620 void Assembler::set_target_address_at(Address pc, Address target) {
621 Memory::Address_at(target_pointer_address_at(pc)) = target;
622 // Intuitively, we would think it is necessary to always flush the
623 // instruction cache after patching a target address in the code as follows:
624 // CPU::FlushICache(pc, sizeof(target));
625 // However, on ARM, an instruction is actually patched in the case of
626 // embedded constants of the form:
627 // ldr ip, [pc, #...]
628 // since the instruction accessing this address in the constant pool remains
629 // unchanged, a flush is not required.
630 }
631
632
633 int RelocInfo::target_address_size() {
634 return kPointerSize;
635 }
636
637
638 Address RelocInfo::target_address() {
639 ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
640 return Assembler::target_address_at(pc_);
641 }
642
643
644 Address RelocInfo::target_address_address() {
645 ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
646 || rmode_ == EMBEDDED_OBJECT
647 || rmode_ == EXTERNAL_REFERENCE);
648 return Assembler::target_pointer_address_at(pc_);
649 }
650
651
652 Object* RelocInfo::target_object() {
653 ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
654 return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
655 }
656
657
658 Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
659 ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
660 return Handle<Object>(reinterpret_cast<Object**>(
661 Assembler::target_address_at(pc_)));
662 }
663
664
665 void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
666 ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
667 ASSERT(!target->IsConsString());
668 Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
669 if (mode == UPDATE_WRITE_BARRIER &&
670 host() != NULL &&
671 target->IsHeapObject()) {
672 host()->GetHeap()->incremental_marking()->RecordWrite(
673 host(), &Memory::Object_at(pc_), HeapObject::cast(target));
674 }
675 }
676
677
678 Address RelocInfo::target_reference() {
679 ASSERT(rmode_ == EXTERNAL_REFERENCE);
680 return Assembler::target_address_at(pc_);
681 }
682
683
684 Address RelocInfo::target_runtime_entry(Assembler* origin) {
685 ASSERT(IsRuntimeEntry(rmode_));
686 return target_address();
687 }
688
689
690 void RelocInfo::set_target_runtime_entry(Address target,
691 WriteBarrierMode mode) {
692 ASSERT(IsRuntimeEntry(rmode_));
693 if (target_address() != target) set_target_address(target, mode);
694 }
695
696
697 Handle<Cell> RelocInfo::target_cell_handle() {
698 UNIMPLEMENTED();
699 Cell *null_cell = NULL;
700 return Handle<Cell>(null_cell);
701 }
702
703
704 Cell* RelocInfo::target_cell() {
705 ASSERT(rmode_ == RelocInfo::CELL);
706 return Cell::FromValueAddress(Memory::Address_at(pc_));
707 }
708
709
710 void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
711 UNIMPLEMENTED();
712 }
713
714
715 static const int kCodeAgeSequenceSize = 5 * kInstructionSize;
716 static const int kCodeAgeStubEntryOffset = 3 * kInstructionSize;
717
718
719 Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
720 UNREACHABLE(); // This should never be reached on A64.
721 return Handle<Object>();
722 }
723
724
725 Code* RelocInfo::code_age_stub() {
726 ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
727 ASSERT(!Code::IsYoungSequence(pc_));
728 // Read the stub entry point from the code age sequence.
729 Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
730 return Code::GetCodeFromTargetAddress(Memory::Address_at(stub_entry_address));
731 }
732
733
734 void RelocInfo::set_code_age_stub(Code* stub) {
735 ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
736 ASSERT(!Code::IsYoungSequence(pc_));
737 // Overwrite the stub entry point in the code age sequence. This is loaded as
738 // a literal so there is no need to call FlushICache here.
739 Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
740 Memory::Address_at(stub_entry_address) = stub->instruction_start();
741 }
742
743
744 Address RelocInfo::call_address() {
745 ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
746 (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
747 // For the above sequences the Relocinfo points to the load literal loading
748 // the call address.
749 return Assembler::target_address_at(pc_);
750 }
751
752
753 void RelocInfo::set_call_address(Address target) {
754 ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
755 (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
756 Assembler::set_target_address_at(pc_, target);
757 if (host() != NULL) {
758 Object* target_code = Code::GetCodeFromTargetAddress(target);
759 host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
760 host(), this, HeapObject::cast(target_code));
761 }
762 }
763
764
765 void RelocInfo::WipeOut() {
766 ASSERT(IsEmbeddedObject(rmode_) ||
767 IsCodeTarget(rmode_) ||
768 IsRuntimeEntry(rmode_) ||
769 IsExternalReference(rmode_));
770 Assembler::set_target_address_at(pc_, NULL);
771 }
772
773
774 bool RelocInfo::IsPatchedReturnSequence() {
775 // The sequence must be:
776 // ldr ip0, [pc, #offset]
777 // blr ip0
778 // See a64/debug-a64.cc BreakLocationIterator::SetDebugBreakAtReturn().
779 Instruction* i1 = reinterpret_cast<Instruction*>(pc_);
780 Instruction* i2 = i1->following();
781 return i1->IsLdrLiteralX() && (i1->Rt() == ip0.code()) &&
782 i2->IsBranchAndLinkToRegister() && (i2->Rn() == ip0.code());
783 }
784
785
786 bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
787 Instruction* current_instr = reinterpret_cast<Instruction*>(pc_);
788 return !current_instr->IsNop(Assembler::DEBUG_BREAK_NOP);
789 }
790
791
792 void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
793 RelocInfo::Mode mode = rmode();
794 if (mode == RelocInfo::EMBEDDED_OBJECT) {
795 visitor->VisitEmbeddedPointer(this);
796 } else if (RelocInfo::IsCodeTarget(mode)) {
797 visitor->VisitCodeTarget(this);
798 } else if (mode == RelocInfo::CELL) {
799 visitor->VisitCell(this);
800 } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
801 visitor->VisitExternalReference(this);
802 #ifdef ENABLE_DEBUGGER_SUPPORT
803 } else if (((RelocInfo::IsJSReturn(mode) &&
804 IsPatchedReturnSequence()) ||
805 (RelocInfo::IsDebugBreakSlot(mode) &&
806 IsPatchedDebugBreakSlotSequence())) &&
807 isolate->debug()->has_break_points()) {
808 visitor->VisitDebugTarget(this);
809 #endif
810 } else if (RelocInfo::IsRuntimeEntry(mode)) {
811 visitor->VisitRuntimeEntry(this);
812 }
813 }
814
815
816 template<typename StaticVisitor>
817 void RelocInfo::Visit(Heap* heap) {
818 RelocInfo::Mode mode = rmode();
819 if (mode == RelocInfo::EMBEDDED_OBJECT) {
820 StaticVisitor::VisitEmbeddedPointer(heap, this);
821 } else if (RelocInfo::IsCodeTarget(mode)) {
822 StaticVisitor::VisitCodeTarget(heap, this);
823 } else if (mode == RelocInfo::CELL) {
824 StaticVisitor::VisitCell(heap, this);
825 } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
826 StaticVisitor::VisitExternalReference(this);
827 #ifdef ENABLE_DEBUGGER_SUPPORT
828 } else if (heap->isolate()->debug()->has_break_points() &&
829 ((RelocInfo::IsJSReturn(mode) &&
830 IsPatchedReturnSequence()) ||
831 (RelocInfo::IsDebugBreakSlot(mode) &&
832 IsPatchedDebugBreakSlotSequence()))) {
833 StaticVisitor::VisitDebugTarget(heap, this);
834 #endif
835 } else if (RelocInfo::IsRuntimeEntry(mode)) {
836 StaticVisitor::VisitRuntimeEntry(this);
837 }
838 }
839
840
841 LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) {
842 ASSERT(rt.IsValid());
843 if (rt.IsRegister()) {
844 return rt.Is64Bits() ? LDR_x : LDR_w;
845 } else {
846 ASSERT(rt.IsFPRegister());
847 return rt.Is64Bits() ? LDR_d : LDR_s;
848 }
849 }
850
851
852 LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt,
853 const CPURegister& rt2) {
854 ASSERT(AreSameSizeAndType(rt, rt2));
855 USE(rt2);
856 if (rt.IsRegister()) {
857 return rt.Is64Bits() ? LDP_x : LDP_w;
858 } else {
859 ASSERT(rt.IsFPRegister());
860 return rt.Is64Bits() ? LDP_d : LDP_s;
861 }
862 }
863
864
865 LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) {
866 ASSERT(rt.IsValid());
867 if (rt.IsRegister()) {
868 return rt.Is64Bits() ? STR_x : STR_w;
869 } else {
870 ASSERT(rt.IsFPRegister());
871 return rt.Is64Bits() ? STR_d : STR_s;
872 }
873 }
874
875
876 LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
877 const CPURegister& rt2) {
878 ASSERT(AreSameSizeAndType(rt, rt2));
879 USE(rt2);
880 if (rt.IsRegister()) {
881 return rt.Is64Bits() ? STP_x : STP_w;
882 } else {
883 ASSERT(rt.IsFPRegister());
884 return rt.Is64Bits() ? STP_d : STP_s;
885 }
886 }
887
888
889 LoadStorePairNonTemporalOp Assembler::LoadPairNonTemporalOpFor(
890 const CPURegister& rt, const CPURegister& rt2) {
891 ASSERT(AreSameSizeAndType(rt, rt2));
892 USE(rt2);
893 if (rt.IsRegister()) {
894 return rt.Is64Bits() ? LDNP_x : LDNP_w;
895 } else {
896 ASSERT(rt.IsFPRegister());
897 return rt.Is64Bits() ? LDNP_d : LDNP_s;
898 }
899 }
900
901
902 LoadStorePairNonTemporalOp Assembler::StorePairNonTemporalOpFor(
903 const CPURegister& rt, const CPURegister& rt2) {
904 ASSERT(AreSameSizeAndType(rt, rt2));
905 USE(rt2);
906 if (rt.IsRegister()) {
907 return rt.Is64Bits() ? STNP_x : STNP_w;
908 } else {
909 ASSERT(rt.IsFPRegister());
910 return rt.Is64Bits() ? STNP_d : STNP_s;
911 }
912 }
913
914
915 int Assembler::LinkAndGetInstructionOffsetTo(Label* label) {
916 ASSERT(kStartOfLabelLinkChain == 0);
917 int offset = LinkAndGetByteOffsetTo(label);
918 ASSERT(IsAligned(offset, kInstructionSize));
919 return offset >> kInstructionSizeLog2;
920 }
921
922
923 Instr Assembler::Flags(FlagsUpdate S) {
924 if (S == SetFlags) {
925 return 1 << FlagsUpdate_offset;
926 } else if (S == LeaveFlags) {
927 return 0 << FlagsUpdate_offset;
928 }
929 UNREACHABLE();
930 return 0;
931 }
932
933
934 Instr Assembler::Cond(Condition cond) {
935 return cond << Condition_offset;
936 }
937
938
939 Instr Assembler::ImmPCRelAddress(int imm21) {
940 CHECK(is_int21(imm21));
941 Instr imm = static_cast<Instr>(truncate_to_int21(imm21));
942 Instr immhi = (imm >> ImmPCRelLo_width) << ImmPCRelHi_offset;
943 Instr immlo = imm << ImmPCRelLo_offset;
944 return (immhi & ImmPCRelHi_mask) | (immlo & ImmPCRelLo_mask);
945 }
946
947
948 Instr Assembler::ImmUncondBranch(int imm26) {
949 CHECK(is_int26(imm26));
950 return truncate_to_int26(imm26) << ImmUncondBranch_offset;
951 }
952
953
954 Instr Assembler::ImmCondBranch(int imm19) {
955 CHECK(is_int19(imm19));
956 return truncate_to_int19(imm19) << ImmCondBranch_offset;
957 }
958
959
960 Instr Assembler::ImmCmpBranch(int imm19) {
961 CHECK(is_int19(imm19));
962 return truncate_to_int19(imm19) << ImmCmpBranch_offset;
963 }
964
965
966 Instr Assembler::ImmTestBranch(int imm14) {
967 CHECK(is_int14(imm14));
968 return truncate_to_int14(imm14) << ImmTestBranch_offset;
969 }
970
971
972 Instr Assembler::ImmTestBranchBit(unsigned bit_pos) {
973 ASSERT(is_uint6(bit_pos));
974 // Subtract five from the shift offset, as we need bit 5 from bit_pos.
975 unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5);
976 unsigned b40 = bit_pos << ImmTestBranchBit40_offset;
977 b5 &= ImmTestBranchBit5_mask;
978 b40 &= ImmTestBranchBit40_mask;
979 return b5 | b40;
980 }
981
982
983 Instr Assembler::SF(Register rd) {
984 return rd.Is64Bits() ? SixtyFourBits : ThirtyTwoBits;
985 }
986
987
988 Instr Assembler::ImmAddSub(int64_t imm) {
989 ASSERT(IsImmAddSub(imm));
990 if (is_uint12(imm)) { // No shift required.
991 return imm << ImmAddSub_offset;
992 } else {
993 return ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset);
994 }
995 }
996
997
998 Instr Assembler::ImmS(unsigned imms, unsigned reg_size) {
999 ASSERT(((reg_size == kXRegSize) && is_uint6(imms)) ||
1000 ((reg_size == kWRegSize) && is_uint5(imms)));
1001 USE(reg_size);
1002 return imms << ImmS_offset;
1003 }
1004
1005
1006 Instr Assembler::ImmR(unsigned immr, unsigned reg_size) {
1007 ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
1008 ((reg_size == kWRegSize) && is_uint5(immr)));
1009 USE(reg_size);
1010 ASSERT(is_uint6(immr));
1011 return immr << ImmR_offset;
1012 }
1013
1014
1015 Instr Assembler::ImmSetBits(unsigned imms, unsigned reg_size) {
1016 ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
1017 ASSERT(is_uint6(imms));
1018 ASSERT((reg_size == kXRegSize) || is_uint6(imms + 3));
1019 USE(reg_size);
1020 return imms << ImmSetBits_offset;
1021 }
1022
1023
1024 Instr Assembler::ImmRotate(unsigned immr, unsigned reg_size) {
1025 ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
1026 ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
1027 ((reg_size == kWRegSize) && is_uint5(immr)));
1028 USE(reg_size);
1029 return immr << ImmRotate_offset;
1030 }
1031
1032
1033 Instr Assembler::ImmLLiteral(int imm19) {
1034 CHECK(is_int19(imm19));
1035 return truncate_to_int19(imm19) << ImmLLiteral_offset;
1036 }
1037
1038
1039 Instr Assembler::BitN(unsigned bitn, unsigned reg_size) {
1040 ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
1041 ASSERT((reg_size == kXRegSize) || (bitn == 0));
1042 USE(reg_size);
1043 return bitn << BitN_offset;
1044 }
1045
1046
1047 Instr Assembler::ShiftDP(Shift shift) {
1048 ASSERT(shift == LSL || shift == LSR || shift == ASR || shift == ROR);
1049 return shift << ShiftDP_offset;
1050 }
1051
1052
1053 Instr Assembler::ImmDPShift(unsigned amount) {
1054 ASSERT(is_uint6(amount));
1055 return amount << ImmDPShift_offset;
1056 }
1057
1058
1059 Instr Assembler::ExtendMode(Extend extend) {
1060 return extend << ExtendMode_offset;
1061 }
1062
1063
1064 Instr Assembler::ImmExtendShift(unsigned left_shift) {
1065 ASSERT(left_shift <= 4);
1066 return left_shift << ImmExtendShift_offset;
1067 }
1068
1069
1070 Instr Assembler::ImmCondCmp(unsigned imm) {
1071 ASSERT(is_uint5(imm));
1072 return imm << ImmCondCmp_offset;
1073 }
1074
1075
1076 Instr Assembler::Nzcv(StatusFlags nzcv) {
1077 return ((nzcv >> Flags_offset) & 0xf) << Nzcv_offset;
1078 }
1079
1080
1081 Instr Assembler::ImmLSUnsigned(int imm12) {
1082 ASSERT(is_uint12(imm12));
1083 return imm12 << ImmLSUnsigned_offset;
1084 }
1085
1086
1087 Instr Assembler::ImmLS(int imm9) {
1088 ASSERT(is_int9(imm9));
1089 return truncate_to_int9(imm9) << ImmLS_offset;
1090 }
1091
1092
1093 Instr Assembler::ImmLSPair(int imm7, LSDataSize size) {
1094 ASSERT(((imm7 >> size) << size) == imm7);
1095 int scaled_imm7 = imm7 >> size;
1096 ASSERT(is_int7(scaled_imm7));
1097 return truncate_to_int7(scaled_imm7) << ImmLSPair_offset;
1098 }
1099
1100
1101 Instr Assembler::ImmShiftLS(unsigned shift_amount) {
1102 ASSERT(is_uint1(shift_amount));
1103 return shift_amount << ImmShiftLS_offset;
1104 }
1105
1106
1107 Instr Assembler::ImmException(int imm16) {
1108 ASSERT(is_uint16(imm16));
1109 return imm16 << ImmException_offset;
1110 }
1111
1112
1113 Instr Assembler::ImmSystemRegister(int imm15) {
1114 ASSERT(is_uint15(imm15));
1115 return imm15 << ImmSystemRegister_offset;
1116 }
1117
1118
1119 Instr Assembler::ImmHint(int imm7) {
1120 ASSERT(is_uint7(imm7));
1121 return imm7 << ImmHint_offset;
1122 }
1123
1124
1125 Instr Assembler::ImmBarrierDomain(int imm2) {
1126 ASSERT(is_uint2(imm2));
1127 return imm2 << ImmBarrierDomain_offset;
1128 }
1129
1130
1131 Instr Assembler::ImmBarrierType(int imm2) {
1132 ASSERT(is_uint2(imm2));
1133 return imm2 << ImmBarrierType_offset;
1134 }
1135
1136
1137 LSDataSize Assembler::CalcLSDataSize(LoadStoreOp op) {
1138 ASSERT((SizeLS_offset + SizeLS_width) == (kInstructionSize * 8));
1139 return static_cast<LSDataSize>(op >> SizeLS_offset);
1140 }
1141
1142
1143 Instr Assembler::ImmMoveWide(uint64_t imm) {
1144 ASSERT(is_uint16(imm));
1145 return imm << ImmMoveWide_offset;
1146 }
1147
1148
1149 Instr Assembler::ShiftMoveWide(int64_t shift) {
1150 ASSERT(is_uint2(shift));
1151 return shift << ShiftMoveWide_offset;
1152 }
1153
1154
1155 Instr Assembler::FPType(FPRegister fd) {
1156 return fd.Is64Bits() ? FP64 : FP32;
1157 }
1158
1159
1160 Instr Assembler::FPScale(unsigned scale) {
1161 ASSERT(is_uint6(scale));
1162 return scale << FPScale_offset;
1163 }
1164
1165
1166 const Register& Assembler::AppropriateZeroRegFor(const CPURegister& reg) const {
1167 return reg.Is64Bits() ? xzr : wzr;
1168 }
1169
1170
1171 void Assembler::LoadRelocated(const CPURegister& rt, const Operand& operand) {
1172 LoadRelocatedValue(rt, operand, LDR_x_lit);
1173 }
1174
1175
1176 inline void Assembler::CheckBuffer() {
1177 ASSERT(pc_ < (buffer_ + buffer_size_));
1178 if (buffer_space() < kGap) {
1179 GrowBuffer();
1180 }
1181 if (pc_offset() >= next_buffer_check_) {
1182 CheckConstPool(false, true);
1183 }
1184 }
1185
1186
1187 TypeFeedbackId Assembler::RecordedAstId() {
1188 ASSERT(!recorded_ast_id_.IsNone());
1189 return recorded_ast_id_;
1190 }
1191
1192
1193 void Assembler::ClearRecordedAstId() {
1194 recorded_ast_id_ = TypeFeedbackId::None();
1195 }
1196
1197
1198 } } // namespace v8::internal
1199
1200 #endif // V8_A64_ASSEMBLER_A64_INL_H_
OLDNEW
« no previous file with comments | « src/a64/assembler-a64.cc ('k') | src/a64/builtins-a64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698