Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(540)

Side by Side Diff: src/a64/assembler-a64-inl.h

Issue 148293020: Merge experimental/a64 to bleeding_edge. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Remove ARM from OWNERS Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/a64/assembler-a64.cc ('k') | src/a64/builtins-a64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #ifndef V8_A64_ASSEMBLER_A64_INL_H_
29 #define V8_A64_ASSEMBLER_A64_INL_H_
30
31 #include "a64/assembler-a64.h"
32 #include "cpu.h"
33 #include "debug.h"
34
35
36 namespace v8 {
37 namespace internal {
38
39
40 void RelocInfo::apply(intptr_t delta) {
41 UNIMPLEMENTED();
42 }
43
44
45 void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
46 ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
47 Assembler::set_target_address_at(pc_, target);
48 if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
49 Object* target_code = Code::GetCodeFromTargetAddress(target);
50 host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
51 host(), this, HeapObject::cast(target_code));
52 }
53 }
54
55
56 inline unsigned CPURegister::code() const {
57 ASSERT(IsValid());
58 return reg_code;
59 }
60
61
62 inline CPURegister::RegisterType CPURegister::type() const {
63 ASSERT(IsValidOrNone());
64 return reg_type;
65 }
66
67
68 inline RegList CPURegister::Bit() const {
69 ASSERT(reg_code < (sizeof(RegList) * kBitsPerByte));
70 return IsValid() ? 1UL << reg_code : 0;
71 }
72
73
74 inline unsigned CPURegister::SizeInBits() const {
75 ASSERT(IsValid());
76 return reg_size;
77 }
78
79
80 inline int CPURegister::SizeInBytes() const {
81 ASSERT(IsValid());
82 ASSERT(SizeInBits() % 8 == 0);
83 return reg_size / 8;
84 }
85
86
87 inline bool CPURegister::Is32Bits() const {
88 ASSERT(IsValid());
89 return reg_size == 32;
90 }
91
92
93 inline bool CPURegister::Is64Bits() const {
94 ASSERT(IsValid());
95 return reg_size == 64;
96 }
97
98
99 inline bool CPURegister::IsValid() const {
100 if (IsValidRegister() || IsValidFPRegister()) {
101 ASSERT(!IsNone());
102 return true;
103 } else {
104 ASSERT(IsNone());
105 return false;
106 }
107 }
108
109
110 inline bool CPURegister::IsValidRegister() const {
111 return IsRegister() &&
112 ((reg_size == kWRegSize) || (reg_size == kXRegSize)) &&
113 ((reg_code < kNumberOfRegisters) || (reg_code == kSPRegInternalCode));
114 }
115
116
117 inline bool CPURegister::IsValidFPRegister() const {
118 return IsFPRegister() &&
119 ((reg_size == kSRegSize) || (reg_size == kDRegSize)) &&
120 (reg_code < kNumberOfFPRegisters);
121 }
122
123
124 inline bool CPURegister::IsNone() const {
125 // kNoRegister types should always have size 0 and code 0.
126 ASSERT((reg_type != kNoRegister) || (reg_code == 0));
127 ASSERT((reg_type != kNoRegister) || (reg_size == 0));
128
129 return reg_type == kNoRegister;
130 }
131
132
133 inline bool CPURegister::Is(const CPURegister& other) const {
134 ASSERT(IsValidOrNone() && other.IsValidOrNone());
135 return (reg_code == other.reg_code) && (reg_size == other.reg_size) &&
136 (reg_type == other.reg_type);
137 }
138
139
140 inline bool CPURegister::IsRegister() const {
141 return reg_type == kRegister;
142 }
143
144
145 inline bool CPURegister::IsFPRegister() const {
146 return reg_type == kFPRegister;
147 }
148
149
150 inline bool CPURegister::IsSameSizeAndType(const CPURegister& other) const {
151 return (reg_size == other.reg_size) && (reg_type == other.reg_type);
152 }
153
154
155 inline bool CPURegister::IsValidOrNone() const {
156 return IsValid() || IsNone();
157 }
158
159
160 inline bool CPURegister::IsZero() const {
161 ASSERT(IsValid());
162 return IsRegister() && (reg_code == kZeroRegCode);
163 }
164
165
166 inline bool CPURegister::IsSP() const {
167 ASSERT(IsValid());
168 return IsRegister() && (reg_code == kSPRegInternalCode);
169 }
170
171
172 inline void CPURegList::Combine(const CPURegList& other) {
173 ASSERT(IsValid());
174 ASSERT(other.type() == type_);
175 ASSERT(other.RegisterSizeInBits() == size_);
176 list_ |= other.list();
177 }
178
179
180 inline void CPURegList::Remove(const CPURegList& other) {
181 ASSERT(IsValid());
182 ASSERT(other.type() == type_);
183 ASSERT(other.RegisterSizeInBits() == size_);
184 list_ &= ~other.list();
185 }
186
187
188 inline void CPURegList::Combine(const CPURegister& other) {
189 ASSERT(other.type() == type_);
190 ASSERT(other.SizeInBits() == size_);
191 Combine(other.code());
192 }
193
194
195 inline void CPURegList::Remove(const CPURegister& other) {
196 ASSERT(other.type() == type_);
197 ASSERT(other.SizeInBits() == size_);
198 Remove(other.code());
199 }
200
201
202 inline void CPURegList::Combine(int code) {
203 ASSERT(IsValid());
204 ASSERT(CPURegister::Create(code, size_, type_).IsValid());
205 list_ |= (1UL << code);
206 }
207
208
209 inline void CPURegList::Remove(int code) {
210 ASSERT(IsValid());
211 ASSERT(CPURegister::Create(code, size_, type_).IsValid());
212 list_ &= ~(1UL << code);
213 }
214
215
216 inline Register Register::XRegFromCode(unsigned code) {
217 // This function returns the zero register when code = 31. The stack pointer
218 // can not be returned.
219 ASSERT(code < kNumberOfRegisters);
220 return Register::Create(code, kXRegSize);
221 }
222
223
224 inline Register Register::WRegFromCode(unsigned code) {
225 ASSERT(code < kNumberOfRegisters);
226 return Register::Create(code, kWRegSize);
227 }
228
229
230 inline FPRegister FPRegister::SRegFromCode(unsigned code) {
231 ASSERT(code < kNumberOfFPRegisters);
232 return FPRegister::Create(code, kSRegSize);
233 }
234
235
236 inline FPRegister FPRegister::DRegFromCode(unsigned code) {
237 ASSERT(code < kNumberOfFPRegisters);
238 return FPRegister::Create(code, kDRegSize);
239 }
240
241
242 inline Register CPURegister::W() const {
243 ASSERT(IsValidRegister());
244 return Register::WRegFromCode(reg_code);
245 }
246
247
248 inline Register CPURegister::X() const {
249 ASSERT(IsValidRegister());
250 return Register::XRegFromCode(reg_code);
251 }
252
253
254 inline FPRegister CPURegister::S() const {
255 ASSERT(IsValidFPRegister());
256 return FPRegister::SRegFromCode(reg_code);
257 }
258
259
260 inline FPRegister CPURegister::D() const {
261 ASSERT(IsValidFPRegister());
262 return FPRegister::DRegFromCode(reg_code);
263 }
264
265
266 // Operand.
267 #define DECLARE_INT_OPERAND_CONSTRUCTOR(type) \
268 Operand::Operand(type immediate, RelocInfo::Mode rmode) \
269 : immediate_(immediate), \
270 reg_(NoReg), \
271 rmode_(rmode) {}
272 DECLARE_INT_OPERAND_CONSTRUCTOR(int64_t)
273 DECLARE_INT_OPERAND_CONSTRUCTOR(uint64_t)
274 DECLARE_INT_OPERAND_CONSTRUCTOR(int32_t) // NOLINT(readability/casting)
275 DECLARE_INT_OPERAND_CONSTRUCTOR(uint32_t)
276 #undef DECLARE_INT_OPERAND_CONSTRUCTOR
277
278 Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
279 : reg_(reg),
280 shift_(shift),
281 extend_(NO_EXTEND),
282 shift_amount_(shift_amount),
283 rmode_(reg.Is64Bits() ? RelocInfo::NONE64 : RelocInfo::NONE32) {
284 ASSERT(reg.Is64Bits() || (shift_amount < kWRegSize));
285 ASSERT(reg.Is32Bits() || (shift_amount < kXRegSize));
286 ASSERT(!reg.IsSP());
287 }
288
289
290 Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
291 : reg_(reg),
292 shift_(NO_SHIFT),
293 extend_(extend),
294 shift_amount_(shift_amount),
295 rmode_(reg.Is64Bits() ? RelocInfo::NONE64 : RelocInfo::NONE32) {
296 ASSERT(reg.IsValid());
297 ASSERT(shift_amount <= 4);
298 ASSERT(!reg.IsSP());
299
300 // Extend modes SXTX and UXTX require a 64-bit register.
301 ASSERT(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
302 }
303
304
305 Operand::Operand(Smi* value)
306 : immediate_(reinterpret_cast<intptr_t>(value)),
307 reg_(NoReg),
308 rmode_(RelocInfo::NONE64) {}
309
310
311 bool Operand::IsImmediate() const {
312 return reg_.Is(NoReg);
313 }
314
315
316 bool Operand::IsShiftedRegister() const {
317 return reg_.IsValid() && (shift_ != NO_SHIFT);
318 }
319
320
321 bool Operand::IsExtendedRegister() const {
322 return reg_.IsValid() && (extend_ != NO_EXTEND);
323 }
324
325
326 bool Operand::IsZero() const {
327 if (IsImmediate()) {
328 return immediate() == 0;
329 } else {
330 return reg().IsZero();
331 }
332 }
333
334
335 Operand Operand::ToExtendedRegister() const {
336 ASSERT(IsShiftedRegister());
337 ASSERT((shift_ == LSL) && (shift_amount_ <= 4));
338 return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
339 }
340
341
342 int64_t Operand::immediate() const {
343 ASSERT(IsImmediate());
344 return immediate_;
345 }
346
347
348 Register Operand::reg() const {
349 ASSERT(IsShiftedRegister() || IsExtendedRegister());
350 return reg_;
351 }
352
353
354 Shift Operand::shift() const {
355 ASSERT(IsShiftedRegister());
356 return shift_;
357 }
358
359
360 Extend Operand::extend() const {
361 ASSERT(IsExtendedRegister());
362 return extend_;
363 }
364
365
366 unsigned Operand::shift_amount() const {
367 ASSERT(IsShiftedRegister() || IsExtendedRegister());
368 return shift_amount_;
369 }
370
371
372 Operand Operand::UntagSmi(Register smi) {
373 ASSERT(smi.Is64Bits());
374 return Operand(smi, ASR, kSmiShift);
375 }
376
377
378 Operand Operand::UntagSmiAndScale(Register smi, int scale) {
379 ASSERT(smi.Is64Bits());
380 ASSERT((scale >= 0) && (scale <= (64 - kSmiValueSize)));
381 if (scale > kSmiShift) {
382 return Operand(smi, LSL, scale - kSmiShift);
383 } else if (scale < kSmiShift) {
384 return Operand(smi, ASR, kSmiShift - scale);
385 }
386 return Operand(smi);
387 }
388
389
390 MemOperand::MemOperand(Register base, ptrdiff_t offset, AddrMode addrmode)
391 : base_(base), regoffset_(NoReg), offset_(offset), addrmode_(addrmode),
392 shift_(NO_SHIFT), extend_(NO_EXTEND), shift_amount_(0) {
393 ASSERT(base.Is64Bits() && !base.IsZero());
394 }
395
396
397 MemOperand::MemOperand(Register base,
398 Register regoffset,
399 Extend extend,
400 unsigned shift_amount)
401 : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
402 shift_(NO_SHIFT), extend_(extend), shift_amount_(shift_amount) {
403 ASSERT(base.Is64Bits() && !base.IsZero());
404 ASSERT(!regoffset.IsSP());
405 ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
406
407 // SXTX extend mode requires a 64-bit offset register.
408 ASSERT(regoffset.Is64Bits() || (extend != SXTX));
409 }
410
411
412 MemOperand::MemOperand(Register base,
413 Register regoffset,
414 Shift shift,
415 unsigned shift_amount)
416 : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
417 shift_(shift), extend_(NO_EXTEND), shift_amount_(shift_amount) {
418 ASSERT(base.Is64Bits() && !base.IsZero());
419 ASSERT(regoffset.Is64Bits() && !regoffset.IsSP());
420 ASSERT(shift == LSL);
421 }
422
423
424 MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
425 : base_(base), addrmode_(addrmode) {
426 ASSERT(base.Is64Bits() && !base.IsZero());
427
428 if (offset.IsImmediate()) {
429 offset_ = offset.immediate();
430
431 regoffset_ = NoReg;
432 } else if (offset.IsShiftedRegister()) {
433 ASSERT(addrmode == Offset);
434
435 regoffset_ = offset.reg();
436 shift_= offset.shift();
437 shift_amount_ = offset.shift_amount();
438
439 extend_ = NO_EXTEND;
440 offset_ = 0;
441
442 // These assertions match those in the shifted-register constructor.
443 ASSERT(regoffset_.Is64Bits() && !regoffset_.IsSP());
444 ASSERT(shift_ == LSL);
445 } else {
446 ASSERT(offset.IsExtendedRegister());
447 ASSERT(addrmode == Offset);
448
449 regoffset_ = offset.reg();
450 extend_ = offset.extend();
451 shift_amount_ = offset.shift_amount();
452
453 shift_= NO_SHIFT;
454 offset_ = 0;
455
456 // These assertions match those in the extended-register constructor.
457 ASSERT(!regoffset_.IsSP());
458 ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));
459 ASSERT((regoffset_.Is64Bits() || (extend_ != SXTX)));
460 }
461 }
462
463 bool MemOperand::IsImmediateOffset() const {
464 return (addrmode_ == Offset) && regoffset_.Is(NoReg);
465 }
466
467
468 bool MemOperand::IsRegisterOffset() const {
469 return (addrmode_ == Offset) && !regoffset_.Is(NoReg);
470 }
471
472
473 bool MemOperand::IsPreIndex() const {
474 return addrmode_ == PreIndex;
475 }
476
477
478 bool MemOperand::IsPostIndex() const {
479 return addrmode_ == PostIndex;
480 }
481
482 Operand MemOperand::OffsetAsOperand() const {
483 if (IsImmediateOffset()) {
484 return offset();
485 } else {
486 ASSERT(IsRegisterOffset());
487 if (extend() == NO_EXTEND) {
488 return Operand(regoffset(), shift(), shift_amount());
489 } else {
490 return Operand(regoffset(), extend(), shift_amount());
491 }
492 }
493 }
494
495
496 Address Assembler::target_pointer_address_at(Address pc) {
497 Instruction* instr = reinterpret_cast<Instruction*>(pc);
498 ASSERT(instr->IsLdrLiteralX());
499 return reinterpret_cast<Address>(instr->ImmPCOffsetTarget());
500 }
501
502
503 // Read/Modify the code target address in the branch/call instruction at pc.
504 Address Assembler::target_address_at(Address pc) {
505 return Memory::Address_at(target_pointer_address_at(pc));
506 }
507
508
509 Address Assembler::target_address_from_return_address(Address pc) {
510 // Returns the address of the call target from the return address that will
511 // be returned to after a call.
512 // Call sequence on A64 is:
513 // ldr ip0, #... @ load from literal pool
514 // blr ip0
515 Address candidate = pc - 2 * kInstructionSize;
516 Instruction* instr = reinterpret_cast<Instruction*>(candidate);
517 USE(instr);
518 ASSERT(instr->IsLdrLiteralX());
519 return candidate;
520 }
521
522
523 Address Assembler::return_address_from_call_start(Address pc) {
524 // The call, generated by MacroAssembler::Call, is one of two possible
525 // sequences:
526 //
527 // Without relocation:
528 // movz ip0, #(target & 0x000000000000ffff)
529 // movk ip0, #(target & 0x00000000ffff0000)
530 // movk ip0, #(target & 0x0000ffff00000000)
531 // movk ip0, #(target & 0xffff000000000000)
532 // blr ip0
533 //
534 // With relocation:
535 // ldr ip0, =target
536 // blr ip0
537 //
538 // The return address is immediately after the blr instruction in both cases,
539 // so it can be found by adding the call size to the address at the start of
540 // the call sequence.
541 STATIC_ASSERT(Assembler::kCallSizeWithoutRelocation == 5 * kInstructionSize);
542 STATIC_ASSERT(Assembler::kCallSizeWithRelocation == 2 * kInstructionSize);
543
544 Instruction* instr = reinterpret_cast<Instruction*>(pc);
545 if (instr->IsMovz()) {
546 // Verify the instruction sequence.
547 ASSERT(instr->following(1)->IsMovk());
548 ASSERT(instr->following(2)->IsMovk());
549 ASSERT(instr->following(3)->IsMovk());
550 ASSERT(instr->following(4)->IsBranchAndLinkToRegister());
551 return pc + Assembler::kCallSizeWithoutRelocation;
552 } else {
553 // Verify the instruction sequence.
554 ASSERT(instr->IsLdrLiteralX());
555 ASSERT(instr->following(1)->IsBranchAndLinkToRegister());
556 return pc + Assembler::kCallSizeWithRelocation;
557 }
558 }
559
560
561 void Assembler::deserialization_set_special_target_at(
562 Address constant_pool_entry, Address target) {
563 Memory::Address_at(constant_pool_entry) = target;
564 }
565
566
567 void Assembler::set_target_address_at(Address pc, Address target) {
568 Memory::Address_at(target_pointer_address_at(pc)) = target;
569 // Intuitively, we would think it is necessary to always flush the
570 // instruction cache after patching a target address in the code as follows:
571 // CPU::FlushICache(pc, sizeof(target));
572 // However, on ARM, an instruction is actually patched in the case of
573 // embedded constants of the form:
574 // ldr ip, [pc, #...]
575 // since the instruction accessing this address in the constant pool remains
576 // unchanged, a flush is not required.
577 }
578
579
580 int RelocInfo::target_address_size() {
581 return kPointerSize;
582 }
583
584
585 Address RelocInfo::target_address() {
586 ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
587 return Assembler::target_address_at(pc_);
588 }
589
590
591 Address RelocInfo::target_address_address() {
592 ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
593 || rmode_ == EMBEDDED_OBJECT
594 || rmode_ == EXTERNAL_REFERENCE);
595 return Assembler::target_pointer_address_at(pc_);
596 }
597
598
599 Object* RelocInfo::target_object() {
600 ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
601 return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
602 }
603
604
605 Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
606 ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
607 return Handle<Object>(reinterpret_cast<Object**>(
608 Assembler::target_address_at(pc_)));
609 }
610
611
612 void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
613 ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
614 ASSERT(!target->IsConsString());
615 Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(target));
616 if (mode == UPDATE_WRITE_BARRIER &&
617 host() != NULL &&
618 target->IsHeapObject()) {
619 host()->GetHeap()->incremental_marking()->RecordWrite(
620 host(), &Memory::Object_at(pc_), HeapObject::cast(target));
621 }
622 }
623
624
625 Address RelocInfo::target_reference() {
626 ASSERT(rmode_ == EXTERNAL_REFERENCE);
627 return Assembler::target_address_at(pc_);
628 }
629
630
631 Address RelocInfo::target_runtime_entry(Assembler* origin) {
632 ASSERT(IsRuntimeEntry(rmode_));
633 return target_address();
634 }
635
636
637 void RelocInfo::set_target_runtime_entry(Address target,
638 WriteBarrierMode mode) {
639 ASSERT(IsRuntimeEntry(rmode_));
640 if (target_address() != target) set_target_address(target, mode);
641 }
642
643
644 Handle<Cell> RelocInfo::target_cell_handle() {
645 UNIMPLEMENTED();
646 Cell *null_cell = NULL;
647 return Handle<Cell>(null_cell);
648 }
649
650
651 Cell* RelocInfo::target_cell() {
652 ASSERT(rmode_ == RelocInfo::CELL);
653 return Cell::FromValueAddress(Memory::Address_at(pc_));
654 }
655
656
657 void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) {
658 UNIMPLEMENTED();
659 }
660
661
662 static const int kCodeAgeSequenceSize = 5 * kInstructionSize;
663 static const int kCodeAgeStubEntryOffset = 3 * kInstructionSize;
664
665
666 Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
667 UNREACHABLE(); // This should never be reached on A64.
668 return Handle<Object>();
669 }
670
671
672 Code* RelocInfo::code_age_stub() {
673 ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
674 ASSERT(!Code::IsYoungSequence(pc_));
675 // Read the stub entry point from the code age sequence.
676 Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
677 return Code::GetCodeFromTargetAddress(Memory::Address_at(stub_entry_address));
678 }
679
680
681 void RelocInfo::set_code_age_stub(Code* stub) {
682 ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
683 ASSERT(!Code::IsYoungSequence(pc_));
684 // Overwrite the stub entry point in the code age sequence. This is loaded as
685 // a literal so there is no need to call FlushICache here.
686 Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
687 Memory::Address_at(stub_entry_address) = stub->instruction_start();
688 }
689
690
691 Address RelocInfo::call_address() {
692 ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
693 (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
694 // For the above sequences the Relocinfo points to the load literal loading
695 // the call address.
696 return Assembler::target_address_at(pc_);
697 }
698
699
700 void RelocInfo::set_call_address(Address target) {
701 ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
702 (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
703 Assembler::set_target_address_at(pc_, target);
704 if (host() != NULL) {
705 Object* target_code = Code::GetCodeFromTargetAddress(target);
706 host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
707 host(), this, HeapObject::cast(target_code));
708 }
709 }
710
711
712 void RelocInfo::WipeOut() {
713 ASSERT(IsEmbeddedObject(rmode_) ||
714 IsCodeTarget(rmode_) ||
715 IsRuntimeEntry(rmode_) ||
716 IsExternalReference(rmode_));
717 Assembler::set_target_address_at(pc_, NULL);
718 }
719
720
721 bool RelocInfo::IsPatchedReturnSequence() {
722 // The sequence must be:
723 // ldr ip0, [pc, #offset]
724 // blr ip0
725 // See a64/debug-a64.cc BreakLocationIterator::SetDebugBreakAtReturn().
726 Instruction* i1 = reinterpret_cast<Instruction*>(pc_);
727 Instruction* i2 = i1->following();
728 return i1->IsLdrLiteralX() && (i1->Rt() == ip0.code()) &&
729 i2->IsBranchAndLinkToRegister() && (i2->Rn() == ip0.code());
730 }
731
732
733 bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
734 Instruction* current_instr = reinterpret_cast<Instruction*>(pc_);
735 return !current_instr->IsNop(Assembler::DEBUG_BREAK_NOP);
736 }
737
738
739 void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
740 RelocInfo::Mode mode = rmode();
741 if (mode == RelocInfo::EMBEDDED_OBJECT) {
742 visitor->VisitEmbeddedPointer(this);
743 } else if (RelocInfo::IsCodeTarget(mode)) {
744 visitor->VisitCodeTarget(this);
745 } else if (mode == RelocInfo::CELL) {
746 visitor->VisitCell(this);
747 } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
748 visitor->VisitExternalReference(this);
749 #ifdef ENABLE_DEBUGGER_SUPPORT
750 } else if (((RelocInfo::IsJSReturn(mode) &&
751 IsPatchedReturnSequence()) ||
752 (RelocInfo::IsDebugBreakSlot(mode) &&
753 IsPatchedDebugBreakSlotSequence())) &&
754 isolate->debug()->has_break_points()) {
755 visitor->VisitDebugTarget(this);
756 #endif
757 } else if (RelocInfo::IsRuntimeEntry(mode)) {
758 visitor->VisitRuntimeEntry(this);
759 }
760 }
761
762
763 template<typename StaticVisitor>
764 void RelocInfo::Visit(Heap* heap) {
765 RelocInfo::Mode mode = rmode();
766 if (mode == RelocInfo::EMBEDDED_OBJECT) {
767 StaticVisitor::VisitEmbeddedPointer(heap, this);
768 } else if (RelocInfo::IsCodeTarget(mode)) {
769 StaticVisitor::VisitCodeTarget(heap, this);
770 } else if (mode == RelocInfo::CELL) {
771 StaticVisitor::VisitCell(heap, this);
772 } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
773 StaticVisitor::VisitExternalReference(this);
774 #ifdef ENABLE_DEBUGGER_SUPPORT
775 } else if (heap->isolate()->debug()->has_break_points() &&
776 ((RelocInfo::IsJSReturn(mode) &&
777 IsPatchedReturnSequence()) ||
778 (RelocInfo::IsDebugBreakSlot(mode) &&
779 IsPatchedDebugBreakSlotSequence()))) {
780 StaticVisitor::VisitDebugTarget(heap, this);
781 #endif
782 } else if (RelocInfo::IsRuntimeEntry(mode)) {
783 StaticVisitor::VisitRuntimeEntry(this);
784 }
785 }
786
787
788 LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) {
789 ASSERT(rt.IsValid());
790 if (rt.IsRegister()) {
791 return rt.Is64Bits() ? LDR_x : LDR_w;
792 } else {
793 ASSERT(rt.IsFPRegister());
794 return rt.Is64Bits() ? LDR_d : LDR_s;
795 }
796 }
797
798
799 LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt,
800 const CPURegister& rt2) {
801 ASSERT(AreSameSizeAndType(rt, rt2));
802 USE(rt2);
803 if (rt.IsRegister()) {
804 return rt.Is64Bits() ? LDP_x : LDP_w;
805 } else {
806 ASSERT(rt.IsFPRegister());
807 return rt.Is64Bits() ? LDP_d : LDP_s;
808 }
809 }
810
811
812 LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) {
813 ASSERT(rt.IsValid());
814 if (rt.IsRegister()) {
815 return rt.Is64Bits() ? STR_x : STR_w;
816 } else {
817 ASSERT(rt.IsFPRegister());
818 return rt.Is64Bits() ? STR_d : STR_s;
819 }
820 }
821
822
823 LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
824 const CPURegister& rt2) {
825 ASSERT(AreSameSizeAndType(rt, rt2));
826 USE(rt2);
827 if (rt.IsRegister()) {
828 return rt.Is64Bits() ? STP_x : STP_w;
829 } else {
830 ASSERT(rt.IsFPRegister());
831 return rt.Is64Bits() ? STP_d : STP_s;
832 }
833 }
834
835
836 LoadStorePairNonTemporalOp Assembler::LoadPairNonTemporalOpFor(
837 const CPURegister& rt, const CPURegister& rt2) {
838 ASSERT(AreSameSizeAndType(rt, rt2));
839 USE(rt2);
840 if (rt.IsRegister()) {
841 return rt.Is64Bits() ? LDNP_x : LDNP_w;
842 } else {
843 ASSERT(rt.IsFPRegister());
844 return rt.Is64Bits() ? LDNP_d : LDNP_s;
845 }
846 }
847
848
849 LoadStorePairNonTemporalOp Assembler::StorePairNonTemporalOpFor(
850 const CPURegister& rt, const CPURegister& rt2) {
851 ASSERT(AreSameSizeAndType(rt, rt2));
852 USE(rt2);
853 if (rt.IsRegister()) {
854 return rt.Is64Bits() ? STNP_x : STNP_w;
855 } else {
856 ASSERT(rt.IsFPRegister());
857 return rt.Is64Bits() ? STNP_d : STNP_s;
858 }
859 }
860
861
862 int Assembler::LinkAndGetInstructionOffsetTo(Label* label) {
863 ASSERT(kStartOfLabelLinkChain == 0);
864 int offset = LinkAndGetByteOffsetTo(label);
865 ASSERT(IsAligned(offset, kInstructionSize));
866 return offset >> kInstructionSizeLog2;
867 }
868
869
870 Instr Assembler::Flags(FlagsUpdate S) {
871 if (S == SetFlags) {
872 return 1 << FlagsUpdate_offset;
873 } else if (S == LeaveFlags) {
874 return 0 << FlagsUpdate_offset;
875 }
876 UNREACHABLE();
877 return 0;
878 }
879
880
881 Instr Assembler::Cond(Condition cond) {
882 return cond << Condition_offset;
883 }
884
885
886 Instr Assembler::ImmPCRelAddress(int imm21) {
887 CHECK(is_int21(imm21));
888 Instr imm = static_cast<Instr>(truncate_to_int21(imm21));
889 Instr immhi = (imm >> ImmPCRelLo_width) << ImmPCRelHi_offset;
890 Instr immlo = imm << ImmPCRelLo_offset;
891 return (immhi & ImmPCRelHi_mask) | (immlo & ImmPCRelLo_mask);
892 }
893
894
895 Instr Assembler::ImmUncondBranch(int imm26) {
896 CHECK(is_int26(imm26));
897 return truncate_to_int26(imm26) << ImmUncondBranch_offset;
898 }
899
900
901 Instr Assembler::ImmCondBranch(int imm19) {
902 CHECK(is_int19(imm19));
903 return truncate_to_int19(imm19) << ImmCondBranch_offset;
904 }
905
906
907 Instr Assembler::ImmCmpBranch(int imm19) {
908 CHECK(is_int19(imm19));
909 return truncate_to_int19(imm19) << ImmCmpBranch_offset;
910 }
911
912
913 Instr Assembler::ImmTestBranch(int imm14) {
914 CHECK(is_int14(imm14));
915 return truncate_to_int14(imm14) << ImmTestBranch_offset;
916 }
917
918
919 Instr Assembler::ImmTestBranchBit(unsigned bit_pos) {
920 ASSERT(is_uint6(bit_pos));
921 // Subtract five from the shift offset, as we need bit 5 from bit_pos.
922 unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5);
923 unsigned b40 = bit_pos << ImmTestBranchBit40_offset;
924 b5 &= ImmTestBranchBit5_mask;
925 b40 &= ImmTestBranchBit40_mask;
926 return b5 | b40;
927 }
928
929
930 Instr Assembler::SF(Register rd) {
931 return rd.Is64Bits() ? SixtyFourBits : ThirtyTwoBits;
932 }
933
934
935 Instr Assembler::ImmAddSub(int64_t imm) {
936 ASSERT(IsImmAddSub(imm));
937 if (is_uint12(imm)) { // No shift required.
938 return imm << ImmAddSub_offset;
939 } else {
940 return ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset);
941 }
942 }
943
944
945 Instr Assembler::ImmS(unsigned imms, unsigned reg_size) {
946 ASSERT(((reg_size == kXRegSize) && is_uint6(imms)) ||
947 ((reg_size == kWRegSize) && is_uint5(imms)));
948 USE(reg_size);
949 return imms << ImmS_offset;
950 }
951
952
953 Instr Assembler::ImmR(unsigned immr, unsigned reg_size) {
954 ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
955 ((reg_size == kWRegSize) && is_uint5(immr)));
956 USE(reg_size);
957 ASSERT(is_uint6(immr));
958 return immr << ImmR_offset;
959 }
960
961
962 Instr Assembler::ImmSetBits(unsigned imms, unsigned reg_size) {
963 ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
964 ASSERT(is_uint6(imms));
965 ASSERT((reg_size == kXRegSize) || is_uint6(imms + 3));
966 USE(reg_size);
967 return imms << ImmSetBits_offset;
968 }
969
970
971 Instr Assembler::ImmRotate(unsigned immr, unsigned reg_size) {
972 ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
973 ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
974 ((reg_size == kWRegSize) && is_uint5(immr)));
975 USE(reg_size);
976 return immr << ImmRotate_offset;
977 }
978
979
980 Instr Assembler::ImmLLiteral(int imm19) {
981 CHECK(is_int19(imm19));
982 return truncate_to_int19(imm19) << ImmLLiteral_offset;
983 }
984
985
986 Instr Assembler::BitN(unsigned bitn, unsigned reg_size) {
987 ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
988 ASSERT((reg_size == kXRegSize) || (bitn == 0));
989 USE(reg_size);
990 return bitn << BitN_offset;
991 }
992
993
994 Instr Assembler::ShiftDP(Shift shift) {
995 ASSERT(shift == LSL || shift == LSR || shift == ASR || shift == ROR);
996 return shift << ShiftDP_offset;
997 }
998
999
1000 Instr Assembler::ImmDPShift(unsigned amount) {
1001 ASSERT(is_uint6(amount));
1002 return amount << ImmDPShift_offset;
1003 }
1004
1005
1006 Instr Assembler::ExtendMode(Extend extend) {
1007 return extend << ExtendMode_offset;
1008 }
1009
1010
1011 Instr Assembler::ImmExtendShift(unsigned left_shift) {
1012 ASSERT(left_shift <= 4);
1013 return left_shift << ImmExtendShift_offset;
1014 }
1015
1016
1017 Instr Assembler::ImmCondCmp(unsigned imm) {
1018 ASSERT(is_uint5(imm));
1019 return imm << ImmCondCmp_offset;
1020 }
1021
1022
1023 Instr Assembler::Nzcv(StatusFlags nzcv) {
1024 return ((nzcv >> Flags_offset) & 0xf) << Nzcv_offset;
1025 }
1026
1027
1028 Instr Assembler::ImmLSUnsigned(int imm12) {
1029 ASSERT(is_uint12(imm12));
1030 return imm12 << ImmLSUnsigned_offset;
1031 }
1032
1033
1034 Instr Assembler::ImmLS(int imm9) {
1035 ASSERT(is_int9(imm9));
1036 return truncate_to_int9(imm9) << ImmLS_offset;
1037 }
1038
1039
1040 Instr Assembler::ImmLSPair(int imm7, LSDataSize size) {
1041 ASSERT(((imm7 >> size) << size) == imm7);
1042 int scaled_imm7 = imm7 >> size;
1043 ASSERT(is_int7(scaled_imm7));
1044 return truncate_to_int7(scaled_imm7) << ImmLSPair_offset;
1045 }
1046
1047
1048 Instr Assembler::ImmShiftLS(unsigned shift_amount) {
1049 ASSERT(is_uint1(shift_amount));
1050 return shift_amount << ImmShiftLS_offset;
1051 }
1052
1053
1054 Instr Assembler::ImmException(int imm16) {
1055 ASSERT(is_uint16(imm16));
1056 return imm16 << ImmException_offset;
1057 }
1058
1059
1060 Instr Assembler::ImmSystemRegister(int imm15) {
1061 ASSERT(is_uint15(imm15));
1062 return imm15 << ImmSystemRegister_offset;
1063 }
1064
1065
1066 Instr Assembler::ImmHint(int imm7) {
1067 ASSERT(is_uint7(imm7));
1068 return imm7 << ImmHint_offset;
1069 }
1070
1071
1072 Instr Assembler::ImmBarrierDomain(int imm2) {
1073 ASSERT(is_uint2(imm2));
1074 return imm2 << ImmBarrierDomain_offset;
1075 }
1076
1077
1078 Instr Assembler::ImmBarrierType(int imm2) {
1079 ASSERT(is_uint2(imm2));
1080 return imm2 << ImmBarrierType_offset;
1081 }
1082
1083
1084 LSDataSize Assembler::CalcLSDataSize(LoadStoreOp op) {
1085 ASSERT((SizeLS_offset + SizeLS_width) == (kInstructionSize * 8));
1086 return static_cast<LSDataSize>(op >> SizeLS_offset);
1087 }
1088
1089
1090 Instr Assembler::ImmMoveWide(uint64_t imm) {
1091 ASSERT(is_uint16(imm));
1092 return imm << ImmMoveWide_offset;
1093 }
1094
1095
1096 Instr Assembler::ShiftMoveWide(int64_t shift) {
1097 ASSERT(is_uint2(shift));
1098 return shift << ShiftMoveWide_offset;
1099 }
1100
1101
1102 Instr Assembler::FPType(FPRegister fd) {
1103 return fd.Is64Bits() ? FP64 : FP32;
1104 }
1105
1106
1107 Instr Assembler::FPScale(unsigned scale) {
1108 ASSERT(is_uint6(scale));
1109 return scale << FPScale_offset;
1110 }
1111
1112
1113 const Register& Assembler::AppropriateZeroRegFor(const CPURegister& reg) const {
1114 return reg.Is64Bits() ? xzr : wzr;
1115 }
1116
1117
1118 void Assembler::LoadRelocated(const CPURegister& rt, const Operand& operand) {
1119 LoadRelocatedValue(rt, operand, LDR_x_lit);
1120 }
1121
1122
1123 inline void Assembler::CheckBuffer() {
1124 ASSERT(pc_ < (buffer_ + buffer_size_));
1125 if (buffer_space() < kGap) {
1126 GrowBuffer();
1127 }
1128 if (pc_offset() >= next_buffer_check_) {
1129 CheckConstPool(false, true);
1130 }
1131 }
1132
1133
1134 TypeFeedbackId Assembler::RecordedAstId() {
1135 ASSERT(!recorded_ast_id_.IsNone());
1136 return recorded_ast_id_;
1137 }
1138
1139
1140 void Assembler::ClearRecordedAstId() {
1141 recorded_ast_id_ = TypeFeedbackId::None();
1142 }
1143
1144
1145 } } // namespace v8::internal
1146
1147 #endif // V8_A64_ASSEMBLER_A64_INL_H_
OLDNEW
« no previous file with comments | « src/a64/assembler-a64.cc ('k') | src/a64/builtins-a64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698