Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(50)

Side by Side Diff: src/a64/assembler-a64-inl.h

Issue 144963003: A64: add missing files. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/a64
Patch Set: Created 6 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/a64/assembler-a64.cc ('k') | src/a64/builtins-a64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #ifndef V8_A64_ASSEMBLER_A64_INL_H_
29 #define V8_A64_ASSEMBLER_A64_INL_H_
30
31 #include "a64/assembler-a64.h"
32 #include "cpu.h"
33 #include "debug.h"
34
35
36 namespace v8 {
37 namespace internal {
38
39
40 void RelocInfo::apply(intptr_t delta) {
41 UNIMPLEMENTED();
42 }
43
44
45 void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
46 ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
47 Assembler::set_target_address_at(pc_, target);
48 if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
49 Object* target_code = Code::GetCodeFromTargetAddress(target);
50 host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
51 host(), this, HeapObject::cast(target_code));
52 }
53 }
54
55
56 inline unsigned CPURegister::code() const {
57 ASSERT(IsValid());
58 return code_;
59 }
60
61
62 inline CPURegister::RegisterType CPURegister::type() const {
63 ASSERT(IsValidOrNone());
64 return type_;
65 }
66
67
68 inline RegList CPURegister::Bit() const {
69 ASSERT(code_ < (sizeof(RegList) * kBitsPerByte));
70 return IsValid() ? 1UL << code_ : 0;
71 }
72
73
74 inline unsigned CPURegister::SizeInBits() const {
75 ASSERT(IsValid());
76 return size_;
77 }
78
79
80 inline int CPURegister::SizeInBytes() const {
81 ASSERT(IsValid());
82 ASSERT(SizeInBits() % 8 == 0);
83 return size_ / 8;
84 }
85
86
87 inline bool CPURegister::Is32Bits() const {
88 ASSERT(IsValid());
89 return size_ == 32;
90 }
91
92
93 inline bool CPURegister::Is64Bits() const {
94 ASSERT(IsValid());
95 return size_ == 64;
96 }
97
98
99 inline bool CPURegister::IsValid() const {
100 if (IsValidRegister() || IsValidFPRegister()) {
101 ASSERT(!IsNone());
102 return true;
103 } else {
104 ASSERT(IsNone());
105 return false;
106 }
107 }
108
109
110 inline bool CPURegister::IsValidRegister() const {
111 return IsRegister() &&
112 ((size_ == kWRegSize) || (size_ == kXRegSize)) &&
113 ((code_ < kNumberOfRegisters) || (code_ == kSPRegInternalCode));
114 }
115
116
117 inline bool CPURegister::IsValidFPRegister() const {
118 return IsFPRegister() &&
119 ((size_ == kSRegSize) || (size_ == kDRegSize)) &&
120 (code_ < kNumberOfFPRegisters);
121 }
122
123
124 inline bool CPURegister::IsNone() const {
125 // kNoRegister types should always have size 0 and code 0.
126 ASSERT((type_ != kNoRegister) || (code_ == 0));
127 ASSERT((type_ != kNoRegister) || (size_ == 0));
128
129 return type_ == kNoRegister;
130 }
131
132
133 inline bool CPURegister::Is(const CPURegister& other) const {
134 ASSERT(IsValidOrNone() && other.IsValidOrNone());
135 return (code_ == other.code_) && (size_ == other.size_) &&
136 (type_ == other.type_);
137 }
138
139
140 inline bool CPURegister::is(const CPURegister& other) const {
141 return Is(other);
142 }
143
144
145 inline bool CPURegister::IsRegister() const {
146 return type_ == kRegister;
147 }
148
149
150 inline bool CPURegister::IsFPRegister() const {
151 return type_ == kFPRegister;
152 }
153
154
155 inline bool CPURegister::IsSameSizeAndType(const CPURegister& other) const {
156 return (size_ == other.size_) && (type_ == other.type_);
157 }
158
159
160 inline bool CPURegister::IsValidOrNone() const {
161 return IsValid() || IsNone();
162 }
163
164
165 inline bool CPURegister::IsZero() const {
166 ASSERT(IsValid());
167 return IsRegister() && (code_ == kZeroRegCode);
168 }
169
170
171 inline bool CPURegister::IsSP() const {
172 ASSERT(IsValid());
173 return IsRegister() && (code_ == kSPRegInternalCode);
174 }
175
176
177 void CPURegList::Combine(const CPURegList& other) {
178 ASSERT(IsValid());
179 ASSERT(other.type() == type_);
180 ASSERT(other.RegisterSizeInBits() == size_);
181 list_ |= other.list();
182 }
183
184
185 void CPURegList::Remove(const CPURegList& other) {
186 ASSERT(IsValid());
187 ASSERT(other.type() == type_);
188 ASSERT(other.RegisterSizeInBits() == size_);
189 list_ &= ~other.list();
190 }
191
192
193 void CPURegList::Combine(const CPURegister& other) {
194 ASSERT(other.type() == type_);
195 ASSERT(other.SizeInBits() == size_);
196 Combine(other.code());
197 }
198
199
200 void CPURegList::Remove(const CPURegister& other) {
201 ASSERT(other.type() == type_);
202 ASSERT(other.SizeInBits() == size_);
203 Remove(other.code());
204 }
205
206
207 void CPURegList::Combine(int code) {
208 ASSERT(IsValid());
209 ASSERT(CPURegister(code, size_, type_).IsValid());
210 list_ |= (1UL << code);
211 }
212
213
214 void CPURegList::Remove(int code) {
215 ASSERT(IsValid());
216 ASSERT(CPURegister(code, size_, type_).IsValid());
217 list_ &= ~(1UL << code);
218 }
219
220
221 inline const Register& Register::XRegFromCode(unsigned code) {
222 // This function returns the zero register when code = 31. The stack pointer
223 // can not be returned.
224 ASSERT(code < kNumberOfRegisters);
225 return xregisters[code];
226 }
227
228
229 inline const Register& Register::WRegFromCode(unsigned code) {
230 ASSERT(code < kNumberOfRegisters);
231 return wregisters[code];
232 }
233
234
235 inline const FPRegister& FPRegister::SRegFromCode(unsigned code) {
236 ASSERT(code < kNumberOfFPRegisters);
237 return sregisters[code];
238 }
239
240
241 inline const FPRegister& FPRegister::DRegFromCode(unsigned code) {
242 ASSERT(code < kNumberOfFPRegisters);
243 return dregisters[code];
244 }
245
246
247 inline const Register& CPURegister::W() {
248 ASSERT(IsValidRegister());
249 return Register::WRegFromCode(code_);
250 }
251
252
253 inline const Register& CPURegister::X() {
254 ASSERT(IsValidRegister());
255 return Register::XRegFromCode(code_);
256 }
257
258
259 inline const FPRegister& CPURegister::S() {
260 ASSERT(IsValidFPRegister());
261 return FPRegister::SRegFromCode(code_);
262 }
263
264
265 inline const FPRegister& CPURegister::D() {
266 ASSERT(IsValidFPRegister());
267 return FPRegister::DRegFromCode(code_);
268 }
269
270
271 // Operand.
272 #define DECLARE_INT_OPERAND_CONSTRUCTOR(type) \
273 Operand::Operand(type immediate, RelocInfo::Mode rmode) \
274 : immediate_(immediate), \
275 reg_(NoReg), \
276 rmode_(rmode) {}
277 DECLARE_INT_OPERAND_CONSTRUCTOR(int64_t)
278 DECLARE_INT_OPERAND_CONSTRUCTOR(uint64_t)
279 DECLARE_INT_OPERAND_CONSTRUCTOR(int32_t) // NOLINT(readability/casting)
280 DECLARE_INT_OPERAND_CONSTRUCTOR(uint32_t)
281 #undef DECLARE_INT_OPERAND_CONSTRUCTOR
282
283 Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
284 : reg_(reg),
285 shift_(shift),
286 extend_(NO_EXTEND),
287 shift_amount_(shift_amount),
288 rmode_(reg.Is64Bits() ? RelocInfo::NONE64 : RelocInfo::NONE32) {
289 ASSERT(reg.Is64Bits() || (shift_amount < kWRegSize));
290 ASSERT(reg.Is32Bits() || (shift_amount < kXRegSize));
291 ASSERT(!reg.IsSP());
292 }
293
294
295 Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
296 : reg_(reg),
297 shift_(NO_SHIFT),
298 extend_(extend),
299 shift_amount_(shift_amount),
300 rmode_(reg.Is64Bits() ? RelocInfo::NONE64 : RelocInfo::NONE32) {
301 ASSERT(reg.IsValid());
302 ASSERT(shift_amount <= 4);
303 ASSERT(!reg.IsSP());
304
305 // Extend modes SXTX and UXTX require a 64-bit register.
306 ASSERT(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
307 }
308
309
310 Operand::Operand(Smi* value)
311 : immediate_(reinterpret_cast<intptr_t>(value)),
312 reg_(NoReg),
313 rmode_(RelocInfo::NONE64) {}
314
315
316 bool Operand::IsImmediate() const {
317 return reg_.Is(NoReg);
318 }
319
320
321 bool Operand::IsShiftedRegister() const {
322 return reg_.IsValid() && (shift_ != NO_SHIFT);
323 }
324
325
326 bool Operand::IsExtendedRegister() const {
327 return reg_.IsValid() && (extend_ != NO_EXTEND);
328 }
329
330
331 bool Operand::IsZero() const {
332 if (IsImmediate()) {
333 return immediate() == 0;
334 } else {
335 return reg().IsZero();
336 }
337 }
338
339
340 Operand Operand::ToExtendedRegister() const {
341 ASSERT(IsShiftedRegister());
342 ASSERT((shift_ == LSL) && (shift_amount_ <= 4));
343 return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
344 }
345
346
347 int64_t Operand::immediate() const {
348 ASSERT(IsImmediate());
349 return immediate_;
350 }
351
352
353 Register Operand::reg() const {
354 ASSERT(IsShiftedRegister() || IsExtendedRegister());
355 return reg_;
356 }
357
358
359 Shift Operand::shift() const {
360 ASSERT(IsShiftedRegister());
361 return shift_;
362 }
363
364
365 Extend Operand::extend() const {
366 ASSERT(IsExtendedRegister());
367 return extend_;
368 }
369
370
371 unsigned Operand::shift_amount() const {
372 ASSERT(IsShiftedRegister() || IsExtendedRegister());
373 return shift_amount_;
374 }
375
376
377 Operand Operand::UntagSmi(Register smi) {
378 ASSERT(smi.Is64Bits());
379 return Operand(smi, ASR, kSmiShift);
380 }
381
382
383 Operand Operand::UntagSmiAndScale(Register smi, int scale) {
384 ASSERT(smi.Is64Bits());
385 ASSERT((scale >= 0) && (scale <= (64 - kSmiValueSize)));
386 if (scale > kSmiShift) {
387 return Operand(smi, LSL, scale - kSmiShift);
388 } else if (scale < kSmiShift) {
389 return Operand(smi, ASR, kSmiShift - scale);
390 }
391 return Operand(smi);
392 }
393
394
395 MemOperand::MemOperand(Register base, ptrdiff_t offset, AddrMode addrmode)
396 : base_(base), regoffset_(NoReg), offset_(offset), addrmode_(addrmode) {
397 ASSERT(base.Is64Bits() && !base.IsZero());
398 }
399
400
401 MemOperand::MemOperand(Register base,
402 Register regoffset,
403 Extend extend,
404 unsigned shift_amount)
405 : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
406 shift_(NO_SHIFT), extend_(extend), shift_amount_(shift_amount) {
407 ASSERT(base.Is64Bits() && !base.IsZero());
408 ASSERT(!regoffset.IsSP());
409 ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
410
411 // SXTX extend mode requires a 64-bit offset register.
412 ASSERT(regoffset.Is64Bits() || (extend != SXTX));
413 }
414
415
416 MemOperand::MemOperand(Register base,
417 Register regoffset,
418 Shift shift,
419 unsigned shift_amount)
420 : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
421 shift_(shift), extend_(NO_EXTEND), shift_amount_(shift_amount) {
422 ASSERT(base.Is64Bits() && !base.IsZero());
423 ASSERT(regoffset.Is64Bits() && !regoffset.IsSP());
424 ASSERT(shift == LSL);
425 }
426
427
428 MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
429 : base_(base), addrmode_(addrmode) {
430 ASSERT(base.Is64Bits() && !base.IsZero());
431
432 if (offset.IsImmediate()) {
433 offset_ = offset.immediate();
434
435 regoffset_ = NoReg;
436 } else if (offset.IsShiftedRegister()) {
437 ASSERT(addrmode == Offset);
438
439 regoffset_ = offset.reg();
440 shift_= offset.shift();
441 shift_amount_ = offset.shift_amount();
442
443 extend_ = NO_EXTEND;
444 offset_ = 0;
445
446 // These assertions match those in the shifted-register constructor.
447 ASSERT(regoffset_.Is64Bits() && !regoffset_.IsSP());
448 ASSERT(shift_ == LSL);
449 } else {
450 ASSERT(offset.IsExtendedRegister());
451 ASSERT(addrmode == Offset);
452
453 regoffset_ = offset.reg();
454 extend_ = offset.extend();
455 shift_amount_ = offset.shift_amount();
456
457 shift_= NO_SHIFT;
458 offset_ = 0;
459
460 // These assertions match those in the extended-register constructor.
461 ASSERT(!regoffset_.IsSP());
462 ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));
463 ASSERT((regoffset_.Is64Bits() || (extend_ != SXTX)));
464 }
465 }
466
467 bool MemOperand::IsImmediateOffset() const {
468 return (addrmode_ == Offset) && regoffset_.Is(NoReg);
469 }
470
471
472 bool MemOperand::IsRegisterOffset() const {
473 return (addrmode_ == Offset) && !regoffset_.Is(NoReg);
474 }
475
476
477 bool MemOperand::IsPreIndex() const {
478 return addrmode_ == PreIndex;
479 }
480
481
482 bool MemOperand::IsPostIndex() const {
483 return addrmode_ == PostIndex;
484 }
485
486 Operand MemOperand::OffsetAsOperand() const {
487 if (IsImmediateOffset()) {
488 return offset();
489 } else {
490 ASSERT(IsRegisterOffset());
491 if (extend() == NO_EXTEND) {
492 return Operand(regoffset(), shift(), shift_amount());
493 } else {
494 return Operand(regoffset(), extend(), shift_amount());
495 }
496 }
497 }
498
499
500 Address Assembler::target_pointer_address_at(Address pc) {
501 Instruction* instr = reinterpret_cast<Instruction*>(pc);
502 ASSERT(instr->IsLdrLiteralX());
503 return reinterpret_cast<Address>(instr->ImmPCOffsetTarget());
504 }
505
506
507 // Read/Modify the code target address in the branch/call instruction at pc.
508 Address Assembler::target_pointer_at(Address pc) {
509 return Memory::Address_at(target_pointer_address_at(pc));
510 }
511
512
513 Address Assembler::target_address_from_return_address(Address pc) {
514 // Returns the address of the call target from the return address that will
515 // be returned to after a call.
516 // Call sequence on A64 is:
517 // ldr ip0, #... @ load from literal pool
518 // blr ip0
519 Address candidate = pc - 2 * kInstructionSize;
520 Instruction* instr = reinterpret_cast<Instruction*>(candidate);
521 USE(instr);
522 ASSERT(instr->IsLdrLiteralX());
523 return candidate;
524 }
525
526
527 Address Assembler::return_address_from_call_start(Address pc) {
528 // The call, generated by MacroAssembler::Call, is one of two possible
529 // sequences:
530 //
531 // Without relocation:
532 // movz ip0, #(target & 0x000000000000ffff)
533 // movk ip0, #(target & 0x00000000ffff0000)
534 // movk ip0, #(target & 0x0000ffff00000000)
535 // movk ip0, #(target & 0xffff000000000000)
536 // blr ip0
537 //
538 // With relocation:
539 // ldr ip0, =target
540 // blr ip0
541 //
542 // The return address is immediately after the blr instruction in both cases,
543 // so it can be found by adding the call size to the address at the start of
544 // the call sequence.
545 STATIC_ASSERT(Assembler::kCallSizeWithoutRelocation == 5 * kInstructionSize);
546 STATIC_ASSERT(Assembler::kCallSizeWithRelocation == 2 * kInstructionSize);
547
548 Instruction* instr = reinterpret_cast<Instruction*>(pc);
549 if (instr->IsMovz()) {
550 // Verify the instruction sequence.
551 ASSERT(instr->following(1)->IsMovk());
552 ASSERT(instr->following(2)->IsMovk());
553 ASSERT(instr->following(3)->IsMovk());
554 ASSERT(instr->following(4)->IsBranchAndLinkToRegister());
555 return pc + Assembler::kCallSizeWithoutRelocation;
556 } else {
557 // Verify the instruction sequence.
558 ASSERT(instr->IsLdrLiteralX());
559 ASSERT(instr->following(1)->IsBranchAndLinkToRegister());
560 return pc + Assembler::kCallSizeWithRelocation;
561 }
562 }
563
564
565 Address Assembler::target_address_at(Address pc) {
566 return target_pointer_at(pc);
567 }
568
569
570 void Assembler::set_target_address_at(Address pc, Address target) {
571 set_target_pointer_at(pc, target);
572 }
573
574
575 void Assembler::deserialization_set_special_target_at(
576 Address constant_pool_entry, Address target) {
577 Memory::Address_at(constant_pool_entry) = target;
578 }
579
580
581 void Assembler::set_external_target_at(Address constant_pool_entry,
582 Address target) {
583 Memory::Address_at(constant_pool_entry) = target;
584 }
585
586
587 void Assembler::set_target_pointer_at(Address pc, Address target) {
588 Memory::Address_at(target_pointer_address_at(pc)) = target;
589 // Intuitively, we would think it is necessary to always flush the
590 // instruction cache after patching a target address in the code as follows:
591 // CPU::FlushICache(pc, sizeof(target));
592 // However, on ARM, an instruction is actually patched in the case of
593 // embedded constants of the form:
594 // ldr ip, [pc, #...]
595 // since the instruction accessing this address in the constant pool remains
596 // unchanged, a flush is not required.
597 }
598
599
600 int RelocInfo::target_address_size() {
601 return kPointerSize;
602 }
603
604
605 Address RelocInfo::target_address() {
606 ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
607 return Assembler::target_address_at(pc_);
608 }
609
610
611 Address RelocInfo::target_address_address() {
612 ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
613 || rmode_ == EMBEDDED_OBJECT
614 || rmode_ == EXTERNAL_REFERENCE);
615 return reinterpret_cast<Address>(Assembler::target_pointer_address_at(pc_));
616 }
617
618
619 Object* RelocInfo::target_object() {
620 ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
621 return reinterpret_cast<Object*>(Assembler::target_pointer_at(pc_));
622 }
623
624
625 Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
626 ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
627 return Handle<Object>(reinterpret_cast<Object**>(
628 Assembler::target_pointer_at(pc_)));
629 }
630
631
632 Object** RelocInfo::target_object_address() {
633 ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
634 reconstructed_obj_ptr_ =
635 reinterpret_cast<Object*>(Assembler::target_pointer_at(pc_));
636 return &reconstructed_obj_ptr_;
637 }
638
639
640 void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) {
641 ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
642 Assembler::set_target_pointer_at(pc_, reinterpret_cast<Address>(target));
643 if (mode == UPDATE_WRITE_BARRIER &&
644 host() != NULL &&
645 target->IsHeapObject()) {
646 host()->GetHeap()->incremental_marking()->RecordWrite(
647 host(), &Memory::Object_at(pc_), HeapObject::cast(target));
648 }
649 }
650
651
652 Address* RelocInfo::target_reference_address() {
653 ASSERT(rmode_ == EXTERNAL_REFERENCE);
654 reconstructed_adr_ptr_ = Assembler::target_address_at(pc_);
655 return &reconstructed_adr_ptr_;
656 }
657
658
659 Address RelocInfo::target_runtime_entry(Assembler* origin) {
660 ASSERT(IsRuntimeEntry(rmode_));
661 return target_address();
662 }
663
664
665 void RelocInfo::set_target_runtime_entry(Address target,
666 WriteBarrierMode mode) {
667 ASSERT(IsRuntimeEntry(rmode_));
668 if (target_address() != target) set_target_address(target, mode);
669 }
670
671
672 Handle<JSGlobalPropertyCell> RelocInfo::target_cell_handle() {
673 UNIMPLEMENTED();
674 JSGlobalPropertyCell *null_cell = NULL;
675 return Handle<JSGlobalPropertyCell>(null_cell);
676 }
677
678
679 JSGlobalPropertyCell* RelocInfo::target_cell() {
680 ASSERT(rmode_ == RelocInfo::GLOBAL_PROPERTY_CELL);
681 return JSGlobalPropertyCell::FromValueAddress(Memory::Address_at(pc_));
682 }
683
684
685 void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
686 WriteBarrierMode mode) {
687 UNIMPLEMENTED();
688 }
689
690
691 static const int kCodeAgeSequenceSize = 5 * kInstructionSize;
692 static const int kCodeAgeStubEntryOffset = 3 * kInstructionSize;
693
694
695 Code* RelocInfo::code_age_stub() {
696 ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
697 ASSERT(!Code::IsYoungSequence(pc_));
698 // Read the stub entry point from the code age sequence.
699 Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
700 return Code::GetCodeFromTargetAddress(Memory::Address_at(stub_entry_address));
701 }
702
703
704 void RelocInfo::set_code_age_stub(Code* stub) {
705 ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
706 ASSERT(!Code::IsYoungSequence(pc_));
707 // Overwrite the stub entry point in the code age sequence. This is loaded as
708 // a literal so there is no need to call FlushICache here.
709 Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
710 Memory::Address_at(stub_entry_address) = stub->instruction_start();
711 }
712
713
714 Address RelocInfo::call_address() {
715 ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
716 (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
717 // For the above sequences the Relocinfo points to the load literal loading
718 // the call address.
719 return Assembler::target_address_at(pc_);
720 }
721
722
723 void RelocInfo::set_call_address(Address target) {
724 ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
725 (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
726 Assembler::set_target_address_at(pc_, target);
727 if (host() != NULL) {
728 Object* target_code = Code::GetCodeFromTargetAddress(target);
729 host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
730 host(), this, HeapObject::cast(target_code));
731 }
732 }
733
734
735 bool RelocInfo::IsPatchedReturnSequence() {
736 // The sequence must be:
737 // ldr ip0, [pc, #offset]
738 // blr ip0
739 // See a64/debug-a64.cc BreakLocationIterator::SetDebugBreakAtReturn().
740 Instruction* i1 = reinterpret_cast<Instruction*>(pc_);
741 Instruction* i2 = i1->following();
742 return i1->IsLdrLiteralX() && (i1->Rt() == ip0.code()) &&
743 i2->IsBranchAndLinkToRegister() && (i2->Rn() == ip0.code());
744 }
745
746
747 bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
748 Instruction* current_instr = reinterpret_cast<Instruction*>(pc_);
749 return !current_instr->IsNop(Assembler::DEBUG_BREAK_NOP);
750 }
751
752
753 void RelocInfo::Visit(ObjectVisitor* visitor) {
754 RelocInfo::Mode mode = rmode();
755 if (mode == RelocInfo::EMBEDDED_OBJECT) {
756 visitor->VisitEmbeddedPointer(this);
757 } else if (RelocInfo::IsCodeTarget(mode)) {
758 visitor->VisitCodeTarget(this);
759 } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
760 visitor->VisitGlobalPropertyCell(this);
761 } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
762 visitor->VisitExternalReference(this);
763 #ifdef ENABLE_DEBUGGER_SUPPORT
764 // TODO(isolates): Get a cached isolate below.
765 } else if (((RelocInfo::IsJSReturn(mode) &&
766 IsPatchedReturnSequence()) ||
767 (RelocInfo::IsDebugBreakSlot(mode) &&
768 IsPatchedDebugBreakSlotSequence())) &&
769 Isolate::Current()->debug()->has_break_points()) {
770 visitor->VisitDebugTarget(this);
771 #endif
772 } else if (RelocInfo::IsRuntimeEntry(mode)) {
773 visitor->VisitRuntimeEntry(this);
774 }
775 }
776
777
778 template<typename StaticVisitor>
779 void RelocInfo::Visit(Heap* heap) {
780 RelocInfo::Mode mode = rmode();
781 if (mode == RelocInfo::EMBEDDED_OBJECT) {
782 StaticVisitor::VisitEmbeddedPointer(heap, this);
783 } else if (RelocInfo::IsCodeTarget(mode)) {
784 StaticVisitor::VisitCodeTarget(heap, this);
785 } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
786 StaticVisitor::VisitGlobalPropertyCell(heap, this);
787 } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
788 StaticVisitor::VisitExternalReference(this);
789 #ifdef ENABLE_DEBUGGER_SUPPORT
790 } else if (heap->isolate()->debug()->has_break_points() &&
791 ((RelocInfo::IsJSReturn(mode) &&
792 IsPatchedReturnSequence()) ||
793 (RelocInfo::IsDebugBreakSlot(mode) &&
794 IsPatchedDebugBreakSlotSequence()))) {
795 StaticVisitor::VisitDebugTarget(heap, this);
796 #endif
797 } else if (RelocInfo::IsRuntimeEntry(mode)) {
798 StaticVisitor::VisitRuntimeEntry(this);
799 }
800 }
801
802
803 LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) {
804 ASSERT(rt.IsValid());
805 if (rt.IsRegister()) {
806 return rt.Is64Bits() ? LDR_x : LDR_w;
807 } else {
808 ASSERT(rt.IsFPRegister());
809 return rt.Is64Bits() ? LDR_d : LDR_s;
810 }
811 }
812
813
814 LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt,
815 const CPURegister& rt2) {
816 ASSERT(AreSameSizeAndType(rt, rt2));
817 USE(rt2);
818 if (rt.IsRegister()) {
819 return rt.Is64Bits() ? LDP_x : LDP_w;
820 } else {
821 ASSERT(rt.IsFPRegister());
822 return rt.Is64Bits() ? LDP_d : LDP_s;
823 }
824 }
825
826
827 LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) {
828 ASSERT(rt.IsValid());
829 if (rt.IsRegister()) {
830 return rt.Is64Bits() ? STR_x : STR_w;
831 } else {
832 ASSERT(rt.IsFPRegister());
833 return rt.Is64Bits() ? STR_d : STR_s;
834 }
835 }
836
837
838 LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
839 const CPURegister& rt2) {
840 ASSERT(AreSameSizeAndType(rt, rt2));
841 USE(rt2);
842 if (rt.IsRegister()) {
843 return rt.Is64Bits() ? STP_x : STP_w;
844 } else {
845 ASSERT(rt.IsFPRegister());
846 return rt.Is64Bits() ? STP_d : STP_s;
847 }
848 }
849
850
851 LoadStorePairNonTemporalOp Assembler::LoadPairNonTemporalOpFor(
852 const CPURegister& rt, const CPURegister& rt2) {
853 ASSERT(AreSameSizeAndType(rt, rt2));
854 USE(rt2);
855 if (rt.IsRegister()) {
856 return rt.Is64Bits() ? LDNP_x : LDNP_w;
857 } else {
858 ASSERT(rt.IsFPRegister());
859 return rt.Is64Bits() ? LDNP_d : LDNP_s;
860 }
861 }
862
863
864 LoadStorePairNonTemporalOp Assembler::StorePairNonTemporalOpFor(
865 const CPURegister& rt, const CPURegister& rt2) {
866 ASSERT(AreSameSizeAndType(rt, rt2));
867 USE(rt2);
868 if (rt.IsRegister()) {
869 return rt.Is64Bits() ? STNP_x : STNP_w;
870 } else {
871 ASSERT(rt.IsFPRegister());
872 return rt.Is64Bits() ? STNP_d : STNP_s;
873 }
874 }
875
876
877 int Assembler::LinkAndGetInstructionOffsetTo(Label* label) {
878 ASSERT(kStartOfLabelLinkChain == 0);
879 int offset = LinkAndGetByteOffsetTo(label);
880 ASSERT(IsAligned(offset, kInstructionSize));
881 return offset >> kInstructionSizeLog2;
882 }
883
884
885 Instr Assembler::Flags(FlagsUpdate S) {
886 if (S == SetFlags) {
887 return 1 << FlagsUpdate_offset;
888 } else if (S == LeaveFlags) {
889 return 0 << FlagsUpdate_offset;
890 }
891 UNREACHABLE();
892 return 0;
893 }
894
895
896 Instr Assembler::Cond(Condition cond) {
897 return cond << Condition_offset;
898 }
899
900
901 Instr Assembler::ImmPCRelAddress(int imm21) {
902 ASSERT(is_int21(imm21));
903 Instr imm = static_cast<Instr>(truncate_to_int21(imm21));
904 Instr immhi = (imm >> ImmPCRelLo_width) << ImmPCRelHi_offset;
905 Instr immlo = imm << ImmPCRelLo_offset;
906 return (immhi & ImmPCRelHi_mask) | (immlo & ImmPCRelLo_mask);
907 }
908
909
910 Instr Assembler::ImmUncondBranch(int imm26) {
911 ASSERT(is_int26(imm26));
912 return truncate_to_int26(imm26) << ImmUncondBranch_offset;
913 }
914
915
916 Instr Assembler::ImmCondBranch(int imm19) {
917 ASSERT(is_int19(imm19));
918 return truncate_to_int19(imm19) << ImmCondBranch_offset;
919 }
920
921
922 Instr Assembler::ImmCmpBranch(int imm19) {
923 ASSERT(is_int19(imm19));
924 return truncate_to_int19(imm19) << ImmCmpBranch_offset;
925 }
926
927
928 Instr Assembler::ImmTestBranch(int imm14) {
929 ASSERT(is_int14(imm14));
930 return truncate_to_int14(imm14) << ImmTestBranch_offset;
931 }
932
933
934 Instr Assembler::ImmTestBranchBit(unsigned bit_pos) {
935 ASSERT(is_uint6(bit_pos));
936 // Subtract five from the shift offset, as we need bit 5 from bit_pos.
937 unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5);
938 unsigned b40 = bit_pos << ImmTestBranchBit40_offset;
939 b5 &= ImmTestBranchBit5_mask;
940 b40 &= ImmTestBranchBit40_mask;
941 return b5 | b40;
942 }
943
944
945 Instr Assembler::SF(Register rd) {
946 return rd.Is64Bits() ? SixtyFourBits : ThirtyTwoBits;
947 }
948
949
950 Instr Assembler::ImmAddSub(int64_t imm) {
951 ASSERT(IsImmAddSub(imm));
952 if (is_uint12(imm)) { // No shift required.
953 return imm << ImmAddSub_offset;
954 } else {
955 return ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset);
956 }
957 }
958
959
960 Instr Assembler::ImmS(unsigned imms, unsigned reg_size) {
961 ASSERT(((reg_size == kXRegSize) && is_uint6(imms)) ||
962 ((reg_size == kWRegSize) && is_uint5(imms)));
963 USE(reg_size);
964 return imms << ImmS_offset;
965 }
966
967
968 Instr Assembler::ImmR(unsigned immr, unsigned reg_size) {
969 ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
970 ((reg_size == kWRegSize) && is_uint5(immr)));
971 USE(reg_size);
972 ASSERT(is_uint6(immr));
973 return immr << ImmR_offset;
974 }
975
976
977 Instr Assembler::ImmSetBits(unsigned imms, unsigned reg_size) {
978 ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
979 ASSERT(is_uint6(imms));
980 ASSERT((reg_size == kXRegSize) || is_uint6(imms + 3));
981 USE(reg_size);
982 return imms << ImmSetBits_offset;
983 }
984
985
986 Instr Assembler::ImmRotate(unsigned immr, unsigned reg_size) {
987 ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
988 ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
989 ((reg_size == kWRegSize) && is_uint5(immr)));
990 USE(reg_size);
991 return immr << ImmRotate_offset;
992 }
993
994
995 Instr Assembler::ImmLLiteral(int imm19) {
996 ASSERT(is_int19(imm19));
997 return truncate_to_int19(imm19) << ImmLLiteral_offset;
998 }
999
1000
1001 Instr Assembler::BitN(unsigned bitn, unsigned reg_size) {
1002 ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
1003 ASSERT((reg_size == kXRegSize) || (bitn == 0));
1004 USE(reg_size);
1005 return bitn << BitN_offset;
1006 }
1007
1008
1009 Instr Assembler::ShiftDP(Shift shift) {
1010 ASSERT(shift == LSL || shift == LSR || shift == ASR || shift == ROR);
1011 return shift << ShiftDP_offset;
1012 }
1013
1014
1015 Instr Assembler::ImmDPShift(unsigned amount) {
1016 ASSERT(is_uint6(amount));
1017 return amount << ImmDPShift_offset;
1018 }
1019
1020
1021 Instr Assembler::ExtendMode(Extend extend) {
1022 return extend << ExtendMode_offset;
1023 }
1024
1025
1026 Instr Assembler::ImmExtendShift(unsigned left_shift) {
1027 ASSERT(left_shift <= 4);
1028 return left_shift << ImmExtendShift_offset;
1029 }
1030
1031
1032 Instr Assembler::ImmCondCmp(unsigned imm) {
1033 ASSERT(is_uint5(imm));
1034 return imm << ImmCondCmp_offset;
1035 }
1036
1037
1038 Instr Assembler::Nzcv(StatusFlags nzcv) {
1039 return ((nzcv >> Flags_offset) & 0xf) << Nzcv_offset;
1040 }
1041
1042
1043 Instr Assembler::ImmLSUnsigned(int imm12) {
1044 ASSERT(is_uint12(imm12));
1045 return imm12 << ImmLSUnsigned_offset;
1046 }
1047
1048
1049 Instr Assembler::ImmLS(int imm9) {
1050 ASSERT(is_int9(imm9));
1051 return truncate_to_int9(imm9) << ImmLS_offset;
1052 }
1053
1054
1055 Instr Assembler::ImmLSPair(int imm7, LSDataSize size) {
1056 ASSERT(((imm7 >> size) << size) == imm7);
1057 int scaled_imm7 = imm7 >> size;
1058 ASSERT(is_int7(scaled_imm7));
1059 return truncate_to_int7(scaled_imm7) << ImmLSPair_offset;
1060 }
1061
1062
1063 Instr Assembler::ImmShiftLS(unsigned shift_amount) {
1064 ASSERT(is_uint1(shift_amount));
1065 return shift_amount << ImmShiftLS_offset;
1066 }
1067
1068
1069 Instr Assembler::ImmException(int imm16) {
1070 ASSERT(is_uint16(imm16));
1071 return imm16 << ImmException_offset;
1072 }
1073
1074
1075 Instr Assembler::ImmSystemRegister(int imm15) {
1076 ASSERT(is_uint15(imm15));
1077 return imm15 << ImmSystemRegister_offset;
1078 }
1079
1080
1081 Instr Assembler::ImmHint(int imm7) {
1082 ASSERT(is_uint7(imm7));
1083 return imm7 << ImmHint_offset;
1084 }
1085
1086
1087 Instr Assembler::ImmBarrierDomain(int imm2) {
1088 ASSERT(is_uint2(imm2));
1089 return imm2 << ImmBarrierDomain_offset;
1090 }
1091
1092
1093 Instr Assembler::ImmBarrierType(int imm2) {
1094 ASSERT(is_uint2(imm2));
1095 return imm2 << ImmBarrierType_offset;
1096 }
1097
1098
1099 LSDataSize Assembler::CalcLSDataSize(LoadStoreOp op) {
1100 ASSERT((SizeLS_offset + SizeLS_width) == (kInstructionSize * 8));
1101 return static_cast<LSDataSize>(op >> SizeLS_offset);
1102 }
1103
1104
1105 Instr Assembler::ImmMoveWide(uint64_t imm) {
1106 ASSERT(is_uint16(imm));
1107 return imm << ImmMoveWide_offset;
1108 }
1109
1110
1111 Instr Assembler::ShiftMoveWide(int64_t shift) {
1112 ASSERT(is_uint2(shift));
1113 return shift << ShiftMoveWide_offset;
1114 }
1115
1116
1117 Instr Assembler::FPType(FPRegister fd) {
1118 return fd.Is64Bits() ? FP64 : FP32;
1119 }
1120
1121
1122 Instr Assembler::FPScale(unsigned scale) {
1123 ASSERT(is_uint6(scale));
1124 return scale << FPScale_offset;
1125 }
1126
1127
1128 const Register& Assembler::AppropriateZeroRegFor(const CPURegister& reg) const {
1129 return reg.Is64Bits() ? xzr : wzr;
1130 }
1131
1132
1133 void Assembler::LoadRelocated(const CPURegister& rt, const Operand& operand) {
1134 LoadRelocatedValue(rt, operand, LDR_x_lit);
1135 }
1136
1137
1138 inline void Assembler::CheckBuffer() {
1139 ASSERT(pc_ < (buffer_ + buffer_size_));
1140 if (buffer_space() < kGap) {
1141 GrowBuffer();
1142 }
1143 if (pc_offset() >= next_buffer_check_) {
1144 CheckConstPool(false, true);
1145 }
1146 }
1147
1148
1149 TypeFeedbackId Assembler::RecordedAstId() {
1150 ASSERT(!recorded_ast_id_.IsNone());
1151 return recorded_ast_id_;
1152 }
1153
1154
1155 void Assembler::ClearRecordedAstId() {
1156 recorded_ast_id_ = TypeFeedbackId::None();
1157 }
1158
1159
1160 } } // namespace v8::internal
1161
1162 #endif // V8_A64_ASSEMBLER_A64_INL_H_
OLDNEW
« no previous file with comments | « src/a64/assembler-a64.cc ('k') | src/a64/builtins-a64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698