OLD | NEW |
| (Empty) |
1 // Copyright 2013 the V8 project authors. All rights reserved. | |
2 // Redistribution and use in source and binary forms, with or without | |
3 // modification, are permitted provided that the following conditions are | |
4 // met: | |
5 // | |
6 // * Redistributions of source code must retain the above copyright | |
7 // notice, this list of conditions and the following disclaimer. | |
8 // * Redistributions in binary form must reproduce the above | |
9 // copyright notice, this list of conditions and the following | |
10 // disclaimer in the documentation and/or other materials provided | |
11 // with the distribution. | |
12 // * Neither the name of Google Inc. nor the names of its | |
13 // contributors may be used to endorse or promote products derived | |
14 // from this software without specific prior written permission. | |
15 // | |
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
27 | |
28 #ifndef V8_A64_ASSEMBLER_A64_INL_H_ | |
29 #define V8_A64_ASSEMBLER_A64_INL_H_ | |
30 | |
31 #include "a64/assembler-a64.h" | |
32 #include "cpu.h" | |
33 #include "debug.h" | |
34 | |
35 | |
36 namespace v8 { | |
37 namespace internal { | |
38 | |
39 | |
40 void RelocInfo::apply(intptr_t delta) { | |
41 UNIMPLEMENTED(); | |
42 } | |
43 | |
44 | |
45 void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) { | |
46 ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)); | |
47 Assembler::set_target_address_at(pc_, host_, target); | |
48 if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) { | |
49 Object* target_code = Code::GetCodeFromTargetAddress(target); | |
50 host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( | |
51 host(), this, HeapObject::cast(target_code)); | |
52 } | |
53 } | |
54 | |
55 | |
56 inline unsigned CPURegister::code() const { | |
57 ASSERT(IsValid()); | |
58 return reg_code; | |
59 } | |
60 | |
61 | |
62 inline CPURegister::RegisterType CPURegister::type() const { | |
63 ASSERT(IsValidOrNone()); | |
64 return reg_type; | |
65 } | |
66 | |
67 | |
68 inline RegList CPURegister::Bit() const { | |
69 ASSERT(reg_code < (sizeof(RegList) * kBitsPerByte)); | |
70 return IsValid() ? 1UL << reg_code : 0; | |
71 } | |
72 | |
73 | |
74 inline unsigned CPURegister::SizeInBits() const { | |
75 ASSERT(IsValid()); | |
76 return reg_size; | |
77 } | |
78 | |
79 | |
80 inline int CPURegister::SizeInBytes() const { | |
81 ASSERT(IsValid()); | |
82 ASSERT(SizeInBits() % 8 == 0); | |
83 return reg_size / 8; | |
84 } | |
85 | |
86 | |
87 inline bool CPURegister::Is32Bits() const { | |
88 ASSERT(IsValid()); | |
89 return reg_size == 32; | |
90 } | |
91 | |
92 | |
93 inline bool CPURegister::Is64Bits() const { | |
94 ASSERT(IsValid()); | |
95 return reg_size == 64; | |
96 } | |
97 | |
98 | |
99 inline bool CPURegister::IsValid() const { | |
100 if (IsValidRegister() || IsValidFPRegister()) { | |
101 ASSERT(!IsNone()); | |
102 return true; | |
103 } else { | |
104 ASSERT(IsNone()); | |
105 return false; | |
106 } | |
107 } | |
108 | |
109 | |
110 inline bool CPURegister::IsValidRegister() const { | |
111 return IsRegister() && | |
112 ((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits)) && | |
113 ((reg_code < kNumberOfRegisters) || (reg_code == kSPRegInternalCode)); | |
114 } | |
115 | |
116 | |
117 inline bool CPURegister::IsValidFPRegister() const { | |
118 return IsFPRegister() && | |
119 ((reg_size == kSRegSizeInBits) || (reg_size == kDRegSizeInBits)) && | |
120 (reg_code < kNumberOfFPRegisters); | |
121 } | |
122 | |
123 | |
124 inline bool CPURegister::IsNone() const { | |
125 // kNoRegister types should always have size 0 and code 0. | |
126 ASSERT((reg_type != kNoRegister) || (reg_code == 0)); | |
127 ASSERT((reg_type != kNoRegister) || (reg_size == 0)); | |
128 | |
129 return reg_type == kNoRegister; | |
130 } | |
131 | |
132 | |
133 inline bool CPURegister::Is(const CPURegister& other) const { | |
134 ASSERT(IsValidOrNone() && other.IsValidOrNone()); | |
135 return (reg_code == other.reg_code) && (reg_size == other.reg_size) && | |
136 (reg_type == other.reg_type); | |
137 } | |
138 | |
139 | |
140 inline bool CPURegister::IsRegister() const { | |
141 return reg_type == kRegister; | |
142 } | |
143 | |
144 | |
145 inline bool CPURegister::IsFPRegister() const { | |
146 return reg_type == kFPRegister; | |
147 } | |
148 | |
149 | |
150 inline bool CPURegister::IsSameSizeAndType(const CPURegister& other) const { | |
151 return (reg_size == other.reg_size) && (reg_type == other.reg_type); | |
152 } | |
153 | |
154 | |
155 inline bool CPURegister::IsValidOrNone() const { | |
156 return IsValid() || IsNone(); | |
157 } | |
158 | |
159 | |
160 inline bool CPURegister::IsZero() const { | |
161 ASSERT(IsValid()); | |
162 return IsRegister() && (reg_code == kZeroRegCode); | |
163 } | |
164 | |
165 | |
166 inline bool CPURegister::IsSP() const { | |
167 ASSERT(IsValid()); | |
168 return IsRegister() && (reg_code == kSPRegInternalCode); | |
169 } | |
170 | |
171 | |
172 inline void CPURegList::Combine(const CPURegList& other) { | |
173 ASSERT(IsValid()); | |
174 ASSERT(other.type() == type_); | |
175 ASSERT(other.RegisterSizeInBits() == size_); | |
176 list_ |= other.list(); | |
177 } | |
178 | |
179 | |
180 inline void CPURegList::Remove(const CPURegList& other) { | |
181 ASSERT(IsValid()); | |
182 if (other.type() == type_) { | |
183 list_ &= ~other.list(); | |
184 } | |
185 } | |
186 | |
187 | |
188 inline void CPURegList::Combine(const CPURegister& other) { | |
189 ASSERT(other.type() == type_); | |
190 ASSERT(other.SizeInBits() == size_); | |
191 Combine(other.code()); | |
192 } | |
193 | |
194 | |
195 inline void CPURegList::Remove(const CPURegister& other1, | |
196 const CPURegister& other2, | |
197 const CPURegister& other3, | |
198 const CPURegister& other4) { | |
199 if (!other1.IsNone() && (other1.type() == type_)) Remove(other1.code()); | |
200 if (!other2.IsNone() && (other2.type() == type_)) Remove(other2.code()); | |
201 if (!other3.IsNone() && (other3.type() == type_)) Remove(other3.code()); | |
202 if (!other4.IsNone() && (other4.type() == type_)) Remove(other4.code()); | |
203 } | |
204 | |
205 | |
206 inline void CPURegList::Combine(int code) { | |
207 ASSERT(IsValid()); | |
208 ASSERT(CPURegister::Create(code, size_, type_).IsValid()); | |
209 list_ |= (1UL << code); | |
210 } | |
211 | |
212 | |
213 inline void CPURegList::Remove(int code) { | |
214 ASSERT(IsValid()); | |
215 ASSERT(CPURegister::Create(code, size_, type_).IsValid()); | |
216 list_ &= ~(1UL << code); | |
217 } | |
218 | |
219 | |
220 inline Register Register::XRegFromCode(unsigned code) { | |
221 // This function returns the zero register when code = 31. The stack pointer | |
222 // can not be returned. | |
223 ASSERT(code < kNumberOfRegisters); | |
224 return Register::Create(code, kXRegSizeInBits); | |
225 } | |
226 | |
227 | |
228 inline Register Register::WRegFromCode(unsigned code) { | |
229 ASSERT(code < kNumberOfRegisters); | |
230 return Register::Create(code, kWRegSizeInBits); | |
231 } | |
232 | |
233 | |
234 inline FPRegister FPRegister::SRegFromCode(unsigned code) { | |
235 ASSERT(code < kNumberOfFPRegisters); | |
236 return FPRegister::Create(code, kSRegSizeInBits); | |
237 } | |
238 | |
239 | |
240 inline FPRegister FPRegister::DRegFromCode(unsigned code) { | |
241 ASSERT(code < kNumberOfFPRegisters); | |
242 return FPRegister::Create(code, kDRegSizeInBits); | |
243 } | |
244 | |
245 | |
246 inline Register CPURegister::W() const { | |
247 ASSERT(IsValidRegister()); | |
248 return Register::WRegFromCode(reg_code); | |
249 } | |
250 | |
251 | |
252 inline Register CPURegister::X() const { | |
253 ASSERT(IsValidRegister()); | |
254 return Register::XRegFromCode(reg_code); | |
255 } | |
256 | |
257 | |
258 inline FPRegister CPURegister::S() const { | |
259 ASSERT(IsValidFPRegister()); | |
260 return FPRegister::SRegFromCode(reg_code); | |
261 } | |
262 | |
263 | |
264 inline FPRegister CPURegister::D() const { | |
265 ASSERT(IsValidFPRegister()); | |
266 return FPRegister::DRegFromCode(reg_code); | |
267 } | |
268 | |
269 | |
270 // Operand. | |
271 template<typename T> | |
272 Operand::Operand(Handle<T> value) : reg_(NoReg) { | |
273 initialize_handle(value); | |
274 } | |
275 | |
276 | |
277 // Default initializer is for int types | |
278 template<typename int_t> | |
279 struct OperandInitializer { | |
280 static const bool kIsIntType = true; | |
281 static inline RelocInfo::Mode rmode_for(int_t) { | |
282 return sizeof(int_t) == 8 ? RelocInfo::NONE64 : RelocInfo::NONE32; | |
283 } | |
284 static inline int64_t immediate_for(int_t t) { | |
285 STATIC_ASSERT(sizeof(int_t) <= 8); | |
286 return t; | |
287 } | |
288 }; | |
289 | |
290 | |
291 template<> | |
292 struct OperandInitializer<Smi*> { | |
293 static const bool kIsIntType = false; | |
294 static inline RelocInfo::Mode rmode_for(Smi* t) { | |
295 return RelocInfo::NONE64; | |
296 } | |
297 static inline int64_t immediate_for(Smi* t) {; | |
298 return reinterpret_cast<int64_t>(t); | |
299 } | |
300 }; | |
301 | |
302 | |
303 template<> | |
304 struct OperandInitializer<ExternalReference> { | |
305 static const bool kIsIntType = false; | |
306 static inline RelocInfo::Mode rmode_for(ExternalReference t) { | |
307 return RelocInfo::EXTERNAL_REFERENCE; | |
308 } | |
309 static inline int64_t immediate_for(ExternalReference t) {; | |
310 return reinterpret_cast<int64_t>(t.address()); | |
311 } | |
312 }; | |
313 | |
314 | |
315 template<typename T> | |
316 Operand::Operand(T t) | |
317 : immediate_(OperandInitializer<T>::immediate_for(t)), | |
318 reg_(NoReg), | |
319 rmode_(OperandInitializer<T>::rmode_for(t)) {} | |
320 | |
321 | |
322 template<typename T> | |
323 Operand::Operand(T t, RelocInfo::Mode rmode) | |
324 : immediate_(OperandInitializer<T>::immediate_for(t)), | |
325 reg_(NoReg), | |
326 rmode_(rmode) { | |
327 STATIC_ASSERT(OperandInitializer<T>::kIsIntType); | |
328 } | |
329 | |
330 | |
331 Operand::Operand(Register reg, Shift shift, unsigned shift_amount) | |
332 : reg_(reg), | |
333 shift_(shift), | |
334 extend_(NO_EXTEND), | |
335 shift_amount_(shift_amount), | |
336 rmode_(reg.Is64Bits() ? RelocInfo::NONE64 : RelocInfo::NONE32) { | |
337 ASSERT(reg.Is64Bits() || (shift_amount < kWRegSizeInBits)); | |
338 ASSERT(reg.Is32Bits() || (shift_amount < kXRegSizeInBits)); | |
339 ASSERT(!reg.IsSP()); | |
340 } | |
341 | |
342 | |
343 Operand::Operand(Register reg, Extend extend, unsigned shift_amount) | |
344 : reg_(reg), | |
345 shift_(NO_SHIFT), | |
346 extend_(extend), | |
347 shift_amount_(shift_amount), | |
348 rmode_(reg.Is64Bits() ? RelocInfo::NONE64 : RelocInfo::NONE32) { | |
349 ASSERT(reg.IsValid()); | |
350 ASSERT(shift_amount <= 4); | |
351 ASSERT(!reg.IsSP()); | |
352 | |
353 // Extend modes SXTX and UXTX require a 64-bit register. | |
354 ASSERT(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX))); | |
355 } | |
356 | |
357 | |
358 bool Operand::IsImmediate() const { | |
359 return reg_.Is(NoReg); | |
360 } | |
361 | |
362 | |
363 bool Operand::IsShiftedRegister() const { | |
364 return reg_.IsValid() && (shift_ != NO_SHIFT); | |
365 } | |
366 | |
367 | |
368 bool Operand::IsExtendedRegister() const { | |
369 return reg_.IsValid() && (extend_ != NO_EXTEND); | |
370 } | |
371 | |
372 | |
373 bool Operand::IsZero() const { | |
374 if (IsImmediate()) { | |
375 return immediate() == 0; | |
376 } else { | |
377 return reg().IsZero(); | |
378 } | |
379 } | |
380 | |
381 | |
382 Operand Operand::ToExtendedRegister() const { | |
383 ASSERT(IsShiftedRegister()); | |
384 ASSERT((shift_ == LSL) && (shift_amount_ <= 4)); | |
385 return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_); | |
386 } | |
387 | |
388 | |
389 int64_t Operand::immediate() const { | |
390 ASSERT(IsImmediate()); | |
391 return immediate_; | |
392 } | |
393 | |
394 | |
395 Register Operand::reg() const { | |
396 ASSERT(IsShiftedRegister() || IsExtendedRegister()); | |
397 return reg_; | |
398 } | |
399 | |
400 | |
401 Shift Operand::shift() const { | |
402 ASSERT(IsShiftedRegister()); | |
403 return shift_; | |
404 } | |
405 | |
406 | |
407 Extend Operand::extend() const { | |
408 ASSERT(IsExtendedRegister()); | |
409 return extend_; | |
410 } | |
411 | |
412 | |
413 unsigned Operand::shift_amount() const { | |
414 ASSERT(IsShiftedRegister() || IsExtendedRegister()); | |
415 return shift_amount_; | |
416 } | |
417 | |
418 | |
419 Operand Operand::UntagSmi(Register smi) { | |
420 ASSERT(smi.Is64Bits()); | |
421 return Operand(smi, ASR, kSmiShift); | |
422 } | |
423 | |
424 | |
425 Operand Operand::UntagSmiAndScale(Register smi, int scale) { | |
426 ASSERT(smi.Is64Bits()); | |
427 ASSERT((scale >= 0) && (scale <= (64 - kSmiValueSize))); | |
428 if (scale > kSmiShift) { | |
429 return Operand(smi, LSL, scale - kSmiShift); | |
430 } else if (scale < kSmiShift) { | |
431 return Operand(smi, ASR, kSmiShift - scale); | |
432 } | |
433 return Operand(smi); | |
434 } | |
435 | |
436 | |
437 MemOperand::MemOperand(Register base, ptrdiff_t offset, AddrMode addrmode) | |
438 : base_(base), regoffset_(NoReg), offset_(offset), addrmode_(addrmode), | |
439 shift_(NO_SHIFT), extend_(NO_EXTEND), shift_amount_(0) { | |
440 ASSERT(base.Is64Bits() && !base.IsZero()); | |
441 } | |
442 | |
443 | |
444 MemOperand::MemOperand(Register base, | |
445 Register regoffset, | |
446 Extend extend, | |
447 unsigned shift_amount) | |
448 : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset), | |
449 shift_(NO_SHIFT), extend_(extend), shift_amount_(shift_amount) { | |
450 ASSERT(base.Is64Bits() && !base.IsZero()); | |
451 ASSERT(!regoffset.IsSP()); | |
452 ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX)); | |
453 | |
454 // SXTX extend mode requires a 64-bit offset register. | |
455 ASSERT(regoffset.Is64Bits() || (extend != SXTX)); | |
456 } | |
457 | |
458 | |
459 MemOperand::MemOperand(Register base, | |
460 Register regoffset, | |
461 Shift shift, | |
462 unsigned shift_amount) | |
463 : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset), | |
464 shift_(shift), extend_(NO_EXTEND), shift_amount_(shift_amount) { | |
465 ASSERT(base.Is64Bits() && !base.IsZero()); | |
466 ASSERT(regoffset.Is64Bits() && !regoffset.IsSP()); | |
467 ASSERT(shift == LSL); | |
468 } | |
469 | |
470 | |
471 MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode) | |
472 : base_(base), addrmode_(addrmode) { | |
473 ASSERT(base.Is64Bits() && !base.IsZero()); | |
474 | |
475 if (offset.IsImmediate()) { | |
476 offset_ = offset.immediate(); | |
477 | |
478 regoffset_ = NoReg; | |
479 } else if (offset.IsShiftedRegister()) { | |
480 ASSERT(addrmode == Offset); | |
481 | |
482 regoffset_ = offset.reg(); | |
483 shift_= offset.shift(); | |
484 shift_amount_ = offset.shift_amount(); | |
485 | |
486 extend_ = NO_EXTEND; | |
487 offset_ = 0; | |
488 | |
489 // These assertions match those in the shifted-register constructor. | |
490 ASSERT(regoffset_.Is64Bits() && !regoffset_.IsSP()); | |
491 ASSERT(shift_ == LSL); | |
492 } else { | |
493 ASSERT(offset.IsExtendedRegister()); | |
494 ASSERT(addrmode == Offset); | |
495 | |
496 regoffset_ = offset.reg(); | |
497 extend_ = offset.extend(); | |
498 shift_amount_ = offset.shift_amount(); | |
499 | |
500 shift_= NO_SHIFT; | |
501 offset_ = 0; | |
502 | |
503 // These assertions match those in the extended-register constructor. | |
504 ASSERT(!regoffset_.IsSP()); | |
505 ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX)); | |
506 ASSERT((regoffset_.Is64Bits() || (extend_ != SXTX))); | |
507 } | |
508 } | |
509 | |
510 bool MemOperand::IsImmediateOffset() const { | |
511 return (addrmode_ == Offset) && regoffset_.Is(NoReg); | |
512 } | |
513 | |
514 | |
515 bool MemOperand::IsRegisterOffset() const { | |
516 return (addrmode_ == Offset) && !regoffset_.Is(NoReg); | |
517 } | |
518 | |
519 | |
520 bool MemOperand::IsPreIndex() const { | |
521 return addrmode_ == PreIndex; | |
522 } | |
523 | |
524 | |
525 bool MemOperand::IsPostIndex() const { | |
526 return addrmode_ == PostIndex; | |
527 } | |
528 | |
529 Operand MemOperand::OffsetAsOperand() const { | |
530 if (IsImmediateOffset()) { | |
531 return offset(); | |
532 } else { | |
533 ASSERT(IsRegisterOffset()); | |
534 if (extend() == NO_EXTEND) { | |
535 return Operand(regoffset(), shift(), shift_amount()); | |
536 } else { | |
537 return Operand(regoffset(), extend(), shift_amount()); | |
538 } | |
539 } | |
540 } | |
541 | |
542 | |
543 void Assembler::Unreachable() { | |
544 #ifdef USE_SIMULATOR | |
545 debug("UNREACHABLE", __LINE__, BREAK); | |
546 #else | |
547 // Crash by branching to 0. lr now points near the fault. | |
548 Emit(BLR | Rn(xzr)); | |
549 #endif | |
550 } | |
551 | |
552 | |
553 Address Assembler::target_pointer_address_at(Address pc) { | |
554 Instruction* instr = reinterpret_cast<Instruction*>(pc); | |
555 ASSERT(instr->IsLdrLiteralX()); | |
556 return reinterpret_cast<Address>(instr->ImmPCOffsetTarget()); | |
557 } | |
558 | |
559 | |
560 // Read/Modify the code target address in the branch/call instruction at pc. | |
561 Address Assembler::target_address_at(Address pc, | |
562 ConstantPoolArray* constant_pool) { | |
563 return Memory::Address_at(target_pointer_address_at(pc)); | |
564 } | |
565 | |
566 | |
567 Address Assembler::target_address_at(Address pc, Code* code) { | |
568 ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL; | |
569 return target_address_at(pc, constant_pool); | |
570 } | |
571 | |
572 | |
573 Address Assembler::target_address_from_return_address(Address pc) { | |
574 // Returns the address of the call target from the return address that will | |
575 // be returned to after a call. | |
576 // Call sequence on A64 is: | |
577 // ldr ip0, #... @ load from literal pool | |
578 // blr ip0 | |
579 Address candidate = pc - 2 * kInstructionSize; | |
580 Instruction* instr = reinterpret_cast<Instruction*>(candidate); | |
581 USE(instr); | |
582 ASSERT(instr->IsLdrLiteralX()); | |
583 return candidate; | |
584 } | |
585 | |
586 | |
587 Address Assembler::return_address_from_call_start(Address pc) { | |
588 // The call, generated by MacroAssembler::Call, is one of two possible | |
589 // sequences: | |
590 // | |
591 // Without relocation: | |
592 // movz ip0, #(target & 0x000000000000ffff) | |
593 // movk ip0, #(target & 0x00000000ffff0000) | |
594 // movk ip0, #(target & 0x0000ffff00000000) | |
595 // movk ip0, #(target & 0xffff000000000000) | |
596 // blr ip0 | |
597 // | |
598 // With relocation: | |
599 // ldr ip0, =target | |
600 // blr ip0 | |
601 // | |
602 // The return address is immediately after the blr instruction in both cases, | |
603 // so it can be found by adding the call size to the address at the start of | |
604 // the call sequence. | |
605 STATIC_ASSERT(Assembler::kCallSizeWithoutRelocation == 5 * kInstructionSize); | |
606 STATIC_ASSERT(Assembler::kCallSizeWithRelocation == 2 * kInstructionSize); | |
607 | |
608 Instruction* instr = reinterpret_cast<Instruction*>(pc); | |
609 if (instr->IsMovz()) { | |
610 // Verify the instruction sequence. | |
611 ASSERT(instr->following(1)->IsMovk()); | |
612 ASSERT(instr->following(2)->IsMovk()); | |
613 ASSERT(instr->following(3)->IsMovk()); | |
614 ASSERT(instr->following(4)->IsBranchAndLinkToRegister()); | |
615 return pc + Assembler::kCallSizeWithoutRelocation; | |
616 } else { | |
617 // Verify the instruction sequence. | |
618 ASSERT(instr->IsLdrLiteralX()); | |
619 ASSERT(instr->following(1)->IsBranchAndLinkToRegister()); | |
620 return pc + Assembler::kCallSizeWithRelocation; | |
621 } | |
622 } | |
623 | |
624 | |
625 void Assembler::deserialization_set_special_target_at( | |
626 Address constant_pool_entry, Code* code, Address target) { | |
627 Memory::Address_at(constant_pool_entry) = target; | |
628 } | |
629 | |
630 | |
631 void Assembler::set_target_address_at(Address pc, | |
632 ConstantPoolArray* constant_pool, | |
633 Address target) { | |
634 Memory::Address_at(target_pointer_address_at(pc)) = target; | |
635 // Intuitively, we would think it is necessary to always flush the | |
636 // instruction cache after patching a target address in the code as follows: | |
637 // CPU::FlushICache(pc, sizeof(target)); | |
638 // However, on ARM, an instruction is actually patched in the case of | |
639 // embedded constants of the form: | |
640 // ldr ip, [pc, #...] | |
641 // since the instruction accessing this address in the constant pool remains | |
642 // unchanged, a flush is not required. | |
643 } | |
644 | |
645 | |
646 void Assembler::set_target_address_at(Address pc, | |
647 Code* code, | |
648 Address target) { | |
649 ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL; | |
650 set_target_address_at(pc, constant_pool, target); | |
651 } | |
652 | |
653 | |
654 int RelocInfo::target_address_size() { | |
655 return kPointerSize; | |
656 } | |
657 | |
658 | |
659 Address RelocInfo::target_address() { | |
660 ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)); | |
661 return Assembler::target_address_at(pc_, host_); | |
662 } | |
663 | |
664 | |
665 Address RelocInfo::target_address_address() { | |
666 ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) | |
667 || rmode_ == EMBEDDED_OBJECT | |
668 || rmode_ == EXTERNAL_REFERENCE); | |
669 return Assembler::target_pointer_address_at(pc_); | |
670 } | |
671 | |
672 | |
673 Address RelocInfo::constant_pool_entry_address() { | |
674 ASSERT(IsInConstantPool()); | |
675 return Assembler::target_pointer_address_at(pc_); | |
676 } | |
677 | |
678 | |
679 Object* RelocInfo::target_object() { | |
680 ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); | |
681 return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_)); | |
682 } | |
683 | |
684 | |
685 Handle<Object> RelocInfo::target_object_handle(Assembler* origin) { | |
686 ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); | |
687 return Handle<Object>(reinterpret_cast<Object**>( | |
688 Assembler::target_address_at(pc_, host_))); | |
689 } | |
690 | |
691 | |
692 void RelocInfo::set_target_object(Object* target, WriteBarrierMode mode) { | |
693 ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT); | |
694 ASSERT(!target->IsConsString()); | |
695 Assembler::set_target_address_at(pc_, host_, | |
696 reinterpret_cast<Address>(target)); | |
697 if (mode == UPDATE_WRITE_BARRIER && | |
698 host() != NULL && | |
699 target->IsHeapObject()) { | |
700 host()->GetHeap()->incremental_marking()->RecordWrite( | |
701 host(), &Memory::Object_at(pc_), HeapObject::cast(target)); | |
702 } | |
703 } | |
704 | |
705 | |
706 Address RelocInfo::target_reference() { | |
707 ASSERT(rmode_ == EXTERNAL_REFERENCE); | |
708 return Assembler::target_address_at(pc_, host_); | |
709 } | |
710 | |
711 | |
712 Address RelocInfo::target_runtime_entry(Assembler* origin) { | |
713 ASSERT(IsRuntimeEntry(rmode_)); | |
714 return target_address(); | |
715 } | |
716 | |
717 | |
718 void RelocInfo::set_target_runtime_entry(Address target, | |
719 WriteBarrierMode mode) { | |
720 ASSERT(IsRuntimeEntry(rmode_)); | |
721 if (target_address() != target) set_target_address(target, mode); | |
722 } | |
723 | |
724 | |
725 Handle<Cell> RelocInfo::target_cell_handle() { | |
726 UNIMPLEMENTED(); | |
727 Cell *null_cell = NULL; | |
728 return Handle<Cell>(null_cell); | |
729 } | |
730 | |
731 | |
732 Cell* RelocInfo::target_cell() { | |
733 ASSERT(rmode_ == RelocInfo::CELL); | |
734 return Cell::FromValueAddress(Memory::Address_at(pc_)); | |
735 } | |
736 | |
737 | |
738 void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode mode) { | |
739 UNIMPLEMENTED(); | |
740 } | |
741 | |
742 | |
743 static const int kCodeAgeSequenceSize = 5 * kInstructionSize; | |
744 static const int kCodeAgeStubEntryOffset = 3 * kInstructionSize; | |
745 | |
746 | |
747 Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) { | |
748 UNREACHABLE(); // This should never be reached on A64. | |
749 return Handle<Object>(); | |
750 } | |
751 | |
752 | |
753 Code* RelocInfo::code_age_stub() { | |
754 ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); | |
755 ASSERT(!Code::IsYoungSequence(pc_)); | |
756 // Read the stub entry point from the code age sequence. | |
757 Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset; | |
758 return Code::GetCodeFromTargetAddress(Memory::Address_at(stub_entry_address)); | |
759 } | |
760 | |
761 | |
762 void RelocInfo::set_code_age_stub(Code* stub) { | |
763 ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE); | |
764 ASSERT(!Code::IsYoungSequence(pc_)); | |
765 // Overwrite the stub entry point in the code age sequence. This is loaded as | |
766 // a literal so there is no need to call FlushICache here. | |
767 Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset; | |
768 Memory::Address_at(stub_entry_address) = stub->instruction_start(); | |
769 } | |
770 | |
771 | |
772 Address RelocInfo::call_address() { | |
773 ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || | |
774 (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); | |
775 // For the above sequences the Relocinfo points to the load literal loading | |
776 // the call address. | |
777 return Assembler::target_address_at(pc_, host_); | |
778 } | |
779 | |
780 | |
781 void RelocInfo::set_call_address(Address target) { | |
782 ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) || | |
783 (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence())); | |
784 Assembler::set_target_address_at(pc_, host_, target); | |
785 if (host() != NULL) { | |
786 Object* target_code = Code::GetCodeFromTargetAddress(target); | |
787 host()->GetHeap()->incremental_marking()->RecordWriteIntoCode( | |
788 host(), this, HeapObject::cast(target_code)); | |
789 } | |
790 } | |
791 | |
792 | |
793 void RelocInfo::WipeOut() { | |
794 ASSERT(IsEmbeddedObject(rmode_) || | |
795 IsCodeTarget(rmode_) || | |
796 IsRuntimeEntry(rmode_) || | |
797 IsExternalReference(rmode_)); | |
798 Assembler::set_target_address_at(pc_, host_, NULL); | |
799 } | |
800 | |
801 | |
802 bool RelocInfo::IsPatchedReturnSequence() { | |
803 // The sequence must be: | |
804 // ldr ip0, [pc, #offset] | |
805 // blr ip0 | |
806 // See a64/debug-a64.cc BreakLocationIterator::SetDebugBreakAtReturn(). | |
807 Instruction* i1 = reinterpret_cast<Instruction*>(pc_); | |
808 Instruction* i2 = i1->following(); | |
809 return i1->IsLdrLiteralX() && (i1->Rt() == ip0.code()) && | |
810 i2->IsBranchAndLinkToRegister() && (i2->Rn() == ip0.code()); | |
811 } | |
812 | |
813 | |
814 bool RelocInfo::IsPatchedDebugBreakSlotSequence() { | |
815 Instruction* current_instr = reinterpret_cast<Instruction*>(pc_); | |
816 return !current_instr->IsNop(Assembler::DEBUG_BREAK_NOP); | |
817 } | |
818 | |
819 | |
820 void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) { | |
821 RelocInfo::Mode mode = rmode(); | |
822 if (mode == RelocInfo::EMBEDDED_OBJECT) { | |
823 visitor->VisitEmbeddedPointer(this); | |
824 } else if (RelocInfo::IsCodeTarget(mode)) { | |
825 visitor->VisitCodeTarget(this); | |
826 } else if (mode == RelocInfo::CELL) { | |
827 visitor->VisitCell(this); | |
828 } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { | |
829 visitor->VisitExternalReference(this); | |
830 #ifdef ENABLE_DEBUGGER_SUPPORT | |
831 } else if (((RelocInfo::IsJSReturn(mode) && | |
832 IsPatchedReturnSequence()) || | |
833 (RelocInfo::IsDebugBreakSlot(mode) && | |
834 IsPatchedDebugBreakSlotSequence())) && | |
835 isolate->debug()->has_break_points()) { | |
836 visitor->VisitDebugTarget(this); | |
837 #endif | |
838 } else if (RelocInfo::IsRuntimeEntry(mode)) { | |
839 visitor->VisitRuntimeEntry(this); | |
840 } | |
841 } | |
842 | |
843 | |
844 template<typename StaticVisitor> | |
845 void RelocInfo::Visit(Heap* heap) { | |
846 RelocInfo::Mode mode = rmode(); | |
847 if (mode == RelocInfo::EMBEDDED_OBJECT) { | |
848 StaticVisitor::VisitEmbeddedPointer(heap, this); | |
849 } else if (RelocInfo::IsCodeTarget(mode)) { | |
850 StaticVisitor::VisitCodeTarget(heap, this); | |
851 } else if (mode == RelocInfo::CELL) { | |
852 StaticVisitor::VisitCell(heap, this); | |
853 } else if (mode == RelocInfo::EXTERNAL_REFERENCE) { | |
854 StaticVisitor::VisitExternalReference(this); | |
855 #ifdef ENABLE_DEBUGGER_SUPPORT | |
856 } else if (heap->isolate()->debug()->has_break_points() && | |
857 ((RelocInfo::IsJSReturn(mode) && | |
858 IsPatchedReturnSequence()) || | |
859 (RelocInfo::IsDebugBreakSlot(mode) && | |
860 IsPatchedDebugBreakSlotSequence()))) { | |
861 StaticVisitor::VisitDebugTarget(heap, this); | |
862 #endif | |
863 } else if (RelocInfo::IsRuntimeEntry(mode)) { | |
864 StaticVisitor::VisitRuntimeEntry(this); | |
865 } | |
866 } | |
867 | |
868 | |
869 LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) { | |
870 ASSERT(rt.IsValid()); | |
871 if (rt.IsRegister()) { | |
872 return rt.Is64Bits() ? LDR_x : LDR_w; | |
873 } else { | |
874 ASSERT(rt.IsFPRegister()); | |
875 return rt.Is64Bits() ? LDR_d : LDR_s; | |
876 } | |
877 } | |
878 | |
879 | |
880 LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt, | |
881 const CPURegister& rt2) { | |
882 ASSERT(AreSameSizeAndType(rt, rt2)); | |
883 USE(rt2); | |
884 if (rt.IsRegister()) { | |
885 return rt.Is64Bits() ? LDP_x : LDP_w; | |
886 } else { | |
887 ASSERT(rt.IsFPRegister()); | |
888 return rt.Is64Bits() ? LDP_d : LDP_s; | |
889 } | |
890 } | |
891 | |
892 | |
893 LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) { | |
894 ASSERT(rt.IsValid()); | |
895 if (rt.IsRegister()) { | |
896 return rt.Is64Bits() ? STR_x : STR_w; | |
897 } else { | |
898 ASSERT(rt.IsFPRegister()); | |
899 return rt.Is64Bits() ? STR_d : STR_s; | |
900 } | |
901 } | |
902 | |
903 | |
904 LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt, | |
905 const CPURegister& rt2) { | |
906 ASSERT(AreSameSizeAndType(rt, rt2)); | |
907 USE(rt2); | |
908 if (rt.IsRegister()) { | |
909 return rt.Is64Bits() ? STP_x : STP_w; | |
910 } else { | |
911 ASSERT(rt.IsFPRegister()); | |
912 return rt.Is64Bits() ? STP_d : STP_s; | |
913 } | |
914 } | |
915 | |
916 | |
917 LoadStorePairNonTemporalOp Assembler::LoadPairNonTemporalOpFor( | |
918 const CPURegister& rt, const CPURegister& rt2) { | |
919 ASSERT(AreSameSizeAndType(rt, rt2)); | |
920 USE(rt2); | |
921 if (rt.IsRegister()) { | |
922 return rt.Is64Bits() ? LDNP_x : LDNP_w; | |
923 } else { | |
924 ASSERT(rt.IsFPRegister()); | |
925 return rt.Is64Bits() ? LDNP_d : LDNP_s; | |
926 } | |
927 } | |
928 | |
929 | |
930 LoadStorePairNonTemporalOp Assembler::StorePairNonTemporalOpFor( | |
931 const CPURegister& rt, const CPURegister& rt2) { | |
932 ASSERT(AreSameSizeAndType(rt, rt2)); | |
933 USE(rt2); | |
934 if (rt.IsRegister()) { | |
935 return rt.Is64Bits() ? STNP_x : STNP_w; | |
936 } else { | |
937 ASSERT(rt.IsFPRegister()); | |
938 return rt.Is64Bits() ? STNP_d : STNP_s; | |
939 } | |
940 } | |
941 | |
942 | |
943 int Assembler::LinkAndGetInstructionOffsetTo(Label* label) { | |
944 ASSERT(kStartOfLabelLinkChain == 0); | |
945 int offset = LinkAndGetByteOffsetTo(label); | |
946 ASSERT(IsAligned(offset, kInstructionSize)); | |
947 return offset >> kInstructionSizeLog2; | |
948 } | |
949 | |
950 | |
951 Instr Assembler::Flags(FlagsUpdate S) { | |
952 if (S == SetFlags) { | |
953 return 1 << FlagsUpdate_offset; | |
954 } else if (S == LeaveFlags) { | |
955 return 0 << FlagsUpdate_offset; | |
956 } | |
957 UNREACHABLE(); | |
958 return 0; | |
959 } | |
960 | |
961 | |
962 Instr Assembler::Cond(Condition cond) { | |
963 return cond << Condition_offset; | |
964 } | |
965 | |
966 | |
967 Instr Assembler::ImmPCRelAddress(int imm21) { | |
968 CHECK(is_int21(imm21)); | |
969 Instr imm = static_cast<Instr>(truncate_to_int21(imm21)); | |
970 Instr immhi = (imm >> ImmPCRelLo_width) << ImmPCRelHi_offset; | |
971 Instr immlo = imm << ImmPCRelLo_offset; | |
972 return (immhi & ImmPCRelHi_mask) | (immlo & ImmPCRelLo_mask); | |
973 } | |
974 | |
975 | |
976 Instr Assembler::ImmUncondBranch(int imm26) { | |
977 CHECK(is_int26(imm26)); | |
978 return truncate_to_int26(imm26) << ImmUncondBranch_offset; | |
979 } | |
980 | |
981 | |
982 Instr Assembler::ImmCondBranch(int imm19) { | |
983 CHECK(is_int19(imm19)); | |
984 return truncate_to_int19(imm19) << ImmCondBranch_offset; | |
985 } | |
986 | |
987 | |
988 Instr Assembler::ImmCmpBranch(int imm19) { | |
989 CHECK(is_int19(imm19)); | |
990 return truncate_to_int19(imm19) << ImmCmpBranch_offset; | |
991 } | |
992 | |
993 | |
994 Instr Assembler::ImmTestBranch(int imm14) { | |
995 CHECK(is_int14(imm14)); | |
996 return truncate_to_int14(imm14) << ImmTestBranch_offset; | |
997 } | |
998 | |
999 | |
1000 Instr Assembler::ImmTestBranchBit(unsigned bit_pos) { | |
1001 ASSERT(is_uint6(bit_pos)); | |
1002 // Subtract five from the shift offset, as we need bit 5 from bit_pos. | |
1003 unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5); | |
1004 unsigned b40 = bit_pos << ImmTestBranchBit40_offset; | |
1005 b5 &= ImmTestBranchBit5_mask; | |
1006 b40 &= ImmTestBranchBit40_mask; | |
1007 return b5 | b40; | |
1008 } | |
1009 | |
1010 | |
1011 Instr Assembler::SF(Register rd) { | |
1012 return rd.Is64Bits() ? SixtyFourBits : ThirtyTwoBits; | |
1013 } | |
1014 | |
1015 | |
1016 Instr Assembler::ImmAddSub(int64_t imm) { | |
1017 ASSERT(IsImmAddSub(imm)); | |
1018 if (is_uint12(imm)) { // No shift required. | |
1019 return imm << ImmAddSub_offset; | |
1020 } else { | |
1021 return ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset); | |
1022 } | |
1023 } | |
1024 | |
1025 | |
1026 Instr Assembler::ImmS(unsigned imms, unsigned reg_size) { | |
1027 ASSERT(((reg_size == kXRegSizeInBits) && is_uint6(imms)) || | |
1028 ((reg_size == kWRegSizeInBits) && is_uint5(imms))); | |
1029 USE(reg_size); | |
1030 return imms << ImmS_offset; | |
1031 } | |
1032 | |
1033 | |
1034 Instr Assembler::ImmR(unsigned immr, unsigned reg_size) { | |
1035 ASSERT(((reg_size == kXRegSizeInBits) && is_uint6(immr)) || | |
1036 ((reg_size == kWRegSizeInBits) && is_uint5(immr))); | |
1037 USE(reg_size); | |
1038 ASSERT(is_uint6(immr)); | |
1039 return immr << ImmR_offset; | |
1040 } | |
1041 | |
1042 | |
1043 Instr Assembler::ImmSetBits(unsigned imms, unsigned reg_size) { | |
1044 ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits)); | |
1045 ASSERT(is_uint6(imms)); | |
1046 ASSERT((reg_size == kXRegSizeInBits) || is_uint6(imms + 3)); | |
1047 USE(reg_size); | |
1048 return imms << ImmSetBits_offset; | |
1049 } | |
1050 | |
1051 | |
1052 Instr Assembler::ImmRotate(unsigned immr, unsigned reg_size) { | |
1053 ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits)); | |
1054 ASSERT(((reg_size == kXRegSizeInBits) && is_uint6(immr)) || | |
1055 ((reg_size == kWRegSizeInBits) && is_uint5(immr))); | |
1056 USE(reg_size); | |
1057 return immr << ImmRotate_offset; | |
1058 } | |
1059 | |
1060 | |
1061 Instr Assembler::ImmLLiteral(int imm19) { | |
1062 CHECK(is_int19(imm19)); | |
1063 return truncate_to_int19(imm19) << ImmLLiteral_offset; | |
1064 } | |
1065 | |
1066 | |
1067 Instr Assembler::BitN(unsigned bitn, unsigned reg_size) { | |
1068 ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits)); | |
1069 ASSERT((reg_size == kXRegSizeInBits) || (bitn == 0)); | |
1070 USE(reg_size); | |
1071 return bitn << BitN_offset; | |
1072 } | |
1073 | |
1074 | |
1075 Instr Assembler::ShiftDP(Shift shift) { | |
1076 ASSERT(shift == LSL || shift == LSR || shift == ASR || shift == ROR); | |
1077 return shift << ShiftDP_offset; | |
1078 } | |
1079 | |
1080 | |
1081 Instr Assembler::ImmDPShift(unsigned amount) { | |
1082 ASSERT(is_uint6(amount)); | |
1083 return amount << ImmDPShift_offset; | |
1084 } | |
1085 | |
1086 | |
1087 Instr Assembler::ExtendMode(Extend extend) { | |
1088 return extend << ExtendMode_offset; | |
1089 } | |
1090 | |
1091 | |
1092 Instr Assembler::ImmExtendShift(unsigned left_shift) { | |
1093 ASSERT(left_shift <= 4); | |
1094 return left_shift << ImmExtendShift_offset; | |
1095 } | |
1096 | |
1097 | |
1098 Instr Assembler::ImmCondCmp(unsigned imm) { | |
1099 ASSERT(is_uint5(imm)); | |
1100 return imm << ImmCondCmp_offset; | |
1101 } | |
1102 | |
1103 | |
1104 Instr Assembler::Nzcv(StatusFlags nzcv) { | |
1105 return ((nzcv >> Flags_offset) & 0xf) << Nzcv_offset; | |
1106 } | |
1107 | |
1108 | |
1109 Instr Assembler::ImmLSUnsigned(int imm12) { | |
1110 ASSERT(is_uint12(imm12)); | |
1111 return imm12 << ImmLSUnsigned_offset; | |
1112 } | |
1113 | |
1114 | |
1115 Instr Assembler::ImmLS(int imm9) { | |
1116 ASSERT(is_int9(imm9)); | |
1117 return truncate_to_int9(imm9) << ImmLS_offset; | |
1118 } | |
1119 | |
1120 | |
1121 Instr Assembler::ImmLSPair(int imm7, LSDataSize size) { | |
1122 ASSERT(((imm7 >> size) << size) == imm7); | |
1123 int scaled_imm7 = imm7 >> size; | |
1124 ASSERT(is_int7(scaled_imm7)); | |
1125 return truncate_to_int7(scaled_imm7) << ImmLSPair_offset; | |
1126 } | |
1127 | |
1128 | |
1129 Instr Assembler::ImmShiftLS(unsigned shift_amount) { | |
1130 ASSERT(is_uint1(shift_amount)); | |
1131 return shift_amount << ImmShiftLS_offset; | |
1132 } | |
1133 | |
1134 | |
1135 Instr Assembler::ImmException(int imm16) { | |
1136 ASSERT(is_uint16(imm16)); | |
1137 return imm16 << ImmException_offset; | |
1138 } | |
1139 | |
1140 | |
1141 Instr Assembler::ImmSystemRegister(int imm15) { | |
1142 ASSERT(is_uint15(imm15)); | |
1143 return imm15 << ImmSystemRegister_offset; | |
1144 } | |
1145 | |
1146 | |
1147 Instr Assembler::ImmHint(int imm7) { | |
1148 ASSERT(is_uint7(imm7)); | |
1149 return imm7 << ImmHint_offset; | |
1150 } | |
1151 | |
1152 | |
1153 Instr Assembler::ImmBarrierDomain(int imm2) { | |
1154 ASSERT(is_uint2(imm2)); | |
1155 return imm2 << ImmBarrierDomain_offset; | |
1156 } | |
1157 | |
1158 | |
1159 Instr Assembler::ImmBarrierType(int imm2) { | |
1160 ASSERT(is_uint2(imm2)); | |
1161 return imm2 << ImmBarrierType_offset; | |
1162 } | |
1163 | |
1164 | |
1165 LSDataSize Assembler::CalcLSDataSize(LoadStoreOp op) { | |
1166 ASSERT((SizeLS_offset + SizeLS_width) == (kInstructionSize * 8)); | |
1167 return static_cast<LSDataSize>(op >> SizeLS_offset); | |
1168 } | |
1169 | |
1170 | |
1171 Instr Assembler::ImmMoveWide(uint64_t imm) { | |
1172 ASSERT(is_uint16(imm)); | |
1173 return imm << ImmMoveWide_offset; | |
1174 } | |
1175 | |
1176 | |
1177 Instr Assembler::ShiftMoveWide(int64_t shift) { | |
1178 ASSERT(is_uint2(shift)); | |
1179 return shift << ShiftMoveWide_offset; | |
1180 } | |
1181 | |
1182 | |
1183 Instr Assembler::FPType(FPRegister fd) { | |
1184 return fd.Is64Bits() ? FP64 : FP32; | |
1185 } | |
1186 | |
1187 | |
1188 Instr Assembler::FPScale(unsigned scale) { | |
1189 ASSERT(is_uint6(scale)); | |
1190 return scale << FPScale_offset; | |
1191 } | |
1192 | |
1193 | |
1194 const Register& Assembler::AppropriateZeroRegFor(const CPURegister& reg) const { | |
1195 return reg.Is64Bits() ? xzr : wzr; | |
1196 } | |
1197 | |
1198 | |
1199 void Assembler::LoadRelocated(const CPURegister& rt, const Operand& operand) { | |
1200 LoadRelocatedValue(rt, operand, LDR_x_lit); | |
1201 } | |
1202 | |
1203 | |
1204 inline void Assembler::CheckBuffer() { | |
1205 ASSERT(pc_ < (buffer_ + buffer_size_)); | |
1206 if (buffer_space() < kGap) { | |
1207 GrowBuffer(); | |
1208 } | |
1209 if (pc_offset() >= next_veneer_pool_check_) { | |
1210 CheckVeneerPool(false, true); | |
1211 } | |
1212 if (pc_offset() >= next_constant_pool_check_) { | |
1213 CheckConstPool(false, true); | |
1214 } | |
1215 } | |
1216 | |
1217 | |
1218 TypeFeedbackId Assembler::RecordedAstId() { | |
1219 ASSERT(!recorded_ast_id_.IsNone()); | |
1220 return recorded_ast_id_; | |
1221 } | |
1222 | |
1223 | |
1224 void Assembler::ClearRecordedAstId() { | |
1225 recorded_ast_id_ = TypeFeedbackId::None(); | |
1226 } | |
1227 | |
1228 | |
1229 } } // namespace v8::internal | |
1230 | |
1231 #endif // V8_A64_ASSEMBLER_A64_INL_H_ | |
OLD | NEW |