Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/ppc/assembler-ppc.cc

Issue 571173003: PowerPC specific sub-directories (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Remove IBM copyright, update code to later level Created 6 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions
6 // are met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the
14 // distribution.
15 //
16 // - Neither the name of Sun Microsystems or the names of contributors may
17 // be used to endorse or promote products derived from this software without
18 // specific prior written permission.
19 //
20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31 // OF THE POSSIBILITY OF SUCH DAMAGE.
32
33 // The original source code covered by the above license above has been
34 // modified significantly by Google Inc.
35 // Copyright 2012 the V8 project authors. All rights reserved.
36
37 #include "src/v8.h"
38
39 #if V8_TARGET_ARCH_PPC
40
41 #include "src/base/bits.h"
42 #include "src/base/cpu.h"
43 #include "src/macro-assembler.h"
44 #include "src/ppc/assembler-ppc-inl.h"
45 #include "src/serialize.h"
46
47 namespace v8 {
48 namespace internal {
49
50 // Get the CPU features enabled by the build.
51 static unsigned CpuFeaturesImpliedByCompiler() {
52 unsigned answer = 0;
53 return answer;
54 }
55
56
57 void CpuFeatures::ProbeImpl(bool cross_compile) {
58 supported_ |= CpuFeaturesImpliedByCompiler();
59 cache_line_size_ = 128;
60
61 // Only use statically determined features for cross compile (snapshot).
62 if (cross_compile) return;
63
64 // Detect whether frim instruction is supported (POWER5+)
65 // For now we will just check for processors we know do not
66 // support it
67 #ifndef USE_SIMULATOR
68 // Probe for additional features at runtime.
69 base::CPU cpu;
70 #if V8_TARGET_ARCH_PPC64
71 if (cpu.part() == base::CPU::PPC_POWER8) {
72 supported_ |= (1u << FPR_GPR_MOV);
73 }
74 #endif
75 if (cpu.part() == base::CPU::PPC_POWER6 ||
76 cpu.part() == base::CPU::PPC_POWER7 ||
77 cpu.part() == base::CPU::PPC_POWER8) {
78 supported_ |= (1u << LWSYNC);
79 }
80 #if V8_OS_LINUX
81 if (!(cpu.part() == base::CPU::PPC_G5 || cpu.part() == base::CPU::PPC_G4)) {
82 // Assume support
83 supported_ |= (1u << FPU);
84 }
85 if (cpu.cache_line_size() != 0) {
86 cache_line_size_ = cpu.cache_line_size();
87 }
88 #elif V8_OS_AIX
89 // Assume support FP support and default cache line size
90 supported_ |= (1u << FPU);
91 #endif
92 #else // Simulator
93 supported_ |= (1u << FPU);
94 supported_ |= (1u << LWSYNC);
95 #if V8_TARGET_ARCH_PPC64
96 supported_ |= (1u << FPR_GPR_MOV);
97 #endif
98 #endif
99 }
100
101
102 void CpuFeatures::PrintTarget() {
103 const char* ppc_arch = NULL;
104
105 #if V8_TARGET_ARCH_PPC64
106 ppc_arch = "ppc64";
107 #else
108 ppc_arch = "ppc";
109 #endif
110
111 printf("target %s\n", ppc_arch);
112 }
113
114
115 void CpuFeatures::PrintFeatures() {
116 printf("FPU=%d\n", CpuFeatures::IsSupported(FPU));
117 }
118
119
120 Register ToRegister(int num) {
121 DCHECK(num >= 0 && num < kNumRegisters);
122 const Register kRegisters[] = {r0, sp, r2, r3, r4, r5, r6, r7,
123 r8, r9, r10, r11, ip, r13, r14, r15,
124 r16, r17, r18, r19, r20, r21, r22, r23,
125 r24, r25, r26, r27, r28, r29, r30, fp};
126 return kRegisters[num];
127 }
128
129
130 const char* DoubleRegister::AllocationIndexToString(int index) {
131 DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
132 const char* const names[] = {
133 "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10",
134 "d11", "d12", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
135 "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"};
136 return names[index];
137 }
138
139
140 // -----------------------------------------------------------------------------
141 // Implementation of RelocInfo
142
143 const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE;
144
145
146 bool RelocInfo::IsCodedSpecially() {
147 // The deserializer needs to know whether a pointer is specially
148 // coded. Being specially coded on PPC means that it is a lis/ori
149 // instruction sequence or is an out of line constant pool entry,
150 // and these are always the case inside code objects.
151 return true;
152 }
153
154
155 bool RelocInfo::IsInConstantPool() {
156 #if V8_OOL_CONSTANT_POOL
157 return Assembler::IsConstantPoolLoadStart(pc_);
158 #else
159 return false;
160 #endif
161 }
162
163
164 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
165 // Patch the code at the current address with the supplied instructions.
166 Instr* pc = reinterpret_cast<Instr*>(pc_);
167 Instr* instr = reinterpret_cast<Instr*>(instructions);
168 for (int i = 0; i < instruction_count; i++) {
169 *(pc + i) = *(instr + i);
170 }
171
172 // Indicate that code has changed.
173 CpuFeatures::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
174 }
175
176
177 // Patch the code at the current PC with a call to the target address.
178 // Additional guard instructions can be added if required.
179 void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
180 // Patch the code at the current address with a call to the target.
181 UNIMPLEMENTED();
182 }
183
184
185 // -----------------------------------------------------------------------------
186 // Implementation of Operand and MemOperand
187 // See assembler-ppc-inl.h for inlined constructors
188
189 Operand::Operand(Handle<Object> handle) {
190 AllowDeferredHandleDereference using_raw_address;
191 rm_ = no_reg;
192 // Verify all Objects referred by code are NOT in new space.
193 Object* obj = *handle;
194 if (obj->IsHeapObject()) {
195 DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
196 imm_ = reinterpret_cast<intptr_t>(handle.location());
197 rmode_ = RelocInfo::EMBEDDED_OBJECT;
198 } else {
199 // no relocation needed
200 imm_ = reinterpret_cast<intptr_t>(obj);
201 rmode_ = kRelocInfo_NONEPTR;
202 }
203 }
204
205
206 MemOperand::MemOperand(Register rn, int32_t offset) {
207 ra_ = rn;
208 rb_ = no_reg;
209 offset_ = offset;
210 }
211
212
213 MemOperand::MemOperand(Register ra, Register rb) {
214 ra_ = ra;
215 rb_ = rb;
216 offset_ = 0;
217 }
218
219
220 // -----------------------------------------------------------------------------
221 // Specific instructions, constants, and masks.
222
223 // Spare buffer.
224 static const int kMinimalBufferSize = 4 * KB;
225
226
227 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
228 : AssemblerBase(isolate, buffer, buffer_size),
229 recorded_ast_id_(TypeFeedbackId::None()),
230 #if V8_OOL_CONSTANT_POOL
231 constant_pool_builder_(),
232 #endif
233 positions_recorder_(this) {
234 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
235
236 no_trampoline_pool_before_ = 0;
237 trampoline_pool_blocked_nesting_ = 0;
238 // We leave space (kMaxBlockTrampolineSectionSize)
239 // for BlockTrampolinePoolScope buffer.
240 next_buffer_check_ =
241 FLAG_force_long_branches ? kMaxInt : kMaxCondBranchReach -
242 kMaxBlockTrampolineSectionSize;
243 internal_trampoline_exception_ = false;
244 last_bound_pos_ = 0;
245
246 trampoline_emitted_ = FLAG_force_long_branches;
247 unbound_labels_count_ = 0;
248
249 #if V8_OOL_CONSTANT_POOL
250 constant_pool_available_ = false;
251 #endif
252
253 ClearRecordedAstId();
254 }
255
256
257 void Assembler::GetCode(CodeDesc* desc) {
258 // Set up code descriptor.
259 desc->buffer = buffer_;
260 desc->buffer_size = buffer_size_;
261 desc->instr_size = pc_offset();
262 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
263 desc->origin = this;
264 }
265
266
267 void Assembler::Align(int m) {
268 #if V8_TARGET_ARCH_PPC64
269 DCHECK(m >= 4 && base::bits::IsPowerOfTwo64(m));
270 #else
271 DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
272 #endif
273 while ((pc_offset() & (m - 1)) != 0) {
274 nop();
275 }
276 }
277
278
279 void Assembler::CodeTargetAlign() { Align(8); }
280
281
282 Condition Assembler::GetCondition(Instr instr) {
283 switch (instr & kCondMask) {
284 case BT:
285 return eq;
286 case BF:
287 return ne;
288 default:
289 UNIMPLEMENTED();
290 }
291 return al;
292 }
293
294
295 bool Assembler::IsLis(Instr instr) {
296 return ((instr & kOpcodeMask) == ADDIS) && GetRA(instr).is(r0);
297 }
298
299
300 bool Assembler::IsLi(Instr instr) {
301 return ((instr & kOpcodeMask) == ADDI) && GetRA(instr).is(r0);
302 }
303
304
305 bool Assembler::IsAddic(Instr instr) { return (instr & kOpcodeMask) == ADDIC; }
306
307
308 bool Assembler::IsOri(Instr instr) { return (instr & kOpcodeMask) == ORI; }
309
310
311 bool Assembler::IsBranch(Instr instr) { return ((instr & kOpcodeMask) == BCX); }
312
313
314 Register Assembler::GetRA(Instr instr) {
315 Register reg;
316 reg.code_ = Instruction::RAValue(instr);
317 return reg;
318 }
319
320
321 Register Assembler::GetRB(Instr instr) {
322 Register reg;
323 reg.code_ = Instruction::RBValue(instr);
324 return reg;
325 }
326
327
328 #if V8_TARGET_ARCH_PPC64
329 // This code assumes a FIXED_SEQUENCE for 64bit loads (lis/ori)
330 bool Assembler::Is64BitLoadIntoR12(Instr instr1, Instr instr2, Instr instr3,
331 Instr instr4, Instr instr5) {
332 // Check the instructions are indeed a five part load (into r12)
333 // 3d800000 lis r12, 0
334 // 618c0000 ori r12, r12, 0
335 // 798c07c6 rldicr r12, r12, 32, 31
336 // 658c00c3 oris r12, r12, 195
337 // 618ccd40 ori r12, r12, 52544
338 return (((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c) &&
339 (instr3 == 0x798c07c6) && ((instr4 >> 16) == 0x658c) &&
340 ((instr5 >> 16) == 0x618c));
341 }
342 #else
343 // This code assumes a FIXED_SEQUENCE for 32bit loads (lis/ori)
344 bool Assembler::Is32BitLoadIntoR12(Instr instr1, Instr instr2) {
345 // Check the instruction is indeed a two part load (into r12)
346 // 3d802553 lis r12, 9555
347 // 618c5000 ori r12, r12, 20480
348 return (((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c));
349 }
350 #endif
351
352
353 bool Assembler::IsCmpRegister(Instr instr) {
354 return (((instr & kOpcodeMask) == EXT2) &&
355 ((instr & kExt2OpcodeMask) == CMP));
356 }
357
358
359 bool Assembler::IsRlwinm(Instr instr) {
360 return ((instr & kOpcodeMask) == RLWINMX);
361 }
362
363
364 #if V8_TARGET_ARCH_PPC64
365 bool Assembler::IsRldicl(Instr instr) {
366 return (((instr & kOpcodeMask) == EXT5) &&
367 ((instr & kExt5OpcodeMask) == RLDICL));
368 }
369 #endif
370
371
372 bool Assembler::IsCmpImmediate(Instr instr) {
373 return ((instr & kOpcodeMask) == CMPI);
374 }
375
376
377 bool Assembler::IsCrSet(Instr instr) {
378 return (((instr & kOpcodeMask) == EXT1) &&
379 ((instr & kExt1OpcodeMask) == CREQV));
380 }
381
382
383 Register Assembler::GetCmpImmediateRegister(Instr instr) {
384 DCHECK(IsCmpImmediate(instr));
385 return GetRA(instr);
386 }
387
388
389 int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
390 DCHECK(IsCmpImmediate(instr));
391 return instr & kOff16Mask;
392 }
393
394
395 // Labels refer to positions in the (to be) generated code.
396 // There are bound, linked, and unused labels.
397 //
398 // Bound labels refer to known positions in the already
399 // generated code. pos() is the position the label refers to.
400 //
401 // Linked labels refer to unknown positions in the code
402 // to be generated; pos() is the position of the last
403 // instruction using the label.
404
405
406 // The link chain is terminated by a negative code position (must be aligned)
407 const int kEndOfChain = -4;
408
409
410 int Assembler::target_at(int pos) {
411 Instr instr = instr_at(pos);
412 // check which type of branch this is 16 or 26 bit offset
413 int opcode = instr & kOpcodeMask;
414 if (BX == opcode) {
415 int imm26 = ((instr & kImm26Mask) << 6) >> 6;
416 imm26 &= ~(kAAMask | kLKMask); // discard AA|LK bits if present
417 if (imm26 == 0) return kEndOfChain;
418 return pos + imm26;
419 } else if (BCX == opcode) {
420 int imm16 = SIGN_EXT_IMM16((instr & kImm16Mask));
421 imm16 &= ~(kAAMask | kLKMask); // discard AA|LK bits if present
422 if (imm16 == 0) return kEndOfChain;
423 return pos + imm16;
424 } else if ((instr & ~kImm26Mask) == 0) {
425 // Emitted link to a label, not part of a branch (regexp PushBacktrack).
426 if (instr == 0) {
427 return kEndOfChain;
428 } else {
429 int32_t imm26 = SIGN_EXT_IMM26(instr);
430 return (imm26 + pos);
431 }
432 }
433
434 PPCPORT_UNIMPLEMENTED();
435 DCHECK(false);
436 return -1;
437 }
438
439
440 void Assembler::target_at_put(int pos, int target_pos) {
441 Instr instr = instr_at(pos);
442 int opcode = instr & kOpcodeMask;
443
444 // check which type of branch this is 16 or 26 bit offset
445 if (BX == opcode) {
446 int imm26 = target_pos - pos;
447 DCHECK((imm26 & (kAAMask | kLKMask)) == 0);
448 instr &= ((~kImm26Mask) | kAAMask | kLKMask);
449 DCHECK(is_int26(imm26));
450 instr_at_put(pos, instr | (imm26 & kImm26Mask));
451 return;
452 } else if (BCX == opcode) {
453 int imm16 = target_pos - pos;
454 DCHECK((imm16 & (kAAMask | kLKMask)) == 0);
455 instr &= ((~kImm16Mask) | kAAMask | kLKMask);
456 DCHECK(is_int16(imm16));
457 instr_at_put(pos, instr | (imm16 & kImm16Mask));
458 return;
459 } else if ((instr & ~kImm26Mask) == 0) {
460 DCHECK(target_pos == kEndOfChain || target_pos >= 0);
461 // Emitted link to a label, not part of a branch (regexp PushBacktrack).
462 // Load the position of the label relative to the generated code object
463 // pointer in a register.
464
465 Register dst = r3; // we assume r3 for now
466 DCHECK(IsNop(instr_at(pos + kInstrSize)));
467 uint32_t target = target_pos + (Code::kHeaderSize - kHeapObjectTag);
468 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), 2,
469 CodePatcher::DONT_FLUSH);
470 int target_hi = static_cast<int>(target) >> 16;
471 int target_lo = static_cast<int>(target) & 0XFFFF;
472
473 patcher.masm()->lis(dst, Operand(SIGN_EXT_IMM16(target_hi)));
474 patcher.masm()->ori(dst, dst, Operand(target_lo));
475 return;
476 }
477
478 DCHECK(false);
479 }
480
481
482 int Assembler::max_reach_from(int pos) {
483 Instr instr = instr_at(pos);
484 int opcode = instr & kOpcodeMask;
485
486 // check which type of branch this is 16 or 26 bit offset
487 if (BX == opcode) {
488 return 26;
489 } else if (BCX == opcode) {
490 return 16;
491 } else if ((instr & ~kImm26Mask) == 0) {
492 // Emitted label constant, not part of a branch (regexp PushBacktrack).
493 return 26;
494 }
495
496 DCHECK(false);
497 return 0;
498 }
499
500
501 void Assembler::bind_to(Label* L, int pos) {
502 DCHECK(0 <= pos && pos <= pc_offset()); // must have a valid binding position
503 int32_t trampoline_pos = kInvalidSlotPos;
504 if (L->is_linked() && !trampoline_emitted_) {
505 unbound_labels_count_--;
506 next_buffer_check_ += kTrampolineSlotsSize;
507 }
508
509 while (L->is_linked()) {
510 int fixup_pos = L->pos();
511 int32_t offset = pos - fixup_pos;
512 int maxReach = max_reach_from(fixup_pos);
513 next(L); // call next before overwriting link with target at fixup_pos
514 if (is_intn(offset, maxReach) == false) {
515 if (trampoline_pos == kInvalidSlotPos) {
516 trampoline_pos = get_trampoline_entry();
517 CHECK(trampoline_pos != kInvalidSlotPos);
518 target_at_put(trampoline_pos, pos);
519 }
520 target_at_put(fixup_pos, trampoline_pos);
521 } else {
522 target_at_put(fixup_pos, pos);
523 }
524 }
525 L->bind_to(pos);
526
527 // Keep track of the last bound label so we don't eliminate any instructions
528 // before a bound label.
529 if (pos > last_bound_pos_) last_bound_pos_ = pos;
530 }
531
532
533 void Assembler::bind(Label* L) {
534 DCHECK(!L->is_bound()); // label can only be bound once
535 bind_to(L, pc_offset());
536 }
537
538
539 void Assembler::next(Label* L) {
540 DCHECK(L->is_linked());
541 int link = target_at(L->pos());
542 if (link == kEndOfChain) {
543 L->Unuse();
544 } else {
545 DCHECK(link >= 0);
546 L->link_to(link);
547 }
548 }
549
550
551 bool Assembler::is_near(Label* L, Condition cond) {
552 DCHECK(L->is_bound());
553 if (L->is_bound() == false) return false;
554
555 int maxReach = ((cond == al) ? 26 : 16);
556 int offset = L->pos() - pc_offset();
557
558 return is_intn(offset, maxReach);
559 }
560
561
562 void Assembler::a_form(Instr instr, DoubleRegister frt, DoubleRegister fra,
563 DoubleRegister frb, RCBit r) {
564 emit(instr | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 | r);
565 }
566
567
568 void Assembler::d_form(Instr instr, Register rt, Register ra,
569 const intptr_t val, bool signed_disp) {
570 if (signed_disp) {
571 if (!is_int16(val)) {
572 PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR "\n", val, val);
573 }
574 DCHECK(is_int16(val));
575 } else {
576 if (!is_uint16(val)) {
577 PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR
578 ", is_unsigned_imm16(val)=%d, kImm16Mask=0x%x\n",
579 val, val, is_uint16(val), kImm16Mask);
580 }
581 DCHECK(is_uint16(val));
582 }
583 emit(instr | rt.code() * B21 | ra.code() * B16 | (kImm16Mask & val));
584 }
585
586
587 void Assembler::x_form(Instr instr, Register ra, Register rs, Register rb,
588 RCBit r) {
589 emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | r);
590 }
591
592
593 void Assembler::xo_form(Instr instr, Register rt, Register ra, Register rb,
594 OEBit o, RCBit r) {
595 emit(instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | o | r);
596 }
597
598
599 void Assembler::md_form(Instr instr, Register ra, Register rs, int shift,
600 int maskbit, RCBit r) {
601 int sh0_4 = shift & 0x1f;
602 int sh5 = (shift >> 5) & 0x1;
603 int m0_4 = maskbit & 0x1f;
604 int m5 = (maskbit >> 5) & 0x1;
605
606 emit(instr | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 | m0_4 * B6 |
607 m5 * B5 | sh5 * B1 | r);
608 }
609
610
611 void Assembler::mds_form(Instr instr, Register ra, Register rs, Register rb,
612 int maskbit, RCBit r) {
613 int m0_4 = maskbit & 0x1f;
614 int m5 = (maskbit >> 5) & 0x1;
615
616 emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | m0_4 * B6 |
617 m5 * B5 | r);
618 }
619
620
621 // Returns the next free trampoline entry.
622 int32_t Assembler::get_trampoline_entry() {
623 int32_t trampoline_entry = kInvalidSlotPos;
624
625 if (!internal_trampoline_exception_) {
626 trampoline_entry = trampoline_.take_slot();
627
628 if (kInvalidSlotPos == trampoline_entry) {
629 internal_trampoline_exception_ = true;
630 }
631 }
632 return trampoline_entry;
633 }
634
635
636 int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
637 int target_pos;
638 if (L->is_bound()) {
639 target_pos = L->pos();
640 } else {
641 if (L->is_linked()) {
642 target_pos = L->pos(); // L's link
643 } else {
644 // was: target_pos = kEndOfChain;
645 // However, using branch to self to mark the first reference
646 // should avoid most instances of branch offset overflow. See
647 // target_at() for where this is converted back to kEndOfChain.
648 target_pos = pc_offset();
649 if (!trampoline_emitted_) {
650 unbound_labels_count_++;
651 next_buffer_check_ -= kTrampolineSlotsSize;
652 }
653 }
654 L->link_to(pc_offset());
655 }
656
657 return target_pos - pc_offset();
658 }
659
660
661 // Branch instructions.
662
663
664 void Assembler::bclr(BOfield bo, LKBit lk) {
665 positions_recorder()->WriteRecordedPositions();
666 emit(EXT1 | bo | BCLRX | lk);
667 }
668
669
670 void Assembler::bcctr(BOfield bo, LKBit lk) {
671 positions_recorder()->WriteRecordedPositions();
672 emit(EXT1 | bo | BCCTRX | lk);
673 }
674
675
676 // Pseudo op - branch to link register
677 void Assembler::blr() { bclr(BA, LeaveLK); }
678
679
680 // Pseudo op - branch to count register -- used for "jump"
681 void Assembler::bctr() { bcctr(BA, LeaveLK); }
682
683
684 void Assembler::bctrl() { bcctr(BA, SetLK); }
685
686
687 void Assembler::bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk) {
688 if (lk == SetLK) {
689 positions_recorder()->WriteRecordedPositions();
690 }
691 DCHECK(is_int16(branch_offset));
692 emit(BCX | bo | condition_bit * B16 | (kImm16Mask & branch_offset) | lk);
693 }
694
695
696 void Assembler::b(int branch_offset, LKBit lk) {
697 if (lk == SetLK) {
698 positions_recorder()->WriteRecordedPositions();
699 }
700 DCHECK((branch_offset & 3) == 0);
701 int imm26 = branch_offset;
702 DCHECK(is_int26(imm26));
703 // todo add AA and LK bits
704 emit(BX | (imm26 & kImm26Mask) | lk);
705 }
706
707
708 void Assembler::xori(Register dst, Register src, const Operand& imm) {
709 d_form(XORI, src, dst, imm.imm_, false);
710 }
711
712
713 void Assembler::xoris(Register ra, Register rs, const Operand& imm) {
714 d_form(XORIS, rs, ra, imm.imm_, false);
715 }
716
717
718 void Assembler::xor_(Register dst, Register src1, Register src2, RCBit rc) {
719 x_form(EXT2 | XORX, dst, src1, src2, rc);
720 }
721
722
723 void Assembler::cntlzw_(Register ra, Register rs, RCBit rc) {
724 x_form(EXT2 | CNTLZWX, ra, rs, r0, rc);
725 }
726
727
728 void Assembler::and_(Register ra, Register rs, Register rb, RCBit rc) {
729 x_form(EXT2 | ANDX, ra, rs, rb, rc);
730 }
731
732
733 void Assembler::rlwinm(Register ra, Register rs, int sh, int mb, int me,
734 RCBit rc) {
735 sh &= 0x1f;
736 mb &= 0x1f;
737 me &= 0x1f;
738 emit(RLWINMX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
739 me << 1 | rc);
740 }
741
742
743 void Assembler::rlwnm(Register ra, Register rs, Register rb, int mb, int me,
744 RCBit rc) {
745 mb &= 0x1f;
746 me &= 0x1f;
747 emit(RLWNMX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | mb * B6 |
748 me << 1 | rc);
749 }
750
751
752 void Assembler::rlwimi(Register ra, Register rs, int sh, int mb, int me,
753 RCBit rc) {
754 sh &= 0x1f;
755 mb &= 0x1f;
756 me &= 0x1f;
757 emit(RLWIMIX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
758 me << 1 | rc);
759 }
760
761
762 void Assembler::slwi(Register dst, Register src, const Operand& val, RCBit rc) {
763 DCHECK((32 > val.imm_) && (val.imm_ >= 0));
764 rlwinm(dst, src, val.imm_, 0, 31 - val.imm_, rc);
765 }
766
767
768 void Assembler::srwi(Register dst, Register src, const Operand& val, RCBit rc) {
769 DCHECK((32 > val.imm_) && (val.imm_ >= 0));
770 rlwinm(dst, src, 32 - val.imm_, val.imm_, 31, rc);
771 }
772
773
774 void Assembler::clrrwi(Register dst, Register src, const Operand& val,
775 RCBit rc) {
776 DCHECK((32 > val.imm_) && (val.imm_ >= 0));
777 rlwinm(dst, src, 0, 0, 31 - val.imm_, rc);
778 }
779
780
781 void Assembler::clrlwi(Register dst, Register src, const Operand& val,
782 RCBit rc) {
783 DCHECK((32 > val.imm_) && (val.imm_ >= 0));
784 rlwinm(dst, src, 0, val.imm_, 31, rc);
785 }
786
787
788 void Assembler::srawi(Register ra, Register rs, int sh, RCBit r) {
789 emit(EXT2 | SRAWIX | rs.code() * B21 | ra.code() * B16 | sh * B11 | r);
790 }
791
792
793 void Assembler::srw(Register dst, Register src1, Register src2, RCBit r) {
794 x_form(EXT2 | SRWX, dst, src1, src2, r);
795 }
796
797
798 void Assembler::slw(Register dst, Register src1, Register src2, RCBit r) {
799 x_form(EXT2 | SLWX, dst, src1, src2, r);
800 }
801
802
803 void Assembler::sraw(Register ra, Register rs, Register rb, RCBit r) {
804 x_form(EXT2 | SRAW, ra, rs, rb, r);
805 }
806
807
808 void Assembler::rotlw(Register ra, Register rs, Register rb, RCBit r) {
809 rlwnm(ra, rs, rb, 0, 31, r);
810 }
811
812
813 void Assembler::rotlwi(Register ra, Register rs, int sh, RCBit r) {
814 rlwinm(ra, rs, sh, 0, 31, r);
815 }
816
817
818 void Assembler::rotrwi(Register ra, Register rs, int sh, RCBit r) {
819 rlwinm(ra, rs, 32 - sh, 0, 31, r);
820 }
821
822
823 void Assembler::subi(Register dst, Register src, const Operand& imm) {
824 addi(dst, src, Operand(-(imm.imm_)));
825 }
826
827 void Assembler::addc(Register dst, Register src1, Register src2, OEBit o,
828 RCBit r) {
829 xo_form(EXT2 | ADDCX, dst, src1, src2, o, r);
830 }
831
832
833 void Assembler::addze(Register dst, Register src1, OEBit o, RCBit r) {
834 // a special xo_form
835 emit(EXT2 | ADDZEX | dst.code() * B21 | src1.code() * B16 | o | r);
836 }
837
838
839 void Assembler::sub(Register dst, Register src1, Register src2, OEBit o,
840 RCBit r) {
841 xo_form(EXT2 | SUBFX, dst, src2, src1, o, r);
842 }
843
844
845 void Assembler::subfc(Register dst, Register src1, Register src2, OEBit o,
846 RCBit r) {
847 xo_form(EXT2 | SUBFCX, dst, src2, src1, o, r);
848 }
849
850
851 void Assembler::subfic(Register dst, Register src, const Operand& imm) {
852 d_form(SUBFIC, dst, src, imm.imm_, true);
853 }
854
855
856 void Assembler::add(Register dst, Register src1, Register src2, OEBit o,
857 RCBit r) {
858 xo_form(EXT2 | ADDX, dst, src1, src2, o, r);
859 }
860
861
862 // Multiply low word
863 void Assembler::mullw(Register dst, Register src1, Register src2, OEBit o,
864 RCBit r) {
865 xo_form(EXT2 | MULLW, dst, src1, src2, o, r);
866 }
867
868
869 // Multiply hi word
870 void Assembler::mulhw(Register dst, Register src1, Register src2, OEBit o,
871 RCBit r) {
872 xo_form(EXT2 | MULHWX, dst, src1, src2, o, r);
873 }
874
875
876 // Divide word
877 void Assembler::divw(Register dst, Register src1, Register src2, OEBit o,
878 RCBit r) {
879 xo_form(EXT2 | DIVW, dst, src1, src2, o, r);
880 }
881
882
883 void Assembler::addi(Register dst, Register src, const Operand& imm) {
884 DCHECK(!src.is(r0)); // use li instead to show intent
885 d_form(ADDI, dst, src, imm.imm_, true);
886 }
887
888
889 void Assembler::addis(Register dst, Register src, const Operand& imm) {
890 DCHECK(!src.is(r0)); // use lis instead to show intent
891 d_form(ADDIS, dst, src, imm.imm_, true);
892 }
893
894
895 void Assembler::addic(Register dst, Register src, const Operand& imm) {
896 d_form(ADDIC, dst, src, imm.imm_, true);
897 }
898
899
900 void Assembler::andi(Register ra, Register rs, const Operand& imm) {
901 d_form(ANDIx, rs, ra, imm.imm_, false);
902 }
903
904
905 void Assembler::andis(Register ra, Register rs, const Operand& imm) {
906 d_form(ANDISx, rs, ra, imm.imm_, false);
907 }
908
909
910 void Assembler::nor(Register dst, Register src1, Register src2, RCBit r) {
911 x_form(EXT2 | NORX, dst, src1, src2, r);
912 }
913
914
915 void Assembler::notx(Register dst, Register src, RCBit r) {
916 x_form(EXT2 | NORX, dst, src, src, r);
917 }
918
919
920 void Assembler::ori(Register ra, Register rs, const Operand& imm) {
921 d_form(ORI, rs, ra, imm.imm_, false);
922 }
923
924
925 void Assembler::oris(Register dst, Register src, const Operand& imm) {
926 d_form(ORIS, src, dst, imm.imm_, false);
927 }
928
929
930 void Assembler::orx(Register dst, Register src1, Register src2, RCBit rc) {
931 x_form(EXT2 | ORX, dst, src1, src2, rc);
932 }
933
934
935 void Assembler::cmpi(Register src1, const Operand& src2, CRegister cr) {
936 intptr_t imm16 = src2.imm_;
937 #if V8_TARGET_ARCH_PPC64
938 int L = 1;
939 #else
940 int L = 0;
941 #endif
942 DCHECK(is_int16(imm16));
943 DCHECK(cr.code() >= 0 && cr.code() <= 7);
944 imm16 &= kImm16Mask;
945 emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
946 }
947
948
949 void Assembler::cmpli(Register src1, const Operand& src2, CRegister cr) {
950 uintptr_t uimm16 = src2.imm_;
951 #if V8_TARGET_ARCH_PPC64
952 int L = 1;
953 #else
954 int L = 0;
955 #endif
956 DCHECK(is_uint16(uimm16));
957 DCHECK(cr.code() >= 0 && cr.code() <= 7);
958 uimm16 &= kImm16Mask;
959 emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
960 }
961
962
963 void Assembler::cmp(Register src1, Register src2, CRegister cr) {
964 #if V8_TARGET_ARCH_PPC64
965 int L = 1;
966 #else
967 int L = 0;
968 #endif
969 DCHECK(cr.code() >= 0 && cr.code() <= 7);
970 emit(EXT2 | CMP | cr.code() * B23 | L * B21 | src1.code() * B16 |
971 src2.code() * B11);
972 }
973
974
975 void Assembler::cmpl(Register src1, Register src2, CRegister cr) {
976 #if V8_TARGET_ARCH_PPC64
977 int L = 1;
978 #else
979 int L = 0;
980 #endif
981 DCHECK(cr.code() >= 0 && cr.code() <= 7);
982 emit(EXT2 | CMPL | cr.code() * B23 | L * B21 | src1.code() * B16 |
983 src2.code() * B11);
984 }
985
986
987 void Assembler::cmpwi(Register src1, const Operand& src2, CRegister cr) {
988 intptr_t imm16 = src2.imm_;
989 int L = 0;
990 DCHECK(is_int16(imm16));
991 DCHECK(cr.code() >= 0 && cr.code() <= 7);
992 imm16 &= kImm16Mask;
993 emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
994 }
995
996
997 void Assembler::cmplwi(Register src1, const Operand& src2, CRegister cr) {
998 uintptr_t uimm16 = src2.imm_;
999 int L = 0;
1000 DCHECK(is_uint16(uimm16));
1001 DCHECK(cr.code() >= 0 && cr.code() <= 7);
1002 uimm16 &= kImm16Mask;
1003 emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
1004 }
1005
1006
1007 void Assembler::cmpw(Register src1, Register src2, CRegister cr) {
1008 int L = 0;
1009 DCHECK(cr.code() >= 0 && cr.code() <= 7);
1010 emit(EXT2 | CMP | cr.code() * B23 | L * B21 | src1.code() * B16 |
1011 src2.code() * B11);
1012 }
1013
1014
1015 void Assembler::cmplw(Register src1, Register src2, CRegister cr) {
1016 int L = 0;
1017 DCHECK(cr.code() >= 0 && cr.code() <= 7);
1018 emit(EXT2 | CMPL | cr.code() * B23 | L * B21 | src1.code() * B16 |
1019 src2.code() * B11);
1020 }
1021
1022
1023 // Pseudo op - load immediate
1024 void Assembler::li(Register dst, const Operand& imm) {
1025 d_form(ADDI, dst, r0, imm.imm_, true);
1026 }
1027
1028
1029 void Assembler::lis(Register dst, const Operand& imm) {
1030 d_form(ADDIS, dst, r0, imm.imm_, true);
1031 }
1032
1033
1034 // Pseudo op - move register
1035 void Assembler::mr(Register dst, Register src) {
1036 // actually or(dst, src, src)
1037 orx(dst, src, src);
1038 }
1039
1040
1041 void Assembler::lbz(Register dst, const MemOperand& src) {
1042 DCHECK(!src.ra_.is(r0));
1043 d_form(LBZ, dst, src.ra(), src.offset(), true);
1044 }
1045
1046
1047 void Assembler::lbzx(Register rt, const MemOperand& src) {
1048 Register ra = src.ra();
1049 Register rb = src.rb();
1050 DCHECK(!ra.is(r0));
1051 emit(EXT2 | LBZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1052 LeaveRC);
1053 }
1054
1055
1056 void Assembler::lbzux(Register rt, const MemOperand& src) {
1057 Register ra = src.ra();
1058 Register rb = src.rb();
1059 DCHECK(!ra.is(r0));
1060 emit(EXT2 | LBZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1061 LeaveRC);
1062 }
1063
1064
1065 void Assembler::lhz(Register dst, const MemOperand& src) {
1066 DCHECK(!src.ra_.is(r0));
1067 d_form(LHZ, dst, src.ra(), src.offset(), true);
1068 }
1069
1070
1071 void Assembler::lhzx(Register rt, const MemOperand& src) {
1072 Register ra = src.ra();
1073 Register rb = src.rb();
1074 DCHECK(!ra.is(r0));
1075 emit(EXT2 | LHZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1076 LeaveRC);
1077 }
1078
1079
1080 void Assembler::lhzux(Register rt, const MemOperand& src) {
1081 Register ra = src.ra();
1082 Register rb = src.rb();
1083 DCHECK(!ra.is(r0));
1084 emit(EXT2 | LHZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1085 LeaveRC);
1086 }
1087
1088
1089 void Assembler::lwz(Register dst, const MemOperand& src) {
1090 DCHECK(!src.ra_.is(r0));
1091 d_form(LWZ, dst, src.ra(), src.offset(), true);
1092 }
1093
1094
1095 void Assembler::lwzu(Register dst, const MemOperand& src) {
1096 DCHECK(!src.ra_.is(r0));
1097 d_form(LWZU, dst, src.ra(), src.offset(), true);
1098 }
1099
1100
1101 void Assembler::lwzx(Register rt, const MemOperand& src) {
1102 Register ra = src.ra();
1103 Register rb = src.rb();
1104 DCHECK(!ra.is(r0));
1105 emit(EXT2 | LWZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1106 LeaveRC);
1107 }
1108
1109
1110 void Assembler::lwzux(Register rt, const MemOperand& src) {
1111 Register ra = src.ra();
1112 Register rb = src.rb();
1113 DCHECK(!ra.is(r0));
1114 emit(EXT2 | LWZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1115 LeaveRC);
1116 }
1117
1118
1119 void Assembler::lwa(Register dst, const MemOperand& src) {
1120 #if V8_TARGET_ARCH_PPC64
1121 int offset = src.offset();
1122 DCHECK(!src.ra_.is(r0));
1123 DCHECK(!(offset & 3) && is_int16(offset));
1124 offset = kImm16Mask & offset;
1125 emit(LD | dst.code() * B21 | src.ra().code() * B16 | offset | 2);
1126 #else
1127 lwz(dst, src);
1128 #endif
1129 }
1130
1131
1132 void Assembler::stb(Register dst, const MemOperand& src) {
1133 DCHECK(!src.ra_.is(r0));
1134 d_form(STB, dst, src.ra(), src.offset(), true);
1135 }
1136
1137
1138 void Assembler::stbx(Register rs, const MemOperand& src) {
1139 Register ra = src.ra();
1140 Register rb = src.rb();
1141 DCHECK(!ra.is(r0));
1142 emit(EXT2 | STBX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1143 LeaveRC);
1144 }
1145
1146
1147 void Assembler::stbux(Register rs, const MemOperand& src) {
1148 Register ra = src.ra();
1149 Register rb = src.rb();
1150 DCHECK(!ra.is(r0));
1151 emit(EXT2 | STBUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1152 LeaveRC);
1153 }
1154
1155
1156 void Assembler::sth(Register dst, const MemOperand& src) {
1157 DCHECK(!src.ra_.is(r0));
1158 d_form(STH, dst, src.ra(), src.offset(), true);
1159 }
1160
1161
1162 void Assembler::sthx(Register rs, const MemOperand& src) {
1163 Register ra = src.ra();
1164 Register rb = src.rb();
1165 DCHECK(!ra.is(r0));
1166 emit(EXT2 | STHX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1167 LeaveRC);
1168 }
1169
1170
1171 void Assembler::sthux(Register rs, const MemOperand& src) {
1172 Register ra = src.ra();
1173 Register rb = src.rb();
1174 DCHECK(!ra.is(r0));
1175 emit(EXT2 | STHUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1176 LeaveRC);
1177 }
1178
1179
1180 void Assembler::stw(Register dst, const MemOperand& src) {
1181 DCHECK(!src.ra_.is(r0));
1182 d_form(STW, dst, src.ra(), src.offset(), true);
1183 }
1184
1185
1186 void Assembler::stwu(Register dst, const MemOperand& src) {
1187 DCHECK(!src.ra_.is(r0));
1188 d_form(STWU, dst, src.ra(), src.offset(), true);
1189 }
1190
1191
1192 void Assembler::stwx(Register rs, const MemOperand& src) {
1193 Register ra = src.ra();
1194 Register rb = src.rb();
1195 DCHECK(!ra.is(r0));
1196 emit(EXT2 | STWX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1197 LeaveRC);
1198 }
1199
1200
1201 void Assembler::stwux(Register rs, const MemOperand& src) {
1202 Register ra = src.ra();
1203 Register rb = src.rb();
1204 DCHECK(!ra.is(r0));
1205 emit(EXT2 | STWUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1206 LeaveRC);
1207 }
1208
1209
1210 void Assembler::extsb(Register rs, Register ra, RCBit rc) {
1211 emit(EXT2 | EXTSB | ra.code() * B21 | rs.code() * B16 | rc);
1212 }
1213
1214
1215 void Assembler::extsh(Register rs, Register ra, RCBit rc) {
1216 emit(EXT2 | EXTSH | ra.code() * B21 | rs.code() * B16 | rc);
1217 }
1218
1219
1220 void Assembler::neg(Register rt, Register ra, OEBit o, RCBit r) {
1221 emit(EXT2 | NEGX | rt.code() * B21 | ra.code() * B16 | o | r);
1222 }
1223
1224
1225 void Assembler::andc(Register dst, Register src1, Register src2, RCBit rc) {
1226 x_form(EXT2 | ANDCX, dst, src1, src2, rc);
1227 }
1228
1229
1230 #if V8_TARGET_ARCH_PPC64
1231 // 64bit specific instructions
1232 void Assembler::ld(Register rd, const MemOperand& src) {
1233 int offset = src.offset();
1234 DCHECK(!src.ra_.is(r0));
1235 DCHECK(!(offset & 3) && is_int16(offset));
1236 offset = kImm16Mask & offset;
1237 emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset);
1238 }
1239
1240
1241 void Assembler::ldx(Register rd, const MemOperand& src) {
1242 Register ra = src.ra();
1243 Register rb = src.rb();
1244 DCHECK(!ra.is(r0));
1245 emit(EXT2 | LDX | rd.code() * B21 | ra.code() * B16 | rb.code() * B11);
1246 }
1247
1248
1249 void Assembler::ldu(Register rd, const MemOperand& src) {
1250 int offset = src.offset();
1251 DCHECK(!src.ra_.is(r0));
1252 DCHECK(!(offset & 3) && is_int16(offset));
1253 offset = kImm16Mask & offset;
1254 emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset | 1);
1255 }
1256
1257
1258 void Assembler::ldux(Register rd, const MemOperand& src) {
1259 Register ra = src.ra();
1260 Register rb = src.rb();
1261 DCHECK(!ra.is(r0));
1262 emit(EXT2 | LDUX | rd.code() * B21 | ra.code() * B16 | rb.code() * B11);
1263 }
1264
1265
1266 void Assembler::std(Register rs, const MemOperand& src) {
1267 int offset = src.offset();
1268 DCHECK(!src.ra_.is(r0));
1269 DCHECK(!(offset & 3) && is_int16(offset));
1270 offset = kImm16Mask & offset;
1271 emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset);
1272 }
1273
1274
1275 void Assembler::stdx(Register rs, const MemOperand& src) {
1276 Register ra = src.ra();
1277 Register rb = src.rb();
1278 DCHECK(!ra.is(r0));
1279 emit(EXT2 | STDX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11);
1280 }
1281
1282
1283 void Assembler::stdu(Register rs, const MemOperand& src) {
1284 int offset = src.offset();
1285 DCHECK(!src.ra_.is(r0));
1286 DCHECK(!(offset & 3) && is_int16(offset));
1287 offset = kImm16Mask & offset;
1288 emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset | 1);
1289 }
1290
1291
1292 void Assembler::stdux(Register rs, const MemOperand& src) {
1293 Register ra = src.ra();
1294 Register rb = src.rb();
1295 DCHECK(!ra.is(r0));
1296 emit(EXT2 | STDUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11);
1297 }
1298
1299
1300 void Assembler::rldic(Register ra, Register rs, int sh, int mb, RCBit r) {
1301 md_form(EXT5 | RLDIC, ra, rs, sh, mb, r);
1302 }
1303
1304
1305 void Assembler::rldicl(Register ra, Register rs, int sh, int mb, RCBit r) {
1306 md_form(EXT5 | RLDICL, ra, rs, sh, mb, r);
1307 }
1308
1309
1310 void Assembler::rldcl(Register ra, Register rs, Register rb, int mb, RCBit r) {
1311 mds_form(EXT5 | RLDCL, ra, rs, rb, mb, r);
1312 }
1313
1314
1315 void Assembler::rldicr(Register ra, Register rs, int sh, int me, RCBit r) {
1316 md_form(EXT5 | RLDICR, ra, rs, sh, me, r);
1317 }
1318
1319
1320 void Assembler::sldi(Register dst, Register src, const Operand& val, RCBit rc) {
1321 DCHECK((64 > val.imm_) && (val.imm_ >= 0));
1322 rldicr(dst, src, val.imm_, 63 - val.imm_, rc);
1323 }
1324
1325
1326 void Assembler::srdi(Register dst, Register src, const Operand& val, RCBit rc) {
1327 DCHECK((64 > val.imm_) && (val.imm_ >= 0));
1328 rldicl(dst, src, 64 - val.imm_, val.imm_, rc);
1329 }
1330
1331
1332 void Assembler::clrrdi(Register dst, Register src, const Operand& val,
1333 RCBit rc) {
1334 DCHECK((64 > val.imm_) && (val.imm_ >= 0));
1335 rldicr(dst, src, 0, 63 - val.imm_, rc);
1336 }
1337
1338
1339 void Assembler::clrldi(Register dst, Register src, const Operand& val,
1340 RCBit rc) {
1341 DCHECK((64 > val.imm_) && (val.imm_ >= 0));
1342 rldicl(dst, src, 0, val.imm_, rc);
1343 }
1344
1345
1346 void Assembler::rldimi(Register ra, Register rs, int sh, int mb, RCBit r) {
1347 md_form(EXT5 | RLDIMI, ra, rs, sh, mb, r);
1348 }
1349
1350
1351 void Assembler::sradi(Register ra, Register rs, int sh, RCBit r) {
1352 int sh0_4 = sh & 0x1f;
1353 int sh5 = (sh >> 5) & 0x1;
1354
1355 emit(EXT2 | SRADIX | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 |
1356 sh5 * B1 | r);
1357 }
1358
1359
1360 void Assembler::srd(Register dst, Register src1, Register src2, RCBit r) {
1361 x_form(EXT2 | SRDX, dst, src1, src2, r);
1362 }
1363
1364
1365 void Assembler::sld(Register dst, Register src1, Register src2, RCBit r) {
1366 x_form(EXT2 | SLDX, dst, src1, src2, r);
1367 }
1368
1369
1370 void Assembler::srad(Register ra, Register rs, Register rb, RCBit r) {
1371 x_form(EXT2 | SRAD, ra, rs, rb, r);
1372 }
1373
1374
1375 void Assembler::rotld(Register ra, Register rs, Register rb, RCBit r) {
1376 rldcl(ra, rs, rb, 0, r);
1377 }
1378
1379
1380 void Assembler::rotldi(Register ra, Register rs, int sh, RCBit r) {
1381 rldicl(ra, rs, sh, 0, r);
1382 }
1383
1384
1385 void Assembler::rotrdi(Register ra, Register rs, int sh, RCBit r) {
1386 rldicl(ra, rs, 64 - sh, 0, r);
1387 }
1388
1389
1390 void Assembler::cntlzd_(Register ra, Register rs, RCBit rc) {
1391 x_form(EXT2 | CNTLZDX, ra, rs, r0, rc);
1392 }
1393
1394
1395 void Assembler::extsw(Register rs, Register ra, RCBit rc) {
1396 emit(EXT2 | EXTSW | ra.code() * B21 | rs.code() * B16 | rc);
1397 }
1398
1399
1400 void Assembler::mulld(Register dst, Register src1, Register src2, OEBit o,
1401 RCBit r) {
1402 xo_form(EXT2 | MULLD, dst, src1, src2, o, r);
1403 }
1404
1405
1406 void Assembler::divd(Register dst, Register src1, Register src2, OEBit o,
1407 RCBit r) {
1408 xo_form(EXT2 | DIVD, dst, src1, src2, o, r);
1409 }
1410 #endif
1411
1412
1413 void Assembler::fake_asm(enum FAKE_OPCODE_T fopcode) {
1414 DCHECK(fopcode < fLastFaker);
1415 emit(FAKE_OPCODE | FAKER_SUBOPCODE | fopcode);
1416 }
1417
1418
1419 void Assembler::marker_asm(int mcode) {
1420 if (::v8::internal::FLAG_trace_sim_stubs) {
1421 DCHECK(mcode < F_NEXT_AVAILABLE_STUB_MARKER);
1422 emit(FAKE_OPCODE | MARKER_SUBOPCODE | mcode);
1423 }
1424 }
1425
1426
1427 // Function descriptor for AIX.
1428 // Code address skips the function descriptor "header".
1429 // TOC and static chain are ignored and set to 0.
1430 void Assembler::function_descriptor() {
1431 DCHECK(pc_offset() == 0);
1432 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
1433 emit_ptr(reinterpret_cast<uintptr_t>(pc_) + 3 * kPointerSize);
1434 emit_ptr(0);
1435 emit_ptr(0);
1436 }
1437
1438
1439 #if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL
1440 void Assembler::RelocateInternalReference(Address pc, intptr_t delta,
1441 Address code_start,
1442 ICacheFlushMode icache_flush_mode) {
1443 DCHECK(delta || code_start);
1444 #if ABI_USES_FUNCTION_DESCRIPTORS
1445 uintptr_t* fd = reinterpret_cast<uintptr_t*>(pc);
1446 if (fd[1] == 0 && fd[2] == 0) {
1447 // Function descriptor
1448 if (delta) {
1449 fd[0] += delta;
1450 } else {
1451 fd[0] = reinterpret_cast<uintptr_t>(code_start) + 3 * kPointerSize;
1452 }
1453 return;
1454 }
1455 #endif
1456 #if V8_OOL_CONSTANT_POOL
1457 // mov for LoadConstantPoolPointerRegister
1458 ConstantPoolArray* constant_pool = NULL;
1459 if (delta) {
1460 code_start = target_address_at(pc, constant_pool) + delta;
1461 }
1462 set_target_address_at(pc, constant_pool, code_start, icache_flush_mode);
1463 #endif
1464 }
1465
1466
1467 int Assembler::DecodeInternalReference(Vector<char> buffer, Address pc) {
1468 #if ABI_USES_FUNCTION_DESCRIPTORS
1469 uintptr_t* fd = reinterpret_cast<uintptr_t*>(pc);
1470 if (fd[1] == 0 && fd[2] == 0) {
1471 // Function descriptor
1472 SNPrintF(buffer, "[%08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
1473 "]"
1474 " function descriptor",
1475 fd[0], fd[1], fd[2]);
1476 return kPointerSize * 3;
1477 }
1478 #endif
1479 return 0;
1480 }
1481 #endif
1482
1483
1484 int Assembler::instructions_required_for_mov(const Operand& x) const {
1485 #if V8_OOL_CONSTANT_POOL || DEBUG
1486 bool canOptimize =
1487 !(x.must_output_reloc_info(this) || is_trampoline_pool_blocked());
1488 #endif
1489 #if V8_OOL_CONSTANT_POOL
1490 if (use_constant_pool_for_mov(x, canOptimize)) {
1491 // Current usage guarantees that all constant pool references can
1492 // use the same sequence.
1493 return kMovInstructionsConstantPool;
1494 }
1495 #endif
1496 DCHECK(!canOptimize);
1497 return kMovInstructionsNoConstantPool;
1498 }
1499
1500
1501 #if V8_OOL_CONSTANT_POOL
1502 bool Assembler::use_constant_pool_for_mov(const Operand& x,
1503 bool canOptimize) const {
1504 if (!is_constant_pool_available() || is_constant_pool_full()) {
1505 // If there is no constant pool available, we must use a mov
1506 // immediate sequence.
1507 return false;
1508 }
1509
1510 intptr_t value = x.immediate();
1511 if (canOptimize && is_int16(value)) {
1512 // Prefer a single-instruction load-immediate.
1513 return false;
1514 }
1515
1516 return true;
1517 }
1518
1519
1520 void Assembler::EnsureSpaceFor(int space_needed) {
1521 if (buffer_space() <= (kGap + space_needed)) {
1522 GrowBuffer();
1523 }
1524 }
1525 #endif
1526
1527
1528 bool Operand::must_output_reloc_info(const Assembler* assembler) const {
1529 if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
1530 if (assembler != NULL && assembler->predictable_code_size()) return true;
1531 return assembler->serializer_enabled();
1532 } else if (RelocInfo::IsNone(rmode_)) {
1533 return false;
1534 }
1535 return true;
1536 }
1537
1538
1539 // Primarily used for loading constants
1540 // This should really move to be in macro-assembler as it
1541 // is really a pseudo instruction
1542 // Some usages of this intend for a FIXED_SEQUENCE to be used
1543 // Todo - break this dependency so we can optimize mov() in general
1544 // and only use the generic version when we require a fixed sequence
1545 void Assembler::mov(Register dst, const Operand& src) {
1546 intptr_t value = src.immediate();
1547 bool canOptimize;
1548 RelocInfo rinfo(pc_, src.rmode_, value, NULL);
1549
1550 if (src.must_output_reloc_info(this)) {
1551 RecordRelocInfo(rinfo);
1552 }
1553
1554 canOptimize =
1555 !(src.must_output_reloc_info(this) || is_trampoline_pool_blocked());
1556
1557 #if V8_OOL_CONSTANT_POOL
1558 if (use_constant_pool_for_mov(src, canOptimize)) {
1559 DCHECK(is_constant_pool_available());
1560 ConstantPoolAddEntry(rinfo);
1561 #if V8_TARGET_ARCH_PPC64
1562 BlockTrampolinePoolScope block_trampoline_pool(this);
1563 // We are forced to use 2 instruction sequence since the constant
1564 // pool pointer is tagged.
1565 li(dst, Operand::Zero());
1566 ldx(dst, MemOperand(kConstantPoolRegister, dst));
1567 #else
1568 lwz(dst, MemOperand(kConstantPoolRegister, 0));
1569 #endif
1570 return;
1571 }
1572 #endif
1573
1574 if (canOptimize) {
1575 if (is_int16(value)) {
1576 li(dst, Operand(value));
1577 } else {
1578 uint16_t u16;
1579 #if V8_TARGET_ARCH_PPC64
1580 if (is_int32(value)) {
1581 #endif
1582 lis(dst, Operand(value >> 16));
1583 #if V8_TARGET_ARCH_PPC64
1584 } else {
1585 if (is_int48(value)) {
1586 li(dst, Operand(value >> 32));
1587 } else {
1588 lis(dst, Operand(value >> 48));
1589 u16 = ((value >> 32) & 0xffff);
1590 if (u16) {
1591 ori(dst, dst, Operand(u16));
1592 }
1593 }
1594 sldi(dst, dst, Operand(32));
1595 u16 = ((value >> 16) & 0xffff);
1596 if (u16) {
1597 oris(dst, dst, Operand(u16));
1598 }
1599 }
1600 #endif
1601 u16 = (value & 0xffff);
1602 if (u16) {
1603 ori(dst, dst, Operand(u16));
1604 }
1605 }
1606 return;
1607 }
1608
1609 DCHECK(!canOptimize);
1610
1611 {
1612 BlockTrampolinePoolScope block_trampoline_pool(this);
1613 #if V8_TARGET_ARCH_PPC64
1614 int32_t hi_32 = static_cast<int32_t>(value >> 32);
1615 int32_t lo_32 = static_cast<int32_t>(value);
1616 int hi_word = static_cast<int>(hi_32 >> 16);
1617 int lo_word = static_cast<int>(hi_32 & 0xffff);
1618 lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
1619 ori(dst, dst, Operand(lo_word));
1620 sldi(dst, dst, Operand(32));
1621 hi_word = static_cast<int>(((lo_32 >> 16) & 0xffff));
1622 lo_word = static_cast<int>(lo_32 & 0xffff);
1623 oris(dst, dst, Operand(hi_word));
1624 ori(dst, dst, Operand(lo_word));
1625 #else
1626 int hi_word = static_cast<int>(value >> 16);
1627 int lo_word = static_cast<int>(value & 0xffff);
1628 lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
1629 ori(dst, dst, Operand(lo_word));
1630 #endif
1631 }
1632 }
1633
1634
1635 void Assembler::mov_label_offset(Register dst, Label* label) {
1636 if (label->is_bound()) {
1637 int target = label->pos();
1638 mov(dst, Operand(target + Code::kHeaderSize - kHeapObjectTag));
1639 } else {
1640 bool is_linked = label->is_linked();
1641 // Emit the link to the label in the code stream followed by extra
1642 // nop instructions.
1643 DCHECK(dst.is(r3)); // target_at_put assumes r3 for now
1644 int link = is_linked ? label->pos() - pc_offset() : 0;
1645 label->link_to(pc_offset());
1646
1647 if (!is_linked && !trampoline_emitted_) {
1648 unbound_labels_count_++;
1649 next_buffer_check_ -= kTrampolineSlotsSize;
1650 }
1651
1652 // When the label is bound, these instructions will be patched
1653 // with a 2 instruction mov sequence that will load the
1654 // destination register with the position of the label from the
1655 // beginning of the code.
1656 //
1657 // When the label gets bound: target_at extracts the link and
1658 // target_at_put patches the instructions.
1659 BlockTrampolinePoolScope block_trampoline_pool(this);
1660 emit(link);
1661 nop();
1662 }
1663 }
1664
1665
1666 // Special register instructions
1667 void Assembler::crxor(int bt, int ba, int bb) {
1668 emit(EXT1 | CRXOR | bt * B21 | ba * B16 | bb * B11);
1669 }
1670
1671
1672 void Assembler::creqv(int bt, int ba, int bb) {
1673 emit(EXT1 | CREQV | bt * B21 | ba * B16 | bb * B11);
1674 }
1675
1676
1677 void Assembler::mflr(Register dst) {
1678 emit(EXT2 | MFSPR | dst.code() * B21 | 256 << 11); // Ignore RC bit
1679 }
1680
1681
1682 void Assembler::mtlr(Register src) {
1683 emit(EXT2 | MTSPR | src.code() * B21 | 256 << 11); // Ignore RC bit
1684 }
1685
1686
1687 void Assembler::mtctr(Register src) {
1688 emit(EXT2 | MTSPR | src.code() * B21 | 288 << 11); // Ignore RC bit
1689 }
1690
1691
1692 void Assembler::mtxer(Register src) {
1693 emit(EXT2 | MTSPR | src.code() * B21 | 32 << 11);
1694 }
1695
1696
1697 void Assembler::mcrfs(int bf, int bfa) {
1698 emit(EXT4 | MCRFS | bf * B23 | bfa * B18);
1699 }
1700
1701
1702 void Assembler::mfcr(Register dst) { emit(EXT2 | MFCR | dst.code() * B21); }
1703
1704
1705 #if V8_TARGET_ARCH_PPC64
1706 void Assembler::mffprd(Register dst, DoubleRegister src) {
1707 emit(EXT2 | MFVSRD | src.code() * B21 | dst.code() * B16);
1708 }
1709
1710
1711 void Assembler::mffprwz(Register dst, DoubleRegister src) {
1712 emit(EXT2 | MFVSRWZ | src.code() * B21 | dst.code() * B16);
1713 }
1714
1715
1716 void Assembler::mtfprd(DoubleRegister dst, Register src) {
1717 emit(EXT2 | MTVSRD | dst.code() * B21 | src.code() * B16);
1718 }
1719
1720
1721 void Assembler::mtfprwz(DoubleRegister dst, Register src) {
1722 emit(EXT2 | MTVSRWZ | dst.code() * B21 | src.code() * B16);
1723 }
1724
1725
1726 void Assembler::mtfprwa(DoubleRegister dst, Register src) {
1727 emit(EXT2 | MTVSRWA | dst.code() * B21 | src.code() * B16);
1728 }
1729 #endif
1730
1731
1732 // Exception-generating instructions and debugging support.
1733 // Stops with a non-negative code less than kNumOfWatchedStops support
1734 // enabling/disabling and a counter feature. See simulator-ppc.h .
1735 void Assembler::stop(const char* msg, Condition cond, int32_t code,
1736 CRegister cr) {
1737 if (cond != al) {
1738 Label skip;
1739 b(NegateCondition(cond), &skip, cr);
1740 bkpt(0);
1741 bind(&skip);
1742 } else {
1743 bkpt(0);
1744 }
1745 }
1746
1747
1748 void Assembler::bkpt(uint32_t imm16) { emit(0x7d821008); }
1749
1750
1751 void Assembler::info(const char* msg, Condition cond, int32_t code,
1752 CRegister cr) {
1753 if (::v8::internal::FLAG_trace_sim_stubs) {
1754 emit(0x7d9ff808);
1755 #if V8_TARGET_ARCH_PPC64
1756 uint64_t value = reinterpret_cast<uint64_t>(msg);
1757 emit(static_cast<uint32_t>(value >> 32));
1758 emit(static_cast<uint32_t>(value & 0xFFFFFFFF));
1759 #else
1760 emit(reinterpret_cast<Instr>(msg));
1761 #endif
1762 }
1763 }
1764
1765
1766 void Assembler::dcbf(Register ra, Register rb) {
1767 emit(EXT2 | DCBF | ra.code() * B16 | rb.code() * B11);
1768 }
1769
1770
1771 void Assembler::sync() { emit(EXT2 | SYNC); }
1772
1773
1774 void Assembler::lwsync() { emit(EXT2 | SYNC | 1 * B21); }
1775
1776
1777 void Assembler::icbi(Register ra, Register rb) {
1778 emit(EXT2 | ICBI | ra.code() * B16 | rb.code() * B11);
1779 }
1780
1781
1782 void Assembler::isync() { emit(EXT1 | ISYNC); }
1783
1784
1785 // Floating point support
1786
1787 void Assembler::lfd(const DoubleRegister frt, const MemOperand& src) {
1788 int offset = src.offset();
1789 Register ra = src.ra();
1790 DCHECK(is_int16(offset));
1791 int imm16 = offset & kImm16Mask;
1792 // could be x_form instruction with some casting magic
1793 emit(LFD | frt.code() * B21 | ra.code() * B16 | imm16);
1794 }
1795
1796
1797 void Assembler::lfdu(const DoubleRegister frt, const MemOperand& src) {
1798 int offset = src.offset();
1799 Register ra = src.ra();
1800 DCHECK(is_int16(offset));
1801 int imm16 = offset & kImm16Mask;
1802 // could be x_form instruction with some casting magic
1803 emit(LFDU | frt.code() * B21 | ra.code() * B16 | imm16);
1804 }
1805
1806
1807 void Assembler::lfdx(const DoubleRegister frt, const MemOperand& src) {
1808 Register ra = src.ra();
1809 Register rb = src.rb();
1810 DCHECK(!ra.is(r0));
1811 emit(EXT2 | LFDX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1812 LeaveRC);
1813 }
1814
1815
1816 void Assembler::lfdux(const DoubleRegister frt, const MemOperand& src) {
1817 Register ra = src.ra();
1818 Register rb = src.rb();
1819 DCHECK(!ra.is(r0));
1820 emit(EXT2 | LFDUX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1821 LeaveRC);
1822 }
1823
1824
1825 void Assembler::lfs(const DoubleRegister frt, const MemOperand& src) {
1826 int offset = src.offset();
1827 Register ra = src.ra();
1828 DCHECK(is_int16(offset));
1829 DCHECK(!ra.is(r0));
1830 int imm16 = offset & kImm16Mask;
1831 // could be x_form instruction with some casting magic
1832 emit(LFS | frt.code() * B21 | ra.code() * B16 | imm16);
1833 }
1834
1835
1836 void Assembler::lfsu(const DoubleRegister frt, const MemOperand& src) {
1837 int offset = src.offset();
1838 Register ra = src.ra();
1839 DCHECK(is_int16(offset));
1840 DCHECK(!ra.is(r0));
1841 int imm16 = offset & kImm16Mask;
1842 // could be x_form instruction with some casting magic
1843 emit(LFSU | frt.code() * B21 | ra.code() * B16 | imm16);
1844 }
1845
1846
1847 void Assembler::lfsx(const DoubleRegister frt, const MemOperand& src) {
1848 Register ra = src.ra();
1849 Register rb = src.rb();
1850 DCHECK(!ra.is(r0));
1851 emit(EXT2 | LFSX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1852 LeaveRC);
1853 }
1854
1855
1856 void Assembler::lfsux(const DoubleRegister frt, const MemOperand& src) {
1857 Register ra = src.ra();
1858 Register rb = src.rb();
1859 DCHECK(!ra.is(r0));
1860 emit(EXT2 | LFSUX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1861 LeaveRC);
1862 }
1863
1864
1865 void Assembler::stfd(const DoubleRegister frs, const MemOperand& src) {
1866 int offset = src.offset();
1867 Register ra = src.ra();
1868 DCHECK(is_int16(offset));
1869 DCHECK(!ra.is(r0));
1870 int imm16 = offset & kImm16Mask;
1871 // could be x_form instruction with some casting magic
1872 emit(STFD | frs.code() * B21 | ra.code() * B16 | imm16);
1873 }
1874
1875
1876 void Assembler::stfdu(const DoubleRegister frs, const MemOperand& src) {
1877 int offset = src.offset();
1878 Register ra = src.ra();
1879 DCHECK(is_int16(offset));
1880 DCHECK(!ra.is(r0));
1881 int imm16 = offset & kImm16Mask;
1882 // could be x_form instruction with some casting magic
1883 emit(STFDU | frs.code() * B21 | ra.code() * B16 | imm16);
1884 }
1885
1886
1887 void Assembler::stfdx(const DoubleRegister frs, const MemOperand& src) {
1888 Register ra = src.ra();
1889 Register rb = src.rb();
1890 DCHECK(!ra.is(r0));
1891 emit(EXT2 | STFDX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1892 LeaveRC);
1893 }
1894
1895
1896 void Assembler::stfdux(const DoubleRegister frs, const MemOperand& src) {
1897 Register ra = src.ra();
1898 Register rb = src.rb();
1899 DCHECK(!ra.is(r0));
1900 emit(EXT2 | STFDUX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1901 LeaveRC);
1902 }
1903
1904
1905 void Assembler::stfs(const DoubleRegister frs, const MemOperand& src) {
1906 int offset = src.offset();
1907 Register ra = src.ra();
1908 DCHECK(is_int16(offset));
1909 DCHECK(!ra.is(r0));
1910 int imm16 = offset & kImm16Mask;
1911 // could be x_form instruction with some casting magic
1912 emit(STFS | frs.code() * B21 | ra.code() * B16 | imm16);
1913 }
1914
1915
1916 void Assembler::stfsu(const DoubleRegister frs, const MemOperand& src) {
1917 int offset = src.offset();
1918 Register ra = src.ra();
1919 DCHECK(is_int16(offset));
1920 DCHECK(!ra.is(r0));
1921 int imm16 = offset & kImm16Mask;
1922 // could be x_form instruction with some casting magic
1923 emit(STFSU | frs.code() * B21 | ra.code() * B16 | imm16);
1924 }
1925
1926
1927 void Assembler::stfsx(const DoubleRegister frs, const MemOperand& src) {
1928 Register ra = src.ra();
1929 Register rb = src.rb();
1930 DCHECK(!ra.is(r0));
1931 emit(EXT2 | STFSX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1932 LeaveRC);
1933 }
1934
1935
1936 void Assembler::stfsux(const DoubleRegister frs, const MemOperand& src) {
1937 Register ra = src.ra();
1938 Register rb = src.rb();
1939 DCHECK(!ra.is(r0));
1940 emit(EXT2 | STFSUX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1941 LeaveRC);
1942 }
1943
1944
1945 void Assembler::fsub(const DoubleRegister frt, const DoubleRegister fra,
1946 const DoubleRegister frb, RCBit rc) {
1947 a_form(EXT4 | FSUB, frt, fra, frb, rc);
1948 }
1949
1950
1951 void Assembler::fadd(const DoubleRegister frt, const DoubleRegister fra,
1952 const DoubleRegister frb, RCBit rc) {
1953 a_form(EXT4 | FADD, frt, fra, frb, rc);
1954 }
1955
1956
1957 void Assembler::fmul(const DoubleRegister frt, const DoubleRegister fra,
1958 const DoubleRegister frc, RCBit rc) {
1959 emit(EXT4 | FMUL | frt.code() * B21 | fra.code() * B16 | frc.code() * B6 |
1960 rc);
1961 }
1962
1963
1964 void Assembler::fdiv(const DoubleRegister frt, const DoubleRegister fra,
1965 const DoubleRegister frb, RCBit rc) {
1966 a_form(EXT4 | FDIV, frt, fra, frb, rc);
1967 }
1968
1969
1970 void Assembler::fcmpu(const DoubleRegister fra, const DoubleRegister frb,
1971 CRegister cr) {
1972 DCHECK(cr.code() >= 0 && cr.code() <= 7);
1973 emit(EXT4 | FCMPU | cr.code() * B23 | fra.code() * B16 | frb.code() * B11);
1974 }
1975
1976
1977 void Assembler::fmr(const DoubleRegister frt, const DoubleRegister frb,
1978 RCBit rc) {
1979 emit(EXT4 | FMR | frt.code() * B21 | frb.code() * B11 | rc);
1980 }
1981
1982
1983 void Assembler::fctiwz(const DoubleRegister frt, const DoubleRegister frb) {
1984 emit(EXT4 | FCTIWZ | frt.code() * B21 | frb.code() * B11);
1985 }
1986
1987
1988 void Assembler::fctiw(const DoubleRegister frt, const DoubleRegister frb) {
1989 emit(EXT4 | FCTIW | frt.code() * B21 | frb.code() * B11);
1990 }
1991
1992
1993 void Assembler::frim(const DoubleRegister frt, const DoubleRegister frb) {
1994 emit(EXT4 | FRIM | frt.code() * B21 | frb.code() * B11);
1995 }
1996
1997
1998 void Assembler::frsp(const DoubleRegister frt, const DoubleRegister frb,
1999 RCBit rc) {
2000 emit(EXT4 | FRSP | frt.code() * B21 | frb.code() * B11 | rc);
2001 }
2002
2003
2004 void Assembler::fcfid(const DoubleRegister frt, const DoubleRegister frb,
2005 RCBit rc) {
2006 emit(EXT4 | FCFID | frt.code() * B21 | frb.code() * B11 | rc);
2007 }
2008
2009
2010 void Assembler::fctid(const DoubleRegister frt, const DoubleRegister frb,
2011 RCBit rc) {
2012 emit(EXT4 | FCTID | frt.code() * B21 | frb.code() * B11 | rc);
2013 }
2014
2015
2016 void Assembler::fctidz(const DoubleRegister frt, const DoubleRegister frb,
2017 RCBit rc) {
2018 emit(EXT4 | FCTIDZ | frt.code() * B21 | frb.code() * B11 | rc);
2019 }
2020
2021
2022 void Assembler::fsel(const DoubleRegister frt, const DoubleRegister fra,
2023 const DoubleRegister frc, const DoubleRegister frb,
2024 RCBit rc) {
2025 emit(EXT4 | FSEL | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
2026 frc.code() * B6 | rc);
2027 }
2028
2029
2030 void Assembler::fneg(const DoubleRegister frt, const DoubleRegister frb,
2031 RCBit rc) {
2032 emit(EXT4 | FNEG | frt.code() * B21 | frb.code() * B11 | rc);
2033 }
2034
2035
2036 void Assembler::mtfsfi(int bf, int immediate, RCBit rc) {
2037 emit(EXT4 | MTFSFI | bf * B23 | immediate * B12 | rc);
2038 }
2039
2040
2041 void Assembler::mffs(const DoubleRegister frt, RCBit rc) {
2042 emit(EXT4 | MFFS | frt.code() * B21 | rc);
2043 }
2044
2045
2046 void Assembler::mtfsf(const DoubleRegister frb, bool L, int FLM, bool W,
2047 RCBit rc) {
2048 emit(EXT4 | MTFSF | frb.code() * B11 | W * B16 | FLM * B17 | L * B25 | rc);
2049 }
2050
2051
2052 void Assembler::fsqrt(const DoubleRegister frt, const DoubleRegister frb,
2053 RCBit rc) {
2054 emit(EXT4 | FSQRT | frt.code() * B21 | frb.code() * B11 | rc);
2055 }
2056
2057
2058 void Assembler::fabs(const DoubleRegister frt, const DoubleRegister frb,
2059 RCBit rc) {
2060 emit(EXT4 | FABS | frt.code() * B21 | frb.code() * B11 | rc);
2061 }
2062
2063
2064 void Assembler::fmadd(const DoubleRegister frt, const DoubleRegister fra,
2065 const DoubleRegister frc, const DoubleRegister frb,
2066 RCBit rc) {
2067 emit(EXT4 | FMADD | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
2068 frc.code() * B6 | rc);
2069 }
2070
2071
2072 void Assembler::fmsub(const DoubleRegister frt, const DoubleRegister fra,
2073 const DoubleRegister frc, const DoubleRegister frb,
2074 RCBit rc) {
2075 emit(EXT4 | FMSUB | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
2076 frc.code() * B6 | rc);
2077 }
2078
2079
2080 // Pseudo instructions.
2081 void Assembler::nop(int type) {
2082 Register reg = r0;
2083 switch (type) {
2084 case NON_MARKING_NOP:
2085 reg = r0;
2086 break;
2087 case GROUP_ENDING_NOP:
2088 reg = r2;
2089 break;
2090 case DEBUG_BREAK_NOP:
2091 reg = r3;
2092 break;
2093 default:
2094 UNIMPLEMENTED();
2095 }
2096
2097 ori(reg, reg, Operand::Zero());
2098 }
2099
2100
2101 bool Assembler::IsNop(Instr instr, int type) {
2102 int reg = 0;
2103 switch (type) {
2104 case NON_MARKING_NOP:
2105 reg = 0;
2106 break;
2107 case GROUP_ENDING_NOP:
2108 reg = 2;
2109 break;
2110 case DEBUG_BREAK_NOP:
2111 reg = 3;
2112 break;
2113 default:
2114 UNIMPLEMENTED();
2115 }
2116 return instr == (ORI | reg * B21 | reg * B16);
2117 }
2118
2119
2120 // Debugging.
2121 void Assembler::RecordJSReturn() {
2122 positions_recorder()->WriteRecordedPositions();
2123 CheckBuffer();
2124 RecordRelocInfo(RelocInfo::JS_RETURN);
2125 }
2126
2127
2128 void Assembler::RecordDebugBreakSlot() {
2129 positions_recorder()->WriteRecordedPositions();
2130 CheckBuffer();
2131 RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
2132 }
2133
2134
2135 void Assembler::RecordComment(const char* msg) {
2136 if (FLAG_code_comments) {
2137 CheckBuffer();
2138 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
2139 }
2140 }
2141
2142
2143 void Assembler::GrowBuffer() {
2144 if (!own_buffer_) FATAL("external code buffer is too small");
2145
2146 // Compute new buffer size.
2147 CodeDesc desc; // the new buffer
2148 if (buffer_size_ < 4 * KB) {
2149 desc.buffer_size = 4 * KB;
2150 } else if (buffer_size_ < 1 * MB) {
2151 desc.buffer_size = 2 * buffer_size_;
2152 } else {
2153 desc.buffer_size = buffer_size_ + 1 * MB;
2154 }
2155 CHECK_GT(desc.buffer_size, 0); // no overflow
2156
2157 // Set up new buffer.
2158 desc.buffer = NewArray<byte>(desc.buffer_size);
2159
2160 desc.instr_size = pc_offset();
2161 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
2162
2163 // Copy the data.
2164 intptr_t pc_delta = desc.buffer - buffer_;
2165 intptr_t rc_delta =
2166 (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
2167 memmove(desc.buffer, buffer_, desc.instr_size);
2168 memmove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
2169 desc.reloc_size);
2170
2171 // Switch buffers.
2172 DeleteArray(buffer_);
2173 buffer_ = desc.buffer;
2174 buffer_size_ = desc.buffer_size;
2175 pc_ += pc_delta;
2176 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2177 reloc_info_writer.last_pc() + pc_delta);
2178
2179 // None of our relocation types are pc relative pointing outside the code
2180 // buffer nor pc absolute pointing inside the code buffer, so there is no need
2181 // to relocate any emitted relocation entries.
2182
2183 #if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL
2184 // Relocate runtime entries.
2185 for (RelocIterator it(desc); !it.done(); it.next()) {
2186 RelocInfo::Mode rmode = it.rinfo()->rmode();
2187 if (rmode == RelocInfo::INTERNAL_REFERENCE) {
2188 RelocateInternalReference(it.rinfo()->pc(), pc_delta, 0);
2189 }
2190 }
2191 #if V8_OOL_CONSTANT_POOL
2192 constant_pool_builder_.Relocate(pc_delta);
2193 #endif
2194 #endif
2195 }
2196
2197
2198 void Assembler::db(uint8_t data) {
2199 CheckBuffer();
2200 *reinterpret_cast<uint8_t*>(pc_) = data;
2201 pc_ += sizeof(uint8_t);
2202 }
2203
2204
2205 void Assembler::dd(uint32_t data) {
2206 CheckBuffer();
2207 *reinterpret_cast<uint32_t*>(pc_) = data;
2208 pc_ += sizeof(uint32_t);
2209 }
2210
2211
2212 void Assembler::emit_ptr(uintptr_t data) {
2213 CheckBuffer();
2214 *reinterpret_cast<uintptr_t*>(pc_) = data;
2215 pc_ += sizeof(uintptr_t);
2216 }
2217
2218
2219 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2220 RelocInfo rinfo(pc_, rmode, data, NULL);
2221 RecordRelocInfo(rinfo);
2222 }
2223
2224
2225 void Assembler::RecordRelocInfo(const RelocInfo& rinfo) {
2226 if (rinfo.rmode() >= RelocInfo::JS_RETURN &&
2227 rinfo.rmode() <= RelocInfo::DEBUG_BREAK_SLOT) {
2228 // Adjust code for new modes.
2229 DCHECK(RelocInfo::IsDebugBreakSlot(rinfo.rmode()) ||
2230 RelocInfo::IsJSReturn(rinfo.rmode()) ||
2231 RelocInfo::IsComment(rinfo.rmode()) ||
2232 RelocInfo::IsPosition(rinfo.rmode()));
2233 }
2234 if (!RelocInfo::IsNone(rinfo.rmode())) {
2235 // Don't record external references unless the heap will be serialized.
2236 if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE) {
2237 if (!serializer_enabled() && !emit_debug_code()) {
2238 return;
2239 }
2240 }
2241 DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
2242 if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) {
2243 RelocInfo reloc_info_with_ast_id(rinfo.pc(), rinfo.rmode(),
2244 RecordedAstId().ToInt(), NULL);
2245 ClearRecordedAstId();
2246 reloc_info_writer.Write(&reloc_info_with_ast_id);
2247 } else {
2248 reloc_info_writer.Write(&rinfo);
2249 }
2250 }
2251 }
2252
2253
2254 void Assembler::BlockTrampolinePoolFor(int instructions) {
2255 BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
2256 }
2257
2258
2259 void Assembler::CheckTrampolinePool() {
2260 // Some small sequences of instructions must not be broken up by the
2261 // insertion of a trampoline pool; such sequences are protected by setting
2262 // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
2263 // which are both checked here. Also, recursive calls to CheckTrampolinePool
2264 // are blocked by trampoline_pool_blocked_nesting_.
2265 if ((trampoline_pool_blocked_nesting_ > 0) ||
2266 (pc_offset() < no_trampoline_pool_before_)) {
2267 // Emission is currently blocked; make sure we try again as soon as
2268 // possible.
2269 if (trampoline_pool_blocked_nesting_ > 0) {
2270 next_buffer_check_ = pc_offset() + kInstrSize;
2271 } else {
2272 next_buffer_check_ = no_trampoline_pool_before_;
2273 }
2274 return;
2275 }
2276
2277 DCHECK(!trampoline_emitted_);
2278 DCHECK(unbound_labels_count_ >= 0);
2279 if (unbound_labels_count_ > 0) {
2280 // First we emit jump, then we emit trampoline pool.
2281 {
2282 BlockTrampolinePoolScope block_trampoline_pool(this);
2283 Label after_pool;
2284 b(&after_pool);
2285
2286 int pool_start = pc_offset();
2287 for (int i = 0; i < unbound_labels_count_; i++) {
2288 b(&after_pool);
2289 }
2290 bind(&after_pool);
2291 trampoline_ = Trampoline(pool_start, unbound_labels_count_);
2292
2293 trampoline_emitted_ = true;
2294 // As we are only going to emit trampoline once, we need to prevent any
2295 // further emission.
2296 next_buffer_check_ = kMaxInt;
2297 }
2298 } else {
2299 // Number of branches to unbound label at this point is zero, so we can
2300 // move next buffer check to maximum.
2301 next_buffer_check_ =
2302 pc_offset() + kMaxCondBranchReach - kMaxBlockTrampolineSectionSize;
2303 }
2304 return;
2305 }
2306
2307
2308 Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
2309 #if V8_OOL_CONSTANT_POOL
2310 return constant_pool_builder_.New(isolate);
2311 #else
2312 // No out-of-line constant pool support.
2313 DCHECK(!FLAG_enable_ool_constant_pool);
2314 return isolate->factory()->empty_constant_pool_array();
2315 #endif
2316 }
2317
2318
2319 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
2320 #if V8_OOL_CONSTANT_POOL
2321 constant_pool_builder_.Populate(this, constant_pool);
2322 #else
2323 // No out-of-line constant pool support.
2324 DCHECK(!FLAG_enable_ool_constant_pool);
2325 #endif
2326 }
2327
2328
2329 #if V8_OOL_CONSTANT_POOL
2330 ConstantPoolBuilder::ConstantPoolBuilder()
2331 : size_(0),
2332 entries_(),
2333 current_section_(ConstantPoolArray::SMALL_SECTION) {}
2334
2335
2336 bool ConstantPoolBuilder::IsEmpty() { return entries_.size() == 0; }
2337
2338
2339 ConstantPoolArray::Type ConstantPoolBuilder::GetConstantPoolType(
2340 RelocInfo::Mode rmode) {
2341 #if V8_TARGET_ARCH_PPC64
2342 // We don't support 32-bit entries at this time.
2343 if (!RelocInfo::IsGCRelocMode(rmode)) {
2344 return ConstantPoolArray::INT64;
2345 #else
2346 if (rmode == RelocInfo::NONE64) {
2347 return ConstantPoolArray::INT64;
2348 } else if (!RelocInfo::IsGCRelocMode(rmode)) {
2349 return ConstantPoolArray::INT32;
2350 #endif
2351 } else if (RelocInfo::IsCodeTarget(rmode)) {
2352 return ConstantPoolArray::CODE_PTR;
2353 } else {
2354 DCHECK(RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode));
2355 return ConstantPoolArray::HEAP_PTR;
2356 }
2357 }
2358
2359
2360 ConstantPoolArray::LayoutSection ConstantPoolBuilder::AddEntry(
2361 Assembler* assm, const RelocInfo& rinfo) {
2362 RelocInfo::Mode rmode = rinfo.rmode();
2363 DCHECK(rmode != RelocInfo::COMMENT && rmode != RelocInfo::POSITION &&
2364 rmode != RelocInfo::STATEMENT_POSITION &&
2365 rmode != RelocInfo::CONST_POOL);
2366
2367 // Try to merge entries which won't be patched.
2368 int merged_index = -1;
2369 ConstantPoolArray::LayoutSection entry_section = current_section_;
2370 if (RelocInfo::IsNone(rmode) ||
2371 (!assm->serializer_enabled() && (rmode >= RelocInfo::CELL))) {
2372 size_t i;
2373 std::vector<ConstantPoolEntry>::const_iterator it;
2374 for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) {
2375 if (RelocInfo::IsEqual(rinfo, it->rinfo_)) {
2376 // Merge with found entry.
2377 merged_index = i;
2378 entry_section = entries_[i].section_;
2379 break;
2380 }
2381 }
2382 }
2383 DCHECK(entry_section <= current_section_);
2384 entries_.push_back(ConstantPoolEntry(rinfo, entry_section, merged_index));
2385
2386 if (merged_index == -1) {
2387 // Not merged, so update the appropriate count.
2388 number_of_entries_[entry_section].increment(GetConstantPoolType(rmode));
2389 }
2390
2391 // Check if we still have room for another entry in the small section
2392 // given the limitations of the header's layout fields.
2393 if (current_section_ == ConstantPoolArray::SMALL_SECTION) {
2394 size_ = ConstantPoolArray::SizeFor(*small_entries());
2395 if (!is_uint12(size_)) {
2396 current_section_ = ConstantPoolArray::EXTENDED_SECTION;
2397 }
2398 } else {
2399 size_ = ConstantPoolArray::SizeForExtended(*small_entries(),
2400 *extended_entries());
2401 }
2402
2403 return entry_section;
2404 }
2405
2406
2407 void ConstantPoolBuilder::Relocate(intptr_t pc_delta) {
2408 for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
2409 entry != entries_.end(); entry++) {
2410 DCHECK(entry->rinfo_.rmode() != RelocInfo::JS_RETURN);
2411 entry->rinfo_.set_pc(entry->rinfo_.pc() + pc_delta);
2412 }
2413 }
2414
2415
2416 Handle<ConstantPoolArray> ConstantPoolBuilder::New(Isolate* isolate) {
2417 if (IsEmpty()) {
2418 return isolate->factory()->empty_constant_pool_array();
2419 } else if (extended_entries()->is_empty()) {
2420 return isolate->factory()->NewConstantPoolArray(*small_entries());
2421 } else {
2422 DCHECK(current_section_ == ConstantPoolArray::EXTENDED_SECTION);
2423 return isolate->factory()->NewExtendedConstantPoolArray(
2424 *small_entries(), *extended_entries());
2425 }
2426 }
2427
2428
2429 void ConstantPoolBuilder::Populate(Assembler* assm,
2430 ConstantPoolArray* constant_pool) {
2431 DCHECK_EQ(extended_entries()->is_empty(),
2432 !constant_pool->is_extended_layout());
2433 DCHECK(small_entries()->equals(ConstantPoolArray::NumberOfEntries(
2434 constant_pool, ConstantPoolArray::SMALL_SECTION)));
2435 if (constant_pool->is_extended_layout()) {
2436 DCHECK(extended_entries()->equals(ConstantPoolArray::NumberOfEntries(
2437 constant_pool, ConstantPoolArray::EXTENDED_SECTION)));
2438 }
2439
2440 // Set up initial offsets.
2441 int offsets[ConstantPoolArray::NUMBER_OF_LAYOUT_SECTIONS]
2442 [ConstantPoolArray::NUMBER_OF_TYPES];
2443 for (int section = 0; section <= constant_pool->final_section(); section++) {
2444 int section_start = (section == ConstantPoolArray::EXTENDED_SECTION)
2445 ? small_entries()->total_count()
2446 : 0;
2447 for (int i = 0; i < ConstantPoolArray::NUMBER_OF_TYPES; i++) {
2448 ConstantPoolArray::Type type = static_cast<ConstantPoolArray::Type>(i);
2449 if (number_of_entries_[section].count_of(type) != 0) {
2450 offsets[section][type] = constant_pool->OffsetOfElementAt(
2451 number_of_entries_[section].base_of(type) + section_start);
2452 }
2453 }
2454 }
2455
2456 for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
2457 entry != entries_.end(); entry++) {
2458 RelocInfo rinfo = entry->rinfo_;
2459 RelocInfo::Mode rmode = entry->rinfo_.rmode();
2460 ConstantPoolArray::Type type = GetConstantPoolType(rmode);
2461
2462 // Update constant pool if necessary and get the entry's offset.
2463 int offset;
2464 if (entry->merged_index_ == -1) {
2465 offset = offsets[entry->section_][type];
2466 offsets[entry->section_][type] += ConstantPoolArray::entry_size(type);
2467 if (type == ConstantPoolArray::INT64) {
2468 #if V8_TARGET_ARCH_PPC64
2469 constant_pool->set_at_offset(offset, rinfo.data());
2470 #else
2471 constant_pool->set_at_offset(offset, rinfo.data64());
2472 } else if (type == ConstantPoolArray::INT32) {
2473 constant_pool->set_at_offset(offset,
2474 static_cast<int32_t>(rinfo.data()));
2475 #endif
2476 } else if (type == ConstantPoolArray::CODE_PTR) {
2477 constant_pool->set_at_offset(offset,
2478 reinterpret_cast<Address>(rinfo.data()));
2479 } else {
2480 DCHECK(type == ConstantPoolArray::HEAP_PTR);
2481 constant_pool->set_at_offset(offset,
2482 reinterpret_cast<Object*>(rinfo.data()));
2483 }
2484 offset -= kHeapObjectTag;
2485 entry->merged_index_ = offset; // Stash offset for merged entries.
2486 } else {
2487 DCHECK(entry->merged_index_ < (entry - entries_.begin()));
2488 offset = entries_[entry->merged_index_].merged_index_;
2489 }
2490
2491 // Patch load instruction with correct offset.
2492 Assembler::SetConstantPoolOffset(rinfo.pc(), offset);
2493 }
2494 }
2495 #endif
2496 }
2497 } // namespace v8::internal
2498
2499 #endif // V8_TARGET_ARCH_PPC
OLDNEW
« no previous file with comments | « src/ppc/assembler-ppc.h ('k') | src/ppc/assembler-ppc-inl.h » ('j') | src/ppc/code-stubs-ppc.cc » ('J')

Powered by Google App Engine
This is Rietveld 408576698