Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(239)

Side by Side Diff: src/ppc/assembler-ppc.cc

Issue 422063005: Contribution of PowerPC port. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: re-upload - catch up to 8/19 level Created 6 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions
6 // are met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the
14 // distribution.
15 //
16 // - Neither the name of Sun Microsystems or the names of contributors may
17 // be used to endorse or promote products derived from this software without
18 // specific prior written permission.
19 //
20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31 // OF THE POSSIBILITY OF SUCH DAMAGE.
32
33 // The original source code covered by the above license above has been
34 // modified significantly by Google Inc.
35 // Copyright 2012 the V8 project authors. All rights reserved.
36
37 //
38 // Copyright IBM Corp. 2012, 2013. All rights reserved.
39 //
40
41 #include "src/v8.h"
42
43 #if V8_TARGET_ARCH_PPC
44
45 #include "src/base/cpu.h"
46 #include "src/ppc/assembler-ppc-inl.h"
47
48 #include "src/macro-assembler.h"
49 #include "src/serialize.h"
50
51 namespace v8 {
52 namespace internal {
53
54 // Get the CPU features enabled by the build.
55 static unsigned CpuFeaturesImpliedByCompiler() {
56 unsigned answer = 0;
57 return answer;
58 }
59
60
61 void CpuFeatures::ProbeImpl(bool cross_compile) {
62 supported_ |= CpuFeaturesImpliedByCompiler();
63 cache_line_size_ = 128;
64
65 // Only use statically determined features for cross compile (snapshot).
66 if (cross_compile) return;
67
68 // Detect whether frim instruction is supported (POWER5+)
69 // For now we will just check for processors we know do not
70 // support it
71 #ifndef USE_SIMULATOR
72 // Probe for additional features at runtime.
73 base::CPU cpu;
74 #if V8_TARGET_ARCH_PPC64
75 if (cpu.part() == base::CPU::PPC_POWER8) {
76 supported_ |= (1u << FPR_GPR_MOV);
77 }
78 #endif
79 #if V8_OS_LINUX
80 if (!(cpu.part() == base::CPU::PPC_G5 || cpu.part() == base::CPU::PPC_G4)) {
81 // Assume support
82 supported_ |= (1u << FPU);
83 }
84 if (cpu.cache_line_size() != 0) {
85 cache_line_size_ = cpu.cache_line_size();
86 }
87 #elif V8_OS_AIX
88 // Assume support FP support and default cache line size
89 supported_ |= (1u << FPU);
90 #endif
91 #else // Simulator
92 supported_ |= (1u << FPU);
93 #if V8_TARGET_ARCH_PPC64
94 supported_ |= (1u << FPR_GPR_MOV);
95 #endif
96 #endif
97 }
98
99
100 void CpuFeatures::PrintTarget() {
101 const char* ppc_arch = NULL;
102
103 #if V8_TARGET_ARCH_PPC64
104 ppc_arch = "ppc64";
105 #else
106 ppc_arch = "ppc";
107 #endif
108
109 printf("target %s\n", ppc_arch);
110 }
111
112
113 void CpuFeatures::PrintFeatures() {
114 printf("FPU=%d\n", CpuFeatures::IsSupported(FPU));
115 }
116
117
118 Register ToRegister(int num) {
119 DCHECK(num >= 0 && num < kNumRegisters);
120 const Register kRegisters[] = {r0, sp, r2, r3, r4, r5, r6, r7,
121 r8, r9, r10, r11, ip, r13, r14, r15,
122 r16, r17, r18, r19, r20, r21, r22, r23,
123 r24, r25, r26, r27, r28, r29, r30, fp};
124 return kRegisters[num];
125 }
126
127
128 const char* DoubleRegister::AllocationIndexToString(int index) {
129 DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
130 const char* const names[] = {
131 "d1", "d2", "d3", "d4", "d5", "d6", "d7", "d8", "d9", "d10",
132 "d11", "d12", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
133 "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"};
134 return names[index];
135 }
136
137
138 // -----------------------------------------------------------------------------
139 // Implementation of RelocInfo
140
141 const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE;
142
143
144 bool RelocInfo::IsCodedSpecially() {
145 // The deserializer needs to know whether a pointer is specially
146 // coded. Being specially coded on PPC means that it is a lis/ori
147 // instruction sequence or is an out of line constant pool entry,
148 // and these are always the case inside code objects.
149 return true;
150 }
151
152
153 bool RelocInfo::IsInConstantPool() {
154 #if V8_OOL_CONSTANT_POOL
155 return Assembler::IsConstantPoolLoadStart(pc_);
156 #else
157 return false;
158 #endif
159 }
160
161
162 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
163 // Patch the code at the current address with the supplied instructions.
164 Instr* pc = reinterpret_cast<Instr*>(pc_);
165 Instr* instr = reinterpret_cast<Instr*>(instructions);
166 for (int i = 0; i < instruction_count; i++) {
167 *(pc + i) = *(instr + i);
168 }
169
170 // Indicate that code has changed.
171 CpuFeatures::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
172 }
173
174
175 // Patch the code at the current PC with a call to the target address.
176 // Additional guard instructions can be added if required.
177 void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
178 // Patch the code at the current address with a call to the target.
179 UNIMPLEMENTED();
180 }
181
182
183 // -----------------------------------------------------------------------------
184 // Implementation of Operand and MemOperand
185 // See assembler-ppc-inl.h for inlined constructors
186
187 Operand::Operand(Handle<Object> handle) {
188 AllowDeferredHandleDereference using_raw_address;
189 rm_ = no_reg;
190 // Verify all Objects referred by code are NOT in new space.
191 Object* obj = *handle;
192 if (obj->IsHeapObject()) {
193 DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
194 imm_ = reinterpret_cast<intptr_t>(handle.location());
195 rmode_ = RelocInfo::EMBEDDED_OBJECT;
196 } else {
197 // no relocation needed
198 imm_ = reinterpret_cast<intptr_t>(obj);
199 rmode_ = kRelocInfo_NONEPTR;
200 }
201 }
202
203
204 MemOperand::MemOperand(Register rn, int32_t offset) {
205 ra_ = rn;
206 rb_ = no_reg;
207 offset_ = offset;
208 }
209
210
211 MemOperand::MemOperand(Register ra, Register rb) {
212 ra_ = ra;
213 rb_ = rb;
214 offset_ = 0;
215 }
216
217
218 // -----------------------------------------------------------------------------
219 // Specific instructions, constants, and masks.
220
221 // Spare buffer.
222 static const int kMinimalBufferSize = 4 * KB;
223
224
225 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
226 : AssemblerBase(isolate, buffer, buffer_size),
227 recorded_ast_id_(TypeFeedbackId::None()),
228 #if V8_OOL_CONSTANT_POOL
229 constant_pool_builder_(),
230 #endif
231 positions_recorder_(this) {
232 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
233
234 no_trampoline_pool_before_ = 0;
235 trampoline_pool_blocked_nesting_ = 0;
236 // We leave space (kMaxBlockTrampolineSectionSize)
237 // for BlockTrampolinePoolScope buffer.
238 next_buffer_check_ =
239 FLAG_force_long_branches ? kMaxInt : kMaxCondBranchReach -
240 kMaxBlockTrampolineSectionSize;
241 internal_trampoline_exception_ = false;
242 last_bound_pos_ = 0;
243
244 trampoline_emitted_ = FLAG_force_long_branches;
245 unbound_labels_count_ = 0;
246
247 #if V8_OOL_CONSTANT_POOL
248 constant_pool_available_ = false;
249 #endif
250
251 ClearRecordedAstId();
252 }
253
254
255 void Assembler::GetCode(CodeDesc* desc) {
256 // Set up code descriptor.
257 desc->buffer = buffer_;
258 desc->buffer_size = buffer_size_;
259 desc->instr_size = pc_offset();
260 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
261 desc->origin = this;
262 }
263
264
265 void Assembler::Align(int m) {
266 DCHECK(m >= 4 && IsPowerOf2(m));
267 while ((pc_offset() & (m - 1)) != 0) {
268 nop();
269 }
270 }
271
272
273 void Assembler::CodeTargetAlign() { Align(8); }
274
275
276 Condition Assembler::GetCondition(Instr instr) {
277 switch (instr & kCondMask) {
278 case BT:
279 return eq;
280 case BF:
281 return ne;
282 default:
283 UNIMPLEMENTED();
284 }
285 return al;
286 }
287
288
289 bool Assembler::IsLis(Instr instr) {
290 return ((instr & kOpcodeMask) == ADDIS) && GetRA(instr).is(r0);
291 }
292
293
294 bool Assembler::IsLi(Instr instr) {
295 return ((instr & kOpcodeMask) == ADDI) && GetRA(instr).is(r0);
296 }
297
298
299 bool Assembler::IsAddic(Instr instr) { return (instr & kOpcodeMask) == ADDIC; }
300
301
302 bool Assembler::IsOri(Instr instr) { return (instr & kOpcodeMask) == ORI; }
303
304
305 bool Assembler::IsBranch(Instr instr) { return ((instr & kOpcodeMask) == BCX); }
306
307
308 Register Assembler::GetRA(Instr instr) {
309 Register reg;
310 reg.code_ = Instruction::RAValue(instr);
311 return reg;
312 }
313
314
315 Register Assembler::GetRB(Instr instr) {
316 Register reg;
317 reg.code_ = Instruction::RBValue(instr);
318 return reg;
319 }
320
321
322 #if V8_TARGET_ARCH_PPC64
323 // This code assumes a FIXED_SEQUENCE for 64bit loads (lis/ori)
324 bool Assembler::Is64BitLoadIntoR12(Instr instr1, Instr instr2, Instr instr3,
325 Instr instr4, Instr instr5) {
326 // Check the instructions are indeed a five part load (into r12)
327 // 3d800000 lis r12, 0
328 // 618c0000 ori r12, r12, 0
329 // 798c07c6 rldicr r12, r12, 32, 31
330 // 658c00c3 oris r12, r12, 195
331 // 618ccd40 ori r12, r12, 52544
332 return (((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c) &&
333 (instr3 == 0x798c07c6) && ((instr4 >> 16) == 0x658c) &&
334 ((instr5 >> 16) == 0x618c));
335 }
336 #else
337 // This code assumes a FIXED_SEQUENCE for 32bit loads (lis/ori)
338 bool Assembler::Is32BitLoadIntoR12(Instr instr1, Instr instr2) {
339 // Check the instruction is indeed a two part load (into r12)
340 // 3d802553 lis r12, 9555
341 // 618c5000 ori r12, r12, 20480
342 return (((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c));
343 }
344 #endif
345
346
347 bool Assembler::IsCmpRegister(Instr instr) {
348 return (((instr & kOpcodeMask) == EXT2) &&
349 ((instr & kExt2OpcodeMask) == CMP));
350 }
351
352
353 bool Assembler::IsRlwinm(Instr instr) {
354 return ((instr & kOpcodeMask) == RLWINMX);
355 }
356
357
358 #if V8_TARGET_ARCH_PPC64
359 bool Assembler::IsRldicl(Instr instr) {
360 return (((instr & kOpcodeMask) == EXT5) &&
361 ((instr & kExt5OpcodeMask) == RLDICL));
362 }
363 #endif
364
365
366 bool Assembler::IsCmpImmediate(Instr instr) {
367 return ((instr & kOpcodeMask) == CMPI);
368 }
369
370
371 bool Assembler::IsCrSet(Instr instr) {
372 return (((instr & kOpcodeMask) == EXT1) &&
373 ((instr & kExt1OpcodeMask) == CREQV));
374 }
375
376
377 Register Assembler::GetCmpImmediateRegister(Instr instr) {
378 DCHECK(IsCmpImmediate(instr));
379 return GetRA(instr);
380 }
381
382
383 int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
384 DCHECK(IsCmpImmediate(instr));
385 return instr & kOff16Mask;
386 }
387
388
389 // Labels refer to positions in the (to be) generated code.
390 // There are bound, linked, and unused labels.
391 //
392 // Bound labels refer to known positions in the already
393 // generated code. pos() is the position the label refers to.
394 //
395 // Linked labels refer to unknown positions in the code
396 // to be generated; pos() is the position of the last
397 // instruction using the label.
398
399
400 // The link chain is terminated by a negative code position (must be aligned)
401 const int kEndOfChain = -4;
402
403
404 int Assembler::target_at(int pos) {
405 Instr instr = instr_at(pos);
406 // check which type of branch this is 16 or 26 bit offset
407 int opcode = instr & kOpcodeMask;
408 if (BX == opcode) {
409 int imm26 = ((instr & kImm26Mask) << 6) >> 6;
410 imm26 &= ~(kAAMask | kLKMask); // discard AA|LK bits if present
411 if (imm26 == 0) return kEndOfChain;
412 return pos + imm26;
413 } else if (BCX == opcode) {
414 int imm16 = SIGN_EXT_IMM16((instr & kImm16Mask));
415 imm16 &= ~(kAAMask | kLKMask); // discard AA|LK bits if present
416 if (imm16 == 0) return kEndOfChain;
417 return pos + imm16;
418 } else if ((instr & ~kImm26Mask) == 0) {
419 // Emitted link to a label, not part of a branch (regexp PushBacktrack).
420 if (instr == 0) {
421 return kEndOfChain;
422 } else {
423 int32_t imm26 = SIGN_EXT_IMM26(instr);
424 return (imm26 + pos);
425 }
426 }
427
428 DCHECK(false);
429 return -1;
430 }
431
432
433 void Assembler::target_at_put(int pos, int target_pos) {
434 Instr instr = instr_at(pos);
435 int opcode = instr & kOpcodeMask;
436
437 // check which type of branch this is 16 or 26 bit offset
438 if (BX == opcode) {
439 int imm26 = target_pos - pos;
440 DCHECK((imm26 & (kAAMask | kLKMask)) == 0);
441 instr &= ((~kImm26Mask) | kAAMask | kLKMask);
442 DCHECK(is_int26(imm26));
443 instr_at_put(pos, instr | (imm26 & kImm26Mask));
444 return;
445 } else if (BCX == opcode) {
446 int imm16 = target_pos - pos;
447 DCHECK((imm16 & (kAAMask | kLKMask)) == 0);
448 instr &= ((~kImm16Mask) | kAAMask | kLKMask);
449 DCHECK(is_int16(imm16));
450 instr_at_put(pos, instr | (imm16 & kImm16Mask));
451 return;
452 } else if ((instr & ~kImm26Mask) == 0) {
453 DCHECK(target_pos == kEndOfChain || target_pos >= 0);
454 // Emitted link to a label, not part of a branch (regexp PushBacktrack).
455 // Load the position of the label relative to the generated code object
456 // pointer in a register.
457
458 Register dst = r3; // we assume r3 for now
459 DCHECK(IsNop(instr_at(pos + kInstrSize)));
460 uint32_t target = target_pos + (Code::kHeaderSize - kHeapObjectTag);
461 CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), 2,
462 CodePatcher::DONT_FLUSH);
463 int target_hi = static_cast<int>(target) >> 16;
464 int target_lo = static_cast<int>(target) & 0XFFFF;
465
466 patcher.masm()->lis(dst, Operand(SIGN_EXT_IMM16(target_hi)));
467 patcher.masm()->ori(dst, dst, Operand(target_lo));
468 return;
469 }
470
471 DCHECK(false);
472 }
473
474
475 int Assembler::max_reach_from(int pos) {
476 Instr instr = instr_at(pos);
477 int opcode = instr & kOpcodeMask;
478
479 // check which type of branch this is 16 or 26 bit offset
480 if (BX == opcode) {
481 return 26;
482 } else if (BCX == opcode) {
483 return 16;
484 } else if ((instr & ~kImm26Mask) == 0) {
485 // Emitted label constant, not part of a branch (regexp PushBacktrack).
486 return 26;
487 }
488
489 DCHECK(false);
490 return 0;
491 }
492
493
494 void Assembler::bind_to(Label* L, int pos) {
495 DCHECK(0 <= pos && pos <= pc_offset()); // must have a valid binding position
496 int32_t trampoline_pos = kInvalidSlotPos;
497 if (L->is_linked() && !trampoline_emitted_) {
498 unbound_labels_count_--;
499 next_buffer_check_ += kTrampolineSlotsSize;
500 }
501
502 while (L->is_linked()) {
503 int fixup_pos = L->pos();
504 int32_t offset = pos - fixup_pos;
505 int maxReach = max_reach_from(fixup_pos);
506 next(L); // call next before overwriting link with target at fixup_pos
507 if (is_intn(offset, maxReach) == false) {
508 if (trampoline_pos == kInvalidSlotPos) {
509 trampoline_pos = get_trampoline_entry();
510 CHECK(trampoline_pos != kInvalidSlotPos);
511 target_at_put(trampoline_pos, pos);
512 }
513 target_at_put(fixup_pos, trampoline_pos);
514 } else {
515 target_at_put(fixup_pos, pos);
516 }
517 }
518 L->bind_to(pos);
519
520 // Keep track of the last bound label so we don't eliminate any instructions
521 // before a bound label.
522 if (pos > last_bound_pos_) last_bound_pos_ = pos;
523 }
524
525
526 void Assembler::bind(Label* L) {
527 DCHECK(!L->is_bound()); // label can only be bound once
528 bind_to(L, pc_offset());
529 }
530
531
532 void Assembler::next(Label* L) {
533 DCHECK(L->is_linked());
534 int link = target_at(L->pos());
535 if (link == kEndOfChain) {
536 L->Unuse();
537 } else {
538 DCHECK(link >= 0);
539 L->link_to(link);
540 }
541 }
542
543
544 bool Assembler::is_near(Label* L, Condition cond) {
545 DCHECK(L->is_bound());
546 if (L->is_bound() == false) return false;
547
548 int maxReach = ((cond == al) ? 26 : 16);
549 int offset = L->pos() - pc_offset();
550
551 return is_intn(offset, maxReach);
552 }
553
554
555 void Assembler::a_form(Instr instr, DoubleRegister frt, DoubleRegister fra,
556 DoubleRegister frb, RCBit r) {
557 emit(instr | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 | r);
558 }
559
560
561 void Assembler::d_form(Instr instr, Register rt, Register ra,
562 const intptr_t val, bool signed_disp) {
563 if (signed_disp) {
564 if (!is_int16(val)) {
565 PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR "\n", val, val);
566 }
567 DCHECK(is_int16(val));
568 } else {
569 if (!is_uint16(val)) {
570 PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR
571 ", is_unsigned_imm16(val)=%d, kImm16Mask=0x%x\n",
572 val, val, is_uint16(val), kImm16Mask);
573 }
574 DCHECK(is_uint16(val));
575 }
576 emit(instr | rt.code() * B21 | ra.code() * B16 | (kImm16Mask & val));
577 }
578
579
580 void Assembler::x_form(Instr instr, Register ra, Register rs, Register rb,
581 RCBit r) {
582 emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | r);
583 }
584
585
586 void Assembler::xo_form(Instr instr, Register rt, Register ra, Register rb,
587 OEBit o, RCBit r) {
588 emit(instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | o | r);
589 }
590
591
592 void Assembler::md_form(Instr instr, Register ra, Register rs, int shift,
593 int maskbit, RCBit r) {
594 int sh0_4 = shift & 0x1f;
595 int sh5 = (shift >> 5) & 0x1;
596 int m0_4 = maskbit & 0x1f;
597 int m5 = (maskbit >> 5) & 0x1;
598
599 emit(instr | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 | m0_4 * B6 |
600 m5 * B5 | sh5 * B1 | r);
601 }
602
603
604 void Assembler::mds_form(Instr instr, Register ra, Register rs, Register rb,
605 int maskbit, RCBit r) {
606 int m0_4 = maskbit & 0x1f;
607 int m5 = (maskbit >> 5) & 0x1;
608
609 emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | m0_4 * B6 |
610 m5 * B5 | r);
611 }
612
613
614 // Returns the next free trampoline entry.
615 int32_t Assembler::get_trampoline_entry() {
616 int32_t trampoline_entry = kInvalidSlotPos;
617
618 if (!internal_trampoline_exception_) {
619 trampoline_entry = trampoline_.take_slot();
620
621 if (kInvalidSlotPos == trampoline_entry) {
622 internal_trampoline_exception_ = true;
623 }
624 }
625 return trampoline_entry;
626 }
627
628
629 int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
630 int target_pos;
631 if (L->is_bound()) {
632 target_pos = L->pos();
633 } else {
634 if (L->is_linked()) {
635 target_pos = L->pos(); // L's link
636 } else {
637 // was: target_pos = kEndOfChain;
638 // However, using branch to self to mark the first reference
639 // should avoid most instances of branch offset overflow. See
640 // target_at() for where this is converted back to kEndOfChain.
641 target_pos = pc_offset();
642 if (!trampoline_emitted_) {
643 unbound_labels_count_++;
644 next_buffer_check_ -= kTrampolineSlotsSize;
645 }
646 }
647 L->link_to(pc_offset());
648 }
649
650 return target_pos - pc_offset();
651 }
652
653
654 // Branch instructions.
655
656
657 void Assembler::bclr(BOfield bo, LKBit lk) {
658 positions_recorder()->WriteRecordedPositions();
659 emit(EXT1 | bo | BCLRX | lk);
660 }
661
662
663 void Assembler::bcctr(BOfield bo, LKBit lk) {
664 positions_recorder()->WriteRecordedPositions();
665 emit(EXT1 | bo | BCCTRX | lk);
666 }
667
668
669 // Pseudo op - branch to link register
670 void Assembler::blr() { bclr(BA, LeaveLK); }
671
672
673 // Pseudo op - branch to count register -- used for "jump"
674 void Assembler::bctr() { bcctr(BA, LeaveLK); }
675
676
677 void Assembler::bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk) {
678 if (lk == SetLK) {
679 positions_recorder()->WriteRecordedPositions();
680 }
681 DCHECK(is_int16(branch_offset));
682 emit(BCX | bo | condition_bit * B16 | (kImm16Mask & branch_offset) | lk);
683 }
684
685
686 void Assembler::b(int branch_offset, LKBit lk) {
687 if (lk == SetLK) {
688 positions_recorder()->WriteRecordedPositions();
689 }
690 DCHECK((branch_offset & 3) == 0);
691 int imm26 = branch_offset;
692 DCHECK(is_int26(imm26));
693 // todo add AA and LK bits
694 emit(BX | (imm26 & kImm26Mask) | lk);
695 }
696
697
698 void Assembler::xori(Register dst, Register src, const Operand& imm) {
699 d_form(XORI, src, dst, imm.imm_, false);
700 }
701
702
703 void Assembler::xoris(Register ra, Register rs, const Operand& imm) {
704 d_form(XORIS, rs, ra, imm.imm_, false);
705 }
706
707
708 void Assembler::xor_(Register dst, Register src1, Register src2, RCBit rc) {
709 x_form(EXT2 | XORX, dst, src1, src2, rc);
710 }
711
712
713 void Assembler::cntlzw_(Register ra, Register rs, RCBit rc) {
714 x_form(EXT2 | CNTLZWX, ra, rs, r0, rc);
715 }
716
717
718 void Assembler::and_(Register ra, Register rs, Register rb, RCBit rc) {
719 x_form(EXT2 | ANDX, ra, rs, rb, rc);
720 }
721
722
723 void Assembler::rlwinm(Register ra, Register rs, int sh, int mb, int me,
724 RCBit rc) {
725 sh &= 0x1f;
726 mb &= 0x1f;
727 me &= 0x1f;
728 emit(RLWINMX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
729 me << 1 | rc);
730 }
731
732
733 void Assembler::rlwnm(Register ra, Register rs, Register rb, int mb, int me,
734 RCBit rc) {
735 mb &= 0x1f;
736 me &= 0x1f;
737 emit(RLWNMX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | mb * B6 |
738 me << 1 | rc);
739 }
740
741
742 void Assembler::rlwimi(Register ra, Register rs, int sh, int mb, int me,
743 RCBit rc) {
744 sh &= 0x1f;
745 mb &= 0x1f;
746 me &= 0x1f;
747 emit(RLWIMIX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
748 me << 1 | rc);
749 }
750
751
752 void Assembler::slwi(Register dst, Register src, const Operand& val, RCBit rc) {
753 DCHECK((32 > val.imm_) && (val.imm_ >= 0));
754 rlwinm(dst, src, val.imm_, 0, 31 - val.imm_, rc);
755 }
756
757
758 void Assembler::srwi(Register dst, Register src, const Operand& val, RCBit rc) {
759 DCHECK((32 > val.imm_) && (val.imm_ >= 0));
760 rlwinm(dst, src, 32 - val.imm_, val.imm_, 31, rc);
761 }
762
763
764 void Assembler::clrrwi(Register dst, Register src, const Operand& val,
765 RCBit rc) {
766 DCHECK((32 > val.imm_) && (val.imm_ >= 0));
767 rlwinm(dst, src, 0, 0, 31 - val.imm_, rc);
768 }
769
770
771 void Assembler::clrlwi(Register dst, Register src, const Operand& val,
772 RCBit rc) {
773 DCHECK((32 > val.imm_) && (val.imm_ >= 0));
774 rlwinm(dst, src, 0, val.imm_, 31, rc);
775 }
776
777
778 void Assembler::srawi(Register ra, Register rs, int sh, RCBit r) {
779 emit(EXT2 | SRAWIX | rs.code() * B21 | ra.code() * B16 | sh * B11 | r);
780 }
781
782
783 void Assembler::srw(Register dst, Register src1, Register src2, RCBit r) {
784 x_form(EXT2 | SRWX, dst, src1, src2, r);
785 }
786
787
788 void Assembler::slw(Register dst, Register src1, Register src2, RCBit r) {
789 x_form(EXT2 | SLWX, dst, src1, src2, r);
790 }
791
792
793 void Assembler::sraw(Register ra, Register rs, Register rb, RCBit r) {
794 x_form(EXT2 | SRAW, ra, rs, rb, r);
795 }
796
797
798 void Assembler::rotlw(Register ra, Register rs, Register rb, RCBit r) {
799 rlwnm(ra, rs, rb, 0, 31, r);
800 }
801
802
803 void Assembler::rotlwi(Register ra, Register rs, int sh, RCBit r) {
804 rlwinm(ra, rs, sh, 0, 31, r);
805 }
806
807
808 void Assembler::rotrwi(Register ra, Register rs, int sh, RCBit r) {
809 rlwinm(ra, rs, 32 - sh, 0, 31, r);
810 }
811
812
813 void Assembler::subi(Register dst, Register src, const Operand& imm) {
814 addi(dst, src, Operand(-(imm.imm_)));
815 }
816
817 void Assembler::addc(Register dst, Register src1, Register src2, OEBit o,
818 RCBit r) {
819 xo_form(EXT2 | ADDCX, dst, src1, src2, o, r);
820 }
821
822
823 void Assembler::addze(Register dst, Register src1, OEBit o, RCBit r) {
824 // a special xo_form
825 emit(EXT2 | ADDZEX | dst.code() * B21 | src1.code() * B16 | o | r);
826 }
827
828
829 void Assembler::sub(Register dst, Register src1, Register src2, OEBit o,
830 RCBit r) {
831 xo_form(EXT2 | SUBFX, dst, src2, src1, o, r);
832 }
833
834
835 void Assembler::subfc(Register dst, Register src1, Register src2, OEBit o,
836 RCBit r) {
837 xo_form(EXT2 | SUBFCX, dst, src2, src1, o, r);
838 }
839
840
841 void Assembler::subfic(Register dst, Register src, const Operand& imm) {
842 d_form(SUBFIC, dst, src, imm.imm_, true);
843 }
844
845
846 void Assembler::add(Register dst, Register src1, Register src2, OEBit o,
847 RCBit r) {
848 xo_form(EXT2 | ADDX, dst, src1, src2, o, r);
849 }
850
851
852 // Multiply low word
853 void Assembler::mullw(Register dst, Register src1, Register src2, OEBit o,
854 RCBit r) {
855 xo_form(EXT2 | MULLW, dst, src1, src2, o, r);
856 }
857
858
859 // Multiply hi word
860 void Assembler::mulhw(Register dst, Register src1, Register src2, OEBit o,
861 RCBit r) {
862 xo_form(EXT2 | MULHWX, dst, src1, src2, o, r);
863 }
864
865
866 // Divide word
867 void Assembler::divw(Register dst, Register src1, Register src2, OEBit o,
868 RCBit r) {
869 xo_form(EXT2 | DIVW, dst, src1, src2, o, r);
870 }
871
872
873 void Assembler::addi(Register dst, Register src, const Operand& imm) {
874 DCHECK(!src.is(r0)); // use li instead to show intent
875 d_form(ADDI, dst, src, imm.imm_, true);
876 }
877
878
879 void Assembler::addis(Register dst, Register src, const Operand& imm) {
880 DCHECK(!src.is(r0)); // use lis instead to show intent
881 d_form(ADDIS, dst, src, imm.imm_, true);
882 }
883
884
885 void Assembler::addic(Register dst, Register src, const Operand& imm) {
886 d_form(ADDIC, dst, src, imm.imm_, true);
887 }
888
889
890 void Assembler::andi(Register ra, Register rs, const Operand& imm) {
891 d_form(ANDIx, rs, ra, imm.imm_, false);
892 }
893
894
895 void Assembler::andis(Register ra, Register rs, const Operand& imm) {
896 d_form(ANDISx, rs, ra, imm.imm_, false);
897 }
898
899
900 void Assembler::nor(Register dst, Register src1, Register src2, RCBit r) {
901 x_form(EXT2 | NORX, dst, src1, src2, r);
902 }
903
904
905 void Assembler::notx(Register dst, Register src, RCBit r) {
906 x_form(EXT2 | NORX, dst, src, src, r);
907 }
908
909
910 void Assembler::ori(Register ra, Register rs, const Operand& imm) {
911 d_form(ORI, rs, ra, imm.imm_, false);
912 }
913
914
915 void Assembler::oris(Register dst, Register src, const Operand& imm) {
916 d_form(ORIS, src, dst, imm.imm_, false);
917 }
918
919
920 void Assembler::orx(Register dst, Register src1, Register src2, RCBit rc) {
921 x_form(EXT2 | ORX, dst, src1, src2, rc);
922 }
923
924
925 void Assembler::cmpi(Register src1, const Operand& src2, CRegister cr) {
926 intptr_t imm16 = src2.imm_;
927 #if V8_TARGET_ARCH_PPC64
928 int L = 1;
929 #else
930 int L = 0;
931 #endif
932 DCHECK(is_int16(imm16));
933 DCHECK(cr.code() >= 0 && cr.code() <= 7);
934 imm16 &= kImm16Mask;
935 emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
936 }
937
938
939 void Assembler::cmpli(Register src1, const Operand& src2, CRegister cr) {
940 uintptr_t uimm16 = src2.imm_;
941 #if V8_TARGET_ARCH_PPC64
942 int L = 1;
943 #else
944 int L = 0;
945 #endif
946 DCHECK(is_uint16(uimm16));
947 DCHECK(cr.code() >= 0 && cr.code() <= 7);
948 uimm16 &= kImm16Mask;
949 emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
950 }
951
952
953 void Assembler::cmp(Register src1, Register src2, CRegister cr) {
954 #if V8_TARGET_ARCH_PPC64
955 int L = 1;
956 #else
957 int L = 0;
958 #endif
959 DCHECK(cr.code() >= 0 && cr.code() <= 7);
960 emit(EXT2 | CMP | cr.code() * B23 | L * B21 | src1.code() * B16 |
961 src2.code() * B11);
962 }
963
964
965 void Assembler::cmpl(Register src1, Register src2, CRegister cr) {
966 #if V8_TARGET_ARCH_PPC64
967 int L = 1;
968 #else
969 int L = 0;
970 #endif
971 DCHECK(cr.code() >= 0 && cr.code() <= 7);
972 emit(EXT2 | CMPL | cr.code() * B23 | L * B21 | src1.code() * B16 |
973 src2.code() * B11);
974 }
975
976
977 void Assembler::cmpwi(Register src1, const Operand& src2, CRegister cr) {
978 intptr_t imm16 = src2.imm_;
979 int L = 0;
980 DCHECK(is_int16(imm16));
981 DCHECK(cr.code() >= 0 && cr.code() <= 7);
982 imm16 &= kImm16Mask;
983 emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
984 }
985
986
987 void Assembler::cmplwi(Register src1, const Operand& src2, CRegister cr) {
988 uintptr_t uimm16 = src2.imm_;
989 int L = 0;
990 DCHECK(is_uint16(uimm16));
991 DCHECK(cr.code() >= 0 && cr.code() <= 7);
992 uimm16 &= kImm16Mask;
993 emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
994 }
995
996
997 void Assembler::cmpw(Register src1, Register src2, CRegister cr) {
998 int L = 0;
999 DCHECK(cr.code() >= 0 && cr.code() <= 7);
1000 emit(EXT2 | CMP | cr.code() * B23 | L * B21 | src1.code() * B16 |
1001 src2.code() * B11);
1002 }
1003
1004
1005 void Assembler::cmplw(Register src1, Register src2, CRegister cr) {
1006 int L = 0;
1007 DCHECK(cr.code() >= 0 && cr.code() <= 7);
1008 emit(EXT2 | CMPL | cr.code() * B23 | L * B21 | src1.code() * B16 |
1009 src2.code() * B11);
1010 }
1011
1012
1013 // Pseudo op - load immediate
1014 void Assembler::li(Register dst, const Operand& imm) {
1015 d_form(ADDI, dst, r0, imm.imm_, true);
1016 }
1017
1018
1019 void Assembler::lis(Register dst, const Operand& imm) {
1020 d_form(ADDIS, dst, r0, imm.imm_, true);
1021 }
1022
1023
1024 // Pseudo op - move register
1025 void Assembler::mr(Register dst, Register src) {
1026 // actually or(dst, src, src)
1027 orx(dst, src, src);
1028 }
1029
1030
1031 void Assembler::lbz(Register dst, const MemOperand& src) {
1032 DCHECK(!src.ra_.is(r0));
1033 d_form(LBZ, dst, src.ra(), src.offset(), true);
1034 }
1035
1036
1037 void Assembler::lbzx(Register rt, const MemOperand& src) {
1038 Register ra = src.ra();
1039 Register rb = src.rb();
1040 DCHECK(!ra.is(r0));
1041 emit(EXT2 | LBZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1042 LeaveRC);
1043 }
1044
1045
1046 void Assembler::lbzux(Register rt, const MemOperand& src) {
1047 Register ra = src.ra();
1048 Register rb = src.rb();
1049 DCHECK(!ra.is(r0));
1050 emit(EXT2 | LBZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1051 LeaveRC);
1052 }
1053
1054
1055 void Assembler::lhz(Register dst, const MemOperand& src) {
1056 DCHECK(!src.ra_.is(r0));
1057 d_form(LHZ, dst, src.ra(), src.offset(), true);
1058 }
1059
1060
1061 void Assembler::lhzx(Register rt, const MemOperand& src) {
1062 Register ra = src.ra();
1063 Register rb = src.rb();
1064 DCHECK(!ra.is(r0));
1065 emit(EXT2 | LHZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1066 LeaveRC);
1067 }
1068
1069
1070 void Assembler::lhzux(Register rt, const MemOperand& src) {
1071 Register ra = src.ra();
1072 Register rb = src.rb();
1073 DCHECK(!ra.is(r0));
1074 emit(EXT2 | LHZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1075 LeaveRC);
1076 }
1077
1078
1079 void Assembler::lwz(Register dst, const MemOperand& src) {
1080 DCHECK(!src.ra_.is(r0));
1081 d_form(LWZ, dst, src.ra(), src.offset(), true);
1082 }
1083
1084
1085 void Assembler::lwzu(Register dst, const MemOperand& src) {
1086 DCHECK(!src.ra_.is(r0));
1087 d_form(LWZU, dst, src.ra(), src.offset(), true);
1088 }
1089
1090
1091 void Assembler::lwzx(Register rt, const MemOperand& src) {
1092 Register ra = src.ra();
1093 Register rb = src.rb();
1094 DCHECK(!ra.is(r0));
1095 emit(EXT2 | LWZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1096 LeaveRC);
1097 }
1098
1099
1100 void Assembler::lwzux(Register rt, const MemOperand& src) {
1101 Register ra = src.ra();
1102 Register rb = src.rb();
1103 DCHECK(!ra.is(r0));
1104 emit(EXT2 | LWZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1105 LeaveRC);
1106 }
1107
1108
1109 void Assembler::lwa(Register dst, const MemOperand& src) {
1110 #if V8_TARGET_ARCH_PPC64
1111 int offset = src.offset();
1112 DCHECK(!src.ra_.is(r0));
1113 DCHECK(!(offset & 3) && is_int16(offset));
1114 offset = kImm16Mask & offset;
1115 emit(LD | dst.code() * B21 | src.ra().code() * B16 | offset | 2);
1116 #else
1117 lwz(dst, src);
1118 #endif
1119 }
1120
1121
1122 void Assembler::stb(Register dst, const MemOperand& src) {
1123 DCHECK(!src.ra_.is(r0));
1124 d_form(STB, dst, src.ra(), src.offset(), true);
1125 }
1126
1127
1128 void Assembler::stbx(Register rs, const MemOperand& src) {
1129 Register ra = src.ra();
1130 Register rb = src.rb();
1131 DCHECK(!ra.is(r0));
1132 emit(EXT2 | STBX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1133 LeaveRC);
1134 }
1135
1136
1137 void Assembler::stbux(Register rs, const MemOperand& src) {
1138 Register ra = src.ra();
1139 Register rb = src.rb();
1140 DCHECK(!ra.is(r0));
1141 emit(EXT2 | STBUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1142 LeaveRC);
1143 }
1144
1145
1146 void Assembler::sth(Register dst, const MemOperand& src) {
1147 DCHECK(!src.ra_.is(r0));
1148 d_form(STH, dst, src.ra(), src.offset(), true);
1149 }
1150
1151
1152 void Assembler::sthx(Register rs, const MemOperand& src) {
1153 Register ra = src.ra();
1154 Register rb = src.rb();
1155 DCHECK(!ra.is(r0));
1156 emit(EXT2 | STHX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1157 LeaveRC);
1158 }
1159
1160
1161 void Assembler::sthux(Register rs, const MemOperand& src) {
1162 Register ra = src.ra();
1163 Register rb = src.rb();
1164 DCHECK(!ra.is(r0));
1165 emit(EXT2 | STHUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1166 LeaveRC);
1167 }
1168
1169
1170 void Assembler::stw(Register dst, const MemOperand& src) {
1171 DCHECK(!src.ra_.is(r0));
1172 d_form(STW, dst, src.ra(), src.offset(), true);
1173 }
1174
1175
1176 void Assembler::stwu(Register dst, const MemOperand& src) {
1177 DCHECK(!src.ra_.is(r0));
1178 d_form(STWU, dst, src.ra(), src.offset(), true);
1179 }
1180
1181
1182 void Assembler::stwx(Register rs, const MemOperand& src) {
1183 Register ra = src.ra();
1184 Register rb = src.rb();
1185 DCHECK(!ra.is(r0));
1186 emit(EXT2 | STWX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1187 LeaveRC);
1188 }
1189
1190
1191 void Assembler::stwux(Register rs, const MemOperand& src) {
1192 Register ra = src.ra();
1193 Register rb = src.rb();
1194 DCHECK(!ra.is(r0));
1195 emit(EXT2 | STWUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1196 LeaveRC);
1197 }
1198
1199
1200 void Assembler::extsb(Register rs, Register ra, RCBit rc) {
1201 emit(EXT2 | EXTSB | ra.code() * B21 | rs.code() * B16 | rc);
1202 }
1203
1204
1205 void Assembler::extsh(Register rs, Register ra, RCBit rc) {
1206 emit(EXT2 | EXTSH | ra.code() * B21 | rs.code() * B16 | rc);
1207 }
1208
1209
1210 void Assembler::neg(Register rt, Register ra, OEBit o, RCBit r) {
1211 emit(EXT2 | NEGX | rt.code() * B21 | ra.code() * B16 | o | r);
1212 }
1213
1214
1215 void Assembler::andc(Register dst, Register src1, Register src2, RCBit rc) {
1216 x_form(EXT2 | ANDCX, dst, src1, src2, rc);
1217 }
1218
1219
1220 #if V8_TARGET_ARCH_PPC64
1221 // 64bit specific instructions
1222 void Assembler::ld(Register rd, const MemOperand& src) {
1223 int offset = src.offset();
1224 DCHECK(!src.ra_.is(r0));
1225 DCHECK(!(offset & 3) && is_int16(offset));
1226 offset = kImm16Mask & offset;
1227 emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset);
1228 }
1229
1230
1231 void Assembler::ldx(Register rd, const MemOperand& src) {
1232 Register ra = src.ra();
1233 Register rb = src.rb();
1234 DCHECK(!ra.is(r0));
1235 emit(EXT2 | LDX | rd.code() * B21 | ra.code() * B16 | rb.code() * B11);
1236 }
1237
1238
1239 void Assembler::ldu(Register rd, const MemOperand& src) {
1240 int offset = src.offset();
1241 DCHECK(!src.ra_.is(r0));
1242 DCHECK(!(offset & 3) && is_int16(offset));
1243 offset = kImm16Mask & offset;
1244 emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset | 1);
1245 }
1246
1247
1248 void Assembler::ldux(Register rd, const MemOperand& src) {
1249 Register ra = src.ra();
1250 Register rb = src.rb();
1251 DCHECK(!ra.is(r0));
1252 emit(EXT2 | LDUX | rd.code() * B21 | ra.code() * B16 | rb.code() * B11);
1253 }
1254
1255
1256 void Assembler::std(Register rs, const MemOperand& src) {
1257 int offset = src.offset();
1258 DCHECK(!src.ra_.is(r0));
1259 DCHECK(!(offset & 3) && is_int16(offset));
1260 offset = kImm16Mask & offset;
1261 emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset);
1262 }
1263
1264
1265 void Assembler::stdx(Register rs, const MemOperand& src) {
1266 Register ra = src.ra();
1267 Register rb = src.rb();
1268 DCHECK(!ra.is(r0));
1269 emit(EXT2 | STDX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11);
1270 }
1271
1272
1273 void Assembler::stdu(Register rs, const MemOperand& src) {
1274 int offset = src.offset();
1275 DCHECK(!src.ra_.is(r0));
1276 DCHECK(!(offset & 3) && is_int16(offset));
1277 offset = kImm16Mask & offset;
1278 emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset | 1);
1279 }
1280
1281
1282 void Assembler::stdux(Register rs, const MemOperand& src) {
1283 Register ra = src.ra();
1284 Register rb = src.rb();
1285 DCHECK(!ra.is(r0));
1286 emit(EXT2 | STDUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11);
1287 }
1288
1289
1290 void Assembler::rldic(Register ra, Register rs, int sh, int mb, RCBit r) {
1291 md_form(EXT5 | RLDIC, ra, rs, sh, mb, r);
1292 }
1293
1294
1295 void Assembler::rldicl(Register ra, Register rs, int sh, int mb, RCBit r) {
1296 md_form(EXT5 | RLDICL, ra, rs, sh, mb, r);
1297 }
1298
1299
1300 void Assembler::rldcl(Register ra, Register rs, Register rb, int mb, RCBit r) {
1301 mds_form(EXT5 | RLDCL, ra, rs, rb, mb, r);
1302 }
1303
1304
1305 void Assembler::rldicr(Register ra, Register rs, int sh, int me, RCBit r) {
1306 md_form(EXT5 | RLDICR, ra, rs, sh, me, r);
1307 }
1308
1309
1310 void Assembler::sldi(Register dst, Register src, const Operand& val, RCBit rc) {
1311 DCHECK((64 > val.imm_) && (val.imm_ >= 0));
1312 rldicr(dst, src, val.imm_, 63 - val.imm_, rc);
1313 }
1314
1315
1316 void Assembler::srdi(Register dst, Register src, const Operand& val, RCBit rc) {
1317 DCHECK((64 > val.imm_) && (val.imm_ >= 0));
1318 rldicl(dst, src, 64 - val.imm_, val.imm_, rc);
1319 }
1320
1321
1322 void Assembler::clrrdi(Register dst, Register src, const Operand& val,
1323 RCBit rc) {
1324 DCHECK((64 > val.imm_) && (val.imm_ >= 0));
1325 rldicr(dst, src, 0, 63 - val.imm_, rc);
1326 }
1327
1328
1329 void Assembler::clrldi(Register dst, Register src, const Operand& val,
1330 RCBit rc) {
1331 DCHECK((64 > val.imm_) && (val.imm_ >= 0));
1332 rldicl(dst, src, 0, val.imm_, rc);
1333 }
1334
1335
1336 void Assembler::rldimi(Register ra, Register rs, int sh, int mb, RCBit r) {
1337 md_form(EXT5 | RLDIMI, ra, rs, sh, mb, r);
1338 }
1339
1340
1341 void Assembler::sradi(Register ra, Register rs, int sh, RCBit r) {
1342 int sh0_4 = sh & 0x1f;
1343 int sh5 = (sh >> 5) & 0x1;
1344
1345 emit(EXT2 | SRADIX | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 |
1346 sh5 * B1 | r);
1347 }
1348
1349
1350 void Assembler::srd(Register dst, Register src1, Register src2, RCBit r) {
1351 x_form(EXT2 | SRDX, dst, src1, src2, r);
1352 }
1353
1354
1355 void Assembler::sld(Register dst, Register src1, Register src2, RCBit r) {
1356 x_form(EXT2 | SLDX, dst, src1, src2, r);
1357 }
1358
1359
1360 void Assembler::srad(Register ra, Register rs, Register rb, RCBit r) {
1361 x_form(EXT2 | SRAD, ra, rs, rb, r);
1362 }
1363
1364
1365 void Assembler::rotld(Register ra, Register rs, Register rb, RCBit r) {
1366 rldcl(ra, rs, rb, 0, r);
1367 }
1368
1369
1370 void Assembler::rotldi(Register ra, Register rs, int sh, RCBit r) {
1371 rldicl(ra, rs, sh, 0, r);
1372 }
1373
1374
1375 void Assembler::rotrdi(Register ra, Register rs, int sh, RCBit r) {
1376 rldicl(ra, rs, 64 - sh, 0, r);
1377 }
1378
1379
1380 void Assembler::cntlzd_(Register ra, Register rs, RCBit rc) {
1381 x_form(EXT2 | CNTLZDX, ra, rs, r0, rc);
1382 }
1383
1384
1385 void Assembler::extsw(Register rs, Register ra, RCBit rc) {
1386 emit(EXT2 | EXTSW | ra.code() * B21 | rs.code() * B16 | rc);
1387 }
1388
1389
1390 void Assembler::mulld(Register dst, Register src1, Register src2, OEBit o,
1391 RCBit r) {
1392 xo_form(EXT2 | MULLD, dst, src1, src2, o, r);
1393 }
1394
1395
1396 void Assembler::divd(Register dst, Register src1, Register src2, OEBit o,
1397 RCBit r) {
1398 xo_form(EXT2 | DIVD, dst, src1, src2, o, r);
1399 }
1400 #endif
1401
1402
1403 void Assembler::fake_asm(enum FAKE_OPCODE_T fopcode) {
1404 DCHECK(fopcode < fLastFaker);
1405 emit(FAKE_OPCODE | FAKER_SUBOPCODE | fopcode);
1406 }
1407
1408
1409 // Function descriptor for AIX.
1410 // Code address skips the function descriptor "header".
1411 // TOC and static chain are ignored and set to 0.
1412 void Assembler::function_descriptor() {
1413 DCHECK(pc_offset() == 0);
1414 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
1415 emit_ptr(reinterpret_cast<uintptr_t>(pc_) + 3 * kPointerSize);
1416 emit_ptr(0);
1417 emit_ptr(0);
1418 }
1419
1420
1421 #if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL
1422 void Assembler::RelocateInternalReference(Address pc, intptr_t delta,
1423 Address code_start,
1424 ICacheFlushMode icache_flush_mode) {
1425 DCHECK(delta || code_start);
1426 #if ABI_USES_FUNCTION_DESCRIPTORS
1427 uintptr_t* fd = reinterpret_cast<uintptr_t*>(pc);
1428 if (fd[1] == 0 && fd[2] == 0) {
1429 // Function descriptor
1430 if (delta) {
1431 fd[0] += delta;
1432 } else {
1433 fd[0] = reinterpret_cast<uintptr_t>(code_start) + 3 * kPointerSize;
1434 }
1435 return;
1436 }
1437 #endif
1438 #if V8_OOL_CONSTANT_POOL
1439 // mov for LoadConstantPoolPointerRegister
1440 ConstantPoolArray* constant_pool = NULL;
1441 if (delta) {
1442 code_start = target_address_at(pc, constant_pool) + delta;
1443 }
1444 set_target_address_at(pc, constant_pool, code_start, icache_flush_mode);
1445 #endif
1446 }
1447
1448
1449 int Assembler::DecodeInternalReference(Vector<char> buffer, Address pc) {
1450 #if ABI_USES_FUNCTION_DESCRIPTORS
1451 uintptr_t* fd = reinterpret_cast<uintptr_t*>(pc);
1452 if (fd[1] == 0 && fd[2] == 0) {
1453 // Function descriptor
1454 SNPrintF(buffer, "[%08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
1455 "]"
1456 " function descriptor",
1457 fd[0], fd[1], fd[2]);
1458 return kPointerSize * 3;
1459 }
1460 #endif
1461 return 0;
1462 }
1463 #endif
1464
1465
1466 int Assembler::instructions_required_for_mov(const Operand& x) const {
1467 #if V8_OOL_CONSTANT_POOL || DEBUG
1468 bool canOptimize =
1469 !(x.must_output_reloc_info(this) || is_trampoline_pool_blocked());
1470 #endif
1471 #if V8_OOL_CONSTANT_POOL
1472 if (use_constant_pool_for_mov(x, canOptimize)) {
1473 // Current usage guarantees that all constant pool references can
1474 // use the same sequence.
1475 return kMovInstructionsConstantPool;
1476 }
1477 #endif
1478 DCHECK(!canOptimize);
1479 return kMovInstructionsNoConstantPool;
1480 }
1481
1482
1483 #if V8_OOL_CONSTANT_POOL
1484 bool Assembler::use_constant_pool_for_mov(const Operand& x,
1485 bool canOptimize) const {
1486 if (!is_constant_pool_available() || is_constant_pool_full()) {
1487 // If there is no constant pool available, we must use a mov
1488 // immediate sequence.
1489 return false;
1490 }
1491
1492 intptr_t value = x.immediate();
1493 if (canOptimize && is_int16(value)) {
1494 // Prefer a single-instruction load-immediate.
1495 return false;
1496 }
1497
1498 return true;
1499 }
1500
1501
1502 void Assembler::EnsureSpaceFor(int space_needed) {
1503 if (buffer_space() <= (kGap + space_needed)) {
1504 GrowBuffer();
1505 }
1506 }
1507 #endif
1508
1509
1510 bool Operand::must_output_reloc_info(const Assembler* assembler) const {
1511 if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
1512 if (assembler != NULL && assembler->predictable_code_size()) return true;
1513 return assembler->serializer_enabled();
1514 } else if (RelocInfo::IsNone(rmode_)) {
1515 return false;
1516 }
1517 return true;
1518 }
1519
1520
1521 // Primarily used for loading constants
1522 // This should really move to be in macro-assembler as it
1523 // is really a pseudo instruction
1524 // Some usages of this intend for a FIXED_SEQUENCE to be used
1525 // Todo - break this dependency so we can optimize mov() in general
1526 // and only use the generic version when we require a fixed sequence
1527 void Assembler::mov(Register dst, const Operand& src) {
1528 intptr_t value = src.immediate();
1529 bool canOptimize;
1530 RelocInfo rinfo(pc_, src.rmode_, value, NULL);
1531
1532 if (src.must_output_reloc_info(this)) {
1533 RecordRelocInfo(rinfo);
1534 }
1535
1536 canOptimize =
1537 !(src.must_output_reloc_info(this) || is_trampoline_pool_blocked());
1538
1539 #if V8_OOL_CONSTANT_POOL
1540 if (use_constant_pool_for_mov(src, canOptimize)) {
1541 DCHECK(is_constant_pool_available());
1542 ConstantPoolAddEntry(rinfo);
1543 #if V8_TARGET_ARCH_PPC64
1544 BlockTrampolinePoolScope block_trampoline_pool(this);
1545 // We are forced to use 2 instruction sequence since the constant
1546 // pool pointer is tagged.
1547 li(dst, Operand::Zero());
1548 ldx(dst, MemOperand(kConstantPoolRegister, dst));
1549 #else
1550 lwz(dst, MemOperand(kConstantPoolRegister, 0));
1551 #endif
1552 return;
1553 }
1554 #endif
1555
1556 if (canOptimize) {
1557 if (is_int16(value)) {
1558 li(dst, Operand(value));
1559 } else {
1560 uint16_t u16;
1561 #if V8_TARGET_ARCH_PPC64
1562 if (is_int32(value)) {
1563 #endif
1564 lis(dst, Operand(value >> 16));
1565 #if V8_TARGET_ARCH_PPC64
1566 } else {
1567 if (is_int48(value)) {
1568 li(dst, Operand(value >> 32));
1569 } else {
1570 lis(dst, Operand(value >> 48));
1571 u16 = ((value >> 32) & 0xffff);
1572 if (u16) {
1573 ori(dst, dst, Operand(u16));
1574 }
1575 }
1576 sldi(dst, dst, Operand(32));
1577 u16 = ((value >> 16) & 0xffff);
1578 if (u16) {
1579 oris(dst, dst, Operand(u16));
1580 }
1581 }
1582 #endif
1583 u16 = (value & 0xffff);
1584 if (u16) {
1585 ori(dst, dst, Operand(u16));
1586 }
1587 }
1588 return;
1589 }
1590
1591 DCHECK(!canOptimize);
1592
1593 {
1594 BlockTrampolinePoolScope block_trampoline_pool(this);
1595 #if V8_TARGET_ARCH_PPC64
1596 int32_t hi_32 = static_cast<int32_t>(value >> 32);
1597 int32_t lo_32 = static_cast<int32_t>(value);
1598 int hi_word = static_cast<int>(hi_32 >> 16);
1599 int lo_word = static_cast<int>(hi_32 & 0xffff);
1600 lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
1601 ori(dst, dst, Operand(lo_word));
1602 sldi(dst, dst, Operand(32));
1603 hi_word = static_cast<int>(((lo_32 >> 16) & 0xffff));
1604 lo_word = static_cast<int>(lo_32 & 0xffff);
1605 oris(dst, dst, Operand(hi_word));
1606 ori(dst, dst, Operand(lo_word));
1607 #else
1608 int hi_word = static_cast<int>(value >> 16);
1609 int lo_word = static_cast<int>(value & 0xffff);
1610 lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
1611 ori(dst, dst, Operand(lo_word));
1612 #endif
1613 }
1614 }
1615
1616
1617 void Assembler::mov_label_offset(Register dst, Label* label) {
1618 if (label->is_bound()) {
1619 int target = label->pos();
1620 mov(dst, Operand(target + Code::kHeaderSize - kHeapObjectTag));
1621 } else {
1622 bool is_linked = label->is_linked();
1623 // Emit the link to the label in the code stream followed by extra
1624 // nop instructions.
1625 DCHECK(dst.is(r3)); // target_at_put assumes r3 for now
1626 int link = is_linked ? label->pos() - pc_offset() : 0;
1627 label->link_to(pc_offset());
1628
1629 if (!is_linked && !trampoline_emitted_) {
1630 unbound_labels_count_++;
1631 next_buffer_check_ -= kTrampolineSlotsSize;
1632 }
1633
1634 // When the label is bound, these instructions will be patched
1635 // with a 2 instruction mov sequence that will load the
1636 // destination register with the position of the label from the
1637 // beginning of the code.
1638 //
1639 // When the label gets bound: target_at extracts the link and
1640 // target_at_put patches the instructions.
1641 BlockTrampolinePoolScope block_trampoline_pool(this);
1642 emit(link);
1643 nop();
1644 }
1645 }
1646
1647
1648 // Special register instructions
1649 void Assembler::crxor(int bt, int ba, int bb) {
1650 emit(EXT1 | CRXOR | bt * B21 | ba * B16 | bb * B11);
1651 }
1652
1653
1654 void Assembler::creqv(int bt, int ba, int bb) {
1655 emit(EXT1 | CREQV | bt * B21 | ba * B16 | bb * B11);
1656 }
1657
1658
1659 void Assembler::mflr(Register dst) {
1660 emit(EXT2 | MFSPR | dst.code() * B21 | 256 << 11); // Ignore RC bit
1661 }
1662
1663
1664 void Assembler::mtlr(Register src) {
1665 emit(EXT2 | MTSPR | src.code() * B21 | 256 << 11); // Ignore RC bit
1666 }
1667
1668
1669 void Assembler::mtctr(Register src) {
1670 emit(EXT2 | MTSPR | src.code() * B21 | 288 << 11); // Ignore RC bit
1671 }
1672
1673
1674 void Assembler::mtxer(Register src) {
1675 emit(EXT2 | MTSPR | src.code() * B21 | 32 << 11);
1676 }
1677
1678
1679 void Assembler::mcrfs(int bf, int bfa) {
1680 emit(EXT4 | MCRFS | bf * B23 | bfa * B18);
1681 }
1682
1683
1684 void Assembler::mfcr(Register dst) { emit(EXT2 | MFCR | dst.code() * B21); }
1685
1686
1687 #if V8_TARGET_ARCH_PPC64
1688 void Assembler::mffprd(Register dst, DoubleRegister src) {
1689 emit(EXT2 | MFVSRD | src.code() * B21 | dst.code() * B16);
1690 }
1691
1692
1693 void Assembler::mffprwz(Register dst, DoubleRegister src) {
1694 emit(EXT2 | MFVSRWZ | src.code() * B21 | dst.code() * B16);
1695 }
1696
1697
1698 void Assembler::mtfprd(DoubleRegister dst, Register src) {
1699 emit(EXT2 | MTVSRD | dst.code() * B21 | src.code() * B16);
1700 }
1701
1702
1703 void Assembler::mtfprwz(DoubleRegister dst, Register src) {
1704 emit(EXT2 | MTVSRWZ | dst.code() * B21 | src.code() * B16);
1705 }
1706
1707
1708 void Assembler::mtfprwa(DoubleRegister dst, Register src) {
1709 emit(EXT2 | MTVSRWA | dst.code() * B21 | src.code() * B16);
1710 }
1711 #endif
1712
1713
1714 // Exception-generating instructions and debugging support.
1715 // Stops with a non-negative code less than kNumOfWatchedStops support
1716 // enabling/disabling and a counter feature. See simulator-ppc.h .
1717 void Assembler::stop(const char* msg, Condition cond, int32_t code,
1718 CRegister cr) {
1719 if (cond != al) {
1720 Label skip;
1721 b(NegateCondition(cond), &skip, cr);
1722 bkpt(0);
1723 bind(&skip);
1724 } else {
1725 bkpt(0);
1726 }
1727 }
1728
1729
1730 void Assembler::bkpt(uint32_t imm16) { emit(0x7d821008); }
1731
1732
1733 void Assembler::dcbf(Register ra, Register rb) {
1734 emit(EXT2 | DCBF | ra.code() * B16 | rb.code() * B11);
1735 }
1736
1737
1738 void Assembler::sync() { emit(EXT2 | SYNC); }
1739
1740
1741 void Assembler::icbi(Register ra, Register rb) {
1742 emit(EXT2 | ICBI | ra.code() * B16 | rb.code() * B11);
1743 }
1744
1745
1746 void Assembler::isync() { emit(EXT1 | ISYNC); }
1747
1748
1749 // Floating point support
1750
1751 void Assembler::lfd(const DoubleRegister frt, const MemOperand& src) {
1752 int offset = src.offset();
1753 Register ra = src.ra();
1754 DCHECK(is_int16(offset));
1755 int imm16 = offset & kImm16Mask;
1756 // could be x_form instruction with some casting magic
1757 emit(LFD | frt.code() * B21 | ra.code() * B16 | imm16);
1758 }
1759
1760
1761 void Assembler::lfdu(const DoubleRegister frt, const MemOperand& src) {
1762 int offset = src.offset();
1763 Register ra = src.ra();
1764 DCHECK(is_int16(offset));
1765 int imm16 = offset & kImm16Mask;
1766 // could be x_form instruction with some casting magic
1767 emit(LFDU | frt.code() * B21 | ra.code() * B16 | imm16);
1768 }
1769
1770
1771 void Assembler::lfdx(const DoubleRegister frt, const MemOperand& src) {
1772 Register ra = src.ra();
1773 Register rb = src.rb();
1774 DCHECK(!ra.is(r0));
1775 emit(EXT2 | LFDX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1776 LeaveRC);
1777 }
1778
1779
1780 void Assembler::lfdux(const DoubleRegister frt, const MemOperand& src) {
1781 Register ra = src.ra();
1782 Register rb = src.rb();
1783 DCHECK(!ra.is(r0));
1784 emit(EXT2 | LFDUX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1785 LeaveRC);
1786 }
1787
1788
1789 void Assembler::lfs(const DoubleRegister frt, const MemOperand& src) {
1790 int offset = src.offset();
1791 Register ra = src.ra();
1792 DCHECK(is_int16(offset));
1793 DCHECK(!ra.is(r0));
1794 int imm16 = offset & kImm16Mask;
1795 // could be x_form instruction with some casting magic
1796 emit(LFS | frt.code() * B21 | ra.code() * B16 | imm16);
1797 }
1798
1799
1800 void Assembler::lfsu(const DoubleRegister frt, const MemOperand& src) {
1801 int offset = src.offset();
1802 Register ra = src.ra();
1803 DCHECK(is_int16(offset));
1804 DCHECK(!ra.is(r0));
1805 int imm16 = offset & kImm16Mask;
1806 // could be x_form instruction with some casting magic
1807 emit(LFSU | frt.code() * B21 | ra.code() * B16 | imm16);
1808 }
1809
1810
1811 void Assembler::lfsx(const DoubleRegister frt, const MemOperand& src) {
1812 Register ra = src.ra();
1813 Register rb = src.rb();
1814 DCHECK(!ra.is(r0));
1815 emit(EXT2 | LFSX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1816 LeaveRC);
1817 }
1818
1819
1820 void Assembler::lfsux(const DoubleRegister frt, const MemOperand& src) {
1821 Register ra = src.ra();
1822 Register rb = src.rb();
1823 DCHECK(!ra.is(r0));
1824 emit(EXT2 | LFSUX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1825 LeaveRC);
1826 }
1827
1828
1829 void Assembler::stfd(const DoubleRegister frs, const MemOperand& src) {
1830 int offset = src.offset();
1831 Register ra = src.ra();
1832 DCHECK(is_int16(offset));
1833 DCHECK(!ra.is(r0));
1834 int imm16 = offset & kImm16Mask;
1835 // could be x_form instruction with some casting magic
1836 emit(STFD | frs.code() * B21 | ra.code() * B16 | imm16);
1837 }
1838
1839
1840 void Assembler::stfdu(const DoubleRegister frs, const MemOperand& src) {
1841 int offset = src.offset();
1842 Register ra = src.ra();
1843 DCHECK(is_int16(offset));
1844 DCHECK(!ra.is(r0));
1845 int imm16 = offset & kImm16Mask;
1846 // could be x_form instruction with some casting magic
1847 emit(STFDU | frs.code() * B21 | ra.code() * B16 | imm16);
1848 }
1849
1850
1851 void Assembler::stfdx(const DoubleRegister frs, const MemOperand& src) {
1852 Register ra = src.ra();
1853 Register rb = src.rb();
1854 DCHECK(!ra.is(r0));
1855 emit(EXT2 | STFDX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1856 LeaveRC);
1857 }
1858
1859
1860 void Assembler::stfdux(const DoubleRegister frs, const MemOperand& src) {
1861 Register ra = src.ra();
1862 Register rb = src.rb();
1863 DCHECK(!ra.is(r0));
1864 emit(EXT2 | STFDUX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1865 LeaveRC);
1866 }
1867
1868
1869 void Assembler::stfs(const DoubleRegister frs, const MemOperand& src) {
1870 int offset = src.offset();
1871 Register ra = src.ra();
1872 DCHECK(is_int16(offset));
1873 DCHECK(!ra.is(r0));
1874 int imm16 = offset & kImm16Mask;
1875 // could be x_form instruction with some casting magic
1876 emit(STFS | frs.code() * B21 | ra.code() * B16 | imm16);
1877 }
1878
1879
1880 void Assembler::stfsu(const DoubleRegister frs, const MemOperand& src) {
1881 int offset = src.offset();
1882 Register ra = src.ra();
1883 DCHECK(is_int16(offset));
1884 DCHECK(!ra.is(r0));
1885 int imm16 = offset & kImm16Mask;
1886 // could be x_form instruction with some casting magic
1887 emit(STFSU | frs.code() * B21 | ra.code() * B16 | imm16);
1888 }
1889
1890
1891 void Assembler::stfsx(const DoubleRegister frs, const MemOperand& src) {
1892 Register ra = src.ra();
1893 Register rb = src.rb();
1894 DCHECK(!ra.is(r0));
1895 emit(EXT2 | STFSX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1896 LeaveRC);
1897 }
1898
1899
1900 void Assembler::stfsux(const DoubleRegister frs, const MemOperand& src) {
1901 Register ra = src.ra();
1902 Register rb = src.rb();
1903 DCHECK(!ra.is(r0));
1904 emit(EXT2 | STFSUX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1905 LeaveRC);
1906 }
1907
1908
1909 void Assembler::fsub(const DoubleRegister frt, const DoubleRegister fra,
1910 const DoubleRegister frb, RCBit rc) {
1911 a_form(EXT4 | FSUB, frt, fra, frb, rc);
1912 }
1913
1914
1915 void Assembler::fadd(const DoubleRegister frt, const DoubleRegister fra,
1916 const DoubleRegister frb, RCBit rc) {
1917 a_form(EXT4 | FADD, frt, fra, frb, rc);
1918 }
1919
1920
1921 void Assembler::fmul(const DoubleRegister frt, const DoubleRegister fra,
1922 const DoubleRegister frc, RCBit rc) {
1923 emit(EXT4 | FMUL | frt.code() * B21 | fra.code() * B16 | frc.code() * B6 |
1924 rc);
1925 }
1926
1927
1928 void Assembler::fdiv(const DoubleRegister frt, const DoubleRegister fra,
1929 const DoubleRegister frb, RCBit rc) {
1930 a_form(EXT4 | FDIV, frt, fra, frb, rc);
1931 }
1932
1933
1934 void Assembler::fcmpu(const DoubleRegister fra, const DoubleRegister frb,
1935 CRegister cr) {
1936 DCHECK(cr.code() >= 0 && cr.code() <= 7);
1937 emit(EXT4 | FCMPU | cr.code() * B23 | fra.code() * B16 | frb.code() * B11);
1938 }
1939
1940
1941 void Assembler::fmr(const DoubleRegister frt, const DoubleRegister frb,
1942 RCBit rc) {
1943 emit(EXT4 | FMR | frt.code() * B21 | frb.code() * B11 | rc);
1944 }
1945
1946
1947 void Assembler::fctiwz(const DoubleRegister frt, const DoubleRegister frb) {
1948 emit(EXT4 | FCTIWZ | frt.code() * B21 | frb.code() * B11);
1949 }
1950
1951
1952 void Assembler::fctiw(const DoubleRegister frt, const DoubleRegister frb) {
1953 emit(EXT4 | FCTIW | frt.code() * B21 | frb.code() * B11);
1954 }
1955
1956
1957 void Assembler::frim(const DoubleRegister frt, const DoubleRegister frb) {
1958 emit(EXT4 | FRIM | frt.code() * B21 | frb.code() * B11);
1959 }
1960
1961
1962 void Assembler::frsp(const DoubleRegister frt, const DoubleRegister frb,
1963 RCBit rc) {
1964 emit(EXT4 | FRSP | frt.code() * B21 | frb.code() * B11 | rc);
1965 }
1966
1967
1968 void Assembler::fcfid(const DoubleRegister frt, const DoubleRegister frb,
1969 RCBit rc) {
1970 emit(EXT4 | FCFID | frt.code() * B21 | frb.code() * B11 | rc);
1971 }
1972
1973
1974 void Assembler::fctid(const DoubleRegister frt, const DoubleRegister frb,
1975 RCBit rc) {
1976 emit(EXT4 | FCTID | frt.code() * B21 | frb.code() * B11 | rc);
1977 }
1978
1979
1980 void Assembler::fctidz(const DoubleRegister frt, const DoubleRegister frb,
1981 RCBit rc) {
1982 emit(EXT4 | FCTIDZ | frt.code() * B21 | frb.code() * B11 | rc);
1983 }
1984
1985
1986 void Assembler::fsel(const DoubleRegister frt, const DoubleRegister fra,
1987 const DoubleRegister frc, const DoubleRegister frb,
1988 RCBit rc) {
1989 emit(EXT4 | FSEL | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
1990 frc.code() * B6 | rc);
1991 }
1992
1993
1994 void Assembler::fneg(const DoubleRegister frt, const DoubleRegister frb,
1995 RCBit rc) {
1996 emit(EXT4 | FNEG | frt.code() * B21 | frb.code() * B11 | rc);
1997 }
1998
1999
2000 void Assembler::mtfsfi(int bf, int immediate, RCBit rc) {
2001 emit(EXT4 | MTFSFI | bf * B23 | immediate * B12 | rc);
2002 }
2003
2004
2005 void Assembler::mffs(const DoubleRegister frt, RCBit rc) {
2006 emit(EXT4 | MFFS | frt.code() * B21 | rc);
2007 }
2008
2009
2010 void Assembler::mtfsf(const DoubleRegister frb, bool L, int FLM, bool W,
2011 RCBit rc) {
2012 emit(EXT4 | MTFSF | frb.code() * B11 | W * B16 | FLM * B17 | L * B25 | rc);
2013 }
2014
2015
2016 void Assembler::fsqrt(const DoubleRegister frt, const DoubleRegister frb,
2017 RCBit rc) {
2018 emit(EXT4 | FSQRT | frt.code() * B21 | frb.code() * B11 | rc);
2019 }
2020
2021
2022 void Assembler::fabs(const DoubleRegister frt, const DoubleRegister frb,
2023 RCBit rc) {
2024 emit(EXT4 | FABS | frt.code() * B21 | frb.code() * B11 | rc);
2025 }
2026
2027
2028 void Assembler::fmadd(const DoubleRegister frt, const DoubleRegister fra,
2029 const DoubleRegister frc, const DoubleRegister frb,
2030 RCBit rc) {
2031 emit(EXT4 | FMADD | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
2032 frc.code() * B6 | rc);
2033 }
2034
2035
2036 void Assembler::fmsub(const DoubleRegister frt, const DoubleRegister fra,
2037 const DoubleRegister frc, const DoubleRegister frb,
2038 RCBit rc) {
2039 emit(EXT4 | FMSUB | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
2040 frc.code() * B6 | rc);
2041 }
2042
2043
2044 // Pseudo instructions.
2045 void Assembler::nop(int type) {
2046 switch (type) {
2047 case 0:
2048 ori(r0, r0, Operand::Zero());
2049 break;
2050 case DEBUG_BREAK_NOP:
2051 ori(r3, r3, Operand::Zero());
2052 break;
2053 default:
2054 UNIMPLEMENTED();
2055 }
2056 }
2057
2058
2059 bool Assembler::IsNop(Instr instr, int type) {
2060 DCHECK((0 == type) || (DEBUG_BREAK_NOP == type));
2061 int reg = 0;
2062 if (DEBUG_BREAK_NOP == type) {
2063 reg = 3;
2064 }
2065 return instr == (ORI | reg * B21 | reg * B16);
2066 }
2067
2068
2069 // Debugging.
2070 void Assembler::RecordJSReturn() {
2071 positions_recorder()->WriteRecordedPositions();
2072 CheckBuffer();
2073 RecordRelocInfo(RelocInfo::JS_RETURN);
2074 }
2075
2076
2077 void Assembler::RecordDebugBreakSlot() {
2078 positions_recorder()->WriteRecordedPositions();
2079 CheckBuffer();
2080 RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
2081 }
2082
2083
2084 void Assembler::RecordComment(const char* msg) {
2085 if (FLAG_code_comments) {
2086 CheckBuffer();
2087 RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
2088 }
2089 }
2090
2091
2092 void Assembler::GrowBuffer() {
2093 if (!own_buffer_) FATAL("external code buffer is too small");
2094
2095 // Compute new buffer size.
2096 CodeDesc desc; // the new buffer
2097 if (buffer_size_ < 4 * KB) {
2098 desc.buffer_size = 4 * KB;
2099 } else if (buffer_size_ < 1 * MB) {
2100 desc.buffer_size = 2 * buffer_size_;
2101 } else {
2102 desc.buffer_size = buffer_size_ + 1 * MB;
2103 }
2104 CHECK_GT(desc.buffer_size, 0); // no overflow
2105
2106 // Set up new buffer.
2107 desc.buffer = NewArray<byte>(desc.buffer_size);
2108
2109 desc.instr_size = pc_offset();
2110 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
2111
2112 // Copy the data.
2113 intptr_t pc_delta = desc.buffer - buffer_;
2114 intptr_t rc_delta =
2115 (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
2116 memmove(desc.buffer, buffer_, desc.instr_size);
2117 memmove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
2118 desc.reloc_size);
2119
2120 // Switch buffers.
2121 DeleteArray(buffer_);
2122 buffer_ = desc.buffer;
2123 buffer_size_ = desc.buffer_size;
2124 pc_ += pc_delta;
2125 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2126 reloc_info_writer.last_pc() + pc_delta);
2127
2128 // None of our relocation types are pc relative pointing outside the code
2129 // buffer nor pc absolute pointing inside the code buffer, so there is no need
2130 // to relocate any emitted relocation entries.
2131
2132 #if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL
2133 // Relocate runtime entries.
2134 for (RelocIterator it(desc); !it.done(); it.next()) {
2135 RelocInfo::Mode rmode = it.rinfo()->rmode();
2136 if (rmode == RelocInfo::INTERNAL_REFERENCE) {
2137 RelocateInternalReference(it.rinfo()->pc(), pc_delta, 0);
2138 }
2139 }
2140 #if V8_OOL_CONSTANT_POOL
2141 constant_pool_builder_.Relocate(pc_delta);
2142 #endif
2143 #endif
2144 }
2145
2146
2147 void Assembler::db(uint8_t data) {
2148 CheckBuffer();
2149 *reinterpret_cast<uint8_t*>(pc_) = data;
2150 pc_ += sizeof(uint8_t);
2151 }
2152
2153
2154 void Assembler::dd(uint32_t data) {
2155 CheckBuffer();
2156 *reinterpret_cast<uint32_t*>(pc_) = data;
2157 pc_ += sizeof(uint32_t);
2158 }
2159
2160
2161 void Assembler::emit_ptr(uintptr_t data) {
2162 CheckBuffer();
2163 *reinterpret_cast<uintptr_t*>(pc_) = data;
2164 pc_ += sizeof(uintptr_t);
2165 }
2166
2167
2168 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2169 RelocInfo rinfo(pc_, rmode, data, NULL);
2170 RecordRelocInfo(rinfo);
2171 }
2172
2173
2174 void Assembler::RecordRelocInfo(const RelocInfo& rinfo) {
2175 if (rinfo.rmode() >= RelocInfo::JS_RETURN &&
2176 rinfo.rmode() <= RelocInfo::DEBUG_BREAK_SLOT) {
2177 // Adjust code for new modes.
2178 DCHECK(RelocInfo::IsDebugBreakSlot(rinfo.rmode()) ||
2179 RelocInfo::IsJSReturn(rinfo.rmode()) ||
2180 RelocInfo::IsComment(rinfo.rmode()) ||
2181 RelocInfo::IsPosition(rinfo.rmode()));
2182 }
2183 if (!RelocInfo::IsNone(rinfo.rmode())) {
2184 // Don't record external references unless the heap will be serialized.
2185 if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE) {
2186 if (!serializer_enabled() && !emit_debug_code()) {
2187 return;
2188 }
2189 }
2190 DCHECK(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
2191 if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) {
2192 RelocInfo reloc_info_with_ast_id(rinfo.pc(), rinfo.rmode(),
2193 RecordedAstId().ToInt(), NULL);
2194 ClearRecordedAstId();
2195 reloc_info_writer.Write(&reloc_info_with_ast_id);
2196 } else {
2197 reloc_info_writer.Write(&rinfo);
2198 }
2199 }
2200 }
2201
2202
2203 void Assembler::BlockTrampolinePoolFor(int instructions) {
2204 BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
2205 }
2206
2207
2208 void Assembler::CheckTrampolinePool() {
2209 // Some small sequences of instructions must not be broken up by the
2210 // insertion of a trampoline pool; such sequences are protected by setting
2211 // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
2212 // which are both checked here. Also, recursive calls to CheckTrampolinePool
2213 // are blocked by trampoline_pool_blocked_nesting_.
2214 if ((trampoline_pool_blocked_nesting_ > 0) ||
2215 (pc_offset() < no_trampoline_pool_before_)) {
2216 // Emission is currently blocked; make sure we try again as soon as
2217 // possible.
2218 if (trampoline_pool_blocked_nesting_ > 0) {
2219 next_buffer_check_ = pc_offset() + kInstrSize;
2220 } else {
2221 next_buffer_check_ = no_trampoline_pool_before_;
2222 }
2223 return;
2224 }
2225
2226 DCHECK(!trampoline_emitted_);
2227 DCHECK(unbound_labels_count_ >= 0);
2228 if (unbound_labels_count_ > 0) {
2229 // First we emit jump, then we emit trampoline pool.
2230 {
2231 BlockTrampolinePoolScope block_trampoline_pool(this);
2232 Label after_pool;
2233 b(&after_pool);
2234
2235 int pool_start = pc_offset();
2236 for (int i = 0; i < unbound_labels_count_; i++) {
2237 b(&after_pool);
2238 }
2239 bind(&after_pool);
2240 trampoline_ = Trampoline(pool_start, unbound_labels_count_);
2241
2242 trampoline_emitted_ = true;
2243 // As we are only going to emit trampoline once, we need to prevent any
2244 // further emission.
2245 next_buffer_check_ = kMaxInt;
2246 }
2247 } else {
2248 // Number of branches to unbound label at this point is zero, so we can
2249 // move next buffer check to maximum.
2250 next_buffer_check_ =
2251 pc_offset() + kMaxCondBranchReach - kMaxBlockTrampolineSectionSize;
2252 }
2253 return;
2254 }
2255
2256
2257 Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
2258 #if V8_OOL_CONSTANT_POOL
2259 return constant_pool_builder_.New(isolate);
2260 #else
2261 // No out-of-line constant pool support.
2262 DCHECK(!FLAG_enable_ool_constant_pool);
2263 return isolate->factory()->empty_constant_pool_array();
2264 #endif
2265 }
2266
2267
2268 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
2269 #if V8_OOL_CONSTANT_POOL
2270 constant_pool_builder_.Populate(this, constant_pool);
2271 #else
2272 // No out-of-line constant pool support.
2273 DCHECK(!FLAG_enable_ool_constant_pool);
2274 #endif
2275 }
2276
2277
2278 #if V8_OOL_CONSTANT_POOL
2279 ConstantPoolBuilder::ConstantPoolBuilder()
2280 : size_(0),
2281 entries_(),
2282 current_section_(ConstantPoolArray::SMALL_SECTION) {}
2283
2284
2285 bool ConstantPoolBuilder::IsEmpty() { return entries_.size() == 0; }
2286
2287
2288 ConstantPoolArray::Type ConstantPoolBuilder::GetConstantPoolType(
2289 RelocInfo::Mode rmode) {
2290 #if V8_TARGET_ARCH_PPC64
2291 // We don't support 32-bit entries at this time.
2292 if (!RelocInfo::IsGCRelocMode(rmode)) {
2293 return ConstantPoolArray::INT64;
2294 #else
2295 if (rmode == RelocInfo::NONE64) {
2296 return ConstantPoolArray::INT64;
2297 } else if (!RelocInfo::IsGCRelocMode(rmode)) {
2298 return ConstantPoolArray::INT32;
2299 #endif
2300 } else if (RelocInfo::IsCodeTarget(rmode)) {
2301 return ConstantPoolArray::CODE_PTR;
2302 } else {
2303 DCHECK(RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode));
2304 return ConstantPoolArray::HEAP_PTR;
2305 }
2306 }
2307
2308
2309 ConstantPoolArray::LayoutSection ConstantPoolBuilder::AddEntry(
2310 Assembler* assm, const RelocInfo& rinfo) {
2311 RelocInfo::Mode rmode = rinfo.rmode();
2312 DCHECK(rmode != RelocInfo::COMMENT && rmode != RelocInfo::POSITION &&
2313 rmode != RelocInfo::STATEMENT_POSITION &&
2314 rmode != RelocInfo::CONST_POOL);
2315
2316 // Try to merge entries which won't be patched.
2317 int merged_index = -1;
2318 ConstantPoolArray::LayoutSection entry_section = current_section_;
2319 if (RelocInfo::IsNone(rmode) ||
2320 (!assm->serializer_enabled() && (rmode >= RelocInfo::CELL))) {
2321 size_t i;
2322 std::vector<ConstantPoolEntry>::const_iterator it;
2323 for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) {
2324 if (RelocInfo::IsEqual(rinfo, it->rinfo_)) {
2325 // Merge with found entry.
2326 merged_index = i;
2327 entry_section = entries_[i].section_;
2328 break;
2329 }
2330 }
2331 }
2332 DCHECK(entry_section <= current_section_);
2333 entries_.push_back(ConstantPoolEntry(rinfo, entry_section, merged_index));
2334
2335 if (merged_index == -1) {
2336 // Not merged, so update the appropriate count.
2337 number_of_entries_[entry_section].increment(GetConstantPoolType(rmode));
2338 }
2339
2340 // Check if we still have room for another entry in the small section
2341 // given the limitations of the header's layout fields.
2342 if (current_section_ == ConstantPoolArray::SMALL_SECTION) {
2343 size_ = ConstantPoolArray::SizeFor(*small_entries());
2344 if (!is_uint12(size_)) {
2345 current_section_ = ConstantPoolArray::EXTENDED_SECTION;
2346 }
2347 } else {
2348 size_ = ConstantPoolArray::SizeForExtended(*small_entries(),
2349 *extended_entries());
2350 }
2351
2352 return entry_section;
2353 }
2354
2355
2356 void ConstantPoolBuilder::Relocate(intptr_t pc_delta) {
2357 for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
2358 entry != entries_.end(); entry++) {
2359 DCHECK(entry->rinfo_.rmode() != RelocInfo::JS_RETURN);
2360 entry->rinfo_.set_pc(entry->rinfo_.pc() + pc_delta);
2361 }
2362 }
2363
2364
2365 Handle<ConstantPoolArray> ConstantPoolBuilder::New(Isolate* isolate) {
2366 if (IsEmpty()) {
2367 return isolate->factory()->empty_constant_pool_array();
2368 } else if (extended_entries()->is_empty()) {
2369 return isolate->factory()->NewConstantPoolArray(*small_entries());
2370 } else {
2371 DCHECK(current_section_ == ConstantPoolArray::EXTENDED_SECTION);
2372 return isolate->factory()->NewExtendedConstantPoolArray(
2373 *small_entries(), *extended_entries());
2374 }
2375 }
2376
2377
2378 void ConstantPoolBuilder::Populate(Assembler* assm,
2379 ConstantPoolArray* constant_pool) {
2380 DCHECK_EQ(extended_entries()->is_empty(),
2381 !constant_pool->is_extended_layout());
2382 DCHECK(small_entries()->equals(ConstantPoolArray::NumberOfEntries(
2383 constant_pool, ConstantPoolArray::SMALL_SECTION)));
2384 if (constant_pool->is_extended_layout()) {
2385 DCHECK(extended_entries()->equals(ConstantPoolArray::NumberOfEntries(
2386 constant_pool, ConstantPoolArray::EXTENDED_SECTION)));
2387 }
2388
2389 // Set up initial offsets.
2390 int offsets[ConstantPoolArray::NUMBER_OF_LAYOUT_SECTIONS]
2391 [ConstantPoolArray::NUMBER_OF_TYPES];
2392 for (int section = 0; section <= constant_pool->final_section(); section++) {
2393 int section_start = (section == ConstantPoolArray::EXTENDED_SECTION)
2394 ? small_entries()->total_count()
2395 : 0;
2396 for (int i = 0; i < ConstantPoolArray::NUMBER_OF_TYPES; i++) {
2397 ConstantPoolArray::Type type = static_cast<ConstantPoolArray::Type>(i);
2398 if (number_of_entries_[section].count_of(type) != 0) {
2399 offsets[section][type] = constant_pool->OffsetOfElementAt(
2400 number_of_entries_[section].base_of(type) + section_start);
2401 }
2402 }
2403 }
2404
2405 for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
2406 entry != entries_.end(); entry++) {
2407 RelocInfo rinfo = entry->rinfo_;
2408 RelocInfo::Mode rmode = entry->rinfo_.rmode();
2409 ConstantPoolArray::Type type = GetConstantPoolType(rmode);
2410
2411 // Update constant pool if necessary and get the entry's offset.
2412 int offset;
2413 if (entry->merged_index_ == -1) {
2414 offset = offsets[entry->section_][type];
2415 offsets[entry->section_][type] += ConstantPoolArray::entry_size(type);
2416 if (type == ConstantPoolArray::INT64) {
2417 #if V8_TARGET_ARCH_PPC64
2418 constant_pool->set_at_offset(offset, rinfo.data());
2419 #else
2420 constant_pool->set_at_offset(offset, rinfo.data64());
2421 } else if (type == ConstantPoolArray::INT32) {
2422 constant_pool->set_at_offset(offset,
2423 static_cast<int32_t>(rinfo.data()));
2424 #endif
2425 } else if (type == ConstantPoolArray::CODE_PTR) {
2426 constant_pool->set_at_offset(offset,
2427 reinterpret_cast<Address>(rinfo.data()));
2428 } else {
2429 DCHECK(type == ConstantPoolArray::HEAP_PTR);
2430 constant_pool->set_at_offset(offset,
2431 reinterpret_cast<Object*>(rinfo.data()));
2432 }
2433 offset -= kHeapObjectTag;
2434 entry->merged_index_ = offset; // Stash offset for merged entries.
2435 } else {
2436 DCHECK(entry->merged_index_ < (entry - entries_.begin()));
2437 offset = entries_[entry->merged_index_].merged_index_;
2438 }
2439
2440 // Patch load instruction with correct offset.
2441 Assembler::SetConstantPoolOffset(rinfo.pc(), offset);
2442 }
2443 }
2444 #endif
2445 }
2446 } // namespace v8::internal
2447
2448 #endif // V8_TARGET_ARCH_PPC
OLDNEW
« src/hydrogen-bch.cc ('K') | « src/ppc/assembler-ppc.h ('k') | src/ppc/assembler-ppc-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698