Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(21)

Side by Side Diff: src/compiler/ppc/code-generator-ppc.cc

Issue 901083004: Contribution of PowerPC port (continuation of 422063005) - PPC dir update (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Contribution of PowerPC port (continuation of 422063005) - PPC dir update -comments and rebase Created 5 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/compiler/mips64/code-generator-mips64.cc ('k') | src/compiler/ppc/instruction-codes-ppc.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler/code-generator.h"
6
7 #include "src/compiler/code-generator-impl.h"
8 #include "src/compiler/gap-resolver.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/ppc/macro-assembler-ppc.h"
11 #include "src/scopes.h"
12
13 namespace v8 {
14 namespace internal {
15 namespace compiler {
16
17 #define __ masm()->
18
19
20 #define kScratchReg r11
21
22
23 // Adds PPC-specific methods to convert InstructionOperands.
24 class PPCOperandConverter FINAL : public InstructionOperandConverter {
25 public:
26 PPCOperandConverter(CodeGenerator* gen, Instruction* instr)
27 : InstructionOperandConverter(gen, instr) {}
28
29 RCBit OutputRCBit() const {
30 switch (instr_->flags_mode()) {
31 case kFlags_branch:
32 case kFlags_set:
33 return SetRC;
34 case kFlags_none:
35 return LeaveRC;
36 }
37 UNREACHABLE();
38 return LeaveRC;
39 }
40
41 bool CompareLogical() const {
42 switch (instr_->flags_condition()) {
43 case kUnsignedLessThan:
44 case kUnsignedGreaterThanOrEqual:
45 case kUnsignedLessThanOrEqual:
46 case kUnsignedGreaterThan:
47 return true;
48 default:
49 return false;
50 }
51 UNREACHABLE();
52 return false;
53 }
54
55 Operand InputImmediate(int index) {
56 Constant constant = ToConstant(instr_->InputAt(index));
57 switch (constant.type()) {
58 case Constant::kInt32:
59 return Operand(constant.ToInt32());
60 case Constant::kFloat32:
61 return Operand(
62 isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
63 case Constant::kFloat64:
64 return Operand(
65 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
66 case Constant::kInt64:
67 #if V8_TARGET_ARCH_PPC64
68 return Operand(constant.ToInt64());
69 #endif
70 case Constant::kExternalReference:
71 case Constant::kHeapObject:
72 case Constant::kRpoNumber:
73 break;
74 }
75 UNREACHABLE();
76 return Operand::Zero();
77 }
78
79 MemOperand MemoryOperand(AddressingMode* mode, int* first_index) {
80 const int index = *first_index;
81 *mode = AddressingModeField::decode(instr_->opcode());
82 switch (*mode) {
83 case kMode_None:
84 break;
85 case kMode_MRI:
86 *first_index += 2;
87 return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
88 case kMode_MRR:
89 *first_index += 2;
90 return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
91 }
92 UNREACHABLE();
93 return MemOperand(r0);
94 }
95
96 MemOperand MemoryOperand(AddressingMode* mode, int first_index = 0) {
97 return MemoryOperand(mode, &first_index);
98 }
99
100 MemOperand ToMemOperand(InstructionOperand* op) const {
101 DCHECK(op != NULL);
102 DCHECK(!op->IsRegister());
103 DCHECK(!op->IsDoubleRegister());
104 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
105 // The linkage computes where all spill slots are located.
106 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
107 return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
108 }
109 };
110
111
112 static inline bool HasRegisterInput(Instruction* instr, int index) {
113 return instr->InputAt(index)->IsRegister();
114 }
115
116
117 namespace {
118
119 class OutOfLineLoadNAN32 FINAL : public OutOfLineCode {
120 public:
121 OutOfLineLoadNAN32(CodeGenerator* gen, DoubleRegister result)
122 : OutOfLineCode(gen), result_(result) {}
123
124 void Generate() FINAL {
125 __ LoadDoubleLiteral(result_, std::numeric_limits<float>::quiet_NaN(),
126 kScratchReg);
127 }
128
129 private:
130 DoubleRegister const result_;
131 };
132
133
134 class OutOfLineLoadNAN64 FINAL : public OutOfLineCode {
135 public:
136 OutOfLineLoadNAN64(CodeGenerator* gen, DoubleRegister result)
137 : OutOfLineCode(gen), result_(result) {}
138
139 void Generate() FINAL {
140 __ LoadDoubleLiteral(result_, std::numeric_limits<double>::quiet_NaN(),
141 kScratchReg);
142 }
143
144 private:
145 DoubleRegister const result_;
146 };
147
148
149 class OutOfLineLoadZero FINAL : public OutOfLineCode {
150 public:
151 OutOfLineLoadZero(CodeGenerator* gen, Register result)
152 : OutOfLineCode(gen), result_(result) {}
153
154 void Generate() FINAL { __ li(result_, Operand::Zero()); }
155
156 private:
157 Register const result_;
158 };
159
160
161 Condition FlagsConditionToCondition(FlagsCondition condition) {
162 switch (condition) {
163 case kEqual:
164 return eq;
165 case kNotEqual:
166 return ne;
167 case kSignedLessThan:
168 case kUnsignedLessThan:
169 return lt;
170 case kSignedGreaterThanOrEqual:
171 case kUnsignedGreaterThanOrEqual:
172 return ge;
173 case kSignedLessThanOrEqual:
174 case kUnsignedLessThanOrEqual:
175 return le;
176 case kSignedGreaterThan:
177 case kUnsignedGreaterThan:
178 return gt;
179 case kOverflow:
180 #if V8_TARGET_ARCH_PPC64
181 return ne;
182 #else
183 return lt;
184 #endif
185 case kNotOverflow:
186 #if V8_TARGET_ARCH_PPC64
187 return eq;
188 #else
189 return ge;
190 #endif
191 case kUnorderedEqual:
192 case kUnorderedNotEqual:
193 break;
194 }
195 UNREACHABLE();
196 return kNoCondition;
197 }
198
199 } // namespace
200
201 #define ASSEMBLE_FLOAT_UNOP_RC(asm_instr) \
202 do { \
203 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
204 i.OutputRCBit()); \
205 } while (0)
206
207
208 #define ASSEMBLE_FLOAT_BINOP_RC(asm_instr) \
209 do { \
210 __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
211 i.InputDoubleRegister(1), i.OutputRCBit()); \
212 } while (0)
213
214
215 #define ASSEMBLE_BINOP(asm_instr_reg, asm_instr_imm) \
216 do { \
217 if (HasRegisterInput(instr, 1)) { \
218 __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
219 i.InputRegister(1)); \
220 } else { \
221 __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
222 i.InputImmediate(1)); \
223 } \
224 } while (0)
225
226
227 #define ASSEMBLE_BINOP_RC(asm_instr_reg, asm_instr_imm) \
228 do { \
229 if (HasRegisterInput(instr, 1)) { \
230 __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
231 i.InputRegister(1), i.OutputRCBit()); \
232 } else { \
233 __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
234 i.InputImmediate(1), i.OutputRCBit()); \
235 } \
236 } while (0)
237
238
239 #define ASSEMBLE_BINOP_INT_RC(asm_instr_reg, asm_instr_imm) \
240 do { \
241 if (HasRegisterInput(instr, 1)) { \
242 __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
243 i.InputRegister(1), i.OutputRCBit()); \
244 } else { \
245 __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
246 i.InputInt32(1), i.OutputRCBit()); \
247 } \
248 } while (0)
249
250
251 #if V8_TARGET_ARCH_PPC64
252 #define ASSEMBLE_ADD_WITH_OVERFLOW() \
253 do { \
254 ASSEMBLE_BINOP(add, addi); \
255 __ TestIfInt32(i.OutputRegister(), r0, cr0); \
256 } while (0)
257 #else
258 #define ASSEMBLE_ADD_WITH_OVERFLOW() \
259 do { \
260 if (HasRegisterInput(instr, 1)) { \
261 __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
262 i.InputRegister(1), kScratchReg, r0); \
263 } else { \
264 __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
265 i.InputInt32(1), kScratchReg, r0); \
266 } \
267 } while (0)
268 #endif
269
270
271 #if V8_TARGET_ARCH_PPC64
272 #define ASSEMBLE_SUB_WITH_OVERFLOW() \
273 do { \
274 ASSEMBLE_BINOP(sub, subi); \
275 __ TestIfInt32(i.OutputRegister(), r0, cr0); \
276 } while (0)
277 #else
278 #define ASSEMBLE_SUB_WITH_OVERFLOW() \
279 do { \
280 if (HasRegisterInput(instr, 1)) { \
281 __ SubAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
282 i.InputRegister(1), kScratchReg, r0); \
283 } else { \
284 __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
285 -i.InputInt32(1), kScratchReg, r0); \
286 } \
287 } while (0)
288 #endif
289
290
291 #define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr) \
292 do { \
293 const CRegister cr = cr0; \
294 if (HasRegisterInput(instr, 1)) { \
295 if (i.CompareLogical()) { \
296 __ cmpl_instr(i.InputRegister(0), i.InputRegister(1), cr); \
297 } else { \
298 __ cmp_instr(i.InputRegister(0), i.InputRegister(1), cr); \
299 } \
300 } else { \
301 if (i.CompareLogical()) { \
302 __ cmpl_instr##i(i.InputRegister(0), i.InputImmediate(1), cr); \
303 } else { \
304 __ cmp_instr##i(i.InputRegister(0), i.InputImmediate(1), cr); \
305 } \
306 } \
307 DCHECK_EQ(SetRC, i.OutputRCBit()); \
308 } while (0)
309
310
311 #define ASSEMBLE_FLOAT_COMPARE(cmp_instr) \
312 do { \
313 const CRegister cr = cr0; \
314 __ cmp_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1), cr); \
315 DCHECK_EQ(SetRC, i.OutputRCBit()); \
316 } while (0)
317
318
319 #define ASSEMBLE_MODULO(div_instr, mul_instr) \
320 do { \
321 const Register scratch = kScratchReg; \
322 __ div_instr(scratch, i.InputRegister(0), i.InputRegister(1)); \
323 __ mul_instr(scratch, scratch, i.InputRegister(1)); \
324 __ sub(i.OutputRegister(), i.InputRegister(0), scratch, LeaveOE, \
325 i.OutputRCBit()); \
326 } while (0)
327
328
329 #define ASSEMBLE_FLOAT_MODULO() \
330 do { \
331 FrameScope scope(masm(), StackFrame::MANUAL); \
332 __ PrepareCallCFunction(0, 2, kScratchReg); \
333 __ MovToFloatParameters(i.InputDoubleRegister(0), \
334 i.InputDoubleRegister(1)); \
335 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), \
336 0, 2); \
337 __ MovFromFloatResult(i.OutputDoubleRegister()); \
338 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
339 } while (0)
340
341
342 #define ASSEMBLE_LOAD_FLOAT(asm_instr, asm_instrx) \
343 do { \
344 DoubleRegister result = i.OutputDoubleRegister(); \
345 AddressingMode mode = kMode_None; \
346 MemOperand operand = i.MemoryOperand(&mode); \
347 if (mode == kMode_MRI) { \
348 __ asm_instr(result, operand); \
349 } else { \
350 __ asm_instrx(result, operand); \
351 } \
352 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
353 } while (0)
354
355
356 #define ASSEMBLE_LOAD_INTEGER(asm_instr, asm_instrx) \
357 do { \
358 Register result = i.OutputRegister(); \
359 AddressingMode mode = kMode_None; \
360 MemOperand operand = i.MemoryOperand(&mode); \
361 if (mode == kMode_MRI) { \
362 __ asm_instr(result, operand); \
363 } else { \
364 __ asm_instrx(result, operand); \
365 } \
366 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
367 } while (0)
368
369
370 #define ASSEMBLE_STORE_FLOAT(asm_instr, asm_instrx) \
371 do { \
372 int index = 0; \
373 AddressingMode mode = kMode_None; \
374 MemOperand operand = i.MemoryOperand(&mode, &index); \
375 DoubleRegister value = i.InputDoubleRegister(index); \
376 if (mode == kMode_MRI) { \
377 __ asm_instr(value, operand); \
378 } else { \
379 __ asm_instrx(value, operand); \
380 } \
381 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
382 } while (0)
383
384
385 #define ASSEMBLE_STORE_INTEGER(asm_instr, asm_instrx) \
386 do { \
387 int index = 0; \
388 AddressingMode mode = kMode_None; \
389 MemOperand operand = i.MemoryOperand(&mode, &index); \
390 Register value = i.InputRegister(index); \
391 if (mode == kMode_MRI) { \
392 __ asm_instr(value, operand); \
393 } else { \
394 __ asm_instrx(value, operand); \
395 } \
396 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
397 } while (0)
398
399
400 // TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
401 #define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, asm_instrx, width) \
402 do { \
403 DoubleRegister result = i.OutputDoubleRegister(); \
404 AddressingMode mode = kMode_None; \
405 MemOperand operand = i.MemoryOperand(&mode, 0); \
406 DCHECK_EQ(kMode_MRR, mode); \
407 Register offset = operand.rb(); \
408 __ extsw(offset, offset); \
409 if (HasRegisterInput(instr, 2)) { \
410 __ cmplw(offset, i.InputRegister(2)); \
411 } else { \
412 __ cmplwi(offset, i.InputImmediate(2)); \
413 } \
414 auto ool = new (zone()) OutOfLineLoadNAN##width(this, result); \
415 __ bge(ool->entry()); \
416 if (mode == kMode_MRI) { \
417 __ asm_instr(result, operand); \
418 } else { \
419 __ asm_instrx(result, operand); \
420 } \
421 __ bind(ool->exit()); \
422 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
423 } while (0)
424
425
426 // TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
427 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr, asm_instrx) \
428 do { \
429 Register result = i.OutputRegister(); \
430 AddressingMode mode = kMode_None; \
431 MemOperand operand = i.MemoryOperand(&mode, 0); \
432 DCHECK_EQ(kMode_MRR, mode); \
433 Register offset = operand.rb(); \
434 __ extsw(offset, offset); \
435 if (HasRegisterInput(instr, 2)) { \
436 __ cmplw(offset, i.InputRegister(2)); \
437 } else { \
438 __ cmplwi(offset, i.InputImmediate(2)); \
439 } \
440 auto ool = new (zone()) OutOfLineLoadZero(this, result); \
441 __ bge(ool->entry()); \
442 if (mode == kMode_MRI) { \
443 __ asm_instr(result, operand); \
444 } else { \
445 __ asm_instrx(result, operand); \
446 } \
447 __ bind(ool->exit()); \
448 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
449 } while (0)
450
451
452 // TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
453 #define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr, asm_instrx) \
454 do { \
455 Label done; \
456 AddressingMode mode = kMode_None; \
457 MemOperand operand = i.MemoryOperand(&mode, 0); \
458 DCHECK_EQ(kMode_MRR, mode); \
459 Register offset = operand.rb(); \
460 __ extsw(offset, offset); \
461 if (HasRegisterInput(instr, 2)) { \
462 __ cmplw(offset, i.InputRegister(2)); \
463 } else { \
464 __ cmplwi(offset, i.InputImmediate(2)); \
465 } \
466 __ bge(&done); \
467 DoubleRegister value = i.InputDoubleRegister(3); \
468 if (mode == kMode_MRI) { \
469 __ asm_instr(value, operand); \
470 } else { \
471 __ asm_instrx(value, operand); \
472 } \
473 __ bind(&done); \
474 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
475 } while (0)
476
477
478 // TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
479 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr, asm_instrx) \
480 do { \
481 Label done; \
482 AddressingMode mode = kMode_None; \
483 MemOperand operand = i.MemoryOperand(&mode, 0); \
484 DCHECK_EQ(kMode_MRR, mode); \
485 Register offset = operand.rb(); \
486 __ extsw(offset, offset); \
487 if (HasRegisterInput(instr, 2)) { \
488 __ cmplw(offset, i.InputRegister(2)); \
489 } else { \
490 __ cmplwi(offset, i.InputImmediate(2)); \
491 } \
492 __ bge(&done); \
493 Register value = i.InputRegister(3); \
494 if (mode == kMode_MRI) { \
495 __ asm_instr(value, operand); \
496 } else { \
497 __ asm_instrx(value, operand); \
498 } \
499 __ bind(&done); \
500 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
501 } while (0)
502
503
504 #define ASSEMBLE_STORE_WRITE_BARRIER() \
505 do { \
506 Register object = i.InputRegister(0); \
507 Register index = i.InputRegister(1); \
508 Register value = i.InputRegister(2); \
509 __ add(index, object, index); \
510 __ StoreP(value, MemOperand(index)); \
511 SaveFPRegsMode mode = \
512 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs; \
513 LinkRegisterStatus lr_status = kLRHasNotBeenSaved; \
514 __ RecordWrite(object, index, value, lr_status, mode); \
515 DCHECK_EQ(LeaveRC, i.OutputRCBit()); \
516 } while (0)
517
518
519 // Assembles an instruction after register allocation, producing machine code.
520 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
521 PPCOperandConverter i(this, instr);
522 ArchOpcode opcode = ArchOpcodeField::decode(instr->opcode());
523
524 switch (opcode) {
525 case kArchCallCodeObject: {
526 EnsureSpaceForLazyDeopt();
527 if (HasRegisterInput(instr, 0)) {
528 __ addi(ip, i.InputRegister(0),
529 Operand(Code::kHeaderSize - kHeapObjectTag));
530 __ Call(ip);
531 } else {
532 __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
533 RelocInfo::CODE_TARGET);
534 }
535 AddSafepointAndDeopt(instr);
536 DCHECK_EQ(LeaveRC, i.OutputRCBit());
537 break;
538 }
539 case kArchCallJSFunction: {
540 EnsureSpaceForLazyDeopt();
541 Register func = i.InputRegister(0);
542 if (FLAG_debug_code) {
543 // Check the function's context matches the context argument.
544 __ LoadP(kScratchReg,
545 FieldMemOperand(func, JSFunction::kContextOffset));
546 __ cmp(cp, kScratchReg);
547 __ Assert(eq, kWrongFunctionContext);
548 }
549 __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
550 __ Call(ip);
551 AddSafepointAndDeopt(instr);
552 DCHECK_EQ(LeaveRC, i.OutputRCBit());
553 break;
554 }
555 case kArchJmp:
556 AssembleArchJump(i.InputRpo(0));
557 DCHECK_EQ(LeaveRC, i.OutputRCBit());
558 break;
559 case kArchNop:
560 // don't emit code for nops.
561 DCHECK_EQ(LeaveRC, i.OutputRCBit());
562 break;
563 case kArchRet:
564 AssembleReturn();
565 DCHECK_EQ(LeaveRC, i.OutputRCBit());
566 break;
567 case kArchStackPointer:
568 __ mr(i.OutputRegister(), sp);
569 DCHECK_EQ(LeaveRC, i.OutputRCBit());
570 break;
571 case kArchTruncateDoubleToI:
572 // TODO(mbrandy): move slow call to stub out of line.
573 __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
574 DCHECK_EQ(LeaveRC, i.OutputRCBit());
575 break;
576 case kPPC_And32:
577 case kPPC_And64:
578 if (HasRegisterInput(instr, 1)) {
579 __ and_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
580 i.OutputRCBit());
581 } else {
582 __ andi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
583 }
584 break;
585 case kPPC_AndComplement32:
586 case kPPC_AndComplement64:
587 __ andc(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
588 i.OutputRCBit());
589 break;
590 case kPPC_Or32:
591 case kPPC_Or64:
592 if (HasRegisterInput(instr, 1)) {
593 __ orx(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
594 i.OutputRCBit());
595 } else {
596 __ ori(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
597 DCHECK_EQ(LeaveRC, i.OutputRCBit());
598 }
599 break;
600 case kPPC_OrComplement32:
601 case kPPC_OrComplement64:
602 __ orc(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
603 i.OutputRCBit());
604 break;
605 case kPPC_Xor32:
606 case kPPC_Xor64:
607 if (HasRegisterInput(instr, 1)) {
608 __ xor_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
609 i.OutputRCBit());
610 } else {
611 __ xori(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
612 DCHECK_EQ(LeaveRC, i.OutputRCBit());
613 }
614 break;
615 case kPPC_ShiftLeft32:
616 ASSEMBLE_BINOP_RC(slw, slwi);
617 break;
618 #if V8_TARGET_ARCH_PPC64
619 case kPPC_ShiftLeft64:
620 ASSEMBLE_BINOP_RC(sld, sldi);
621 break;
622 #endif
623 case kPPC_ShiftRight32:
624 ASSEMBLE_BINOP_RC(srw, srwi);
625 break;
626 #if V8_TARGET_ARCH_PPC64
627 case kPPC_ShiftRight64:
628 ASSEMBLE_BINOP_RC(srd, srdi);
629 break;
630 #endif
631 case kPPC_ShiftRightAlg32:
632 ASSEMBLE_BINOP_INT_RC(sraw, srawi);
633 break;
634 #if V8_TARGET_ARCH_PPC64
635 case kPPC_ShiftRightAlg64:
636 ASSEMBLE_BINOP_INT_RC(srad, sradi);
637 break;
638 #endif
639 case kPPC_RotRight32:
640 if (HasRegisterInput(instr, 1)) {
641 __ subfic(kScratchReg, i.InputRegister(1), Operand(32));
642 __ rotlw(i.OutputRegister(), i.InputRegister(0), kScratchReg,
643 i.OutputRCBit());
644 } else {
645 int sh = i.InputInt32(1);
646 __ rotrwi(i.OutputRegister(), i.InputRegister(0), sh, i.OutputRCBit());
647 }
648 break;
649 #if V8_TARGET_ARCH_PPC64
650 case kPPC_RotRight64:
651 if (HasRegisterInput(instr, 1)) {
652 __ subfic(kScratchReg, i.InputRegister(1), Operand(64));
653 __ rotld(i.OutputRegister(), i.InputRegister(0), kScratchReg,
654 i.OutputRCBit());
655 } else {
656 int sh = i.InputInt32(1);
657 __ rotrdi(i.OutputRegister(), i.InputRegister(0), sh, i.OutputRCBit());
658 }
659 break;
660 #endif
661 case kPPC_Not32:
662 case kPPC_Not64:
663 __ notx(i.OutputRegister(), i.InputRegister(0), i.OutputRCBit());
664 break;
665 case kPPC_RotLeftAndMask32:
666 __ rlwinm(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
667 31 - i.InputInt32(2), 31 - i.InputInt32(3), i.OutputRCBit());
668 break;
669 #if V8_TARGET_ARCH_PPC64
670 case kPPC_RotLeftAndClear64:
671 __ rldic(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
672 63 - i.InputInt32(2), i.OutputRCBit());
673 break;
674 case kPPC_RotLeftAndClearLeft64:
675 __ rldicl(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
676 63 - i.InputInt32(2), i.OutputRCBit());
677 break;
678 case kPPC_RotLeftAndClearRight64:
679 __ rldicr(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
680 63 - i.InputInt32(2), i.OutputRCBit());
681 break;
682 #endif
683 case kPPC_Add32:
684 case kPPC_Add64:
685 if (HasRegisterInput(instr, 1)) {
686 __ add(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
687 LeaveOE, i.OutputRCBit());
688 } else {
689 __ addi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
690 DCHECK_EQ(LeaveRC, i.OutputRCBit());
691 }
692 break;
693 case kPPC_AddWithOverflow32:
694 ASSEMBLE_ADD_WITH_OVERFLOW();
695 break;
696 case kPPC_AddFloat64:
697 ASSEMBLE_FLOAT_BINOP_RC(fadd);
698 break;
699 case kPPC_Sub32:
700 case kPPC_Sub64:
701 if (HasRegisterInput(instr, 1)) {
702 __ sub(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
703 LeaveOE, i.OutputRCBit());
704 } else {
705 __ subi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
706 DCHECK_EQ(LeaveRC, i.OutputRCBit());
707 }
708 break;
709 case kPPC_SubWithOverflow32:
710 ASSEMBLE_SUB_WITH_OVERFLOW();
711 break;
712 case kPPC_SubFloat64:
713 ASSEMBLE_FLOAT_BINOP_RC(fsub);
714 break;
715 case kPPC_Mul32:
716 __ mullw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
717 LeaveOE, i.OutputRCBit());
718 break;
719 #if V8_TARGET_ARCH_PPC64
720 case kPPC_Mul64:
721 __ mulld(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
722 LeaveOE, i.OutputRCBit());
723 break;
724 #endif
725 case kPPC_MulHigh32:
726 __ mulhw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
727 i.OutputRCBit());
728 break;
729 case kPPC_MulHighU32:
730 __ mulhwu(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
731 i.OutputRCBit());
732 break;
733 case kPPC_MulFloat64:
734 ASSEMBLE_FLOAT_BINOP_RC(fmul);
735 break;
736 case kPPC_Div32:
737 __ divw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
738 DCHECK_EQ(LeaveRC, i.OutputRCBit());
739 break;
740 #if V8_TARGET_ARCH_PPC64
741 case kPPC_Div64:
742 __ divd(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
743 DCHECK_EQ(LeaveRC, i.OutputRCBit());
744 break;
745 #endif
746 case kPPC_DivU32:
747 __ divwu(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
748 DCHECK_EQ(LeaveRC, i.OutputRCBit());
749 break;
750 #if V8_TARGET_ARCH_PPC64
751 case kPPC_DivU64:
752 __ divdu(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
753 DCHECK_EQ(LeaveRC, i.OutputRCBit());
754 break;
755 #endif
756 case kPPC_DivFloat64:
757 ASSEMBLE_FLOAT_BINOP_RC(fdiv);
758 break;
759 case kPPC_Mod32:
760 ASSEMBLE_MODULO(divw, mullw);
761 break;
762 #if V8_TARGET_ARCH_PPC64
763 case kPPC_Mod64:
764 ASSEMBLE_MODULO(divd, mulld);
765 break;
766 #endif
767 case kPPC_ModU32:
768 ASSEMBLE_MODULO(divwu, mullw);
769 break;
770 #if V8_TARGET_ARCH_PPC64
771 case kPPC_ModU64:
772 ASSEMBLE_MODULO(divdu, mulld);
773 break;
774 #endif
775 case kPPC_ModFloat64:
776 // TODO(bmeurer): We should really get rid of this special instruction,
777 // and generate a CallAddress instruction instead.
778 ASSEMBLE_FLOAT_MODULO();
779 break;
780 case kPPC_Neg32:
781 case kPPC_Neg64:
782 __ neg(i.OutputRegister(), i.InputRegister(0), LeaveOE, i.OutputRCBit());
783 break;
784 case kPPC_SqrtFloat64:
785 ASSEMBLE_FLOAT_UNOP_RC(fsqrt);
786 break;
787 case kPPC_FloorFloat64:
788 ASSEMBLE_FLOAT_UNOP_RC(frim);
789 break;
790 case kPPC_CeilFloat64:
791 ASSEMBLE_FLOAT_UNOP_RC(frip);
792 break;
793 case kPPC_TruncateFloat64:
794 ASSEMBLE_FLOAT_UNOP_RC(friz);
795 break;
796 case kPPC_RoundFloat64:
797 ASSEMBLE_FLOAT_UNOP_RC(frin);
798 break;
799 case kPPC_NegFloat64:
800 ASSEMBLE_FLOAT_UNOP_RC(fneg);
801 break;
802 case kPPC_Cmp32:
803 ASSEMBLE_COMPARE(cmpw, cmplw);
804 break;
805 #if V8_TARGET_ARCH_PPC64
806 case kPPC_Cmp64:
807 ASSEMBLE_COMPARE(cmp, cmpl);
808 break;
809 #endif
810 case kPPC_CmpFloat64:
811 ASSEMBLE_FLOAT_COMPARE(fcmpu);
812 break;
813 case kPPC_Tst32:
814 if (HasRegisterInput(instr, 1)) {
815 __ and_(r0, i.InputRegister(0), i.InputRegister(1), i.OutputRCBit());
816 } else {
817 __ andi(r0, i.InputRegister(0), i.InputImmediate(1));
818 }
819 #if V8_TARGET_ARCH_PPC64
820 __ extsw(r0, r0, i.OutputRCBit());
821 #endif
822 DCHECK_EQ(SetRC, i.OutputRCBit());
823 break;
824 #if V8_TARGET_ARCH_PPC64
825 case kPPC_Tst64:
826 if (HasRegisterInput(instr, 1)) {
827 __ and_(r0, i.InputRegister(0), i.InputRegister(1), i.OutputRCBit());
828 } else {
829 __ andi(r0, i.InputRegister(0), i.InputImmediate(1));
830 }
831 DCHECK_EQ(SetRC, i.OutputRCBit());
832 break;
833 #endif
834 case kPPC_Push:
835 __ Push(i.InputRegister(0));
836 DCHECK_EQ(LeaveRC, i.OutputRCBit());
837 break;
838 case kPPC_ExtendSignWord8:
839 __ extsb(i.OutputRegister(), i.InputRegister(0));
840 DCHECK_EQ(LeaveRC, i.OutputRCBit());
841 break;
842 case kPPC_ExtendSignWord16:
843 __ extsh(i.OutputRegister(), i.InputRegister(0));
844 DCHECK_EQ(LeaveRC, i.OutputRCBit());
845 break;
846 #if V8_TARGET_ARCH_PPC64
847 case kPPC_ExtendSignWord32:
848 __ extsw(i.OutputRegister(), i.InputRegister(0));
849 DCHECK_EQ(LeaveRC, i.OutputRCBit());
850 break;
851 case kPPC_Uint32ToUint64:
852 // Zero extend
853 __ clrldi(i.OutputRegister(), i.InputRegister(0), Operand(32));
854 DCHECK_EQ(LeaveRC, i.OutputRCBit());
855 break;
856 case kPPC_Int64ToInt32:
857 // TODO(mbrandy): sign extend?
858 __ Move(i.OutputRegister(), i.InputRegister(0));
859 DCHECK_EQ(LeaveRC, i.OutputRCBit());
860 break;
861 #endif
862 case kPPC_Int32ToFloat64:
863 __ ConvertIntToDouble(i.InputRegister(0), i.OutputDoubleRegister());
864 DCHECK_EQ(LeaveRC, i.OutputRCBit());
865 break;
866 case kPPC_Uint32ToFloat64:
867 __ ConvertUnsignedIntToDouble(i.InputRegister(0),
868 i.OutputDoubleRegister());
869 DCHECK_EQ(LeaveRC, i.OutputRCBit());
870 break;
871 case kPPC_Float64ToInt32:
872 case kPPC_Float64ToUint32:
873 __ ConvertDoubleToInt64(i.InputDoubleRegister(0),
874 #if !V8_TARGET_ARCH_PPC64
875 kScratchReg,
876 #endif
877 i.OutputRegister(), kScratchDoubleReg);
878 DCHECK_EQ(LeaveRC, i.OutputRCBit());
879 break;
880 case kPPC_Float64ToFloat32:
881 ASSEMBLE_FLOAT_UNOP_RC(frsp);
882 break;
883 case kPPC_Float32ToFloat64:
884 // Nothing to do.
885 __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
886 DCHECK_EQ(LeaveRC, i.OutputRCBit());
887 break;
888 case kPPC_LoadWordU8:
889 ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
890 break;
891 case kPPC_LoadWordS8:
892 ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
893 __ extsb(i.OutputRegister(), i.OutputRegister());
894 break;
895 case kPPC_LoadWordU16:
896 ASSEMBLE_LOAD_INTEGER(lhz, lhzx);
897 break;
898 case kPPC_LoadWordS16:
899 ASSEMBLE_LOAD_INTEGER(lha, lhax);
900 break;
901 case kPPC_LoadWordS32:
902 ASSEMBLE_LOAD_INTEGER(lwa, lwax);
903 break;
904 #if V8_TARGET_ARCH_PPC64
905 case kPPC_LoadWord64:
906 ASSEMBLE_LOAD_INTEGER(ld, ldx);
907 break;
908 #endif
909 case kPPC_LoadFloat32:
910 ASSEMBLE_LOAD_FLOAT(lfs, lfsx);
911 break;
912 case kPPC_LoadFloat64:
913 ASSEMBLE_LOAD_FLOAT(lfd, lfdx);
914 break;
915 case kPPC_StoreWord8:
916 ASSEMBLE_STORE_INTEGER(stb, stbx);
917 break;
918 case kPPC_StoreWord16:
919 ASSEMBLE_STORE_INTEGER(sth, sthx);
920 break;
921 case kPPC_StoreWord32:
922 ASSEMBLE_STORE_INTEGER(stw, stwx);
923 break;
924 #if V8_TARGET_ARCH_PPC64
925 case kPPC_StoreWord64:
926 ASSEMBLE_STORE_INTEGER(std, stdx);
927 break;
928 #endif
929 case kPPC_StoreFloat32:
930 ASSEMBLE_STORE_FLOAT(stfs, stfsx);
931 break;
932 case kPPC_StoreFloat64:
933 ASSEMBLE_STORE_FLOAT(stfd, stfdx);
934 break;
935 case kPPC_StoreWriteBarrier:
936 ASSEMBLE_STORE_WRITE_BARRIER();
937 break;
938 case kCheckedLoadInt8:
939 ASSEMBLE_CHECKED_LOAD_INTEGER(lbz, lbzx);
940 __ extsb(i.OutputRegister(), i.OutputRegister());
941 break;
942 case kCheckedLoadUint8:
943 ASSEMBLE_CHECKED_LOAD_INTEGER(lbz, lbzx);
944 break;
945 case kCheckedLoadInt16:
946 ASSEMBLE_CHECKED_LOAD_INTEGER(lha, lhax);
947 break;
948 case kCheckedLoadUint16:
949 ASSEMBLE_CHECKED_LOAD_INTEGER(lhz, lhzx);
950 break;
951 case kCheckedLoadWord32:
952 ASSEMBLE_CHECKED_LOAD_INTEGER(lwa, lwax);
953 break;
954 case kCheckedLoadFloat32:
955 ASSEMBLE_CHECKED_LOAD_FLOAT(lfs, lfsx, 32);
956 break;
957 case kCheckedLoadFloat64:
958 ASSEMBLE_CHECKED_LOAD_FLOAT(lfd, lfdx, 64);
959 break;
960 case kCheckedStoreWord8:
961 ASSEMBLE_CHECKED_STORE_INTEGER(stb, stbx);
962 break;
963 case kCheckedStoreWord16:
964 ASSEMBLE_CHECKED_STORE_INTEGER(sth, sthx);
965 break;
966 case kCheckedStoreWord32:
967 ASSEMBLE_CHECKED_STORE_INTEGER(stw, stwx);
968 break;
969 case kCheckedStoreFloat32:
970 ASSEMBLE_CHECKED_STORE_FLOAT(stfs, stfsx);
971 break;
972 case kCheckedStoreFloat64:
973 ASSEMBLE_CHECKED_STORE_FLOAT(stfd, stfdx);
974 break;
975 default:
976 UNREACHABLE();
977 break;
978 }
979 }
980
981
982 // Assembles branches after an instruction.
983 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
984 PPCOperandConverter i(this, instr);
985 Label* tlabel = branch->true_label;
986 Label* flabel = branch->false_label;
987 ArchOpcode op = instr->arch_opcode();
988 FlagsCondition condition = branch->condition;
989 CRegister cr = cr0;
990
991 // Overflow checked for add/sub only.
992 DCHECK((condition != kOverflow && condition != kNotOverflow) ||
993 (op == kPPC_AddWithOverflow32 || op == kPPC_SubWithOverflow32));
994
995 Condition cond = FlagsConditionToCondition(condition);
996 if (op == kPPC_CmpFloat64) {
997 // check for unordered if necessary
998 if (cond == le) {
999 __ bunordered(flabel, cr);
1000 // Unnecessary for eq/lt since only FU bit will be set.
1001 } else if (cond == gt) {
1002 __ bunordered(tlabel, cr);
1003 // Unnecessary for ne/ge since only FU bit will be set.
1004 }
1005 }
1006 __ b(cond, tlabel, cr);
1007 if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
1008 }
1009
1010
1011 void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
1012 if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
1013 }
1014
1015
1016 // Assembles boolean materializations after an instruction.
1017 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
1018 FlagsCondition condition) {
1019 PPCOperandConverter i(this, instr);
1020 Label done;
1021 ArchOpcode op = instr->arch_opcode();
1022 bool check_unordered = (op == kPPC_CmpFloat64);
1023 CRegister cr = cr0;
1024
1025 // Overflow checked for add/sub only.
1026 DCHECK((condition != kOverflow && condition != kNotOverflow) ||
1027 (op == kPPC_AddWithOverflow32 || op == kPPC_SubWithOverflow32));
1028
1029 // Materialize a full 32-bit 1 or 0 value. The result register is always the
1030 // last output of the instruction.
1031 DCHECK_NE(0u, instr->OutputCount());
1032 Register reg = i.OutputRegister(instr->OutputCount() - 1);
1033
1034 Condition cond = FlagsConditionToCondition(condition);
1035 switch (cond) {
1036 case eq:
1037 case lt:
1038 __ li(reg, Operand::Zero());
1039 __ li(kScratchReg, Operand(1));
1040 __ isel(cond, reg, kScratchReg, reg, cr);
1041 break;
1042 case ne:
1043 case ge:
1044 __ li(reg, Operand(1));
1045 __ isel(NegateCondition(cond), reg, r0, reg, cr);
1046 break;
1047 case gt:
1048 if (check_unordered) {
1049 __ li(reg, Operand(1));
1050 __ li(kScratchReg, Operand::Zero());
1051 __ bunordered(&done, cr);
1052 __ isel(cond, reg, reg, kScratchReg, cr);
1053 } else {
1054 __ li(reg, Operand::Zero());
1055 __ li(kScratchReg, Operand(1));
1056 __ isel(cond, reg, kScratchReg, reg, cr);
1057 }
1058 break;
1059 case le:
1060 if (check_unordered) {
1061 __ li(reg, Operand::Zero());
1062 __ li(kScratchReg, Operand(1));
1063 __ bunordered(&done, cr);
1064 __ isel(NegateCondition(cond), reg, r0, kScratchReg, cr);
1065 } else {
1066 __ li(reg, Operand(1));
1067 __ isel(NegateCondition(cond), reg, r0, reg, cr);
1068 }
1069 break;
1070 default:
1071 UNREACHABLE();
1072 break;
1073 }
1074 __ bind(&done);
1075 }
1076
1077
1078 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
1079 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
1080 isolate(), deoptimization_id, Deoptimizer::LAZY);
1081 __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
1082 }
1083
1084
1085 void CodeGenerator::AssemblePrologue() {
1086 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1087 if (descriptor->kind() == CallDescriptor::kCallAddress) {
1088 #if ABI_USES_FUNCTION_DESCRIPTORS
1089 __ function_descriptor();
1090 #endif
1091 int register_save_area_size = 0;
1092 RegList frame_saves = fp.bit();
1093 __ mflr(r0);
1094 #if V8_OOL_CONSTANT_POOL
1095 __ Push(r0, fp, kConstantPoolRegister);
1096 // Adjust FP to point to saved FP.
1097 __ subi(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
1098 register_save_area_size += kPointerSize;
1099 frame_saves |= kConstantPoolRegister.bit();
1100 #else
1101 __ Push(r0, fp);
1102 __ mr(fp, sp);
1103 #endif
1104 // Save callee-saved registers.
1105 const RegList saves = descriptor->CalleeSavedRegisters() & ~frame_saves;
1106 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
1107 if (!((1 << i) & saves)) continue;
1108 register_save_area_size += kPointerSize;
1109 }
1110 frame()->SetRegisterSaveAreaSize(register_save_area_size);
1111 __ MultiPush(saves);
1112 } else if (descriptor->IsJSFunctionCall()) {
1113 CompilationInfo* info = this->info();
1114 __ Prologue(info->IsCodePreAgingActive());
1115 frame()->SetRegisterSaveAreaSize(
1116 StandardFrameConstants::kFixedFrameSizeFromFp);
1117 } else {
1118 __ StubPrologue();
1119 frame()->SetRegisterSaveAreaSize(
1120 StandardFrameConstants::kFixedFrameSizeFromFp);
1121 }
1122 int stack_slots = frame()->GetSpillSlotCount();
1123
1124 if (info()->is_osr()) {
1125 // TurboFan OSR-compiled functions cannot be entered directly.
1126 __ Abort(kShouldNotDirectlyEnterOsrFunction);
1127
1128 // Unoptimized code jumps directly to this entrypoint while the unoptimized
1129 // frame is still on the stack. Optimized code uses OSR values directly from
1130 // the unoptimized frame. Thus, all that needs to be done is to allocate the
1131 // remaining stack slots.
1132 if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
1133 osr_pc_offset_ = __ pc_offset();
1134 DCHECK(stack_slots >= frame()->GetOsrStackSlotCount());
1135 stack_slots -= frame()->GetOsrStackSlotCount();
1136 }
1137
1138 if (stack_slots > 0) {
1139 __ Add(sp, sp, -stack_slots * kPointerSize, r0);
1140 }
1141 }
1142
1143
1144 void CodeGenerator::AssembleReturn() {
1145 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
1146 if (descriptor->kind() == CallDescriptor::kCallAddress) {
1147 if (frame()->GetRegisterSaveAreaSize() > 0) {
1148 // Remove this frame's spill slots first.
1149 int stack_slots = frame()->GetSpillSlotCount();
1150 if (stack_slots > 0) {
1151 __ Add(sp, sp, stack_slots * kPointerSize, r0);
1152 }
1153 // Restore registers.
1154 RegList frame_saves = fp.bit();
1155 #if V8_OOL_CONSTANT_POOL
1156 frame_saves |= kConstantPoolRegister.bit();
1157 #endif
1158 const RegList saves = descriptor->CalleeSavedRegisters() & ~frame_saves;
1159 if (saves != 0) {
1160 __ MultiPop(saves);
1161 }
1162 }
1163 __ LeaveFrame(StackFrame::MANUAL);
1164 __ Ret();
1165 } else {
1166 int pop_count = descriptor->IsJSFunctionCall()
1167 ? static_cast<int>(descriptor->JSParameterCount())
1168 : 0;
1169 __ LeaveFrame(StackFrame::MANUAL, pop_count * kPointerSize);
1170 __ Ret();
1171 }
1172 }
1173
1174
1175 void CodeGenerator::AssembleMove(InstructionOperand* source,
1176 InstructionOperand* destination) {
1177 PPCOperandConverter g(this, NULL);
1178 // Dispatch on the source and destination operand kinds. Not all
1179 // combinations are possible.
1180 if (source->IsRegister()) {
1181 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1182 Register src = g.ToRegister(source);
1183 if (destination->IsRegister()) {
1184 __ Move(g.ToRegister(destination), src);
1185 } else {
1186 __ StoreP(src, g.ToMemOperand(destination), r0);
1187 }
1188 } else if (source->IsStackSlot()) {
1189 DCHECK(destination->IsRegister() || destination->IsStackSlot());
1190 MemOperand src = g.ToMemOperand(source);
1191 if (destination->IsRegister()) {
1192 __ LoadP(g.ToRegister(destination), src, r0);
1193 } else {
1194 Register temp = kScratchReg;
1195 __ LoadP(temp, src, r0);
1196 __ StoreP(temp, g.ToMemOperand(destination), r0);
1197 }
1198 } else if (source->IsConstant()) {
1199 Constant src = g.ToConstant(source);
1200 if (destination->IsRegister() || destination->IsStackSlot()) {
1201 Register dst =
1202 destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
1203 switch (src.type()) {
1204 case Constant::kInt32:
1205 __ mov(dst, Operand(src.ToInt32()));
1206 break;
1207 case Constant::kInt64:
1208 __ mov(dst, Operand(src.ToInt64()));
1209 break;
1210 case Constant::kFloat32:
1211 __ Move(dst,
1212 isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
1213 break;
1214 case Constant::kFloat64:
1215 __ Move(dst,
1216 isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
1217 break;
1218 case Constant::kExternalReference:
1219 __ mov(dst, Operand(src.ToExternalReference()));
1220 break;
1221 case Constant::kHeapObject:
1222 __ Move(dst, src.ToHeapObject());
1223 break;
1224 case Constant::kRpoNumber:
1225 UNREACHABLE(); // TODO(dcarney): loading RPO constants on PPC.
1226 break;
1227 }
1228 if (destination->IsStackSlot()) {
1229 __ StoreP(dst, g.ToMemOperand(destination), r0);
1230 }
1231 } else {
1232 DoubleRegister dst = destination->IsDoubleRegister()
1233 ? g.ToDoubleRegister(destination)
1234 : kScratchDoubleReg;
1235 double value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
1236 : src.ToFloat64();
1237 __ LoadDoubleLiteral(dst, value, kScratchReg);
1238 if (destination->IsDoubleStackSlot()) {
1239 __ StoreDouble(dst, g.ToMemOperand(destination), r0);
1240 }
1241 }
1242 } else if (source->IsDoubleRegister()) {
1243 DoubleRegister src = g.ToDoubleRegister(source);
1244 if (destination->IsDoubleRegister()) {
1245 DoubleRegister dst = g.ToDoubleRegister(destination);
1246 __ Move(dst, src);
1247 } else {
1248 DCHECK(destination->IsDoubleStackSlot());
1249 __ StoreDouble(src, g.ToMemOperand(destination), r0);
1250 }
1251 } else if (source->IsDoubleStackSlot()) {
1252 DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
1253 MemOperand src = g.ToMemOperand(source);
1254 if (destination->IsDoubleRegister()) {
1255 __ LoadDouble(g.ToDoubleRegister(destination), src, r0);
1256 } else {
1257 DoubleRegister temp = kScratchDoubleReg;
1258 __ LoadDouble(temp, src, r0);
1259 __ StoreDouble(temp, g.ToMemOperand(destination), r0);
1260 }
1261 } else {
1262 UNREACHABLE();
1263 }
1264 }
1265
1266
1267 void CodeGenerator::AssembleSwap(InstructionOperand* source,
1268 InstructionOperand* destination) {
1269 PPCOperandConverter g(this, NULL);
1270 // Dispatch on the source and destination operand kinds. Not all
1271 // combinations are possible.
1272 if (source->IsRegister()) {
1273 // Register-register.
1274 Register temp = kScratchReg;
1275 Register src = g.ToRegister(source);
1276 if (destination->IsRegister()) {
1277 Register dst = g.ToRegister(destination);
1278 __ mr(temp, src);
1279 __ mr(src, dst);
1280 __ mr(dst, temp);
1281 } else {
1282 DCHECK(destination->IsStackSlot());
1283 MemOperand dst = g.ToMemOperand(destination);
1284 __ mr(temp, src);
1285 __ LoadP(src, dst);
1286 __ StoreP(temp, dst);
1287 }
1288 #if V8_TARGET_ARCH_PPC64
1289 } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
1290 #else
1291 } else if (source->IsStackSlot()) {
1292 #endif
1293 DCHECK(destination->IsStackSlot());
1294 Register temp_0 = kScratchReg;
1295 Register temp_1 = r0;
1296 MemOperand src = g.ToMemOperand(source);
1297 MemOperand dst = g.ToMemOperand(destination);
1298 __ LoadP(temp_0, src);
1299 __ LoadP(temp_1, dst);
1300 __ StoreP(temp_0, dst);
1301 __ StoreP(temp_1, src);
1302 } else if (source->IsDoubleRegister()) {
1303 DoubleRegister temp = kScratchDoubleReg;
1304 DoubleRegister src = g.ToDoubleRegister(source);
1305 if (destination->IsDoubleRegister()) {
1306 DoubleRegister dst = g.ToDoubleRegister(destination);
1307 __ fmr(temp, src);
1308 __ fmr(src, dst);
1309 __ fmr(dst, temp);
1310 } else {
1311 DCHECK(destination->IsDoubleStackSlot());
1312 MemOperand dst = g.ToMemOperand(destination);
1313 __ fmr(temp, src);
1314 __ lfd(src, dst);
1315 __ stfd(temp, dst);
1316 }
1317 #if !V8_TARGET_ARCH_PPC64
1318 } else if (source->IsDoubleStackSlot()) {
1319 DCHECK(destination->IsDoubleStackSlot());
1320 DoubleRegister temp_0 = kScratchDoubleReg;
1321 DoubleRegister temp_1 = d0;
1322 MemOperand src = g.ToMemOperand(source);
1323 MemOperand dst = g.ToMemOperand(destination);
1324 __ lfd(temp_0, src);
1325 __ lfd(temp_1, dst);
1326 __ stfd(temp_0, dst);
1327 __ stfd(temp_1, src);
1328 #endif
1329 } else {
1330 // No other combinations are possible.
1331 UNREACHABLE();
1332 }
1333 }
1334
1335
1336 void CodeGenerator::AddNopForSmiCodeInlining() {
1337 // We do not insert nops for inlined Smi code.
1338 }
1339
1340
1341 void CodeGenerator::EnsureSpaceForLazyDeopt() {
1342 int space_needed = Deoptimizer::patch_size();
1343 if (!info()->IsStub()) {
1344 // Ensure that we have enough space after the previous lazy-bailout
1345 // instruction for patching the code here.
1346 int current_pc = masm()->pc_offset();
1347 if (current_pc < last_lazy_deopt_pc_ + space_needed) {
1348 int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
1349 DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
1350 while (padding_size > 0) {
1351 __ nop();
1352 padding_size -= v8::internal::Assembler::kInstrSize;
1353 }
1354 }
1355 }
1356 MarkLazyDeoptSite();
1357 }
1358
1359 #undef __
1360
1361 } // namespace compiler
1362 } // namespace internal
1363 } // namespace v8
OLDNEW
« no previous file with comments | « src/compiler/mips64/code-generator-mips64.cc ('k') | src/compiler/ppc/instruction-codes-ppc.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698