Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(3)

Side by Side Diff: src/compiler/arm/code-generator-arm.cc

Issue 426233002: Land the Fan (disabled) (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Review feedback, rebase and "git cl format" Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/compiler-intrinsics.h ('k') | src/compiler/arm/instruction-codes-arm.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler/code-generator.h"
6
7 #include "src/arm/macro-assembler-arm.h"
8 #include "src/compiler/code-generator-impl.h"
9 #include "src/compiler/gap-resolver.h"
10 #include "src/compiler/node-matchers.h"
11 #include "src/compiler/node-properties-inl.h"
12 #include "src/scopes.h"
13
14 namespace v8 {
15 namespace internal {
16 namespace compiler {
17
18 #define __ masm()->
19
20
21 #define kScratchReg r9
22
23
24 // Adds Arm-specific methods to convert InstructionOperands.
25 class ArmOperandConverter : public InstructionOperandConverter {
26 public:
27 ArmOperandConverter(CodeGenerator* gen, Instruction* instr)
28 : InstructionOperandConverter(gen, instr) {}
29
30 SBit OutputSBit() const {
31 switch (instr_->flags_mode()) {
32 case kFlags_branch:
33 case kFlags_set:
34 return SetCC;
35 case kFlags_none:
36 return LeaveCC;
37 }
38 UNREACHABLE();
39 return LeaveCC;
40 }
41
42 Operand InputImmediate(int index) {
43 Constant constant = ToConstant(instr_->InputAt(index));
44 switch (constant.type()) {
45 case Constant::kInt32:
46 return Operand(constant.ToInt32());
47 case Constant::kFloat64:
48 return Operand(
49 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
50 case Constant::kInt64:
51 case Constant::kExternalReference:
52 case Constant::kHeapObject:
53 break;
54 }
55 UNREACHABLE();
56 return Operand::Zero();
57 }
58
59 Operand InputOperand2(int first_index) {
60 const int index = first_index;
61 switch (AddressingModeField::decode(instr_->opcode())) {
62 case kMode_None:
63 case kMode_Offset_RI:
64 case kMode_Offset_RR:
65 break;
66 case kMode_Operand2_I:
67 return InputImmediate(index + 0);
68 case kMode_Operand2_R:
69 return Operand(InputRegister(index + 0));
70 case kMode_Operand2_R_ASR_I:
71 return Operand(InputRegister(index + 0), ASR, InputInt5(index + 1));
72 case kMode_Operand2_R_ASR_R:
73 return Operand(InputRegister(index + 0), ASR, InputRegister(index + 1));
74 case kMode_Operand2_R_LSL_I:
75 return Operand(InputRegister(index + 0), LSL, InputInt5(index + 1));
76 case kMode_Operand2_R_LSL_R:
77 return Operand(InputRegister(index + 0), LSL, InputRegister(index + 1));
78 case kMode_Operand2_R_LSR_I:
79 return Operand(InputRegister(index + 0), LSR, InputInt5(index + 1));
80 case kMode_Operand2_R_LSR_R:
81 return Operand(InputRegister(index + 0), LSR, InputRegister(index + 1));
82 }
83 UNREACHABLE();
84 return Operand::Zero();
85 }
86
87 MemOperand InputOffset(int* first_index) {
88 const int index = *first_index;
89 switch (AddressingModeField::decode(instr_->opcode())) {
90 case kMode_None:
91 case kMode_Operand2_I:
92 case kMode_Operand2_R:
93 case kMode_Operand2_R_ASR_I:
94 case kMode_Operand2_R_ASR_R:
95 case kMode_Operand2_R_LSL_I:
96 case kMode_Operand2_R_LSL_R:
97 case kMode_Operand2_R_LSR_I:
98 case kMode_Operand2_R_LSR_R:
99 break;
100 case kMode_Offset_RI:
101 *first_index += 2;
102 return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
103 case kMode_Offset_RR:
104 *first_index += 2;
105 return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
106 }
107 UNREACHABLE();
108 return MemOperand(r0);
109 }
110
111 MemOperand InputOffset() {
112 int index = 0;
113 return InputOffset(&index);
114 }
115
116 MemOperand ToMemOperand(InstructionOperand* op) const {
117 ASSERT(op != NULL);
118 ASSERT(!op->IsRegister());
119 ASSERT(!op->IsDoubleRegister());
120 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
121 // The linkage computes where all spill slots are located.
122 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
123 return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
124 }
125 };
126
127
128 // Assembles an instruction after register allocation, producing machine code.
129 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
130 ArmOperandConverter i(this, instr);
131
132 switch (ArchOpcodeField::decode(instr->opcode())) {
133 case kArchJmp:
134 __ b(code_->GetLabel(i.InputBlock(0)));
135 ASSERT_EQ(LeaveCC, i.OutputSBit());
136 break;
137 case kArchNop:
138 // don't emit code for nops.
139 ASSERT_EQ(LeaveCC, i.OutputSBit());
140 break;
141 case kArchRet:
142 AssembleReturn();
143 ASSERT_EQ(LeaveCC, i.OutputSBit());
144 break;
145 case kArchDeoptimize: {
146 int deoptimization_id = MiscField::decode(instr->opcode());
147 BuildTranslation(instr, deoptimization_id);
148
149 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
150 isolate(), deoptimization_id, Deoptimizer::LAZY);
151 __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
152 ASSERT_EQ(LeaveCC, i.OutputSBit());
153 break;
154 }
155 case kArmAdd:
156 __ add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
157 i.OutputSBit());
158 break;
159 case kArmAnd:
160 __ and_(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
161 i.OutputSBit());
162 break;
163 case kArmBic:
164 __ bic(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
165 i.OutputSBit());
166 break;
167 case kArmMul:
168 __ mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
169 i.OutputSBit());
170 break;
171 case kArmMla:
172 __ mla(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
173 i.InputRegister(2), i.OutputSBit());
174 break;
175 case kArmMls: {
176 CpuFeatureScope scope(masm(), MLS);
177 __ mls(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
178 i.InputRegister(2));
179 ASSERT_EQ(LeaveCC, i.OutputSBit());
180 break;
181 }
182 case kArmSdiv: {
183 CpuFeatureScope scope(masm(), SUDIV);
184 __ sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
185 ASSERT_EQ(LeaveCC, i.OutputSBit());
186 break;
187 }
188 case kArmUdiv: {
189 CpuFeatureScope scope(masm(), SUDIV);
190 __ udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
191 ASSERT_EQ(LeaveCC, i.OutputSBit());
192 break;
193 }
194 case kArmMov:
195 __ Move(i.OutputRegister(), i.InputOperand2(0));
196 ASSERT_EQ(LeaveCC, i.OutputSBit());
197 break;
198 case kArmMvn:
199 __ mvn(i.OutputRegister(), i.InputOperand2(0), i.OutputSBit());
200 break;
201 case kArmOrr:
202 __ orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
203 i.OutputSBit());
204 break;
205 case kArmEor:
206 __ eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
207 i.OutputSBit());
208 break;
209 case kArmSub:
210 __ sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
211 i.OutputSBit());
212 break;
213 case kArmRsb:
214 __ rsb(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
215 i.OutputSBit());
216 break;
217 case kArmBfc: {
218 CpuFeatureScope scope(masm(), ARMv7);
219 __ bfc(i.OutputRegister(), i.InputInt8(1), i.InputInt8(2));
220 ASSERT_EQ(LeaveCC, i.OutputSBit());
221 break;
222 }
223 case kArmUbfx: {
224 CpuFeatureScope scope(masm(), ARMv7);
225 __ ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
226 i.InputInt8(2));
227 ASSERT_EQ(LeaveCC, i.OutputSBit());
228 break;
229 }
230 case kArmCallCodeObject: {
231 if (instr->InputAt(0)->IsImmediate()) {
232 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
233 __ Call(code, RelocInfo::CODE_TARGET);
234 RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
235 Safepoint::kNoLazyDeopt);
236 } else {
237 Register reg = i.InputRegister(0);
238 int entry = Code::kHeaderSize - kHeapObjectTag;
239 __ ldr(reg, MemOperand(reg, entry));
240 __ Call(reg);
241 RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
242 Safepoint::kNoLazyDeopt);
243 }
244 bool lazy_deopt = (MiscField::decode(instr->opcode()) == 1);
245 if (lazy_deopt) {
246 RecordLazyDeoptimizationEntry(instr);
247 }
248 ASSERT_EQ(LeaveCC, i.OutputSBit());
249 break;
250 }
251 case kArmCallJSFunction: {
252 Register func = i.InputRegister(0);
253
254 // TODO(jarin) The load of the context should be separated from the call.
255 __ ldr(cp, FieldMemOperand(func, JSFunction::kContextOffset));
256 __ ldr(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
257 __ Call(ip);
258
259 RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
260 Safepoint::kNoLazyDeopt);
261 RecordLazyDeoptimizationEntry(instr);
262 ASSERT_EQ(LeaveCC, i.OutputSBit());
263 break;
264 }
265 case kArmCallAddress: {
266 DirectCEntryStub stub(isolate());
267 stub.GenerateCall(masm(), i.InputRegister(0));
268 ASSERT_EQ(LeaveCC, i.OutputSBit());
269 break;
270 }
271 case kArmPush:
272 __ Push(i.InputRegister(0));
273 ASSERT_EQ(LeaveCC, i.OutputSBit());
274 break;
275 case kArmDrop: {
276 int words = MiscField::decode(instr->opcode());
277 __ Drop(words);
278 ASSERT_EQ(LeaveCC, i.OutputSBit());
279 break;
280 }
281 case kArmCmp:
282 __ cmp(i.InputRegister(0), i.InputOperand2(1));
283 ASSERT_EQ(SetCC, i.OutputSBit());
284 break;
285 case kArmCmn:
286 __ cmn(i.InputRegister(0), i.InputOperand2(1));
287 ASSERT_EQ(SetCC, i.OutputSBit());
288 break;
289 case kArmTst:
290 __ tst(i.InputRegister(0), i.InputOperand2(1));
291 ASSERT_EQ(SetCC, i.OutputSBit());
292 break;
293 case kArmTeq:
294 __ teq(i.InputRegister(0), i.InputOperand2(1));
295 ASSERT_EQ(SetCC, i.OutputSBit());
296 break;
297 case kArmVcmpF64:
298 __ VFPCompareAndSetFlags(i.InputDoubleRegister(0),
299 i.InputDoubleRegister(1));
300 ASSERT_EQ(SetCC, i.OutputSBit());
301 break;
302 case kArmVaddF64:
303 __ vadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
304 i.InputDoubleRegister(1));
305 ASSERT_EQ(LeaveCC, i.OutputSBit());
306 break;
307 case kArmVsubF64:
308 __ vsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
309 i.InputDoubleRegister(1));
310 ASSERT_EQ(LeaveCC, i.OutputSBit());
311 break;
312 case kArmVmulF64:
313 __ vmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
314 i.InputDoubleRegister(1));
315 ASSERT_EQ(LeaveCC, i.OutputSBit());
316 break;
317 case kArmVmlaF64:
318 __ vmla(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
319 i.InputDoubleRegister(2));
320 ASSERT_EQ(LeaveCC, i.OutputSBit());
321 break;
322 case kArmVmlsF64:
323 __ vmls(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
324 i.InputDoubleRegister(2));
325 ASSERT_EQ(LeaveCC, i.OutputSBit());
326 break;
327 case kArmVdivF64:
328 __ vdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
329 i.InputDoubleRegister(1));
330 ASSERT_EQ(LeaveCC, i.OutputSBit());
331 break;
332 case kArmVmodF64: {
333 // TODO(bmeurer): We should really get rid of this special instruction,
334 // and generate a CallAddress instruction instead.
335 FrameScope scope(masm(), StackFrame::MANUAL);
336 __ PrepareCallCFunction(0, 2, kScratchReg);
337 __ MovToFloatParameters(i.InputDoubleRegister(0),
338 i.InputDoubleRegister(1));
339 __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
340 0, 2);
341 // Move the result in the double result register.
342 __ MovFromFloatResult(i.OutputDoubleRegister());
343 ASSERT_EQ(LeaveCC, i.OutputSBit());
344 break;
345 }
346 case kArmVnegF64:
347 __ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
348 break;
349 case kArmVcvtF64S32: {
350 SwVfpRegister scratch = kScratchDoubleReg.low();
351 __ vmov(scratch, i.InputRegister(0));
352 __ vcvt_f64_s32(i.OutputDoubleRegister(), scratch);
353 ASSERT_EQ(LeaveCC, i.OutputSBit());
354 break;
355 }
356 case kArmVcvtF64U32: {
357 SwVfpRegister scratch = kScratchDoubleReg.low();
358 __ vmov(scratch, i.InputRegister(0));
359 __ vcvt_f64_u32(i.OutputDoubleRegister(), scratch);
360 ASSERT_EQ(LeaveCC, i.OutputSBit());
361 break;
362 }
363 case kArmVcvtS32F64: {
364 SwVfpRegister scratch = kScratchDoubleReg.low();
365 __ vcvt_s32_f64(scratch, i.InputDoubleRegister(0));
366 __ vmov(i.OutputRegister(), scratch);
367 ASSERT_EQ(LeaveCC, i.OutputSBit());
368 break;
369 }
370 case kArmVcvtU32F64: {
371 SwVfpRegister scratch = kScratchDoubleReg.low();
372 __ vcvt_u32_f64(scratch, i.InputDoubleRegister(0));
373 __ vmov(i.OutputRegister(), scratch);
374 ASSERT_EQ(LeaveCC, i.OutputSBit());
375 break;
376 }
377 case kArmLoadWord8:
378 __ ldrb(i.OutputRegister(), i.InputOffset());
379 ASSERT_EQ(LeaveCC, i.OutputSBit());
380 break;
381 case kArmStoreWord8: {
382 int index = 0;
383 MemOperand operand = i.InputOffset(&index);
384 __ strb(i.InputRegister(index), operand);
385 ASSERT_EQ(LeaveCC, i.OutputSBit());
386 break;
387 }
388 case kArmLoadWord16:
389 __ ldrh(i.OutputRegister(), i.InputOffset());
390 break;
391 case kArmStoreWord16: {
392 int index = 0;
393 MemOperand operand = i.InputOffset(&index);
394 __ strh(i.InputRegister(index), operand);
395 ASSERT_EQ(LeaveCC, i.OutputSBit());
396 break;
397 }
398 case kArmLoadWord32:
399 __ ldr(i.OutputRegister(), i.InputOffset());
400 break;
401 case kArmStoreWord32: {
402 int index = 0;
403 MemOperand operand = i.InputOffset(&index);
404 __ str(i.InputRegister(index), operand);
405 ASSERT_EQ(LeaveCC, i.OutputSBit());
406 break;
407 }
408 case kArmFloat64Load:
409 __ vldr(i.OutputDoubleRegister(), i.InputOffset());
410 ASSERT_EQ(LeaveCC, i.OutputSBit());
411 break;
412 case kArmFloat64Store: {
413 int index = 0;
414 MemOperand operand = i.InputOffset(&index);
415 __ vstr(i.InputDoubleRegister(index), operand);
416 ASSERT_EQ(LeaveCC, i.OutputSBit());
417 break;
418 }
419 case kArmStoreWriteBarrier: {
420 Register object = i.InputRegister(0);
421 Register index = i.InputRegister(1);
422 Register value = i.InputRegister(2);
423 __ add(index, object, index);
424 __ str(value, MemOperand(index));
425 SaveFPRegsMode mode =
426 frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
427 LinkRegisterStatus lr_status = kLRHasNotBeenSaved;
428 __ RecordWrite(object, index, value, lr_status, mode);
429 ASSERT_EQ(LeaveCC, i.OutputSBit());
430 break;
431 }
432 }
433 }
434
435
436 // Assembles branches after an instruction.
437 void CodeGenerator::AssembleArchBranch(Instruction* instr,
438 FlagsCondition condition) {
439 ArmOperandConverter i(this, instr);
440 Label done;
441
442 // Emit a branch. The true and false targets are always the last two inputs
443 // to the instruction.
444 BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
445 BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
446 bool fallthru = IsNextInAssemblyOrder(fblock);
447 Label* tlabel = code()->GetLabel(tblock);
448 Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
449 switch (condition) {
450 case kUnorderedEqual:
451 __ b(vs, flabel);
452 // Fall through.
453 case kEqual:
454 __ b(eq, tlabel);
455 break;
456 case kUnorderedNotEqual:
457 __ b(vs, tlabel);
458 // Fall through.
459 case kNotEqual:
460 __ b(ne, tlabel);
461 break;
462 case kSignedLessThan:
463 __ b(lt, tlabel);
464 break;
465 case kSignedGreaterThanOrEqual:
466 __ b(ge, tlabel);
467 break;
468 case kSignedLessThanOrEqual:
469 __ b(le, tlabel);
470 break;
471 case kSignedGreaterThan:
472 __ b(gt, tlabel);
473 break;
474 case kUnorderedLessThan:
475 __ b(vs, flabel);
476 // Fall through.
477 case kUnsignedLessThan:
478 __ b(lo, tlabel);
479 break;
480 case kUnorderedGreaterThanOrEqual:
481 __ b(vs, tlabel);
482 // Fall through.
483 case kUnsignedGreaterThanOrEqual:
484 __ b(hs, tlabel);
485 break;
486 case kUnorderedLessThanOrEqual:
487 __ b(vs, flabel);
488 // Fall through.
489 case kUnsignedLessThanOrEqual:
490 __ b(ls, tlabel);
491 break;
492 case kUnorderedGreaterThan:
493 __ b(vs, tlabel);
494 // Fall through.
495 case kUnsignedGreaterThan:
496 __ b(hi, tlabel);
497 break;
498 }
499 if (!fallthru) __ b(flabel); // no fallthru to flabel.
500 __ bind(&done);
501 }
502
503
504 // Assembles boolean materializations after an instruction.
505 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
506 FlagsCondition condition) {
507 ArmOperandConverter i(this, instr);
508 Label done;
509
510 // Materialize a full 32-bit 1 or 0 value.
511 Label check;
512 Register reg = i.OutputRegister();
513 Condition cc = kNoCondition;
514 switch (condition) {
515 case kUnorderedEqual:
516 __ b(vc, &check);
517 __ mov(reg, Operand(0));
518 __ b(&done);
519 // Fall through.
520 case kEqual:
521 cc = eq;
522 break;
523 case kUnorderedNotEqual:
524 __ b(vc, &check);
525 __ mov(reg, Operand(1));
526 __ b(&done);
527 // Fall through.
528 case kNotEqual:
529 cc = ne;
530 break;
531 case kSignedLessThan:
532 cc = lt;
533 break;
534 case kSignedGreaterThanOrEqual:
535 cc = ge;
536 break;
537 case kSignedLessThanOrEqual:
538 cc = le;
539 break;
540 case kSignedGreaterThan:
541 cc = gt;
542 break;
543 case kUnorderedLessThan:
544 __ b(vc, &check);
545 __ mov(reg, Operand(0));
546 __ b(&done);
547 // Fall through.
548 case kUnsignedLessThan:
549 cc = lo;
550 break;
551 case kUnorderedGreaterThanOrEqual:
552 __ b(vc, &check);
553 __ mov(reg, Operand(1));
554 __ b(&done);
555 // Fall through.
556 case kUnsignedGreaterThanOrEqual:
557 cc = hs;
558 break;
559 case kUnorderedLessThanOrEqual:
560 __ b(vc, &check);
561 __ mov(reg, Operand(0));
562 __ b(&done);
563 // Fall through.
564 case kUnsignedLessThanOrEqual:
565 cc = ls;
566 break;
567 case kUnorderedGreaterThan:
568 __ b(vc, &check);
569 __ mov(reg, Operand(1));
570 __ b(&done);
571 // Fall through.
572 case kUnsignedGreaterThan:
573 cc = hi;
574 break;
575 }
576 __ bind(&check);
577 __ mov(reg, Operand(0));
578 __ mov(reg, Operand(1), LeaveCC, cc);
579 __ bind(&done);
580 }
581
582
583 void CodeGenerator::AssemblePrologue() {
584 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
585 if (descriptor->kind() == CallDescriptor::kCallAddress) {
586 __ Push(lr, fp);
587 __ mov(fp, sp);
588 const RegList saves = descriptor->CalleeSavedRegisters();
589 if (saves != 0) { // Save callee-saved registers.
590 int register_save_area_size = 0;
591 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
592 if (!((1 << i) & saves)) continue;
593 register_save_area_size += kPointerSize;
594 }
595 frame()->SetRegisterSaveAreaSize(register_save_area_size);
596 __ stm(db_w, sp, saves);
597 }
598 } else if (descriptor->IsJSFunctionCall()) {
599 CompilationInfo* info = linkage()->info();
600 __ Prologue(info->IsCodePreAgingActive());
601 frame()->SetRegisterSaveAreaSize(
602 StandardFrameConstants::kFixedFrameSizeFromFp);
603
604 // Sloppy mode functions and builtins need to replace the receiver with the
605 // global proxy when called as functions (without an explicit receiver
606 // object).
607 // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
608 if (info->strict_mode() == SLOPPY && !info->is_native()) {
609 Label ok;
610 // +2 for return address and saved frame pointer.
611 int receiver_slot = info->scope()->num_parameters() + 2;
612 __ ldr(r2, MemOperand(fp, receiver_slot * kPointerSize));
613 __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
614 __ b(ne, &ok);
615 __ ldr(r2, GlobalObjectOperand());
616 __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset));
617 __ str(r2, MemOperand(fp, receiver_slot * kPointerSize));
618 __ bind(&ok);
619 }
620
621 } else {
622 __ StubPrologue();
623 frame()->SetRegisterSaveAreaSize(
624 StandardFrameConstants::kFixedFrameSizeFromFp);
625 }
626 int stack_slots = frame()->GetSpillSlotCount();
627 if (stack_slots > 0) {
628 __ sub(sp, sp, Operand(stack_slots * kPointerSize));
629 }
630 }
631
632
633 void CodeGenerator::AssembleReturn() {
634 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
635 if (descriptor->kind() == CallDescriptor::kCallAddress) {
636 if (frame()->GetRegisterSaveAreaSize() > 0) {
637 // Remove this frame's spill slots first.
638 int stack_slots = frame()->GetSpillSlotCount();
639 if (stack_slots > 0) {
640 __ add(sp, sp, Operand(stack_slots * kPointerSize));
641 }
642 // Restore registers.
643 const RegList saves = descriptor->CalleeSavedRegisters();
644 if (saves != 0) {
645 __ ldm(ia_w, sp, saves);
646 }
647 }
648 __ mov(sp, fp);
649 __ ldm(ia_w, sp, fp.bit() | lr.bit());
650 __ Ret();
651 } else {
652 __ mov(sp, fp);
653 __ ldm(ia_w, sp, fp.bit() | lr.bit());
654 int pop_count =
655 descriptor->IsJSFunctionCall() ? descriptor->ParameterCount() : 0;
656 __ Drop(pop_count);
657 __ Ret();
658 }
659 }
660
661
662 void CodeGenerator::AssembleMove(InstructionOperand* source,
663 InstructionOperand* destination) {
664 ArmOperandConverter g(this, NULL);
665 // Dispatch on the source and destination operand kinds. Not all
666 // combinations are possible.
667 if (source->IsRegister()) {
668 ASSERT(destination->IsRegister() || destination->IsStackSlot());
669 Register src = g.ToRegister(source);
670 if (destination->IsRegister()) {
671 __ mov(g.ToRegister(destination), src);
672 } else {
673 __ str(src, g.ToMemOperand(destination));
674 }
675 } else if (source->IsStackSlot()) {
676 ASSERT(destination->IsRegister() || destination->IsStackSlot());
677 MemOperand src = g.ToMemOperand(source);
678 if (destination->IsRegister()) {
679 __ ldr(g.ToRegister(destination), src);
680 } else {
681 Register temp = kScratchReg;
682 __ ldr(temp, src);
683 __ str(temp, g.ToMemOperand(destination));
684 }
685 } else if (source->IsConstant()) {
686 if (destination->IsRegister() || destination->IsStackSlot()) {
687 Register dst =
688 destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
689 Constant src = g.ToConstant(source);
690 switch (src.type()) {
691 case Constant::kInt32:
692 __ mov(dst, Operand(src.ToInt32()));
693 break;
694 case Constant::kInt64:
695 UNREACHABLE();
696 break;
697 case Constant::kFloat64:
698 __ Move(dst,
699 isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
700 break;
701 case Constant::kExternalReference:
702 __ mov(dst, Operand(src.ToExternalReference()));
703 break;
704 case Constant::kHeapObject:
705 __ Move(dst, src.ToHeapObject());
706 break;
707 }
708 if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination));
709 } else if (destination->IsDoubleRegister()) {
710 DwVfpRegister result = g.ToDoubleRegister(destination);
711 __ vmov(result, g.ToDouble(source));
712 } else {
713 ASSERT(destination->IsDoubleStackSlot());
714 DwVfpRegister temp = kScratchDoubleReg;
715 __ vmov(temp, g.ToDouble(source));
716 __ vstr(temp, g.ToMemOperand(destination));
717 }
718 } else if (source->IsDoubleRegister()) {
719 DwVfpRegister src = g.ToDoubleRegister(source);
720 if (destination->IsDoubleRegister()) {
721 DwVfpRegister dst = g.ToDoubleRegister(destination);
722 __ Move(dst, src);
723 } else {
724 ASSERT(destination->IsDoubleStackSlot());
725 __ vstr(src, g.ToMemOperand(destination));
726 }
727 } else if (source->IsDoubleStackSlot()) {
728 ASSERT(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
729 MemOperand src = g.ToMemOperand(source);
730 if (destination->IsDoubleRegister()) {
731 __ vldr(g.ToDoubleRegister(destination), src);
732 } else {
733 DwVfpRegister temp = kScratchDoubleReg;
734 __ vldr(temp, src);
735 __ vstr(temp, g.ToMemOperand(destination));
736 }
737 } else {
738 UNREACHABLE();
739 }
740 }
741
742
743 void CodeGenerator::AssembleSwap(InstructionOperand* source,
744 InstructionOperand* destination) {
745 ArmOperandConverter g(this, NULL);
746 // Dispatch on the source and destination operand kinds. Not all
747 // combinations are possible.
748 if (source->IsRegister()) {
749 // Register-register.
750 Register temp = kScratchReg;
751 Register src = g.ToRegister(source);
752 if (destination->IsRegister()) {
753 Register dst = g.ToRegister(destination);
754 __ Move(temp, src);
755 __ Move(src, dst);
756 __ Move(dst, temp);
757 } else {
758 ASSERT(destination->IsStackSlot());
759 MemOperand dst = g.ToMemOperand(destination);
760 __ mov(temp, src);
761 __ ldr(src, dst);
762 __ str(temp, dst);
763 }
764 } else if (source->IsStackSlot()) {
765 ASSERT(destination->IsStackSlot());
766 Register temp_0 = kScratchReg;
767 SwVfpRegister temp_1 = kScratchDoubleReg.low();
768 MemOperand src = g.ToMemOperand(source);
769 MemOperand dst = g.ToMemOperand(destination);
770 __ ldr(temp_0, src);
771 __ vldr(temp_1, dst);
772 __ str(temp_0, dst);
773 __ vstr(temp_1, src);
774 } else if (source->IsDoubleRegister()) {
775 DwVfpRegister temp = kScratchDoubleReg;
776 DwVfpRegister src = g.ToDoubleRegister(source);
777 if (destination->IsDoubleRegister()) {
778 DwVfpRegister dst = g.ToDoubleRegister(destination);
779 __ Move(temp, src);
780 __ Move(src, dst);
781 __ Move(src, temp);
782 } else {
783 ASSERT(destination->IsDoubleStackSlot());
784 MemOperand dst = g.ToMemOperand(destination);
785 __ Move(temp, src);
786 __ vldr(src, dst);
787 __ vstr(temp, dst);
788 }
789 } else if (source->IsDoubleStackSlot()) {
790 ASSERT(destination->IsDoubleStackSlot());
791 Register temp_0 = kScratchReg;
792 DwVfpRegister temp_1 = kScratchDoubleReg;
793 MemOperand src0 = g.ToMemOperand(source);
794 MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
795 MemOperand dst0 = g.ToMemOperand(destination);
796 MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
797 __ vldr(temp_1, dst0); // Save destination in temp_1.
798 __ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
799 __ str(temp_0, dst0);
800 __ ldr(temp_0, src1);
801 __ str(temp_0, dst1);
802 __ vstr(temp_1, src0);
803 } else {
804 // No other combinations are possible.
805 UNREACHABLE();
806 }
807 }
808
809
810 void CodeGenerator::AddNopForSmiCodeInlining() {
811 // On 32-bit ARM we do not insert nops for inlined Smi code.
812 UNREACHABLE();
813 }
814
815 #ifdef DEBUG
816
817 // Checks whether the code between start_pc and end_pc is a no-op.
818 bool CodeGenerator::IsNopForSmiCodeInlining(Handle<Code> code, int start_pc,
819 int end_pc) {
820 return false;
821 }
822
823 #endif // DEBUG
824
825 #undef __
826 }
827 }
828 } // namespace v8::internal::compiler
OLDNEW
« no previous file with comments | « src/compiler-intrinsics.h ('k') | src/compiler/arm/instruction-codes-arm.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698