Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(950)

Side by Side Diff: src/compiler/ia32/code-generator-ia32.cc

Issue 426233002: Land the Fan (disabled) (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Review feedback, rebase and "git cl format" Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/compiler/graph-visualizer.cc ('k') | src/compiler/ia32/instruction-codes-ia32.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler/code-generator.h"
6
7 #include "src/compiler/code-generator-impl.h"
8 #include "src/compiler/gap-resolver.h"
9 #include "src/compiler/node-matchers.h"
10 #include "src/compiler/node-properties-inl.h"
11 #include "src/ia32/assembler-ia32.h"
12 #include "src/ia32/macro-assembler-ia32.h"
13 #include "src/scopes.h"
14
15 namespace v8 {
16 namespace internal {
17 namespace compiler {
18
19 #define __ masm()->
20
21
22 // Adds IA-32 specific methods for decoding operands.
23 class IA32OperandConverter : public InstructionOperandConverter {
24 public:
25 IA32OperandConverter(CodeGenerator* gen, Instruction* instr)
26 : InstructionOperandConverter(gen, instr) {}
27
28 Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
29
30 Immediate InputImmediate(int index) {
31 return ToImmediate(instr_->InputAt(index));
32 }
33
34 Operand OutputOperand() { return ToOperand(instr_->Output()); }
35
36 Operand TempOperand(int index) { return ToOperand(instr_->TempAt(index)); }
37
38 Operand ToOperand(InstructionOperand* op, int extra = 0) {
39 if (op->IsRegister()) {
40 ASSERT(extra == 0);
41 return Operand(ToRegister(op));
42 } else if (op->IsDoubleRegister()) {
43 ASSERT(extra == 0);
44 return Operand(ToDoubleRegister(op));
45 }
46 ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
47 // The linkage computes where all spill slots are located.
48 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra);
49 return Operand(offset.from_stack_pointer() ? esp : ebp, offset.offset());
50 }
51
52 Operand HighOperand(InstructionOperand* op) {
53 ASSERT(op->IsDoubleStackSlot());
54 return ToOperand(op, kPointerSize);
55 }
56
57 Immediate ToImmediate(InstructionOperand* operand) {
58 Constant constant = ToConstant(operand);
59 switch (constant.type()) {
60 case Constant::kInt32:
61 return Immediate(constant.ToInt32());
62 case Constant::kFloat64:
63 return Immediate(
64 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
65 case Constant::kExternalReference:
66 return Immediate(constant.ToExternalReference());
67 case Constant::kHeapObject:
68 return Immediate(constant.ToHeapObject());
69 case Constant::kInt64:
70 break;
71 }
72 UNREACHABLE();
73 return Immediate(-1);
74 }
75
76 Operand MemoryOperand(int* first_input) {
77 const int offset = *first_input;
78 switch (AddressingModeField::decode(instr_->opcode())) {
79 case kMode_MR1I:
80 *first_input += 2;
81 return Operand(InputRegister(offset + 0), InputRegister(offset + 1),
82 times_1,
83 0); // TODO(dcarney): K != 0
84 case kMode_MRI:
85 *first_input += 2;
86 return Operand::ForRegisterPlusImmediate(InputRegister(offset + 0),
87 InputImmediate(offset + 1));
88 case kMode_MI:
89 *first_input += 1;
90 return Operand(InputImmediate(offset + 0));
91 default:
92 UNREACHABLE();
93 return Operand(no_reg);
94 }
95 }
96
97 Operand MemoryOperand() {
98 int first_input = 0;
99 return MemoryOperand(&first_input);
100 }
101 };
102
103
104 static bool HasImmediateInput(Instruction* instr, int index) {
105 return instr->InputAt(index)->IsImmediate();
106 }
107
108
109 // Assembles an instruction after register allocation, producing machine code.
110 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
111 IA32OperandConverter i(this, instr);
112
113 switch (ArchOpcodeField::decode(instr->opcode())) {
114 case kArchJmp:
115 __ jmp(code()->GetLabel(i.InputBlock(0)));
116 break;
117 case kArchNop:
118 // don't emit code for nops.
119 break;
120 case kArchRet:
121 AssembleReturn();
122 break;
123 case kArchDeoptimize: {
124 int deoptimization_id = MiscField::decode(instr->opcode());
125 BuildTranslation(instr, deoptimization_id);
126
127 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
128 isolate(), deoptimization_id, Deoptimizer::LAZY);
129 __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
130 break;
131 }
132 case kIA32Add:
133 if (HasImmediateInput(instr, 1)) {
134 __ add(i.InputOperand(0), i.InputImmediate(1));
135 } else {
136 __ add(i.InputRegister(0), i.InputOperand(1));
137 }
138 break;
139 case kIA32And:
140 if (HasImmediateInput(instr, 1)) {
141 __ and_(i.InputOperand(0), i.InputImmediate(1));
142 } else {
143 __ and_(i.InputRegister(0), i.InputOperand(1));
144 }
145 break;
146 case kIA32Cmp:
147 if (HasImmediateInput(instr, 1)) {
148 __ cmp(i.InputOperand(0), i.InputImmediate(1));
149 } else {
150 __ cmp(i.InputRegister(0), i.InputOperand(1));
151 }
152 break;
153 case kIA32Test:
154 if (HasImmediateInput(instr, 1)) {
155 __ test(i.InputOperand(0), i.InputImmediate(1));
156 } else {
157 __ test(i.InputRegister(0), i.InputOperand(1));
158 }
159 break;
160 case kIA32Imul:
161 if (HasImmediateInput(instr, 1)) {
162 __ imul(i.OutputRegister(), i.InputOperand(0), i.InputInt32(1));
163 } else {
164 __ imul(i.OutputRegister(), i.InputOperand(1));
165 }
166 break;
167 case kIA32Idiv:
168 __ cdq();
169 __ idiv(i.InputOperand(1));
170 break;
171 case kIA32Udiv:
172 __ xor_(edx, edx);
173 __ div(i.InputOperand(1));
174 break;
175 case kIA32Not:
176 __ not_(i.OutputOperand());
177 break;
178 case kIA32Neg:
179 __ neg(i.OutputOperand());
180 break;
181 case kIA32Or:
182 if (HasImmediateInput(instr, 1)) {
183 __ or_(i.InputOperand(0), i.InputImmediate(1));
184 } else {
185 __ or_(i.InputRegister(0), i.InputOperand(1));
186 }
187 break;
188 case kIA32Xor:
189 if (HasImmediateInput(instr, 1)) {
190 __ xor_(i.InputOperand(0), i.InputImmediate(1));
191 } else {
192 __ xor_(i.InputRegister(0), i.InputOperand(1));
193 }
194 break;
195 case kIA32Sub:
196 if (HasImmediateInput(instr, 1)) {
197 __ sub(i.InputOperand(0), i.InputImmediate(1));
198 } else {
199 __ sub(i.InputRegister(0), i.InputOperand(1));
200 }
201 break;
202 case kIA32Shl:
203 if (HasImmediateInput(instr, 1)) {
204 __ shl(i.OutputRegister(), i.InputInt5(1));
205 } else {
206 __ shl_cl(i.OutputRegister());
207 }
208 break;
209 case kIA32Shr:
210 if (HasImmediateInput(instr, 1)) {
211 __ shr(i.OutputRegister(), i.InputInt5(1));
212 } else {
213 __ shr_cl(i.OutputRegister());
214 }
215 break;
216 case kIA32Sar:
217 if (HasImmediateInput(instr, 1)) {
218 __ sar(i.OutputRegister(), i.InputInt5(1));
219 } else {
220 __ sar_cl(i.OutputRegister());
221 }
222 break;
223 case kIA32Push:
224 if (HasImmediateInput(instr, 0)) {
225 __ push(i.InputImmediate(0));
226 } else {
227 __ push(i.InputOperand(0));
228 }
229 break;
230 case kIA32CallCodeObject: {
231 if (HasImmediateInput(instr, 0)) {
232 Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
233 __ call(code, RelocInfo::CODE_TARGET);
234 } else {
235 Register reg = i.InputRegister(0);
236 int entry = Code::kHeaderSize - kHeapObjectTag;
237 __ call(Operand(reg, entry));
238 }
239 RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
240 Safepoint::kNoLazyDeopt);
241
242 bool lazy_deopt = (MiscField::decode(instr->opcode()) == 1);
243 if (lazy_deopt) {
244 RecordLazyDeoptimizationEntry(instr);
245 }
246 AddNopForSmiCodeInlining();
247 break;
248 }
249 case kIA32CallAddress:
250 if (HasImmediateInput(instr, 0)) {
251 // TODO(dcarney): wire up EXTERNAL_REFERENCE instead of RUNTIME_ENTRY.
252 __ call(reinterpret_cast<byte*>(i.InputInt32(0)),
253 RelocInfo::RUNTIME_ENTRY);
254 } else {
255 __ call(i.InputRegister(0));
256 }
257 break;
258 case kPopStack: {
259 int words = MiscField::decode(instr->opcode());
260 __ add(esp, Immediate(kPointerSize * words));
261 break;
262 }
263 case kIA32CallJSFunction: {
264 Register func = i.InputRegister(0);
265
266 // TODO(jarin) The load of the context should be separated from the call.
267 __ mov(esi, FieldOperand(func, JSFunction::kContextOffset));
268 __ call(FieldOperand(func, JSFunction::kCodeEntryOffset));
269
270 RecordSafepoint(instr->pointer_map(), Safepoint::kSimple, 0,
271 Safepoint::kNoLazyDeopt);
272 RecordLazyDeoptimizationEntry(instr);
273 break;
274 }
275 case kSSEFloat64Cmp:
276 __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
277 break;
278 case kSSEFloat64Add:
279 __ addsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
280 break;
281 case kSSEFloat64Sub:
282 __ subsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
283 break;
284 case kSSEFloat64Mul:
285 __ mulsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
286 break;
287 case kSSEFloat64Div:
288 __ divsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
289 break;
290 case kSSEFloat64Mod: {
291 // TODO(dcarney): alignment is wrong.
292 __ sub(esp, Immediate(kDoubleSize));
293 // Move values to st(0) and st(1).
294 __ movsd(Operand(esp, 0), i.InputDoubleRegister(1));
295 __ fld_d(Operand(esp, 0));
296 __ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
297 __ fld_d(Operand(esp, 0));
298 // Loop while fprem isn't done.
299 Label mod_loop;
300 __ bind(&mod_loop);
301 // This instructions traps on all kinds inputs, but we are assuming the
302 // floating point control word is set to ignore them all.
303 __ fprem();
304 // The following 2 instruction implicitly use eax.
305 __ fnstsw_ax();
306 __ sahf();
307 __ j(parity_even, &mod_loop);
308 // Move output to stack and clean up.
309 __ fstp(1);
310 __ fstp_d(Operand(esp, 0));
311 __ movsd(i.OutputDoubleRegister(), Operand(esp, 0));
312 __ add(esp, Immediate(kDoubleSize));
313 break;
314 }
315 case kSSEFloat64ToInt32:
316 __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
317 break;
318 case kSSEInt32ToFloat64:
319 __ cvtsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
320 break;
321 case kSSELoad:
322 __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
323 break;
324 case kSSEStore: {
325 int index = 0;
326 Operand operand = i.MemoryOperand(&index);
327 __ movsd(operand, i.InputDoubleRegister(index));
328 break;
329 }
330 case kIA32LoadWord8:
331 __ movzx_b(i.OutputRegister(), i.MemoryOperand());
332 break;
333 case kIA32StoreWord8: {
334 int index = 0;
335 Operand operand = i.MemoryOperand(&index);
336 __ mov_b(operand, i.InputRegister(index));
337 break;
338 }
339 case kIA32StoreWord8I: {
340 int index = 0;
341 Operand operand = i.MemoryOperand(&index);
342 __ mov_b(operand, i.InputInt8(index));
343 break;
344 }
345 case kIA32LoadWord16:
346 __ movzx_w(i.OutputRegister(), i.MemoryOperand());
347 break;
348 case kIA32StoreWord16: {
349 int index = 0;
350 Operand operand = i.MemoryOperand(&index);
351 __ mov_w(operand, i.InputRegister(index));
352 break;
353 }
354 case kIA32StoreWord16I: {
355 int index = 0;
356 Operand operand = i.MemoryOperand(&index);
357 __ mov_w(operand, i.InputInt16(index));
358 break;
359 }
360 case kIA32LoadWord32:
361 __ mov(i.OutputRegister(), i.MemoryOperand());
362 break;
363 case kIA32StoreWord32: {
364 int index = 0;
365 Operand operand = i.MemoryOperand(&index);
366 __ mov(operand, i.InputRegister(index));
367 break;
368 }
369 case kIA32StoreWord32I: {
370 int index = 0;
371 Operand operand = i.MemoryOperand(&index);
372 __ mov(operand, i.InputImmediate(index));
373 break;
374 }
375 case kIA32StoreWriteBarrier: {
376 Register object = i.InputRegister(0);
377 Register index = i.InputRegister(1);
378 Register value = i.InputRegister(2);
379 __ mov(Operand(object, index, times_1, 0), value);
380 __ lea(index, Operand(object, index, times_1, 0));
381 SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters()
382 ? kSaveFPRegs
383 : kDontSaveFPRegs;
384 __ RecordWrite(object, index, value, mode);
385 break;
386 }
387 }
388 }
389
390
391 // Assembles branches after an instruction.
392 void CodeGenerator::AssembleArchBranch(Instruction* instr,
393 FlagsCondition condition) {
394 IA32OperandConverter i(this, instr);
395 Label done;
396
397 // Emit a branch. The true and false targets are always the last two inputs
398 // to the instruction.
399 BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
400 BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
401 bool fallthru = IsNextInAssemblyOrder(fblock);
402 Label* tlabel = code()->GetLabel(tblock);
403 Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
404 Label::Distance flabel_distance = fallthru ? Label::kNear : Label::kFar;
405 switch (condition) {
406 case kUnorderedEqual:
407 __ j(parity_even, flabel, flabel_distance);
408 // Fall through.
409 case kEqual:
410 __ j(equal, tlabel);
411 break;
412 case kUnorderedNotEqual:
413 __ j(parity_even, tlabel);
414 // Fall through.
415 case kNotEqual:
416 __ j(not_equal, tlabel);
417 break;
418 case kSignedLessThan:
419 __ j(less, tlabel);
420 break;
421 case kSignedGreaterThanOrEqual:
422 __ j(greater_equal, tlabel);
423 break;
424 case kSignedLessThanOrEqual:
425 __ j(less_equal, tlabel);
426 break;
427 case kSignedGreaterThan:
428 __ j(greater, tlabel);
429 break;
430 case kUnorderedLessThan:
431 __ j(parity_even, flabel, flabel_distance);
432 // Fall through.
433 case kUnsignedLessThan:
434 __ j(below, tlabel);
435 break;
436 case kUnorderedGreaterThanOrEqual:
437 __ j(parity_even, tlabel);
438 // Fall through.
439 case kUnsignedGreaterThanOrEqual:
440 __ j(above_equal, tlabel);
441 break;
442 case kUnorderedLessThanOrEqual:
443 __ j(parity_even, flabel, flabel_distance);
444 // Fall through.
445 case kUnsignedLessThanOrEqual:
446 __ j(below_equal, tlabel);
447 break;
448 case kUnorderedGreaterThan:
449 __ j(parity_even, tlabel);
450 // Fall through.
451 case kUnsignedGreaterThan:
452 __ j(above, tlabel);
453 break;
454 }
455 if (!fallthru) __ jmp(flabel, flabel_distance); // no fallthru to flabel.
456 __ bind(&done);
457 }
458
459
460 // Assembles boolean materializations after an instruction.
461 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
462 FlagsCondition condition) {
463 IA32OperandConverter i(this, instr);
464 Label done;
465
466 // Materialize a full 32-bit 1 or 0 value.
467 Label check;
468 Register reg = i.OutputRegister();
469 Condition cc = no_condition;
470 switch (condition) {
471 case kUnorderedEqual:
472 __ j(parity_odd, &check, Label::kNear);
473 __ mov(reg, Immediate(0));
474 __ jmp(&done, Label::kNear);
475 // Fall through.
476 case kEqual:
477 cc = equal;
478 break;
479 case kUnorderedNotEqual:
480 __ j(parity_odd, &check, Label::kNear);
481 __ mov(reg, Immediate(1));
482 __ jmp(&done, Label::kNear);
483 // Fall through.
484 case kNotEqual:
485 cc = not_equal;
486 break;
487 case kSignedLessThan:
488 cc = less;
489 break;
490 case kSignedGreaterThanOrEqual:
491 cc = greater_equal;
492 break;
493 case kSignedLessThanOrEqual:
494 cc = less_equal;
495 break;
496 case kSignedGreaterThan:
497 cc = greater;
498 break;
499 case kUnorderedLessThan:
500 __ j(parity_odd, &check, Label::kNear);
501 __ mov(reg, Immediate(0));
502 __ jmp(&done, Label::kNear);
503 // Fall through.
504 case kUnsignedLessThan:
505 cc = below;
506 break;
507 case kUnorderedGreaterThanOrEqual:
508 __ j(parity_odd, &check, Label::kNear);
509 __ mov(reg, Immediate(1));
510 __ jmp(&done, Label::kNear);
511 // Fall through.
512 case kUnsignedGreaterThanOrEqual:
513 cc = above_equal;
514 break;
515 case kUnorderedLessThanOrEqual:
516 __ j(parity_odd, &check, Label::kNear);
517 __ mov(reg, Immediate(0));
518 __ jmp(&done, Label::kNear);
519 // Fall through.
520 case kUnsignedLessThanOrEqual:
521 cc = below_equal;
522 break;
523 case kUnorderedGreaterThan:
524 __ j(parity_odd, &check, Label::kNear);
525 __ mov(reg, Immediate(1));
526 __ jmp(&done, Label::kNear);
527 // Fall through.
528 case kUnsignedGreaterThan:
529 cc = above;
530 break;
531 }
532 __ bind(&check);
533 if (reg.is_byte_register()) {
534 // setcc for byte registers (al, bl, cl, dl).
535 __ setcc(cc, reg);
536 __ movzx_b(reg, reg);
537 } else {
538 // Emit a branch to set a register to either 1 or 0.
539 Label set;
540 __ j(cc, &set, Label::kNear);
541 __ mov(reg, Immediate(0));
542 __ jmp(&done, Label::kNear);
543 __ bind(&set);
544 __ mov(reg, Immediate(1));
545 }
546 __ bind(&done);
547 }
548
549
550 // The calling convention for JSFunctions on IA32 passes arguments on the
551 // stack and the JSFunction and context in EDI and ESI, respectively, thus
552 // the steps of the call look as follows:
553
554 // --{ before the call instruction }--------------------------------------------
555 // | caller frame |
556 // ^ esp ^ ebp
557
558 // --{ push arguments and setup ESI, EDI }--------------------------------------
559 // | args + receiver | caller frame |
560 // ^ esp ^ ebp
561 // [edi = JSFunction, esi = context]
562
563 // --{ call [edi + kCodeEntryOffset] }------------------------------------------
564 // | RET | args + receiver | caller frame |
565 // ^ esp ^ ebp
566
567 // =={ prologue of called function }============================================
568 // --{ push ebp }---------------------------------------------------------------
569 // | FP | RET | args + receiver | caller frame |
570 // ^ esp ^ ebp
571
572 // --{ mov ebp, esp }-----------------------------------------------------------
573 // | FP | RET | args + receiver | caller frame |
574 // ^ ebp,esp
575
576 // --{ push esi }---------------------------------------------------------------
577 // | CTX | FP | RET | args + receiver | caller frame |
578 // ^esp ^ ebp
579
580 // --{ push edi }---------------------------------------------------------------
581 // | FNC | CTX | FP | RET | args + receiver | caller frame |
582 // ^esp ^ ebp
583
584 // --{ subi esp, #N }-----------------------------------------------------------
585 // | callee frame | FNC | CTX | FP | RET | args + receiver | caller frame |
586 // ^esp ^ ebp
587
588 // =={ body of called function }================================================
589
590 // =={ epilogue of called function }============================================
591 // --{ mov esp, ebp }-----------------------------------------------------------
592 // | FP | RET | args + receiver | caller frame |
593 // ^ esp,ebp
594
595 // --{ pop ebp }-----------------------------------------------------------
596 // | | RET | args + receiver | caller frame |
597 // ^ esp ^ ebp
598
599 // --{ ret #A+1 }-----------------------------------------------------------
600 // | | caller frame |
601 // ^ esp ^ ebp
602
603
604 // Runtime function calls are accomplished by doing a stub call to the
605 // CEntryStub (a real code object). On IA32 passes arguments on the
606 // stack, the number of arguments in EAX, the address of the runtime function
607 // in EBX, and the context in ESI.
608
609 // --{ before the call instruction }--------------------------------------------
610 // | caller frame |
611 // ^ esp ^ ebp
612
613 // --{ push arguments and setup EAX, EBX, and ESI }-----------------------------
614 // | args + receiver | caller frame |
615 // ^ esp ^ ebp
616 // [eax = #args, ebx = runtime function, esi = context]
617
618 // --{ call #CEntryStub }-------------------------------------------------------
619 // | RET | args + receiver | caller frame |
620 // ^ esp ^ ebp
621
622 // =={ body of runtime function }===============================================
623
624 // --{ runtime returns }--------------------------------------------------------
625 // | caller frame |
626 // ^ esp ^ ebp
627
628 // Other custom linkages (e.g. for calling directly into and out of C++) may
629 // need to save callee-saved registers on the stack, which is done in the
630 // function prologue of generated code.
631
632 // --{ before the call instruction }--------------------------------------------
633 // | caller frame |
634 // ^ esp ^ ebp
635
636 // --{ set up arguments in registers on stack }---------------------------------
637 // | args | caller frame |
638 // ^ esp ^ ebp
639 // [r0 = arg0, r1 = arg1, ...]
640
641 // --{ call code }--------------------------------------------------------------
642 // | RET | args | caller frame |
643 // ^ esp ^ ebp
644
645 // =={ prologue of called function }============================================
646 // --{ push ebp }---------------------------------------------------------------
647 // | FP | RET | args | caller frame |
648 // ^ esp ^ ebp
649
650 // --{ mov ebp, esp }-----------------------------------------------------------
651 // | FP | RET | args | caller frame |
652 // ^ ebp,esp
653
654 // --{ save registers }---------------------------------------------------------
655 // | regs | FP | RET | args | caller frame |
656 // ^ esp ^ ebp
657
658 // --{ subi esp, #N }-----------------------------------------------------------
659 // | callee frame | regs | FP | RET | args | caller frame |
660 // ^esp ^ ebp
661
662 // =={ body of called function }================================================
663
664 // =={ epilogue of called function }============================================
665 // --{ restore registers }------------------------------------------------------
666 // | regs | FP | RET | args | caller frame |
667 // ^ esp ^ ebp
668
669 // --{ mov esp, ebp }-----------------------------------------------------------
670 // | FP | RET | args | caller frame |
671 // ^ esp,ebp
672
673 // --{ pop ebp }----------------------------------------------------------------
674 // | RET | args | caller frame |
675 // ^ esp ^ ebp
676
677
678 void CodeGenerator::AssemblePrologue() {
679 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
680 Frame* frame = code_->frame();
681 int stack_slots = frame->GetSpillSlotCount();
682 if (descriptor->kind() == CallDescriptor::kCallAddress) {
683 // Assemble a prologue similar the to cdecl calling convention.
684 __ push(ebp);
685 __ mov(ebp, esp);
686 const RegList saves = descriptor->CalleeSavedRegisters();
687 if (saves != 0) { // Save callee-saved registers.
688 int register_save_area_size = 0;
689 for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
690 if (!((1 << i) & saves)) continue;
691 __ push(Register::from_code(i));
692 register_save_area_size += kPointerSize;
693 }
694 frame->SetRegisterSaveAreaSize(register_save_area_size);
695 }
696 } else if (descriptor->IsJSFunctionCall()) {
697 CompilationInfo* info = linkage()->info();
698 __ Prologue(info->IsCodePreAgingActive());
699 frame->SetRegisterSaveAreaSize(
700 StandardFrameConstants::kFixedFrameSizeFromFp);
701
702 // Sloppy mode functions and builtins need to replace the receiver with the
703 // global proxy when called as functions (without an explicit receiver
704 // object).
705 // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
706 if (info->strict_mode() == SLOPPY && !info->is_native()) {
707 Label ok;
708 // +2 for return address and saved frame pointer.
709 int receiver_slot = info->scope()->num_parameters() + 2;
710 __ mov(ecx, Operand(ebp, receiver_slot * kPointerSize));
711 __ cmp(ecx, isolate()->factory()->undefined_value());
712 __ j(not_equal, &ok, Label::kNear);
713 __ mov(ecx, GlobalObjectOperand());
714 __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalProxyOffset));
715 __ mov(Operand(ebp, receiver_slot * kPointerSize), ecx);
716 __ bind(&ok);
717 }
718
719 } else {
720 __ StubPrologue();
721 frame->SetRegisterSaveAreaSize(
722 StandardFrameConstants::kFixedFrameSizeFromFp);
723 }
724 if (stack_slots > 0) {
725 __ sub(esp, Immediate(stack_slots * kPointerSize));
726 }
727 }
728
729
730 void CodeGenerator::AssembleReturn() {
731 CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
732 if (descriptor->kind() == CallDescriptor::kCallAddress) {
733 const RegList saves = descriptor->CalleeSavedRegisters();
734 if (frame()->GetRegisterSaveAreaSize() > 0) {
735 // Remove this frame's spill slots first.
736 int stack_slots = frame()->GetSpillSlotCount();
737 if (stack_slots > 0) {
738 __ add(esp, Immediate(stack_slots * kPointerSize));
739 }
740 // Restore registers.
741 if (saves != 0) {
742 for (int i = 0; i < Register::kNumRegisters; i++) {
743 if (!((1 << i) & saves)) continue;
744 __ pop(Register::from_code(i));
745 }
746 }
747 __ pop(ebp); // Pop caller's frame pointer.
748 __ ret(0);
749 } else {
750 // No saved registers.
751 __ mov(esp, ebp); // Move stack pointer back to frame pointer.
752 __ pop(ebp); // Pop caller's frame pointer.
753 __ ret(0);
754 }
755 } else {
756 __ mov(esp, ebp); // Move stack pointer back to frame pointer.
757 __ pop(ebp); // Pop caller's frame pointer.
758 int pop_count =
759 descriptor->IsJSFunctionCall() ? descriptor->ParameterCount() : 0;
760 __ ret(pop_count * kPointerSize);
761 }
762 }
763
764
765 void CodeGenerator::AssembleMove(InstructionOperand* source,
766 InstructionOperand* destination) {
767 IA32OperandConverter g(this, NULL);
768 // Dispatch on the source and destination operand kinds. Not all
769 // combinations are possible.
770 if (source->IsRegister()) {
771 ASSERT(destination->IsRegister() || destination->IsStackSlot());
772 Register src = g.ToRegister(source);
773 Operand dst = g.ToOperand(destination);
774 __ mov(dst, src);
775 } else if (source->IsStackSlot()) {
776 ASSERT(destination->IsRegister() || destination->IsStackSlot());
777 Operand src = g.ToOperand(source);
778 if (destination->IsRegister()) {
779 Register dst = g.ToRegister(destination);
780 __ mov(dst, src);
781 } else {
782 Operand dst = g.ToOperand(destination);
783 __ push(src);
784 __ pop(dst);
785 }
786 } else if (source->IsConstant()) {
787 Constant src_constant = g.ToConstant(source);
788 if (src_constant.type() == Constant::kHeapObject) {
789 Handle<HeapObject> src = src_constant.ToHeapObject();
790 if (destination->IsRegister()) {
791 Register dst = g.ToRegister(destination);
792 __ LoadHeapObject(dst, src);
793 } else {
794 ASSERT(destination->IsStackSlot());
795 Operand dst = g.ToOperand(destination);
796 AllowDeferredHandleDereference embedding_raw_address;
797 if (isolate()->heap()->InNewSpace(*src)) {
798 __ PushHeapObject(src);
799 __ pop(dst);
800 } else {
801 __ mov(dst, src);
802 }
803 }
804 } else if (destination->IsRegister()) {
805 Register dst = g.ToRegister(destination);
806 __ mov(dst, g.ToImmediate(source));
807 } else if (destination->IsStackSlot()) {
808 Operand dst = g.ToOperand(destination);
809 __ mov(dst, g.ToImmediate(source));
810 } else {
811 double v = g.ToDouble(source);
812 uint64_t int_val = BitCast<uint64_t, double>(v);
813 int32_t lower = static_cast<int32_t>(int_val);
814 int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
815 if (destination->IsDoubleRegister()) {
816 XMMRegister dst = g.ToDoubleRegister(destination);
817 __ Move(dst, v);
818 } else {
819 ASSERT(destination->IsDoubleStackSlot());
820 Operand dst0 = g.ToOperand(destination);
821 Operand dst1 = g.HighOperand(destination);
822 __ mov(dst0, Immediate(lower));
823 __ mov(dst1, Immediate(upper));
824 }
825 }
826 } else if (source->IsDoubleRegister()) {
827 XMMRegister src = g.ToDoubleRegister(source);
828 if (destination->IsDoubleRegister()) {
829 XMMRegister dst = g.ToDoubleRegister(destination);
830 __ movaps(dst, src);
831 } else {
832 ASSERT(destination->IsDoubleStackSlot());
833 Operand dst = g.ToOperand(destination);
834 __ movsd(dst, src);
835 }
836 } else if (source->IsDoubleStackSlot()) {
837 ASSERT(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
838 Operand src = g.ToOperand(source);
839 if (destination->IsDoubleRegister()) {
840 XMMRegister dst = g.ToDoubleRegister(destination);
841 __ movsd(dst, src);
842 } else {
843 // We rely on having xmm0 available as a fixed scratch register.
844 Operand dst = g.ToOperand(destination);
845 __ movsd(xmm0, src);
846 __ movsd(dst, xmm0);
847 }
848 } else {
849 UNREACHABLE();
850 }
851 }
852
853
854 void CodeGenerator::AssembleSwap(InstructionOperand* source,
855 InstructionOperand* destination) {
856 IA32OperandConverter g(this, NULL);
857 // Dispatch on the source and destination operand kinds. Not all
858 // combinations are possible.
859 if (source->IsRegister() && destination->IsRegister()) {
860 // Register-register.
861 Register src = g.ToRegister(source);
862 Register dst = g.ToRegister(destination);
863 __ xchg(dst, src);
864 } else if (source->IsRegister() && destination->IsStackSlot()) {
865 // Register-memory.
866 __ xchg(g.ToRegister(source), g.ToOperand(destination));
867 } else if (source->IsStackSlot() && destination->IsStackSlot()) {
868 // Memory-memory.
869 Operand src = g.ToOperand(source);
870 Operand dst = g.ToOperand(destination);
871 __ push(dst);
872 __ push(src);
873 __ pop(dst);
874 __ pop(src);
875 } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
876 // XMM register-register swap. We rely on having xmm0
877 // available as a fixed scratch register.
878 XMMRegister src = g.ToDoubleRegister(source);
879 XMMRegister dst = g.ToDoubleRegister(destination);
880 __ movaps(xmm0, src);
881 __ movaps(src, dst);
882 __ movaps(dst, xmm0);
883 } else if (source->IsDoubleRegister() && source->IsDoubleStackSlot()) {
884 // XMM register-memory swap. We rely on having xmm0
885 // available as a fixed scratch register.
886 XMMRegister reg = g.ToDoubleRegister(source);
887 Operand other = g.ToOperand(destination);
888 __ movsd(xmm0, other);
889 __ movsd(other, reg);
890 __ movaps(reg, xmm0);
891 } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
892 // Double-width memory-to-memory.
893 Operand src0 = g.ToOperand(source);
894 Operand src1 = g.HighOperand(source);
895 Operand dst0 = g.ToOperand(destination);
896 Operand dst1 = g.HighOperand(destination);
897 __ movsd(xmm0, dst0); // Save destination in xmm0.
898 __ push(src0); // Then use stack to copy source to destination.
899 __ pop(dst0);
900 __ push(src1);
901 __ pop(dst1);
902 __ movsd(src0, xmm0);
903 } else {
904 // No other combinations are possible.
905 UNREACHABLE();
906 }
907 }
908
909
910 void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
911
912 #undef __
913
914 #ifdef DEBUG
915
916 // Checks whether the code between start_pc and end_pc is a no-op.
917 bool CodeGenerator::IsNopForSmiCodeInlining(Handle<Code> code, int start_pc,
918 int end_pc) {
919 if (start_pc + 1 != end_pc) {
920 return false;
921 }
922 return *(code->instruction_start() + start_pc) ==
923 v8::internal::Assembler::kNopByte;
924 }
925
926 #endif // DEBUG
927 }
928 }
929 } // namespace v8::internal::compiler
OLDNEW
« no previous file with comments | « src/compiler/graph-visualizer.cc ('k') | src/compiler/ia32/instruction-codes-ia32.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698