Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(374)

Side by Side Diff: src/compiler/x64/instruction-selector-x64.cc

Issue 426233002: Land the Fan (disabled) (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Review feedback, rebase and "git cl format" Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/compiler/x64/instruction-codes-x64.h ('k') | src/compiler/x64/linkage-x64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler/instruction-selector-impl.h"
6 #include "src/compiler/node-matchers.h"
7
8 namespace v8 {
9 namespace internal {
10 namespace compiler {
11
12 // Adds X64-specific methods for generating operands.
13 class X64OperandGenerator V8_FINAL : public OperandGenerator {
14 public:
15 explicit X64OperandGenerator(InstructionSelector* selector)
16 : OperandGenerator(selector) {}
17
18 InstructionOperand* TempRegister(Register reg) {
19 return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
20 Register::ToAllocationIndex(reg));
21 }
22
23 InstructionOperand* UseByteRegister(Node* node) {
24 // TODO(dcarney): relax constraint.
25 return UseFixed(node, rdx);
26 }
27
28 InstructionOperand* UseImmediate64(Node* node) { return UseImmediate(node); }
29
30 bool CanBeImmediate(Node* node) {
31 switch (node->opcode()) {
32 case IrOpcode::kInt32Constant:
33 return true;
34 default:
35 return false;
36 }
37 }
38
39 bool CanBeImmediate64(Node* node) {
40 switch (node->opcode()) {
41 case IrOpcode::kInt32Constant:
42 return true;
43 case IrOpcode::kNumberConstant:
44 return true;
45 case IrOpcode::kHeapConstant: {
46 // Constants in new space cannot be used as immediates in V8 because
47 // the GC does not scan code objects when collecting the new generation.
48 Handle<HeapObject> value = ValueOf<Handle<HeapObject> >(node->op());
49 return !isolate()->heap()->InNewSpace(*value);
50 }
51 default:
52 return false;
53 }
54 }
55 };
56
57
58 void InstructionSelector::VisitLoad(Node* node) {
59 MachineRepresentation rep = OpParameter<MachineRepresentation>(node);
60 X64OperandGenerator g(this);
61 Node* base = node->InputAt(0);
62 Node* index = node->InputAt(1);
63
64 InstructionOperand* output = rep == kMachineFloat64
65 ? g.DefineAsDoubleRegister(node)
66 : g.DefineAsRegister(node);
67 ArchOpcode opcode;
68 switch (rep) {
69 case kMachineFloat64:
70 opcode = kSSELoad;
71 break;
72 case kMachineWord8:
73 opcode = kX64LoadWord8;
74 break;
75 case kMachineWord16:
76 opcode = kX64LoadWord16;
77 break;
78 case kMachineWord32:
79 opcode = kX64LoadWord32;
80 break;
81 case kMachineTagged: // Fall through.
82 case kMachineWord64:
83 opcode = kX64LoadWord64;
84 break;
85 default:
86 UNREACHABLE();
87 return;
88 }
89 if (g.CanBeImmediate(base)) {
90 // load [#base + %index]
91 Emit(opcode | AddressingModeField::encode(kMode_MRI), output,
92 g.UseRegister(index), g.UseImmediate(base));
93 } else if (g.CanBeImmediate(index)) { // load [%base + #index]
94 Emit(opcode | AddressingModeField::encode(kMode_MRI), output,
95 g.UseRegister(base), g.UseImmediate(index));
96 } else { // load [%base + %index + K]
97 Emit(opcode | AddressingModeField::encode(kMode_MR1I), output,
98 g.UseRegister(base), g.UseRegister(index));
99 }
100 // TODO(turbofan): addressing modes [r+r*{2,4,8}+K]
101 }
102
103
104 void InstructionSelector::VisitStore(Node* node) {
105 X64OperandGenerator g(this);
106 Node* base = node->InputAt(0);
107 Node* index = node->InputAt(1);
108 Node* value = node->InputAt(2);
109
110 StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
111 MachineRepresentation rep = store_rep.rep;
112 if (store_rep.write_barrier_kind == kFullWriteBarrier) {
113 ASSERT(rep == kMachineTagged);
114 // TODO(dcarney): refactor RecordWrite function to take temp registers
115 // and pass them here instead of using fixed regs
116 // TODO(dcarney): handle immediate indices.
117 InstructionOperand* temps[] = {g.TempRegister(rcx), g.TempRegister(rdx)};
118 Emit(kX64StoreWriteBarrier, NULL, g.UseFixed(base, rbx),
119 g.UseFixed(index, rcx), g.UseFixed(value, rdx), ARRAY_SIZE(temps),
120 temps);
121 return;
122 }
123 ASSERT_EQ(kNoWriteBarrier, store_rep.write_barrier_kind);
124 bool is_immediate = false;
125 InstructionOperand* val;
126 if (rep == kMachineFloat64) {
127 val = g.UseDoubleRegister(value);
128 } else {
129 is_immediate = g.CanBeImmediate(value);
130 if (is_immediate) {
131 val = g.UseImmediate(value);
132 } else if (rep == kMachineWord8) {
133 val = g.UseByteRegister(value);
134 } else {
135 val = g.UseRegister(value);
136 }
137 }
138 ArchOpcode opcode;
139 switch (rep) {
140 case kMachineFloat64:
141 opcode = kSSEStore;
142 break;
143 case kMachineWord8:
144 opcode = is_immediate ? kX64StoreWord8I : kX64StoreWord8;
145 break;
146 case kMachineWord16:
147 opcode = is_immediate ? kX64StoreWord16I : kX64StoreWord16;
148 break;
149 case kMachineWord32:
150 opcode = is_immediate ? kX64StoreWord32I : kX64StoreWord32;
151 break;
152 case kMachineTagged: // Fall through.
153 case kMachineWord64:
154 opcode = is_immediate ? kX64StoreWord64I : kX64StoreWord64;
155 break;
156 default:
157 UNREACHABLE();
158 return;
159 }
160 if (g.CanBeImmediate(base)) {
161 // store [#base + %index], %|#value
162 Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
163 g.UseRegister(index), g.UseImmediate(base), val);
164 } else if (g.CanBeImmediate(index)) { // store [%base + #index], %|#value
165 Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
166 g.UseRegister(base), g.UseImmediate(index), val);
167 } else { // store [%base + %index], %|#value
168 Emit(opcode | AddressingModeField::encode(kMode_MR1I), NULL,
169 g.UseRegister(base), g.UseRegister(index), val);
170 }
171 // TODO(turbofan): addressing modes [r+r*{2,4,8}+K]
172 }
173
174
175 // Shared routine for multiple binary operations.
176 static void VisitBinop(InstructionSelector* selector, Node* node,
177 ArchOpcode opcode, bool commutative) {
178 X64OperandGenerator g(selector);
179 Node* left = node->InputAt(0);
180 Node* right = node->InputAt(1);
181 // TODO(turbofan): match complex addressing modes.
182 // TODO(turbofan): if commutative, pick the non-live-in operand as the left as
183 // this might be the last use and therefore its register can be reused.
184 if (g.CanBeImmediate(right)) {
185 selector->Emit(opcode, g.DefineSameAsFirst(node), g.Use(left),
186 g.UseImmediate(right));
187 } else if (commutative && g.CanBeImmediate(left)) {
188 selector->Emit(opcode, g.DefineSameAsFirst(node), g.Use(right),
189 g.UseImmediate(left));
190 } else {
191 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
192 g.Use(right));
193 }
194 }
195
196
197 void InstructionSelector::VisitWord32And(Node* node) {
198 VisitBinop(this, node, kX64And32, true);
199 }
200
201
202 void InstructionSelector::VisitWord64And(Node* node) {
203 VisitBinop(this, node, kX64And, true);
204 }
205
206
207 void InstructionSelector::VisitWord32Or(Node* node) {
208 VisitBinop(this, node, kX64Or32, true);
209 }
210
211
212 void InstructionSelector::VisitWord64Or(Node* node) {
213 VisitBinop(this, node, kX64Or, true);
214 }
215
216
217 template <typename T>
218 static void VisitXor(InstructionSelector* selector, Node* node,
219 ArchOpcode xor_opcode, ArchOpcode not_opcode) {
220 X64OperandGenerator g(selector);
221 BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node);
222 if (m.right().Is(-1)) {
223 selector->Emit(not_opcode, g.DefineSameAsFirst(node),
224 g.Use(m.left().node()));
225 } else {
226 VisitBinop(selector, node, xor_opcode, true);
227 }
228 }
229
230
231 void InstructionSelector::VisitWord32Xor(Node* node) {
232 VisitXor<int32_t>(this, node, kX64Xor32, kX64Not32);
233 }
234
235
236 void InstructionSelector::VisitWord64Xor(Node* node) {
237 VisitXor<int64_t>(this, node, kX64Xor, kX64Not);
238 }
239
240
241 // Shared routine for multiple 32-bit shift operations.
242 // TODO(bmeurer): Merge this with VisitWord64Shift using template magic?
243 static void VisitWord32Shift(InstructionSelector* selector, Node* node,
244 ArchOpcode opcode) {
245 X64OperandGenerator g(selector);
246 Node* left = node->InputAt(0);
247 Node* right = node->InputAt(1);
248
249 // TODO(turbofan): assembler only supports some addressing modes for shifts.
250 if (g.CanBeImmediate(right)) {
251 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
252 g.UseImmediate(right));
253 } else {
254 Int32BinopMatcher m(node);
255 if (m.right().IsWord32And()) {
256 Int32BinopMatcher mright(right);
257 if (mright.right().Is(0x1F)) {
258 right = mright.left().node();
259 }
260 }
261 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
262 g.UseFixed(right, rcx));
263 }
264 }
265
266
267 // Shared routine for multiple 64-bit shift operations.
268 // TODO(bmeurer): Merge this with VisitWord32Shift using template magic?
269 static void VisitWord64Shift(InstructionSelector* selector, Node* node,
270 ArchOpcode opcode) {
271 X64OperandGenerator g(selector);
272 Node* left = node->InputAt(0);
273 Node* right = node->InputAt(1);
274
275 // TODO(turbofan): assembler only supports some addressing modes for shifts.
276 if (g.CanBeImmediate(right)) {
277 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
278 g.UseImmediate(right));
279 } else {
280 Int64BinopMatcher m(node);
281 if (m.right().IsWord64And()) {
282 Int64BinopMatcher mright(right);
283 if (mright.right().Is(0x3F)) {
284 right = mright.left().node();
285 }
286 }
287 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
288 g.UseFixed(right, rcx));
289 }
290 }
291
292
293 void InstructionSelector::VisitWord32Shl(Node* node) {
294 VisitWord32Shift(this, node, kX64Shl32);
295 }
296
297
298 void InstructionSelector::VisitWord64Shl(Node* node) {
299 VisitWord64Shift(this, node, kX64Shl);
300 }
301
302
303 void InstructionSelector::VisitWord32Shr(Node* node) {
304 VisitWord32Shift(this, node, kX64Shr32);
305 }
306
307
308 void InstructionSelector::VisitWord64Shr(Node* node) {
309 VisitWord64Shift(this, node, kX64Shr);
310 }
311
312
313 void InstructionSelector::VisitWord32Sar(Node* node) {
314 VisitWord32Shift(this, node, kX64Sar32);
315 }
316
317
318 void InstructionSelector::VisitWord64Sar(Node* node) {
319 VisitWord64Shift(this, node, kX64Sar);
320 }
321
322
323 void InstructionSelector::VisitInt32Add(Node* node) {
324 VisitBinop(this, node, kX64Add32, true);
325 }
326
327
328 void InstructionSelector::VisitInt64Add(Node* node) {
329 VisitBinop(this, node, kX64Add, true);
330 }
331
332
333 template <typename T>
334 static void VisitSub(InstructionSelector* selector, Node* node,
335 ArchOpcode sub_opcode, ArchOpcode neg_opcode) {
336 X64OperandGenerator g(selector);
337 BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node);
338 if (m.left().Is(0)) {
339 selector->Emit(neg_opcode, g.DefineSameAsFirst(node),
340 g.Use(m.right().node()));
341 } else {
342 VisitBinop(selector, node, sub_opcode, false);
343 }
344 }
345
346
347 void InstructionSelector::VisitInt32Sub(Node* node) {
348 VisitSub<int32_t>(this, node, kX64Sub32, kX64Neg32);
349 }
350
351
352 void InstructionSelector::VisitInt64Sub(Node* node) {
353 VisitSub<int64_t>(this, node, kX64Sub, kX64Neg);
354 }
355
356
357 static void VisitMul(InstructionSelector* selector, Node* node,
358 ArchOpcode opcode) {
359 X64OperandGenerator g(selector);
360 Node* left = node->InputAt(0);
361 Node* right = node->InputAt(1);
362 if (g.CanBeImmediate(right)) {
363 selector->Emit(opcode, g.DefineAsRegister(node), g.Use(left),
364 g.UseImmediate(right));
365 } else if (g.CanBeImmediate(left)) {
366 selector->Emit(opcode, g.DefineAsRegister(node), g.Use(right),
367 g.UseImmediate(left));
368 } else {
369 // TODO(turbofan): select better left operand.
370 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
371 g.Use(right));
372 }
373 }
374
375
376 void InstructionSelector::VisitInt32Mul(Node* node) {
377 VisitMul(this, node, kX64Imul32);
378 }
379
380
381 void InstructionSelector::VisitInt64Mul(Node* node) {
382 VisitMul(this, node, kX64Imul);
383 }
384
385
386 static void VisitDiv(InstructionSelector* selector, Node* node,
387 ArchOpcode opcode) {
388 X64OperandGenerator g(selector);
389 InstructionOperand* temps[] = {g.TempRegister(rdx)};
390 selector->Emit(
391 opcode, g.DefineAsFixed(node, rax), g.UseFixed(node->InputAt(0), rax),
392 g.UseUniqueRegister(node->InputAt(1)), ARRAY_SIZE(temps), temps);
393 }
394
395
396 void InstructionSelector::VisitInt32Div(Node* node) {
397 VisitDiv(this, node, kX64Idiv32);
398 }
399
400
401 void InstructionSelector::VisitInt64Div(Node* node) {
402 VisitDiv(this, node, kX64Idiv);
403 }
404
405
406 void InstructionSelector::VisitInt32UDiv(Node* node) {
407 VisitDiv(this, node, kX64Udiv32);
408 }
409
410
411 void InstructionSelector::VisitInt64UDiv(Node* node) {
412 VisitDiv(this, node, kX64Udiv);
413 }
414
415
416 static void VisitMod(InstructionSelector* selector, Node* node,
417 ArchOpcode opcode) {
418 X64OperandGenerator g(selector);
419 InstructionOperand* temps[] = {g.TempRegister(rax), g.TempRegister(rdx)};
420 selector->Emit(
421 opcode, g.DefineAsFixed(node, rdx), g.UseFixed(node->InputAt(0), rax),
422 g.UseUniqueRegister(node->InputAt(1)), ARRAY_SIZE(temps), temps);
423 }
424
425
426 void InstructionSelector::VisitInt32Mod(Node* node) {
427 VisitMod(this, node, kX64Idiv32);
428 }
429
430
431 void InstructionSelector::VisitInt64Mod(Node* node) {
432 VisitMod(this, node, kX64Idiv);
433 }
434
435
436 void InstructionSelector::VisitInt32UMod(Node* node) {
437 VisitMod(this, node, kX64Udiv32);
438 }
439
440
441 void InstructionSelector::VisitInt64UMod(Node* node) {
442 VisitMod(this, node, kX64Udiv);
443 }
444
445
446 void InstructionSelector::VisitConvertInt32ToFloat64(Node* node) {
447 X64OperandGenerator g(this);
448 Emit(kSSEInt32ToFloat64, g.DefineAsDoubleRegister(node),
449 g.Use(node->InputAt(0)));
450 }
451
452
453 void InstructionSelector::VisitConvertFloat64ToInt32(Node* node) {
454 X64OperandGenerator g(this);
455 Emit(kSSEFloat64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
456 }
457
458
459 void InstructionSelector::VisitFloat64Add(Node* node) {
460 X64OperandGenerator g(this);
461 Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
462 g.UseDoubleRegister(node->InputAt(0)),
463 g.UseDoubleRegister(node->InputAt(1)));
464 }
465
466
467 void InstructionSelector::VisitFloat64Sub(Node* node) {
468 X64OperandGenerator g(this);
469 Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
470 g.UseDoubleRegister(node->InputAt(0)),
471 g.UseDoubleRegister(node->InputAt(1)));
472 }
473
474
475 void InstructionSelector::VisitFloat64Mul(Node* node) {
476 X64OperandGenerator g(this);
477 Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
478 g.UseDoubleRegister(node->InputAt(0)),
479 g.UseDoubleRegister(node->InputAt(1)));
480 }
481
482
483 void InstructionSelector::VisitFloat64Div(Node* node) {
484 X64OperandGenerator g(this);
485 Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
486 g.UseDoubleRegister(node->InputAt(0)),
487 g.UseDoubleRegister(node->InputAt(1)));
488 }
489
490
491 void InstructionSelector::VisitFloat64Mod(Node* node) {
492 X64OperandGenerator g(this);
493 InstructionOperand* temps[] = {g.TempRegister(rax)};
494 Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
495 g.UseDoubleRegister(node->InputAt(0)),
496 g.UseDoubleRegister(node->InputAt(1)), 1, temps);
497 }
498
499
500 void InstructionSelector::VisitConvertInt64ToInt32(Node* node) {
501 X64OperandGenerator g(this);
502 // TODO(dcarney): other modes
503 Emit(kX64Int64ToInt32, g.DefineAsRegister(node),
504 g.UseRegister(node->InputAt(0)));
505 }
506
507
508 void InstructionSelector::VisitConvertInt32ToInt64(Node* node) {
509 X64OperandGenerator g(this);
510 // TODO(dcarney): other modes
511 Emit(kX64Int32ToInt64, g.DefineAsRegister(node),
512 g.UseRegister(node->InputAt(0)));
513 }
514
515
516 // Shared routine for multiple compare operations.
517 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
518 InstructionOperand* left, InstructionOperand* right,
519 FlagsContinuation* cont) {
520 X64OperandGenerator g(selector);
521 opcode = cont->Encode(opcode);
522 if (cont->IsBranch()) {
523 selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
524 g.Label(cont->false_block()))->MarkAsControl();
525 } else {
526 ASSERT(cont->IsSet());
527 selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
528 }
529 }
530
531
532 // Shared routine for multiple word compare operations.
533 static void VisitWordCompare(InstructionSelector* selector, Node* node,
534 InstructionCode opcode, FlagsContinuation* cont,
535 bool commutative) {
536 X64OperandGenerator g(selector);
537 Node* left = node->InputAt(0);
538 Node* right = node->InputAt(1);
539
540 // Match immediates on left or right side of comparison.
541 if (g.CanBeImmediate(right)) {
542 VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
543 } else if (g.CanBeImmediate(left)) {
544 if (!commutative) cont->Commute();
545 VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
546 } else {
547 VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
548 }
549 }
550
551
552 void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
553 switch (node->opcode()) {
554 case IrOpcode::kInt32Sub:
555 return VisitWordCompare(this, node, kX64Cmp32, cont, false);
556 case IrOpcode::kWord32And:
557 return VisitWordCompare(this, node, kX64Test32, cont, true);
558 default:
559 break;
560 }
561
562 X64OperandGenerator g(this);
563 VisitCompare(this, kX64Test32, g.Use(node), g.TempImmediate(-1), cont);
564 }
565
566
567 void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation* cont) {
568 switch (node->opcode()) {
569 case IrOpcode::kInt64Sub:
570 return VisitWordCompare(this, node, kX64Cmp, cont, false);
571 case IrOpcode::kWord64And:
572 return VisitWordCompare(this, node, kX64Test, cont, true);
573 default:
574 break;
575 }
576
577 X64OperandGenerator g(this);
578 VisitCompare(this, kX64Test, g.Use(node), g.TempImmediate(-1), cont);
579 }
580
581
582 void InstructionSelector::VisitWord32Compare(Node* node,
583 FlagsContinuation* cont) {
584 VisitWordCompare(this, node, kX64Cmp32, cont, false);
585 }
586
587
588 void InstructionSelector::VisitWord64Compare(Node* node,
589 FlagsContinuation* cont) {
590 VisitWordCompare(this, node, kX64Cmp, cont, false);
591 }
592
593
594 void InstructionSelector::VisitFloat64Compare(Node* node,
595 FlagsContinuation* cont) {
596 X64OperandGenerator g(this);
597 Node* left = node->InputAt(0);
598 Node* right = node->InputAt(1);
599 VisitCompare(this, kSSEFloat64Cmp, g.UseDoubleRegister(left), g.Use(right),
600 cont);
601 }
602
603
604 void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
605 BasicBlock* deoptimization) {
606 X64OperandGenerator g(this);
607 CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
608 CallBuffer buffer(zone(), descriptor); // TODO(turbofan): temp zone here?
609
610 // Compute InstructionOperands for inputs and outputs.
611 InitializeCallBuffer(call, &buffer, true, true, continuation, deoptimization);
612
613 // TODO(dcarney): stack alignment for c calls.
614 // TODO(dcarney): shadow space on window for c calls.
615 // Push any stack arguments.
616 for (int i = buffer.pushed_count - 1; i >= 0; --i) {
617 Node* input = buffer.pushed_nodes[i];
618 // TODO(titzer): handle pushing double parameters.
619 if (g.CanBeImmediate(input)) {
620 Emit(kX64PushI, NULL, g.UseImmediate(input));
621 } else {
622 Emit(kX64Push, NULL, g.Use(input));
623 }
624 }
625
626 // Select the appropriate opcode based on the call type.
627 InstructionCode opcode;
628 switch (descriptor->kind()) {
629 case CallDescriptor::kCallCodeObject: {
630 bool lazy_deopt = descriptor->CanLazilyDeoptimize();
631 opcode = kX64CallCodeObject | MiscField::encode(lazy_deopt ? 1 : 0);
632 break;
633 }
634 case CallDescriptor::kCallAddress:
635 opcode = kX64CallAddress;
636 break;
637 case CallDescriptor::kCallJSFunction:
638 opcode = kX64CallJSFunction;
639 break;
640 default:
641 UNREACHABLE();
642 return;
643 }
644
645 // Emit the call instruction.
646 Instruction* call_instr =
647 Emit(opcode, buffer.output_count, buffer.outputs,
648 buffer.fixed_and_control_count(), buffer.fixed_and_control_args);
649
650 call_instr->MarkAsCall();
651 if (deoptimization != NULL) {
652 ASSERT(continuation != NULL);
653 call_instr->MarkAsControl();
654 }
655
656 // Caller clean up of stack for C-style calls.
657 if (descriptor->kind() == CallDescriptor::kCallAddress &&
658 buffer.pushed_count > 0) {
659 ASSERT(deoptimization == NULL && continuation == NULL);
660 Emit(kPopStack | MiscField::encode(buffer.pushed_count), NULL);
661 }
662 }
663
664 } // namespace compiler
665 } // namespace internal
666 } // namespace v8
OLDNEW
« no previous file with comments | « src/compiler/x64/instruction-codes-x64.h ('k') | src/compiler/x64/linkage-x64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698