OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/compiler/code-generator.h" | 5 #include "src/compiler/code-generator.h" |
6 | 6 |
7 #include "src/compiler/code-generator-impl.h" | 7 #include "src/compiler/code-generator-impl.h" |
8 #include "src/compiler/gap-resolver.h" | 8 #include "src/compiler/gap-resolver.h" |
9 #include "src/compiler/node-matchers.h" | 9 #include "src/compiler/node-matchers.h" |
10 #include "src/compiler/node-properties-inl.h" | 10 #include "src/compiler/node-properties-inl.h" |
11 #include "src/scopes.h" | 11 #include "src/scopes.h" |
12 #include "src/x64/assembler-x64.h" | 12 #include "src/x64/assembler-x64.h" |
13 #include "src/x64/macro-assembler-x64.h" | 13 #include "src/x64/macro-assembler-x64.h" |
14 | 14 |
15 namespace v8 { | 15 namespace v8 { |
16 namespace internal { | 16 namespace internal { |
17 namespace compiler { | 17 namespace compiler { |
18 | 18 |
19 #define __ masm()-> | 19 #define __ masm()-> |
20 | 20 |
21 | 21 |
22 // TODO(turbofan): Cleanup these hacks. | |
23 enum Immediate64Type { kImm64Value, kImm64Handle, kImm64Reference }; | |
24 | |
25 | |
26 struct Immediate64 { | |
27 uint64_t value; | |
28 Handle<Object> handle; | |
29 ExternalReference reference; | |
30 Immediate64Type type; | |
31 }; | |
32 | |
33 | |
34 enum RegisterOrOperandType { kRegister, kDoubleRegister, kOperand }; | |
35 | |
36 | |
37 struct RegisterOrOperand { | |
38 RegisterOrOperand() : operand(no_reg, 0) {} | |
39 Register reg; | |
40 DoubleRegister double_reg; | |
41 Operand operand; | |
42 RegisterOrOperandType type; | |
43 }; | |
44 | |
45 | |
46 // Adds X64 specific methods for decoding operands. | 22 // Adds X64 specific methods for decoding operands. |
47 class X64OperandConverter : public InstructionOperandConverter { | 23 class X64OperandConverter : public InstructionOperandConverter { |
48 public: | 24 public: |
49 X64OperandConverter(CodeGenerator* gen, Instruction* instr) | 25 X64OperandConverter(CodeGenerator* gen, Instruction* instr) |
50 : InstructionOperandConverter(gen, instr) {} | 26 : InstructionOperandConverter(gen, instr) {} |
51 | 27 |
52 RegisterOrOperand InputRegisterOrOperand(int index) { | |
53 return ToRegisterOrOperand(instr_->InputAt(index)); | |
54 } | |
55 | |
56 Immediate InputImmediate(int index) { | 28 Immediate InputImmediate(int index) { |
57 return ToImmediate(instr_->InputAt(index)); | 29 return ToImmediate(instr_->InputAt(index)); |
58 } | 30 } |
59 | 31 |
60 RegisterOrOperand OutputRegisterOrOperand() { | 32 Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); } |
61 return ToRegisterOrOperand(instr_->Output()); | |
62 } | |
63 | 33 |
64 Immediate64 InputImmediate64(int index) { | 34 Operand OutputOperand() { return ToOperand(instr_->Output()); } |
65 return ToImmediate64(instr_->InputAt(index)); | |
66 } | |
67 | |
68 Immediate64 ToImmediate64(InstructionOperand* operand) { | |
69 Constant constant = ToConstant(operand); | |
70 Immediate64 immediate; | |
71 immediate.value = 0xbeefdeaddeefbeed; | |
72 immediate.type = kImm64Value; | |
73 switch (constant.type()) { | |
74 case Constant::kInt32: | |
75 case Constant::kInt64: | |
76 immediate.value = constant.ToInt64(); | |
77 return immediate; | |
78 case Constant::kFloat32: | |
79 immediate.type = kImm64Handle; | |
80 immediate.handle = | |
81 isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED); | |
82 return immediate; | |
83 case Constant::kFloat64: | |
84 immediate.type = kImm64Handle; | |
85 immediate.handle = | |
86 isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED); | |
87 return immediate; | |
88 case Constant::kExternalReference: | |
89 immediate.type = kImm64Reference; | |
90 immediate.reference = constant.ToExternalReference(); | |
91 return immediate; | |
92 case Constant::kHeapObject: | |
93 immediate.type = kImm64Handle; | |
94 immediate.handle = constant.ToHeapObject(); | |
95 return immediate; | |
96 } | |
97 UNREACHABLE(); | |
98 return immediate; | |
99 } | |
100 | 35 |
101 Immediate ToImmediate(InstructionOperand* operand) { | 36 Immediate ToImmediate(InstructionOperand* operand) { |
102 Constant constant = ToConstant(operand); | 37 Constant constant = ToConstant(operand); |
103 switch (constant.type()) { | 38 if (constant.type() == Constant::kInt32) { |
104 case Constant::kInt32: | 39 return Immediate(constant.ToInt32()); |
105 return Immediate(constant.ToInt32()); | |
106 case Constant::kInt64: | |
107 case Constant::kFloat32: | |
108 case Constant::kFloat64: | |
109 case Constant::kExternalReference: | |
110 case Constant::kHeapObject: | |
111 break; | |
112 } | 40 } |
113 UNREACHABLE(); | 41 UNREACHABLE(); |
114 return Immediate(-1); | 42 return Immediate(-1); |
115 } | 43 } |
116 | 44 |
117 Operand ToOperand(InstructionOperand* op, int extra = 0) { | 45 Operand ToOperand(InstructionOperand* op, int extra = 0) { |
118 RegisterOrOperand result = ToRegisterOrOperand(op, extra); | |
119 DCHECK_EQ(kOperand, result.type); | |
120 return result.operand; | |
121 } | |
122 | |
123 RegisterOrOperand ToRegisterOrOperand(InstructionOperand* op, int extra = 0) { | |
124 RegisterOrOperand result; | |
125 if (op->IsRegister()) { | |
126 DCHECK(extra == 0); | |
127 result.type = kRegister; | |
128 result.reg = ToRegister(op); | |
129 return result; | |
130 } else if (op->IsDoubleRegister()) { | |
131 DCHECK(extra == 0); | |
132 DCHECK(extra == 0); | |
133 result.type = kDoubleRegister; | |
134 result.double_reg = ToDoubleRegister(op); | |
135 return result; | |
136 } | |
137 | |
138 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); | 46 DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot()); |
139 | |
140 result.type = kOperand; | |
141 // The linkage computes where all spill slots are located. | 47 // The linkage computes where all spill slots are located. |
142 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra); | 48 FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra); |
143 result.operand = | 49 return Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset()); |
144 Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset()); | |
145 return result; | |
146 } | 50 } |
147 | 51 |
148 static int NextOffset(int* offset) { | 52 static int NextOffset(int* offset) { |
149 int i = *offset; | 53 int i = *offset; |
150 (*offset)++; | 54 (*offset)++; |
151 return i; | 55 return i; |
152 } | 56 } |
153 | 57 |
154 static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) { | 58 static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) { |
155 STATIC_ASSERT(0 == static_cast<int>(times_1)); | 59 STATIC_ASSERT(0 == static_cast<int>(times_1)); |
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
225 return MemoryOperand(&first_input); | 129 return MemoryOperand(&first_input); |
226 } | 130 } |
227 }; | 131 }; |
228 | 132 |
229 | 133 |
230 static bool HasImmediateInput(Instruction* instr, int index) { | 134 static bool HasImmediateInput(Instruction* instr, int index) { |
231 return instr->InputAt(index)->IsImmediate(); | 135 return instr->InputAt(index)->IsImmediate(); |
232 } | 136 } |
233 | 137 |
234 | 138 |
235 #define ASSEMBLE_BINOP(asm_instr) \ | 139 #define ASSEMBLE_UNOP(asm_instr) \ |
236 do { \ | 140 do { \ |
237 if (HasImmediateInput(instr, 1)) { \ | 141 if (instr->Output()->IsRegister()) { \ |
238 RegisterOrOperand input = i.InputRegisterOrOperand(0); \ | 142 __ asm_instr(i.OutputRegister()); \ |
239 if (input.type == kRegister) { \ | 143 } else { \ |
240 __ asm_instr(input.reg, i.InputImmediate(1)); \ | 144 __ asm_instr(i.OutputOperand()); \ |
241 } else { \ | 145 } \ |
242 __ asm_instr(input.operand, i.InputImmediate(1)); \ | |
243 } \ | |
244 } else { \ | |
245 RegisterOrOperand input = i.InputRegisterOrOperand(1); \ | |
246 if (input.type == kRegister) { \ | |
247 __ asm_instr(i.InputRegister(0), input.reg); \ | |
248 } else { \ | |
249 __ asm_instr(i.InputRegister(0), input.operand); \ | |
250 } \ | |
251 } \ | |
252 } while (0) | 146 } while (0) |
253 | 147 |
254 | 148 |
| 149 #define ASSEMBLE_BINOP(asm_instr) \ |
| 150 do { \ |
| 151 if (HasImmediateInput(instr, 1)) { \ |
| 152 if (instr->InputAt(0)->IsRegister()) { \ |
| 153 __ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \ |
| 154 } else { \ |
| 155 __ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \ |
| 156 } \ |
| 157 } else { \ |
| 158 if (instr->InputAt(1)->IsRegister()) { \ |
| 159 __ asm_instr(i.InputRegister(0), i.InputRegister(1)); \ |
| 160 } else { \ |
| 161 __ asm_instr(i.InputRegister(0), i.InputOperand(1)); \ |
| 162 } \ |
| 163 } \ |
| 164 } while (0) |
| 165 |
| 166 |
| 167 #define ASSEMBLE_MULT(asm_instr) \ |
| 168 do { \ |
| 169 if (HasImmediateInput(instr, 1)) { \ |
| 170 if (instr->InputAt(0)->IsRegister()) { \ |
| 171 __ asm_instr(i.OutputRegister(), i.InputRegister(0), \ |
| 172 i.InputImmediate(1)); \ |
| 173 } else { \ |
| 174 __ asm_instr(i.OutputRegister(), i.InputOperand(0), \ |
| 175 i.InputImmediate(1)); \ |
| 176 } \ |
| 177 } else { \ |
| 178 if (instr->InputAt(1)->IsRegister()) { \ |
| 179 __ asm_instr(i.OutputRegister(), i.InputRegister(1)); \ |
| 180 } else { \ |
| 181 __ asm_instr(i.OutputRegister(), i.InputOperand(1)); \ |
| 182 } \ |
| 183 } \ |
| 184 } while (0) |
| 185 |
| 186 |
255 #define ASSEMBLE_SHIFT(asm_instr, width) \ | 187 #define ASSEMBLE_SHIFT(asm_instr, width) \ |
256 do { \ | 188 do { \ |
257 if (HasImmediateInput(instr, 1)) { \ | 189 if (HasImmediateInput(instr, 1)) { \ |
258 __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \ | 190 __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \ |
259 } else { \ | 191 } else { \ |
260 __ asm_instr##_cl(i.OutputRegister()); \ | 192 __ asm_instr##_cl(i.OutputRegister()); \ |
261 } \ | 193 } \ |
262 } while (0) | 194 } while (0) |
263 | 195 |
264 | 196 |
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
328 case kX64Cmp: | 260 case kX64Cmp: |
329 ASSEMBLE_BINOP(cmpq); | 261 ASSEMBLE_BINOP(cmpq); |
330 break; | 262 break; |
331 case kX64Test32: | 263 case kX64Test32: |
332 ASSEMBLE_BINOP(testl); | 264 ASSEMBLE_BINOP(testl); |
333 break; | 265 break; |
334 case kX64Test: | 266 case kX64Test: |
335 ASSEMBLE_BINOP(testq); | 267 ASSEMBLE_BINOP(testq); |
336 break; | 268 break; |
337 case kX64Imul32: | 269 case kX64Imul32: |
338 if (HasImmediateInput(instr, 1)) { | 270 ASSEMBLE_MULT(imull); |
339 RegisterOrOperand input = i.InputRegisterOrOperand(0); | |
340 if (input.type == kRegister) { | |
341 __ imull(i.OutputRegister(), input.reg, i.InputImmediate(1)); | |
342 } else { | |
343 __ imull(i.OutputRegister(), input.operand, i.InputImmediate(1)); | |
344 } | |
345 } else { | |
346 RegisterOrOperand input = i.InputRegisterOrOperand(1); | |
347 if (input.type == kRegister) { | |
348 __ imull(i.OutputRegister(), input.reg); | |
349 } else { | |
350 __ imull(i.OutputRegister(), input.operand); | |
351 } | |
352 } | |
353 break; | 271 break; |
354 case kX64Imul: | 272 case kX64Imul: |
355 if (HasImmediateInput(instr, 1)) { | 273 ASSEMBLE_MULT(imulq); |
356 RegisterOrOperand input = i.InputRegisterOrOperand(0); | |
357 if (input.type == kRegister) { | |
358 __ imulq(i.OutputRegister(), input.reg, i.InputImmediate(1)); | |
359 } else { | |
360 __ imulq(i.OutputRegister(), input.operand, i.InputImmediate(1)); | |
361 } | |
362 } else { | |
363 RegisterOrOperand input = i.InputRegisterOrOperand(1); | |
364 if (input.type == kRegister) { | |
365 __ imulq(i.OutputRegister(), input.reg); | |
366 } else { | |
367 __ imulq(i.OutputRegister(), input.operand); | |
368 } | |
369 } | |
370 break; | 274 break; |
371 case kX64Idiv32: | 275 case kX64Idiv32: |
372 __ cdq(); | 276 __ cdq(); |
373 __ idivl(i.InputRegister(1)); | 277 __ idivl(i.InputRegister(1)); |
374 break; | 278 break; |
375 case kX64Idiv: | 279 case kX64Idiv: |
376 __ cqo(); | 280 __ cqo(); |
377 __ idivq(i.InputRegister(1)); | 281 __ idivq(i.InputRegister(1)); |
378 break; | 282 break; |
379 case kX64Udiv32: | 283 case kX64Udiv32: |
380 __ xorl(rdx, rdx); | 284 __ xorl(rdx, rdx); |
381 __ divl(i.InputRegister(1)); | 285 __ divl(i.InputRegister(1)); |
382 break; | 286 break; |
383 case kX64Udiv: | 287 case kX64Udiv: |
384 __ xorq(rdx, rdx); | 288 __ xorq(rdx, rdx); |
385 __ divq(i.InputRegister(1)); | 289 __ divq(i.InputRegister(1)); |
386 break; | 290 break; |
387 case kX64Not: { | 291 case kX64Not: |
388 RegisterOrOperand output = i.OutputRegisterOrOperand(); | 292 ASSEMBLE_UNOP(notq); |
389 if (output.type == kRegister) { | |
390 __ notq(output.reg); | |
391 } else { | |
392 __ notq(output.operand); | |
393 } | |
394 break; | 293 break; |
395 } | 294 case kX64Not32: |
396 case kX64Not32: { | 295 ASSEMBLE_UNOP(notl); |
397 RegisterOrOperand output = i.OutputRegisterOrOperand(); | |
398 if (output.type == kRegister) { | |
399 __ notl(output.reg); | |
400 } else { | |
401 __ notl(output.operand); | |
402 } | |
403 break; | 296 break; |
404 } | 297 case kX64Neg: |
405 case kX64Neg: { | 298 ASSEMBLE_UNOP(negq); |
406 RegisterOrOperand output = i.OutputRegisterOrOperand(); | |
407 if (output.type == kRegister) { | |
408 __ negq(output.reg); | |
409 } else { | |
410 __ negq(output.operand); | |
411 } | |
412 break; | 299 break; |
413 } | 300 case kX64Neg32: |
414 case kX64Neg32: { | 301 ASSEMBLE_UNOP(negl); |
415 RegisterOrOperand output = i.OutputRegisterOrOperand(); | |
416 if (output.type == kRegister) { | |
417 __ negl(output.reg); | |
418 } else { | |
419 __ negl(output.operand); | |
420 } | |
421 break; | 302 break; |
422 } | |
423 case kX64Or32: | 303 case kX64Or32: |
424 ASSEMBLE_BINOP(orl); | 304 ASSEMBLE_BINOP(orl); |
425 break; | 305 break; |
426 case kX64Or: | 306 case kX64Or: |
427 ASSEMBLE_BINOP(orq); | 307 ASSEMBLE_BINOP(orq); |
428 break; | 308 break; |
429 case kX64Xor32: | 309 case kX64Xor32: |
430 ASSEMBLE_BINOP(xorl); | 310 ASSEMBLE_BINOP(xorl); |
431 break; | 311 break; |
432 case kX64Xor: | 312 case kX64Xor: |
(...skipping 16 matching lines...) Expand all Loading... |
449 break; | 329 break; |
450 case kX64Sar: | 330 case kX64Sar: |
451 ASSEMBLE_SHIFT(sarq, 6); | 331 ASSEMBLE_SHIFT(sarq, 6); |
452 break; | 332 break; |
453 case kX64Ror32: | 333 case kX64Ror32: |
454 ASSEMBLE_SHIFT(rorl, 5); | 334 ASSEMBLE_SHIFT(rorl, 5); |
455 break; | 335 break; |
456 case kX64Ror: | 336 case kX64Ror: |
457 ASSEMBLE_SHIFT(rorq, 6); | 337 ASSEMBLE_SHIFT(rorq, 6); |
458 break; | 338 break; |
459 case kSSEFloat64Cmp: { | 339 case kSSEFloat64Cmp: |
460 RegisterOrOperand input = i.InputRegisterOrOperand(1); | 340 if (instr->InputAt(1)->IsDoubleRegister()) { |
461 if (input.type == kDoubleRegister) { | 341 __ ucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); |
462 __ ucomisd(i.InputDoubleRegister(0), input.double_reg); | |
463 } else { | 342 } else { |
464 __ ucomisd(i.InputDoubleRegister(0), input.operand); | 343 __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1)); |
465 } | 344 } |
466 break; | 345 break; |
467 } | |
468 case kSSEFloat64Add: | 346 case kSSEFloat64Add: |
469 __ addsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); | 347 __ addsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); |
470 break; | 348 break; |
471 case kSSEFloat64Sub: | 349 case kSSEFloat64Sub: |
472 __ subsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); | 350 __ subsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); |
473 break; | 351 break; |
474 case kSSEFloat64Mul: | 352 case kSSEFloat64Mul: |
475 __ mulsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); | 353 __ mulsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1)); |
476 break; | 354 break; |
477 case kSSEFloat64Div: | 355 case kSSEFloat64Div: |
(...skipping 23 matching lines...) Expand all Loading... |
501 __ popfq(); | 379 __ popfq(); |
502 } | 380 } |
503 __ j(parity_even, &mod_loop); | 381 __ j(parity_even, &mod_loop); |
504 // Move output to stack and clean up. | 382 // Move output to stack and clean up. |
505 __ fstp(1); | 383 __ fstp(1); |
506 __ fstp_d(Operand(rsp, 0)); | 384 __ fstp_d(Operand(rsp, 0)); |
507 __ movsd(i.OutputDoubleRegister(), Operand(rsp, 0)); | 385 __ movsd(i.OutputDoubleRegister(), Operand(rsp, 0)); |
508 __ addq(rsp, Immediate(kDoubleSize)); | 386 __ addq(rsp, Immediate(kDoubleSize)); |
509 break; | 387 break; |
510 } | 388 } |
511 case kSSEFloat64Sqrt: { | 389 case kSSEFloat64Sqrt: |
512 RegisterOrOperand input = i.InputRegisterOrOperand(0); | 390 if (instr->InputAt(0)->IsDoubleRegister()) { |
513 if (input.type == kDoubleRegister) { | 391 __ sqrtsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); |
514 __ sqrtsd(i.OutputDoubleRegister(), input.double_reg); | |
515 } else { | 392 } else { |
516 __ sqrtsd(i.OutputDoubleRegister(), input.operand); | 393 __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0)); |
517 } | 394 } |
518 break; | 395 break; |
519 } | |
520 case kSSECvtss2sd: | 396 case kSSECvtss2sd: |
521 __ cvtss2sd(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); | 397 __ cvtss2sd(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); |
522 break; | 398 break; |
523 case kSSECvtsd2ss: | 399 case kSSECvtsd2ss: |
524 __ cvtsd2ss(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); | 400 __ cvtsd2ss(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); |
525 break; | 401 break; |
526 case kSSEFloat64ToInt32: { | 402 case kSSEFloat64ToInt32: |
527 RegisterOrOperand input = i.InputRegisterOrOperand(0); | 403 if (instr->InputAt(0)->IsDoubleRegister()) { |
528 if (input.type == kDoubleRegister) { | 404 __ cvttsd2si(i.OutputRegister(), i.InputDoubleRegister(0)); |
529 __ cvttsd2si(i.OutputRegister(), input.double_reg); | |
530 } else { | 405 } else { |
531 __ cvttsd2si(i.OutputRegister(), input.operand); | 406 __ cvttsd2si(i.OutputRegister(), i.InputOperand(0)); |
532 } | 407 } |
533 break; | 408 break; |
534 } | 409 case kSSEFloat64ToUint32: |
535 case kSSEFloat64ToUint32: { | 410 if (instr->InputAt(0)->IsDoubleRegister()) { |
536 RegisterOrOperand input = i.InputRegisterOrOperand(0); | 411 __ cvttsd2siq(i.OutputRegister(), i.InputDoubleRegister(0)); |
537 if (input.type == kDoubleRegister) { | |
538 __ cvttsd2siq(i.OutputRegister(), input.double_reg); | |
539 } else { | 412 } else { |
540 __ cvttsd2siq(i.OutputRegister(), input.operand); | 413 __ cvttsd2siq(i.OutputRegister(), i.InputOperand(0)); |
541 } | 414 } |
542 __ andl(i.OutputRegister(), i.OutputRegister()); // clear upper bits. | 415 __ andl(i.OutputRegister(), i.OutputRegister()); // clear upper bits. |
543 // TODO(turbofan): generated code should not look at the upper 32 bits | 416 // TODO(turbofan): generated code should not look at the upper 32 bits |
544 // of the result, but those bits could escape to the outside world. | 417 // of the result, but those bits could escape to the outside world. |
545 break; | 418 break; |
546 } | 419 case kSSEInt32ToFloat64: |
547 case kSSEInt32ToFloat64: { | 420 if (instr->InputAt(0)->IsRegister()) { |
548 RegisterOrOperand input = i.InputRegisterOrOperand(0); | 421 __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputRegister(0)); |
549 if (input.type == kRegister) { | |
550 __ cvtlsi2sd(i.OutputDoubleRegister(), input.reg); | |
551 } else { | 422 } else { |
552 __ cvtlsi2sd(i.OutputDoubleRegister(), input.operand); | 423 __ cvtlsi2sd(i.OutputDoubleRegister(), i.InputOperand(0)); |
553 } | 424 } |
554 break; | 425 break; |
555 } | 426 case kSSEUint32ToFloat64: |
556 case kSSEUint32ToFloat64: { | |
557 // TODO(turbofan): X64 SSE cvtqsi2sd should support operands. | 427 // TODO(turbofan): X64 SSE cvtqsi2sd should support operands. |
558 __ cvtqsi2sd(i.OutputDoubleRegister(), i.InputRegister(0)); | 428 __ cvtqsi2sd(i.OutputDoubleRegister(), i.InputRegister(0)); |
559 break; | 429 break; |
560 } | |
561 case kX64Movsxbl: | 430 case kX64Movsxbl: |
562 __ movsxbl(i.OutputRegister(), i.MemoryOperand()); | 431 __ movsxbl(i.OutputRegister(), i.MemoryOperand()); |
563 break; | 432 break; |
564 case kX64Movzxbl: | 433 case kX64Movzxbl: |
565 __ movzxbl(i.OutputRegister(), i.MemoryOperand()); | 434 __ movzxbl(i.OutputRegister(), i.MemoryOperand()); |
566 break; | 435 break; |
567 case kX64Movb: { | 436 case kX64Movb: { |
568 int index = 0; | 437 int index = 0; |
569 Operand operand = i.MemoryOperand(&index); | 438 Operand operand = i.MemoryOperand(&index); |
570 if (HasImmediateInput(instr, index)) { | 439 if (HasImmediateInput(instr, index)) { |
(...skipping 15 matching lines...) Expand all Loading... |
586 if (HasImmediateInput(instr, index)) { | 455 if (HasImmediateInput(instr, index)) { |
587 __ movw(operand, Immediate(i.InputInt16(index))); | 456 __ movw(operand, Immediate(i.InputInt16(index))); |
588 } else { | 457 } else { |
589 __ movw(operand, i.InputRegister(index)); | 458 __ movw(operand, i.InputRegister(index)); |
590 } | 459 } |
591 break; | 460 break; |
592 } | 461 } |
593 case kX64Movl: | 462 case kX64Movl: |
594 if (instr->HasOutput()) { | 463 if (instr->HasOutput()) { |
595 if (instr->addressing_mode() == kMode_None) { | 464 if (instr->addressing_mode() == kMode_None) { |
596 RegisterOrOperand input = i.InputRegisterOrOperand(0); | 465 if (instr->InputAt(0)->IsRegister()) { |
597 if (input.type == kRegister) { | 466 __ movl(i.OutputRegister(), i.InputRegister(0)); |
598 __ movl(i.OutputRegister(), input.reg); | |
599 } else { | 467 } else { |
600 __ movl(i.OutputRegister(), input.operand); | 468 __ movl(i.OutputRegister(), i.InputOperand(0)); |
601 } | 469 } |
602 } else { | 470 } else { |
603 __ movl(i.OutputRegister(), i.MemoryOperand()); | 471 __ movl(i.OutputRegister(), i.MemoryOperand()); |
604 } | 472 } |
605 } else { | 473 } else { |
606 int index = 0; | 474 int index = 0; |
607 Operand operand = i.MemoryOperand(&index); | 475 Operand operand = i.MemoryOperand(&index); |
608 if (HasImmediateInput(instr, index)) { | 476 if (HasImmediateInput(instr, index)) { |
609 __ movl(operand, i.InputImmediate(index)); | 477 __ movl(operand, i.InputImmediate(index)); |
610 } else { | 478 } else { |
611 __ movl(operand, i.InputRegister(index)); | 479 __ movl(operand, i.InputRegister(index)); |
612 } | 480 } |
613 } | 481 } |
614 break; | 482 break; |
615 case kX64Movsxlq: { | 483 case kX64Movsxlq: { |
616 RegisterOrOperand input = i.InputRegisterOrOperand(0); | 484 if (instr->InputAt(0)->IsRegister()) { |
617 if (input.type == kRegister) { | 485 __ movsxlq(i.OutputRegister(), i.InputRegister(0)); |
618 __ movsxlq(i.OutputRegister(), input.reg); | |
619 } else { | 486 } else { |
620 __ movsxlq(i.OutputRegister(), input.operand); | 487 __ movsxlq(i.OutputRegister(), i.InputOperand(0)); |
621 } | 488 } |
622 break; | 489 break; |
623 } | 490 } |
624 case kX64Movq: | 491 case kX64Movq: |
625 if (instr->HasOutput()) { | 492 if (instr->HasOutput()) { |
626 __ movq(i.OutputRegister(), i.MemoryOperand()); | 493 __ movq(i.OutputRegister(), i.MemoryOperand()); |
627 } else { | 494 } else { |
628 int index = 0; | 495 int index = 0; |
629 Operand operand = i.MemoryOperand(&index); | 496 Operand operand = i.MemoryOperand(&index); |
630 if (HasImmediateInput(instr, index)) { | 497 if (HasImmediateInput(instr, index)) { |
(...skipping 18 matching lines...) Expand all Loading... |
649 } else { | 516 } else { |
650 int index = 0; | 517 int index = 0; |
651 Operand operand = i.MemoryOperand(&index); | 518 Operand operand = i.MemoryOperand(&index); |
652 __ movsd(operand, i.InputDoubleRegister(index)); | 519 __ movsd(operand, i.InputDoubleRegister(index)); |
653 } | 520 } |
654 break; | 521 break; |
655 case kX64Push: | 522 case kX64Push: |
656 if (HasImmediateInput(instr, 0)) { | 523 if (HasImmediateInput(instr, 0)) { |
657 __ pushq(i.InputImmediate(0)); | 524 __ pushq(i.InputImmediate(0)); |
658 } else { | 525 } else { |
659 RegisterOrOperand input = i.InputRegisterOrOperand(0); | 526 if (instr->InputAt(0)->IsRegister()) { |
660 if (input.type == kRegister) { | 527 __ pushq(i.InputRegister(0)); |
661 __ pushq(input.reg); | |
662 } else { | 528 } else { |
663 __ pushq(input.operand); | 529 __ pushq(i.InputOperand(0)); |
664 } | 530 } |
665 } | 531 } |
666 break; | 532 break; |
667 case kX64StoreWriteBarrier: { | 533 case kX64StoreWriteBarrier: { |
668 Register object = i.InputRegister(0); | 534 Register object = i.InputRegister(0); |
669 Register index = i.InputRegister(1); | 535 Register index = i.InputRegister(1); |
670 Register value = i.InputRegister(2); | 536 Register value = i.InputRegister(2); |
671 __ movsxlq(index, index); | 537 __ movsxlq(index, index); |
672 __ movq(Operand(object, index, times_1, 0), value); | 538 __ movq(Operand(object, index, times_1, 0), value); |
673 __ leaq(index, Operand(object, index, times_1, 0)); | 539 __ leaq(index, Operand(object, index, times_1, 0)); |
(...skipping 287 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
961 Operand dst = g.ToOperand(destination); | 827 Operand dst = g.ToOperand(destination); |
962 __ movq(tmp, src); | 828 __ movq(tmp, src); |
963 __ movq(dst, tmp); | 829 __ movq(dst, tmp); |
964 } | 830 } |
965 } else if (source->IsConstant()) { | 831 } else if (source->IsConstant()) { |
966 ConstantOperand* constant_source = ConstantOperand::cast(source); | 832 ConstantOperand* constant_source = ConstantOperand::cast(source); |
967 Constant src = g.ToConstant(constant_source); | 833 Constant src = g.ToConstant(constant_source); |
968 if (destination->IsRegister() || destination->IsStackSlot()) { | 834 if (destination->IsRegister() || destination->IsStackSlot()) { |
969 Register dst = destination->IsRegister() ? g.ToRegister(destination) | 835 Register dst = destination->IsRegister() ? g.ToRegister(destination) |
970 : kScratchRegister; | 836 : kScratchRegister; |
971 Immediate64 imm = g.ToImmediate64(constant_source); | 837 switch (src.type()) { |
972 switch (imm.type) { | 838 case Constant::kInt32: |
973 case kImm64Value: | 839 // TODO(dcarney): don't need scratch in this case. |
974 __ Set(dst, imm.value); | 840 __ movq(dst, Immediate(src.ToInt32())); |
975 break; | 841 break; |
976 case kImm64Reference: | 842 case Constant::kInt64: |
977 __ Move(dst, imm.reference); | 843 __ Set(dst, src.ToInt64()); |
978 break; | 844 break; |
979 case kImm64Handle: | 845 case Constant::kFloat32: |
980 __ Move(dst, imm.handle); | 846 __ Move(dst, |
| 847 isolate()->factory()->NewNumber(src.ToFloat32(), TENURED)); |
| 848 break; |
| 849 case Constant::kFloat64: |
| 850 __ Move(dst, |
| 851 isolate()->factory()->NewNumber(src.ToFloat64(), TENURED)); |
| 852 break; |
| 853 case Constant::kExternalReference: |
| 854 __ Move(dst, src.ToExternalReference()); |
| 855 break; |
| 856 case Constant::kHeapObject: |
| 857 __ Move(dst, src.ToHeapObject()); |
981 break; | 858 break; |
982 } | 859 } |
983 if (destination->IsStackSlot()) { | 860 if (destination->IsStackSlot()) { |
984 __ movq(g.ToOperand(destination), kScratchRegister); | 861 __ movq(g.ToOperand(destination), kScratchRegister); |
985 } | 862 } |
986 } else if (src.type() == Constant::kFloat32) { | 863 } else if (src.type() == Constant::kFloat32) { |
987 // TODO(turbofan): Can we do better here? | 864 // TODO(turbofan): Can we do better here? |
988 __ movl(kScratchRegister, Immediate(bit_cast<int32_t>(src.ToFloat32()))); | 865 __ movl(kScratchRegister, Immediate(bit_cast<int32_t>(src.ToFloat32()))); |
989 if (destination->IsDoubleRegister()) { | 866 if (destination->IsDoubleRegister()) { |
990 XMMRegister dst = g.ToDoubleRegister(destination); | 867 XMMRegister dst = g.ToDoubleRegister(destination); |
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1092 } | 969 } |
1093 } | 970 } |
1094 MarkLazyDeoptSite(); | 971 MarkLazyDeoptSite(); |
1095 } | 972 } |
1096 | 973 |
1097 #undef __ | 974 #undef __ |
1098 | 975 |
1099 } // namespace internal | 976 } // namespace internal |
1100 } // namespace compiler | 977 } // namespace compiler |
1101 } // namespace v8 | 978 } // namespace v8 |
OLD | NEW |