Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(182)

Side by Side Diff: src/compiler/x64/instruction-selector-x64.cc

Issue 652363006: [turbofan] First step towards correctified 64-bit addressing. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Fixes2 Created 6 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2014 the V8 project authors. All rights reserved. 1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/compiler/generic-node-inl.h" 5 #include "src/compiler/generic-node-inl.h"
6 #include "src/compiler/instruction-selector-impl.h" 6 #include "src/compiler/instruction-selector-impl.h"
7 #include "src/compiler/node-matchers.h" 7 #include "src/compiler/node-matchers.h"
8 8
9 namespace v8 { 9 namespace v8 {
10 namespace internal { 10 namespace internal {
11 namespace compiler { 11 namespace compiler {
12 12
13 // Adds X64-specific methods for generating operands. 13 // Adds X64-specific methods for generating operands.
14 class X64OperandGenerator FINAL : public OperandGenerator { 14 class X64OperandGenerator FINAL : public OperandGenerator {
15 public: 15 public:
16 explicit X64OperandGenerator(InstructionSelector* selector) 16 explicit X64OperandGenerator(InstructionSelector* selector)
17 : OperandGenerator(selector) {} 17 : OperandGenerator(selector) {}
18 18
19 InstructionOperand* TempRegister(Register reg) { 19 InstructionOperand* TempRegister(Register reg) {
20 return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER, 20 return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
21 Register::ToAllocationIndex(reg)); 21 Register::ToAllocationIndex(reg));
22 } 22 }
23 23
24 bool CanBeImmediate(Node* node) { 24 bool CanBeImmediate(Node* node) {
25 switch (node->opcode()) { 25 switch (node->opcode()) {
26 case IrOpcode::kInt32Constant: 26 case IrOpcode::kInt32Constant:
27 return true; 27 return true;
28 case IrOpcode::kInt64Constant: {
29 const int64_t value = OpParameter<int64_t>(node);
30 return value == static_cast<int64_t>(static_cast<int32_t>(value));
31 }
28 default: 32 default:
29 return false; 33 return false;
30 } 34 }
31 } 35 }
32 36
33 bool CanBeBetterLeftOperand(Node* node) const { 37 bool CanBeBetterLeftOperand(Node* node) const {
34 return !selector()->IsLive(node); 38 return !selector()->IsLive(node);
35 } 39 }
36 }; 40 };
37 41
38 42
39 // Get the AddressingMode of scale factor N from the AddressingMode of scale
40 // factor 1.
41 static AddressingMode AdjustAddressingMode(AddressingMode base_mode,
42 int power) {
43 DCHECK(0 <= power && power < 4);
44 return static_cast<AddressingMode>(static_cast<int>(base_mode) + power);
45 }
46
47
48 class AddressingModeMatcher {
49 public:
50 AddressingModeMatcher(X64OperandGenerator* g, Node* base, Node* index)
51 : base_operand_(NULL),
52 index_operand_(NULL),
53 displacement_operand_(NULL),
54 mode_(kMode_None) {
55 Int32Matcher index_imm(index);
56 if (index_imm.HasValue()) {
57 int32_t value = index_imm.Value();
58 if (value == 0) {
59 mode_ = kMode_MR;
60 } else {
61 mode_ = kMode_MRI;
62 index_operand_ = g->UseImmediate(index);
63 }
64 base_operand_ = g->UseRegister(base);
65 } else {
66 // Compute base operand.
67 Int64Matcher base_imm(base);
68 if (!base_imm.HasValue() || base_imm.Value() != 0) {
69 base_operand_ = g->UseRegister(base);
70 }
71 // Compute index and displacement.
72 IndexAndDisplacementMatcher matcher(index);
73 index_operand_ = g->UseRegister(matcher.index_node());
74 if (matcher.displacement() != 0) {
75 displacement_operand_ = g->TempImmediate(matcher.displacement());
76 }
77 // Compute mode with scale factor one.
78 if (base_operand_ == NULL) {
79 if (displacement_operand_ == NULL) {
80 mode_ = kMode_M1;
81 } else {
82 mode_ = kMode_M1I;
83 }
84 } else {
85 if (displacement_operand_ == NULL) {
86 mode_ = kMode_MR1;
87 } else {
88 mode_ = kMode_MR1I;
89 }
90 }
91 // Adjust mode to actual scale factor.
92 mode_ = AdjustAddressingMode(mode_, matcher.power());
93 }
94 DCHECK_NE(kMode_None, mode_);
95 }
96
97 size_t SetInputs(InstructionOperand** inputs) {
98 size_t input_count = 0;
99 // Compute inputs_ and input_count.
100 if (base_operand_ != NULL) {
101 inputs[input_count++] = base_operand_;
102 }
103 if (index_operand_ != NULL) {
104 inputs[input_count++] = index_operand_;
105 }
106 if (displacement_operand_ != NULL) {
107 // Pure displacement mode not supported by x64.
108 DCHECK_NE(static_cast<int>(input_count), 0);
109 inputs[input_count++] = displacement_operand_;
110 }
111 DCHECK_NE(static_cast<int>(input_count), 0);
112 return input_count;
113 }
114
115 static const int kMaxInputCount = 3;
116 InstructionOperand* base_operand_;
117 InstructionOperand* index_operand_;
118 InstructionOperand* displacement_operand_;
119 AddressingMode mode_;
120 };
121
122
123 static void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
124 Node* node) {
125 X64OperandGenerator g(selector);
126 selector->Emit(opcode, g.DefineAsRegister(node),
127 g.UseRegister(node->InputAt(0)));
128 }
129
130
131 void InstructionSelector::VisitLoad(Node* node) { 43 void InstructionSelector::VisitLoad(Node* node) {
132 MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node)); 44 MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
133 MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node)); 45 MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
134 Node* base = node->InputAt(0); 46 X64OperandGenerator g(this);
135 Node* index = node->InputAt(1); 47 Node* const base = node->InputAt(0);
48 Node* const index = node->InputAt(1);
136 49
137 ArchOpcode opcode; 50 ArchOpcode opcode;
138 // TODO(titzer): signed/unsigned small loads
139 switch (rep) { 51 switch (rep) {
140 case kRepFloat32: 52 case kRepFloat32:
141 opcode = kX64Movss; 53 opcode = kX64Movss;
142 break; 54 break;
143 case kRepFloat64: 55 case kRepFloat64:
144 opcode = kX64Movsd; 56 opcode = kX64Movsd;
145 break; 57 break;
146 case kRepBit: // Fall through. 58 case kRepBit: // Fall through.
147 case kRepWord8: 59 case kRepWord8:
148 opcode = typ == kTypeInt32 ? kX64Movsxbl : kX64Movzxbl; 60 opcode = typ == kTypeInt32 ? kX64Movsxbl : kX64Movzxbl;
149 break; 61 break;
150 case kRepWord16: 62 case kRepWord16:
151 opcode = typ == kTypeInt32 ? kX64Movsxwl : kX64Movzxwl; 63 opcode = typ == kTypeInt32 ? kX64Movsxwl : kX64Movzxwl;
152 break; 64 break;
153 case kRepWord32: 65 case kRepWord32:
154 opcode = kX64Movl; 66 opcode = kX64Movl;
155 break; 67 break;
156 case kRepTagged: // Fall through. 68 case kRepTagged: // Fall through.
157 case kRepWord64: 69 case kRepWord64:
158 opcode = kX64Movq; 70 opcode = kX64Movq;
159 break; 71 break;
160 default: 72 default:
161 UNREACHABLE(); 73 UNREACHABLE();
162 return; 74 return;
163 } 75 }
164 76 if (g.CanBeImmediate(base)) {
165 X64OperandGenerator g(this); 77 // load [#base + %index]
166 AddressingModeMatcher matcher(&g, base, index); 78 Emit(opcode | AddressingModeField::encode(kMode_MRI),
167 InstructionCode code = opcode | AddressingModeField::encode(matcher.mode_); 79 g.DefineAsRegister(node), g.UseRegister(index), g.UseImmediate(base));
168 InstructionOperand* outputs[] = {g.DefineAsRegister(node)}; 80 } else if (g.CanBeImmediate(index)) {
169 InstructionOperand* inputs[AddressingModeMatcher::kMaxInputCount]; 81 // load [%base + #index]
170 size_t input_count = matcher.SetInputs(inputs); 82 Emit(opcode | AddressingModeField::encode(kMode_MRI),
171 Emit(code, 1, outputs, input_count, inputs); 83 g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
84 } else {
85 // load [%base + %index*1]
86 Emit(opcode | AddressingModeField::encode(kMode_MR1),
87 g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
88 }
172 } 89 }
173 90
174 91
175 void InstructionSelector::VisitStore(Node* node) { 92 void InstructionSelector::VisitStore(Node* node) {
176 X64OperandGenerator g(this); 93 X64OperandGenerator g(this);
177 Node* base = node->InputAt(0); 94 Node* base = node->InputAt(0);
178 Node* index = node->InputAt(1); 95 Node* index = node->InputAt(1);
179 Node* value = node->InputAt(2); 96 Node* value = node->InputAt(2);
180 97
181 StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node); 98 StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
(...skipping 29 matching lines...) Expand all
211 opcode = kX64Movl; 128 opcode = kX64Movl;
212 break; 129 break;
213 case kRepTagged: // Fall through. 130 case kRepTagged: // Fall through.
214 case kRepWord64: 131 case kRepWord64:
215 opcode = kX64Movq; 132 opcode = kX64Movq;
216 break; 133 break;
217 default: 134 default:
218 UNREACHABLE(); 135 UNREACHABLE();
219 return; 136 return;
220 } 137 }
221 138 InstructionOperand* value_operand =
222 InstructionOperand* val; 139 g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
223 if (g.CanBeImmediate(value)) { 140 if (g.CanBeImmediate(base)) {
224 val = g.UseImmediate(value); 141 // store [#base + %index], %|#value
142 Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr,
143 g.UseRegister(index), g.UseImmediate(base), value_operand);
144 } else if (g.CanBeImmediate(index)) {
145 // store [%base + #index], %|#value
146 Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr,
147 g.UseRegister(base), g.UseImmediate(index), value_operand);
225 } else { 148 } else {
226 val = g.UseRegister(value); 149 // store [%base + %index*1], %|#value
150 Emit(opcode | AddressingModeField::encode(kMode_MR1), nullptr,
151 g.UseRegister(base), g.UseRegister(index), value_operand);
227 } 152 }
228
229 AddressingModeMatcher matcher(&g, base, index);
230 InstructionCode code = opcode | AddressingModeField::encode(matcher.mode_);
231 InstructionOperand* inputs[AddressingModeMatcher::kMaxInputCount + 1];
232 size_t input_count = matcher.SetInputs(inputs);
233 inputs[input_count++] = val;
234 Emit(code, 0, static_cast<InstructionOperand**>(NULL), input_count, inputs);
235 } 153 }
236 154
237 155
238 // Shared routine for multiple binary operations. 156 // Shared routine for multiple binary operations.
239 static void VisitBinop(InstructionSelector* selector, Node* node, 157 static void VisitBinop(InstructionSelector* selector, Node* node,
240 InstructionCode opcode, FlagsContinuation* cont) { 158 InstructionCode opcode, FlagsContinuation* cont) {
241 X64OperandGenerator g(selector); 159 X64OperandGenerator g(selector);
242 Int32BinopMatcher m(node); 160 Int32BinopMatcher m(node);
243 Node* left = m.left().node(); 161 Node* left = m.left().node();
244 Node* right = m.right().node(); 162 Node* right = m.right().node();
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
335 X64OperandGenerator g(this); 253 X64OperandGenerator g(this);
336 Uint64BinopMatcher m(node); 254 Uint64BinopMatcher m(node);
337 if (m.right().Is(-1)) { 255 if (m.right().Is(-1)) {
338 Emit(kX64Not, g.DefineSameAsFirst(node), g.UseRegister(m.left().node())); 256 Emit(kX64Not, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
339 } else { 257 } else {
340 VisitBinop(this, node, kX64Xor); 258 VisitBinop(this, node, kX64Xor);
341 } 259 }
342 } 260 }
343 261
344 262
263 namespace {
264
345 // Shared routine for multiple 32-bit shift operations. 265 // Shared routine for multiple 32-bit shift operations.
346 // TODO(bmeurer): Merge this with VisitWord64Shift using template magic? 266 // TODO(bmeurer): Merge this with VisitWord64Shift using template magic?
347 static void VisitWord32Shift(InstructionSelector* selector, Node* node, 267 void VisitWord32Shift(InstructionSelector* selector, Node* node,
348 ArchOpcode opcode) { 268 ArchOpcode opcode) {
349 X64OperandGenerator g(selector); 269 X64OperandGenerator g(selector);
350 Node* left = node->InputAt(0); 270 Int32BinopMatcher m(node);
351 Node* right = node->InputAt(1); 271 Node* left = m.left().node();
272 Node* right = m.right().node();
352 273
353 if (g.CanBeImmediate(right)) { 274 if (g.CanBeImmediate(right)) {
354 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left), 275 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
355 g.UseImmediate(right)); 276 g.UseImmediate(right));
356 } else { 277 } else {
357 Int32BinopMatcher m(node);
358 if (m.right().IsWord32And()) { 278 if (m.right().IsWord32And()) {
359 Int32BinopMatcher mright(right); 279 Int32BinopMatcher mright(right);
360 if (mright.right().Is(0x1F)) { 280 if (mright.right().Is(0x1F)) {
361 right = mright.left().node(); 281 right = mright.left().node();
362 } 282 }
363 } 283 }
364 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left), 284 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
365 g.UseFixed(right, rcx)); 285 g.UseFixed(right, rcx));
366 } 286 }
367 } 287 }
368 288
369 289
370 // Shared routine for multiple 64-bit shift operations. 290 // Shared routine for multiple 64-bit shift operations.
371 // TODO(bmeurer): Merge this with VisitWord32Shift using template magic? 291 // TODO(bmeurer): Merge this with VisitWord32Shift using template magic?
372 static void VisitWord64Shift(InstructionSelector* selector, Node* node, 292 void VisitWord64Shift(InstructionSelector* selector, Node* node,
373 ArchOpcode opcode) { 293 ArchOpcode opcode) {
374 X64OperandGenerator g(selector); 294 X64OperandGenerator g(selector);
375 Node* left = node->InputAt(0); 295 Int64BinopMatcher m(node);
376 Node* right = node->InputAt(1); 296 Node* left = m.left().node();
297 Node* right = m.right().node();
377 298
378 if (g.CanBeImmediate(right)) { 299 if (g.CanBeImmediate(right)) {
379 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left), 300 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
380 g.UseImmediate(right)); 301 g.UseImmediate(right));
381 } else { 302 } else {
382 Int64BinopMatcher m(node);
383 if (m.right().IsWord64And()) { 303 if (m.right().IsWord64And()) {
384 Int64BinopMatcher mright(right); 304 Int64BinopMatcher mright(right);
385 if (mright.right().Is(0x3F)) { 305 if (mright.right().Is(0x3F)) {
386 right = mright.left().node(); 306 right = mright.left().node();
387 } 307 }
388 } 308 }
389 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left), 309 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
390 g.UseFixed(right, rcx)); 310 g.UseFixed(right, rcx));
391 } 311 }
392 } 312 }
393 313
314 } // namespace
315
394 316
395 void InstructionSelector::VisitWord32Shl(Node* node) { 317 void InstructionSelector::VisitWord32Shl(Node* node) {
396 VisitWord32Shift(this, node, kX64Shl32); 318 VisitWord32Shift(this, node, kX64Shl32);
397 } 319 }
398 320
399 321
400 void InstructionSelector::VisitWord64Shl(Node* node) { 322 void InstructionSelector::VisitWord64Shl(Node* node) {
401 VisitWord64Shift(this, node, kX64Shl); 323 VisitWord64Shift(this, node, kX64Shl);
402 } 324 }
403 325
(...skipping 21 matching lines...) Expand all
425 void InstructionSelector::VisitWord32Ror(Node* node) { 347 void InstructionSelector::VisitWord32Ror(Node* node) {
426 VisitWord32Shift(this, node, kX64Ror32); 348 VisitWord32Shift(this, node, kX64Ror32);
427 } 349 }
428 350
429 351
430 void InstructionSelector::VisitWord64Ror(Node* node) { 352 void InstructionSelector::VisitWord64Ror(Node* node) {
431 VisitWord64Shift(this, node, kX64Ror); 353 VisitWord64Shift(this, node, kX64Ror);
432 } 354 }
433 355
434 356
435 static bool TryEmitLeaMultAdd(InstructionSelector* selector, Node* node,
436 ArchOpcode opcode) {
437 int32_t displacement_value;
438 Node* left;
439 {
440 Int32BinopMatcher m32(node);
441 left = m32.left().node();
442 if (m32.right().HasValue()) {
443 displacement_value = m32.right().Value();
444 } else {
445 Int64BinopMatcher m64(node);
446 if (!m64.right().HasValue()) {
447 return false;
448 }
449 int64_t value_64 = m64.right().Value();
450 displacement_value = static_cast<int32_t>(value_64);
451 if (displacement_value != value_64) return false;
452 }
453 }
454 LeaMultiplyMatcher lmm(left);
455 if (!lmm.Matches()) return false;
456 AddressingMode mode;
457 size_t input_count;
458 X64OperandGenerator g(selector);
459 InstructionOperand* index = g.UseRegister(lmm.Left());
460 InstructionOperand* displacement = g.TempImmediate(displacement_value);
461 InstructionOperand* inputs[] = {index, displacement, displacement};
462 if (lmm.Displacement() != 0) {
463 input_count = 3;
464 inputs[1] = index;
465 mode = kMode_MR1I;
466 } else {
467 input_count = 2;
468 mode = kMode_M1I;
469 }
470 mode = AdjustAddressingMode(mode, lmm.Power());
471 InstructionOperand* outputs[] = {g.DefineAsRegister(node)};
472 selector->Emit(opcode | AddressingModeField::encode(mode), 1, outputs,
473 input_count, inputs);
474 return true;
475 }
476
477
478 void InstructionSelector::VisitInt32Add(Node* node) { 357 void InstructionSelector::VisitInt32Add(Node* node) {
479 if (TryEmitLeaMultAdd(this, node, kX64Lea32)) return;
480 VisitBinop(this, node, kX64Add32); 358 VisitBinop(this, node, kX64Add32);
481 } 359 }
482 360
483 361
484 void InstructionSelector::VisitInt64Add(Node* node) { 362 void InstructionSelector::VisitInt64Add(Node* node) {
485 if (TryEmitLeaMultAdd(this, node, kX64Lea)) return;
486 VisitBinop(this, node, kX64Add); 363 VisitBinop(this, node, kX64Add);
487 } 364 }
488 365
489 366
490 void InstructionSelector::VisitInt32Sub(Node* node) { 367 void InstructionSelector::VisitInt32Sub(Node* node) {
491 X64OperandGenerator g(this); 368 X64OperandGenerator g(this);
492 Int32BinopMatcher m(node); 369 Int32BinopMatcher m(node);
493 if (m.left().Is(0)) { 370 if (m.left().Is(0)) {
494 Emit(kX64Neg32, g.DefineSameAsFirst(node), g.UseRegister(m.right().node())); 371 Emit(kX64Neg32, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
495 } else { 372 } else {
496 VisitBinop(this, node, kX64Sub32); 373 VisitBinop(this, node, kX64Sub32);
497 } 374 }
498 } 375 }
499 376
500 377
501 void InstructionSelector::VisitInt64Sub(Node* node) { 378 void InstructionSelector::VisitInt64Sub(Node* node) {
502 X64OperandGenerator g(this); 379 X64OperandGenerator g(this);
503 Int64BinopMatcher m(node); 380 Int64BinopMatcher m(node);
504 if (m.left().Is(0)) { 381 if (m.left().Is(0)) {
505 Emit(kX64Neg, g.DefineSameAsFirst(node), g.UseRegister(m.right().node())); 382 Emit(kX64Neg, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
506 } else { 383 } else {
507 VisitBinop(this, node, kX64Sub); 384 VisitBinop(this, node, kX64Sub);
508 } 385 }
509 } 386 }
510 387
511 388
512 static bool TryEmitLeaMult(InstructionSelector* selector, Node* node, 389 namespace {
513 ArchOpcode opcode) {
514 LeaMultiplyMatcher lea(node);
515 // Try to match lea.
516 if (!lea.Matches()) return false;
517 AddressingMode mode;
518 size_t input_count;
519 X64OperandGenerator g(selector);
520 InstructionOperand* left = g.UseRegister(lea.Left());
521 InstructionOperand* inputs[] = {left, left};
522 if (lea.Displacement() != 0) {
523 input_count = 2;
524 mode = kMode_MR1;
525 } else {
526 input_count = 1;
527 mode = kMode_M1;
528 }
529 mode = AdjustAddressingMode(mode, lea.Power());
530 InstructionOperand* outputs[] = {g.DefineAsRegister(node)};
531 selector->Emit(opcode | AddressingModeField::encode(mode), 1, outputs,
532 input_count, inputs);
533 return true;
534 }
535 390
536 391 void VisitMul(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
537 static void VisitMul(InstructionSelector* selector, Node* node,
538 ArchOpcode opcode) {
539 X64OperandGenerator g(selector); 392 X64OperandGenerator g(selector);
540 Int32BinopMatcher m(node); 393 Int32BinopMatcher m(node);
541 Node* left = m.left().node(); 394 Node* left = m.left().node();
542 Node* right = m.right().node(); 395 Node* right = m.right().node();
543 if (g.CanBeImmediate(right)) { 396 if (g.CanBeImmediate(right)) {
544 selector->Emit(opcode, g.DefineAsRegister(node), g.Use(left), 397 selector->Emit(opcode, g.DefineAsRegister(node), g.Use(left),
545 g.UseImmediate(right)); 398 g.UseImmediate(right));
546 } else { 399 } else {
547 if (g.CanBeBetterLeftOperand(right)) { 400 if (g.CanBeBetterLeftOperand(right)) {
548 std::swap(left, right); 401 std::swap(left, right);
549 } 402 }
550 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left), 403 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
551 g.Use(right)); 404 g.Use(right));
552 } 405 }
553 } 406 }
554 407
408 } // namespace
409
555 410
556 void InstructionSelector::VisitInt32Mul(Node* node) { 411 void InstructionSelector::VisitInt32Mul(Node* node) {
557 if (TryEmitLeaMult(this, node, kX64Lea32)) return;
558 VisitMul(this, node, kX64Imul32); 412 VisitMul(this, node, kX64Imul32);
559 } 413 }
560 414
561 415
562 void InstructionSelector::VisitInt64Mul(Node* node) { 416 void InstructionSelector::VisitInt64Mul(Node* node) {
563 if (TryEmitLeaMult(this, node, kX64Lea)) return;
564 VisitMul(this, node, kX64Imul); 417 VisitMul(this, node, kX64Imul);
565 } 418 }
566 419
567 420
568 void InstructionSelector::VisitInt32MulHigh(Node* node) { 421 void InstructionSelector::VisitInt32MulHigh(Node* node) {
569 X64OperandGenerator g(this); 422 X64OperandGenerator g(this);
570 InstructionOperand* temps[] = {g.TempRegister(rax)}; 423 InstructionOperand* temps[] = {g.TempRegister(rax)};
571 Emit(kX64ImulHigh32, g.DefineAsFixed(node, rdx), 424 Emit(kX64ImulHigh32, g.DefineAsFixed(node, rdx),
572 g.UseFixed(node->InputAt(0), rax), g.UseUniqueRegister(node->InputAt(1)), 425 g.UseFixed(node->InputAt(0), rax), g.UseUniqueRegister(node->InputAt(1)),
573 arraysize(temps), temps); 426 arraysize(temps), temps);
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after
665 518
666 519
667 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) { 520 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
668 X64OperandGenerator g(this); 521 X64OperandGenerator g(this);
669 Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(node->InputAt(0))); 522 Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
670 } 523 }
671 524
672 525
673 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) { 526 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
674 X64OperandGenerator g(this); 527 X64OperandGenerator g(this);
675 Emit(kX64Movl, g.DefineAsRegister(node), g.Use(node->InputAt(0))); 528 Node* value = node->InputAt(0);
529 switch (value->opcode()) {
530 case IrOpcode::kWord32And:
531 case IrOpcode::kWord32Or:
532 case IrOpcode::kWord32Xor:
533 case IrOpcode::kWord32Shl:
534 case IrOpcode::kWord32Shr:
535 case IrOpcode::kWord32Sar:
536 case IrOpcode::kWord32Ror:
537 case IrOpcode::kWord32Equal:
538 case IrOpcode::kInt32Add:
539 case IrOpcode::kInt32Sub:
540 case IrOpcode::kInt32Mul:
541 case IrOpcode::kInt32MulHigh:
542 case IrOpcode::kInt32Div:
543 case IrOpcode::kInt32LessThan:
544 case IrOpcode::kInt32LessThanOrEqual:
545 case IrOpcode::kInt32Mod:
546 case IrOpcode::kUint32Div:
547 case IrOpcode::kUint32LessThan:
548 case IrOpcode::kUint32LessThanOrEqual:
549 case IrOpcode::kUint32Mod: {
550 // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the
551 // zero-extension is a no-op.
552 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
553 return;
554 }
555 default:
556 break;
557 }
558 Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
676 } 559 }
677 560
678 561
679 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) { 562 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
680 X64OperandGenerator g(this); 563 X64OperandGenerator g(this);
681 Emit(kSSECvtsd2ss, g.DefineAsRegister(node), g.Use(node->InputAt(0))); 564 Emit(kSSECvtsd2ss, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
682 } 565 }
683 566
684 567
685 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) { 568 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
686 X64OperandGenerator g(this); 569 X64OperandGenerator g(this);
687 Emit(kX64Movl, g.DefineAsRegister(node), g.Use(node->InputAt(0))); 570 Node* value = node->InputAt(0);
571 if (CanCover(node, value)) {
572 switch (value->opcode()) {
573 case IrOpcode::kWord64Sar:
574 case IrOpcode::kWord64Shr: {
575 Int64BinopMatcher m(value);
576 if (m.right().Is(32)) {
577 Emit(kX64Shr, g.DefineSameAsFirst(node),
578 g.UseRegister(m.left().node()), g.TempImmediate(32));
579 return;
580 }
581 break;
582 }
583 default:
584 break;
585 }
586 }
587 Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
688 } 588 }
689 589
690 590
691 void InstructionSelector::VisitFloat64Add(Node* node) { 591 void InstructionSelector::VisitFloat64Add(Node* node) {
692 X64OperandGenerator g(this); 592 X64OperandGenerator g(this);
693 Emit(kSSEFloat64Add, g.DefineSameAsFirst(node), 593 Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
694 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); 594 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
695 } 595 }
696 596
697 597
(...skipping 26 matching lines...) Expand all
724 temps); 624 temps);
725 } 625 }
726 626
727 627
728 void InstructionSelector::VisitFloat64Sqrt(Node* node) { 628 void InstructionSelector::VisitFloat64Sqrt(Node* node) {
729 X64OperandGenerator g(this); 629 X64OperandGenerator g(this);
730 Emit(kSSEFloat64Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0))); 630 Emit(kSSEFloat64Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
731 } 631 }
732 632
733 633
634 namespace {
635
636 void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
637 Node* node) {
638 X64OperandGenerator g(selector);
639 selector->Emit(opcode, g.DefineAsRegister(node),
640 g.UseRegister(node->InputAt(0)));
641 }
642
643 } // namespace
644
645
734 void InstructionSelector::VisitFloat64Floor(Node* node) { 646 void InstructionSelector::VisitFloat64Floor(Node* node) {
735 DCHECK(CpuFeatures::IsSupported(SSE4_1)); 647 DCHECK(CpuFeatures::IsSupported(SSE4_1));
736 VisitRRFloat64(this, kSSEFloat64Floor, node); 648 VisitRRFloat64(this, kSSEFloat64Floor, node);
737 } 649 }
738 650
739 651
740 void InstructionSelector::VisitFloat64Ceil(Node* node) { 652 void InstructionSelector::VisitFloat64Ceil(Node* node) {
741 DCHECK(CpuFeatures::IsSupported(SSE4_1)); 653 DCHECK(CpuFeatures::IsSupported(SSE4_1));
742 VisitRRFloat64(this, kSSEFloat64Ceil, node); 654 VisitRRFloat64(this, kSSEFloat64Ceil, node);
743 } 655 }
(...skipping 402 matching lines...) Expand 10 before | Expand all | Expand 10 after
1146 if (CpuFeatures::IsSupported(SSE4_1)) { 1058 if (CpuFeatures::IsSupported(SSE4_1)) {
1147 return MachineOperatorBuilder::kFloat64Floor | 1059 return MachineOperatorBuilder::kFloat64Floor |
1148 MachineOperatorBuilder::kFloat64Ceil | 1060 MachineOperatorBuilder::kFloat64Ceil |
1149 MachineOperatorBuilder::kFloat64RoundTruncate; 1061 MachineOperatorBuilder::kFloat64RoundTruncate;
1150 } 1062 }
1151 return MachineOperatorBuilder::kNoFlags; 1063 return MachineOperatorBuilder::kNoFlags;
1152 } 1064 }
1153 } // namespace compiler 1065 } // namespace compiler
1154 } // namespace internal 1066 } // namespace internal
1155 } // namespace v8 1067 } // namespace v8
OLDNEW
« no previous file with comments | « src/compiler/x64/instruction-codes-x64.h ('k') | test/cctest/compiler/test-simplified-lowering.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698