Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(88)

Side by Side Diff: src/compiler/x64/instruction-selector-x64.cc

Issue 652363006: [turbofan] First step towards correctified 64-bit addressing. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2014 the V8 project authors. All rights reserved. 1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/compiler/generic-node-inl.h" 5 #include "src/compiler/generic-node-inl.h"
6 #include "src/compiler/instruction-selector-impl.h" 6 #include "src/compiler/instruction-selector-impl.h"
7 #include "src/compiler/node-matchers.h" 7 #include "src/compiler/node-matchers.h"
8 8
9 namespace v8 { 9 namespace v8 {
10 namespace internal { 10 namespace internal {
11 namespace compiler { 11 namespace compiler {
12 12
13 // Adds X64-specific methods for generating operands. 13 // Adds X64-specific methods for generating operands.
14 class X64OperandGenerator FINAL : public OperandGenerator { 14 class X64OperandGenerator FINAL : public OperandGenerator {
15 public: 15 public:
16 explicit X64OperandGenerator(InstructionSelector* selector) 16 explicit X64OperandGenerator(InstructionSelector* selector)
17 : OperandGenerator(selector) {} 17 : OperandGenerator(selector) {}
18 18
19 InstructionOperand* TempRegister(Register reg) { 19 InstructionOperand* TempRegister(Register reg) {
20 return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER, 20 return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
21 Register::ToAllocationIndex(reg)); 21 Register::ToAllocationIndex(reg));
22 } 22 }
23 23
24 bool CanBeImmediate(Node* node) { 24 bool CanBeImmediate(Node* node) {
25 switch (node->opcode()) { 25 switch (node->opcode()) {
26 case IrOpcode::kInt32Constant: 26 case IrOpcode::kInt32Constant:
27 return true; 27 return true;
28 case IrOpcode::kInt64Constant: {
29 const int64_t value = OpParameter<int64_t>(node);
30 return value == static_cast<int64_t>(static_cast<int32_t>(value));
31 }
28 default: 32 default:
29 return false; 33 return false;
30 } 34 }
31 } 35 }
32 36
33 bool CanBeBetterLeftOperand(Node* node) const { 37 bool CanBeBetterLeftOperand(Node* node) const {
34 return !selector()->IsLive(node); 38 return !selector()->IsLive(node);
35 } 39 }
36 }; 40 };
37 41
38 42
39 // Get the AddressingMode of scale factor N from the AddressingMode of scale
40 // factor 1.
41 static AddressingMode AdjustAddressingMode(AddressingMode base_mode,
42 int power) {
43 DCHECK(0 <= power && power < 4);
44 return static_cast<AddressingMode>(static_cast<int>(base_mode) + power);
45 }
46
47
48 class AddressingModeMatcher {
49 public:
50 AddressingModeMatcher(X64OperandGenerator* g, Node* base, Node* index)
51 : base_operand_(NULL),
52 index_operand_(NULL),
53 displacement_operand_(NULL),
54 mode_(kMode_None) {
55 Int32Matcher index_imm(index);
56 if (index_imm.HasValue()) {
57 int32_t value = index_imm.Value();
58 if (value == 0) {
59 mode_ = kMode_MR;
60 } else {
61 mode_ = kMode_MRI;
62 index_operand_ = g->UseImmediate(index);
63 }
64 base_operand_ = g->UseRegister(base);
65 } else {
66 // Compute base operand.
67 Int64Matcher base_imm(base);
68 if (!base_imm.HasValue() || base_imm.Value() != 0) {
69 base_operand_ = g->UseRegister(base);
70 }
71 // Compute index and displacement.
72 IndexAndDisplacementMatcher matcher(index);
73 index_operand_ = g->UseRegister(matcher.index_node());
74 if (matcher.displacement() != 0) {
75 displacement_operand_ = g->TempImmediate(matcher.displacement());
76 }
77 // Compute mode with scale factor one.
78 if (base_operand_ == NULL) {
79 if (displacement_operand_ == NULL) {
80 mode_ = kMode_M1;
81 } else {
82 mode_ = kMode_M1I;
83 }
84 } else {
85 if (displacement_operand_ == NULL) {
86 mode_ = kMode_MR1;
87 } else {
88 mode_ = kMode_MR1I;
89 }
90 }
91 // Adjust mode to actual scale factor.
92 mode_ = AdjustAddressingMode(mode_, matcher.power());
93 }
94 DCHECK_NE(kMode_None, mode_);
95 }
96
97 size_t SetInputs(InstructionOperand** inputs) {
98 size_t input_count = 0;
99 // Compute inputs_ and input_count.
100 if (base_operand_ != NULL) {
101 inputs[input_count++] = base_operand_;
102 }
103 if (index_operand_ != NULL) {
104 inputs[input_count++] = index_operand_;
105 }
106 if (displacement_operand_ != NULL) {
107 // Pure displacement mode not supported by x64.
108 DCHECK_NE(static_cast<int>(input_count), 0);
109 inputs[input_count++] = displacement_operand_;
110 }
111 DCHECK_NE(static_cast<int>(input_count), 0);
112 return input_count;
113 }
114
115 static const int kMaxInputCount = 3;
116 InstructionOperand* base_operand_;
117 InstructionOperand* index_operand_;
118 InstructionOperand* displacement_operand_;
119 AddressingMode mode_;
120 };
121
122
123 void InstructionSelector::VisitLoad(Node* node) { 43 void InstructionSelector::VisitLoad(Node* node) {
124 MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node)); 44 MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
125 MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node)); 45 MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
126 Node* base = node->InputAt(0); 46 X64OperandGenerator g(this);
127 Node* index = node->InputAt(1); 47 Node* const base = node->InputAt(0);
48 Node* const index = node->InputAt(1);
128 49
129 ArchOpcode opcode; 50 ArchOpcode opcode;
130 // TODO(titzer): signed/unsigned small loads
131 switch (rep) { 51 switch (rep) {
132 case kRepFloat32: 52 case kRepFloat32:
133 opcode = kX64Movss; 53 opcode = kX64Movss;
134 break; 54 break;
135 case kRepFloat64: 55 case kRepFloat64:
136 opcode = kX64Movsd; 56 opcode = kX64Movsd;
137 break; 57 break;
138 case kRepBit: // Fall through. 58 case kRepBit: // Fall through.
139 case kRepWord8: 59 case kRepWord8:
140 opcode = typ == kTypeInt32 ? kX64Movsxbl : kX64Movzxbl; 60 opcode = typ == kTypeInt32 ? kX64Movsxbl : kX64Movzxbl;
141 break; 61 break;
142 case kRepWord16: 62 case kRepWord16:
143 opcode = typ == kTypeInt32 ? kX64Movsxwl : kX64Movzxwl; 63 opcode = typ == kTypeInt32 ? kX64Movsxwl : kX64Movzxwl;
144 break; 64 break;
145 case kRepWord32: 65 case kRepWord32:
146 opcode = kX64Movl; 66 opcode = kX64Movl;
147 break; 67 break;
148 case kRepTagged: // Fall through. 68 case kRepTagged: // Fall through.
149 case kRepWord64: 69 case kRepWord64:
150 opcode = kX64Movq; 70 opcode = kX64Movq;
151 break; 71 break;
152 default: 72 default:
153 UNREACHABLE(); 73 UNREACHABLE();
154 return; 74 return;
155 } 75 }
156 76 if (g.CanBeImmediate(base)) {
157 X64OperandGenerator g(this); 77 // load [#base + %index]
158 AddressingModeMatcher matcher(&g, base, index); 78 Emit(opcode | AddressingModeField::encode(kMode_MRI),
159 InstructionCode code = opcode | AddressingModeField::encode(matcher.mode_); 79 g.DefineAsRegister(node), g.UseRegister(index), g.UseImmediate(base));
160 InstructionOperand* outputs[] = {g.DefineAsRegister(node)}; 80 } else if (g.CanBeImmediate(index)) {
161 InstructionOperand* inputs[AddressingModeMatcher::kMaxInputCount]; 81 // load [%base + #index]
162 size_t input_count = matcher.SetInputs(inputs); 82 Emit(opcode | AddressingModeField::encode(kMode_MRI),
163 Emit(code, 1, outputs, input_count, inputs); 83 g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
84 } else {
85 // load [%base + %index*1]
86 Emit(opcode | AddressingModeField::encode(kMode_MR1),
87 g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
88 }
164 } 89 }
165 90
166 91
167 void InstructionSelector::VisitStore(Node* node) { 92 void InstructionSelector::VisitStore(Node* node) {
168 X64OperandGenerator g(this); 93 X64OperandGenerator g(this);
169 Node* base = node->InputAt(0); 94 Node* base = node->InputAt(0);
170 Node* index = node->InputAt(1); 95 Node* index = node->InputAt(1);
171 Node* value = node->InputAt(2); 96 Node* value = node->InputAt(2);
172 97
173 StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node); 98 StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
(...skipping 29 matching lines...) Expand all
203 opcode = kX64Movl; 128 opcode = kX64Movl;
204 break; 129 break;
205 case kRepTagged: // Fall through. 130 case kRepTagged: // Fall through.
206 case kRepWord64: 131 case kRepWord64:
207 opcode = kX64Movq; 132 opcode = kX64Movq;
208 break; 133 break;
209 default: 134 default:
210 UNREACHABLE(); 135 UNREACHABLE();
211 return; 136 return;
212 } 137 }
213 138 InstructionOperand* value_operand =
214 InstructionOperand* val; 139 g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
215 if (g.CanBeImmediate(value)) { 140 if (g.CanBeImmediate(base)) {
216 val = g.UseImmediate(value); 141 // store [#base + %index], %|#value
142 Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr,
143 g.UseRegister(index), g.UseImmediate(base), value_operand);
144 } else if (g.CanBeImmediate(index)) {
145 // store [%base + #index], %|#value
146 Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr,
147 g.UseRegister(base), g.UseImmediate(index), value_operand);
217 } else { 148 } else {
218 val = g.UseRegister(value); 149 // store [%base + %index*1], %|#value
150 Emit(opcode | AddressingModeField::encode(kMode_MR1), nullptr,
151 g.UseRegister(base), g.UseRegister(index), value_operand);
219 } 152 }
220
221 AddressingModeMatcher matcher(&g, base, index);
222 InstructionCode code = opcode | AddressingModeField::encode(matcher.mode_);
223 InstructionOperand* inputs[AddressingModeMatcher::kMaxInputCount + 1];
224 size_t input_count = matcher.SetInputs(inputs);
225 inputs[input_count++] = val;
226 Emit(code, 0, static_cast<InstructionOperand**>(NULL), input_count, inputs);
227 } 153 }
228 154
229 155
230 // Shared routine for multiple binary operations. 156 // Shared routine for multiple binary operations.
231 static void VisitBinop(InstructionSelector* selector, Node* node, 157 static void VisitBinop(InstructionSelector* selector, Node* node,
232 InstructionCode opcode, FlagsContinuation* cont) { 158 InstructionCode opcode, FlagsContinuation* cont) {
233 X64OperandGenerator g(selector); 159 X64OperandGenerator g(selector);
234 Int32BinopMatcher m(node); 160 Int32BinopMatcher m(node);
235 Node* left = m.left().node(); 161 Node* left = m.left().node();
236 Node* right = m.right().node(); 162 Node* right = m.right().node();
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
327 X64OperandGenerator g(this); 253 X64OperandGenerator g(this);
328 Uint64BinopMatcher m(node); 254 Uint64BinopMatcher m(node);
329 if (m.right().Is(-1)) { 255 if (m.right().Is(-1)) {
330 Emit(kX64Not, g.DefineSameAsFirst(node), g.UseRegister(m.left().node())); 256 Emit(kX64Not, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()));
331 } else { 257 } else {
332 VisitBinop(this, node, kX64Xor); 258 VisitBinop(this, node, kX64Xor);
333 } 259 }
334 } 260 }
335 261
336 262
263 namespace {
264
337 // Shared routine for multiple 32-bit shift operations. 265 // Shared routine for multiple 32-bit shift operations.
338 // TODO(bmeurer): Merge this with VisitWord64Shift using template magic? 266 // TODO(bmeurer): Merge this with VisitWord64Shift using template magic?
339 static void VisitWord32Shift(InstructionSelector* selector, Node* node, 267 void VisitWord32Shift(InstructionSelector* selector, Node* node,
340 ArchOpcode opcode) { 268 ArchOpcode opcode) {
341 X64OperandGenerator g(selector); 269 X64OperandGenerator g(selector);
342 Node* left = node->InputAt(0); 270 Int32BinopMatcher m(node);
343 Node* right = node->InputAt(1); 271 Node* left = m.left().node();
272 Node* right = m.right().node();
344 273
345 if (g.CanBeImmediate(right)) { 274 if (g.CanBeImmediate(right)) {
346 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left), 275 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
347 g.UseImmediate(right)); 276 g.UseImmediate(right));
348 } else { 277 } else {
349 Int32BinopMatcher m(node);
350 if (m.right().IsWord32And()) { 278 if (m.right().IsWord32And()) {
351 Int32BinopMatcher mright(right); 279 Int32BinopMatcher mright(right);
352 if (mright.right().Is(0x1F)) { 280 if (mright.right().Is(0x1F)) {
353 right = mright.left().node(); 281 right = mright.left().node();
354 } 282 }
355 } 283 }
356 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left), 284 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
357 g.UseFixed(right, rcx)); 285 g.UseFixed(right, rcx));
358 } 286 }
359 } 287 }
360 288
361 289
362 // Shared routine for multiple 64-bit shift operations. 290 // Shared routine for multiple 64-bit shift operations.
363 // TODO(bmeurer): Merge this with VisitWord32Shift using template magic? 291 // TODO(bmeurer): Merge this with VisitWord32Shift using template magic?
364 static void VisitWord64Shift(InstructionSelector* selector, Node* node, 292 void VisitWord64Shift(InstructionSelector* selector, Node* node,
365 ArchOpcode opcode) { 293 ArchOpcode opcode) {
366 X64OperandGenerator g(selector); 294 X64OperandGenerator g(selector);
367 Node* left = node->InputAt(0); 295 Int64BinopMatcher m(node);
368 Node* right = node->InputAt(1); 296 Node* left = m.left().node();
297 Node* right = m.right().node();
369 298
370 if (g.CanBeImmediate(right)) { 299 if (g.CanBeImmediate(right)) {
371 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left), 300 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
372 g.UseImmediate(right)); 301 g.UseImmediate(right));
373 } else { 302 } else {
374 Int64BinopMatcher m(node);
375 if (m.right().IsWord64And()) { 303 if (m.right().IsWord64And()) {
376 Int64BinopMatcher mright(right); 304 Int64BinopMatcher mright(right);
377 if (mright.right().Is(0x3F)) { 305 if (mright.right().Is(0x3F)) {
378 right = mright.left().node(); 306 right = mright.left().node();
379 } 307 }
380 } 308 }
381 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left), 309 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
382 g.UseFixed(right, rcx)); 310 g.UseFixed(right, rcx));
383 } 311 }
384 } 312 }
385 313
314 } // namespace
315
386 316
387 void InstructionSelector::VisitWord32Shl(Node* node) { 317 void InstructionSelector::VisitWord32Shl(Node* node) {
388 VisitWord32Shift(this, node, kX64Shl32); 318 VisitWord32Shift(this, node, kX64Shl32);
389 } 319 }
390 320
391 321
392 void InstructionSelector::VisitWord64Shl(Node* node) { 322 void InstructionSelector::VisitWord64Shl(Node* node) {
393 VisitWord64Shift(this, node, kX64Shl); 323 VisitWord64Shift(this, node, kX64Shl);
394 } 324 }
395 325
(...skipping 21 matching lines...) Expand all
417 void InstructionSelector::VisitWord32Ror(Node* node) { 347 void InstructionSelector::VisitWord32Ror(Node* node) {
418 VisitWord32Shift(this, node, kX64Ror32); 348 VisitWord32Shift(this, node, kX64Ror32);
419 } 349 }
420 350
421 351
422 void InstructionSelector::VisitWord64Ror(Node* node) { 352 void InstructionSelector::VisitWord64Ror(Node* node) {
423 VisitWord64Shift(this, node, kX64Ror); 353 VisitWord64Shift(this, node, kX64Ror);
424 } 354 }
425 355
426 356
427 static bool TryEmitLeaMultAdd(InstructionSelector* selector, Node* node,
428 ArchOpcode opcode) {
429 int32_t displacement_value;
430 Node* left;
431 {
432 Int32BinopMatcher m32(node);
433 left = m32.left().node();
434 if (m32.right().HasValue()) {
435 displacement_value = m32.right().Value();
436 } else {
437 Int64BinopMatcher m64(node);
438 if (!m64.right().HasValue()) {
439 return false;
440 }
441 int64_t value_64 = m64.right().Value();
442 displacement_value = static_cast<int32_t>(value_64);
443 if (displacement_value != value_64) return false;
444 }
445 }
446 LeaMultiplyMatcher lmm(left);
447 if (!lmm.Matches()) return false;
448 AddressingMode mode;
449 size_t input_count;
450 X64OperandGenerator g(selector);
451 InstructionOperand* index = g.UseRegister(lmm.Left());
452 InstructionOperand* displacement = g.TempImmediate(displacement_value);
453 InstructionOperand* inputs[] = {index, displacement, displacement};
454 if (lmm.Displacement() != 0) {
455 input_count = 3;
456 inputs[1] = index;
457 mode = kMode_MR1I;
458 } else {
459 input_count = 2;
460 mode = kMode_M1I;
461 }
462 mode = AdjustAddressingMode(mode, lmm.Power());
463 InstructionOperand* outputs[] = {g.DefineAsRegister(node)};
464 selector->Emit(opcode | AddressingModeField::encode(mode), 1, outputs,
465 input_count, inputs);
466 return true;
467 }
468
469
470 void InstructionSelector::VisitInt32Add(Node* node) { 357 void InstructionSelector::VisitInt32Add(Node* node) {
471 if (TryEmitLeaMultAdd(this, node, kX64Lea32)) return;
472 VisitBinop(this, node, kX64Add32); 358 VisitBinop(this, node, kX64Add32);
473 } 359 }
474 360
475 361
476 void InstructionSelector::VisitInt64Add(Node* node) { 362 void InstructionSelector::VisitInt64Add(Node* node) {
477 if (TryEmitLeaMultAdd(this, node, kX64Lea)) return;
478 VisitBinop(this, node, kX64Add); 363 VisitBinop(this, node, kX64Add);
479 } 364 }
480 365
481 366
482 void InstructionSelector::VisitInt32Sub(Node* node) { 367 void InstructionSelector::VisitInt32Sub(Node* node) {
483 X64OperandGenerator g(this); 368 X64OperandGenerator g(this);
484 Int32BinopMatcher m(node); 369 Int32BinopMatcher m(node);
485 if (m.left().Is(0)) { 370 if (m.left().Is(0)) {
486 Emit(kX64Neg32, g.DefineSameAsFirst(node), g.UseRegister(m.right().node())); 371 Emit(kX64Neg32, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
487 } else { 372 } else {
488 VisitBinop(this, node, kX64Sub32); 373 VisitBinop(this, node, kX64Sub32);
489 } 374 }
490 } 375 }
491 376
492 377
493 void InstructionSelector::VisitInt64Sub(Node* node) { 378 void InstructionSelector::VisitInt64Sub(Node* node) {
494 X64OperandGenerator g(this); 379 X64OperandGenerator g(this);
495 Int64BinopMatcher m(node); 380 Int64BinopMatcher m(node);
496 if (m.left().Is(0)) { 381 if (m.left().Is(0)) {
497 Emit(kX64Neg, g.DefineSameAsFirst(node), g.UseRegister(m.right().node())); 382 Emit(kX64Neg, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
498 } else { 383 } else {
499 VisitBinop(this, node, kX64Sub); 384 VisitBinop(this, node, kX64Sub);
500 } 385 }
501 } 386 }
502 387
503 388
504 static bool TryEmitLeaMult(InstructionSelector* selector, Node* node, 389 namespace {
505 ArchOpcode opcode) {
506 LeaMultiplyMatcher lea(node);
507 // Try to match lea.
508 if (!lea.Matches()) return false;
509 AddressingMode mode;
510 size_t input_count;
511 X64OperandGenerator g(selector);
512 InstructionOperand* left = g.UseRegister(lea.Left());
513 InstructionOperand* inputs[] = {left, left};
514 if (lea.Displacement() != 0) {
515 input_count = 2;
516 mode = kMode_MR1;
517 } else {
518 input_count = 1;
519 mode = kMode_M1;
520 }
521 mode = AdjustAddressingMode(mode, lea.Power());
522 InstructionOperand* outputs[] = {g.DefineAsRegister(node)};
523 selector->Emit(opcode | AddressingModeField::encode(mode), 1, outputs,
524 input_count, inputs);
525 return true;
526 }
527 390
528 391 void VisitMul(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
529 static void VisitMul(InstructionSelector* selector, Node* node,
530 ArchOpcode opcode) {
531 X64OperandGenerator g(selector); 392 X64OperandGenerator g(selector);
532 Int32BinopMatcher m(node); 393 Int32BinopMatcher m(node);
533 Node* left = m.left().node(); 394 Node* left = m.left().node();
534 Node* right = m.right().node(); 395 Node* right = m.right().node();
535 if (g.CanBeImmediate(right)) { 396 if (g.CanBeImmediate(right)) {
536 selector->Emit(opcode, g.DefineAsRegister(node), g.Use(left), 397 selector->Emit(opcode, g.DefineAsRegister(node), g.Use(left),
537 g.UseImmediate(right)); 398 g.UseImmediate(right));
538 } else { 399 } else {
539 if (g.CanBeBetterLeftOperand(right)) { 400 if (g.CanBeBetterLeftOperand(right)) {
540 std::swap(left, right); 401 std::swap(left, right);
541 } 402 }
542 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left), 403 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
543 g.Use(right)); 404 g.Use(right));
544 } 405 }
545 } 406 }
546 407
408 } // namespace
409
547 410
548 void InstructionSelector::VisitInt32Mul(Node* node) { 411 void InstructionSelector::VisitInt32Mul(Node* node) {
549 if (TryEmitLeaMult(this, node, kX64Lea32)) return;
550 VisitMul(this, node, kX64Imul32); 412 VisitMul(this, node, kX64Imul32);
551 } 413 }
552 414
553 415
554 void InstructionSelector::VisitInt64Mul(Node* node) { 416 void InstructionSelector::VisitInt64Mul(Node* node) {
555 if (TryEmitLeaMult(this, node, kX64Lea)) return;
556 VisitMul(this, node, kX64Imul); 417 VisitMul(this, node, kX64Imul);
557 } 418 }
558 419
559 420
560 void InstructionSelector::VisitInt32MulHigh(Node* node) { 421 void InstructionSelector::VisitInt32MulHigh(Node* node) {
561 X64OperandGenerator g(this); 422 X64OperandGenerator g(this);
562 InstructionOperand* temps[] = {g.TempRegister(rax)}; 423 InstructionOperand* temps[] = {g.TempRegister(rax)};
563 Emit(kX64ImulHigh32, g.DefineAsFixed(node, rdx), 424 Emit(kX64ImulHigh32, g.DefineAsFixed(node, rdx),
564 g.UseFixed(node->InputAt(0), rax), g.UseUniqueRegister(node->InputAt(1)), 425 g.UseFixed(node->InputAt(0), rax), g.UseUniqueRegister(node->InputAt(1)),
565 arraysize(temps), temps); 426 arraysize(temps), temps);
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after
657 518
658 519
659 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) { 520 void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
660 X64OperandGenerator g(this); 521 X64OperandGenerator g(this);
661 Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(node->InputAt(0))); 522 Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
662 } 523 }
663 524
664 525
665 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) { 526 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
666 X64OperandGenerator g(this); 527 X64OperandGenerator g(this);
667 Emit(kX64Movl, g.DefineAsRegister(node), g.Use(node->InputAt(0))); 528 Node* value = node->InputAt(0);
529 switch (value->opcode()) {
530 case IrOpcode::kWord32And:
531 case IrOpcode::kWord32Or:
532 case IrOpcode::kWord32Xor:
533 case IrOpcode::kWord32Shl:
534 case IrOpcode::kWord32Shr:
535 case IrOpcode::kWord32Sar:
536 case IrOpcode::kWord32Ror:
537 case IrOpcode::kWord32Equal:
538 case IrOpcode::kInt32Add:
539 case IrOpcode::kInt32Sub:
540 case IrOpcode::kInt32Mul:
541 case IrOpcode::kInt32MulHigh:
542 case IrOpcode::kInt32Div:
543 case IrOpcode::kInt32LessThan:
544 case IrOpcode::kInt32LessThanOrEqual:
545 case IrOpcode::kInt32Mod:
546 case IrOpcode::kUint32Div:
547 case IrOpcode::kUint32LessThan:
548 case IrOpcode::kUint32LessThanOrEqual:
549 case IrOpcode::kUint32Mod: {
550 // These 32-bit operations implicitly zero-extend to 64-bit on x64, so the
551 // zero-extension is a no-op.
552 Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
553 return;
554 }
555 default:
556 break;
557 }
558 Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
668 } 559 }
669 560
670 561
671 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) { 562 void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
672 X64OperandGenerator g(this); 563 X64OperandGenerator g(this);
673 Emit(kSSECvtsd2ss, g.DefineAsRegister(node), g.Use(node->InputAt(0))); 564 Emit(kSSECvtsd2ss, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
674 } 565 }
675 566
676 567
677 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) { 568 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
678 X64OperandGenerator g(this); 569 X64OperandGenerator g(this);
679 Emit(kX64Movl, g.DefineAsRegister(node), g.Use(node->InputAt(0))); 570 Node* value = node->InputAt(0);
571 if (CanCover(node, value)) {
572 switch (value->opcode()) {
573 case IrOpcode::kWord64Sar:
574 case IrOpcode::kWord64Shr: {
575 Int64BinopMatcher m(value);
576 if (m.right().Is(32)) {
577 Emit(kX64Shr, g.DefineSameAsFirst(node),
578 g.UseRegister(m.left().node()), g.TempImmediate(32));
579 return;
580 }
581 break;
582 }
583 default:
584 break;
585 }
586 }
587 Emit(kX64Movl, g.DefineAsRegister(node), g.Use(value));
680 } 588 }
681 589
682 590
683 void InstructionSelector::VisitFloat64Add(Node* node) { 591 void InstructionSelector::VisitFloat64Add(Node* node) {
684 X64OperandGenerator g(this); 592 X64OperandGenerator g(this);
685 Emit(kSSEFloat64Add, g.DefineSameAsFirst(node), 593 Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
686 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1))); 594 g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
687 } 595 }
688 596
689 597
(...skipping 421 matching lines...) Expand 10 before | Expand all | Expand 10 after
1111 1019
1112 // static 1020 // static
1113 MachineOperatorBuilder::Flags 1021 MachineOperatorBuilder::Flags
1114 InstructionSelector::SupportedMachineOperatorFlags() { 1022 InstructionSelector::SupportedMachineOperatorFlags() {
1115 return MachineOperatorBuilder::kNoFlags; 1023 return MachineOperatorBuilder::kNoFlags;
1116 } 1024 }
1117 1025
1118 } // namespace compiler 1026 } // namespace compiler
1119 } // namespace internal 1027 } // namespace internal
1120 } // namespace v8 1028 } // namespace v8
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698