Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(177)

Side by Side Diff: src/compiler/arm64/instruction-selector-arm64.cc

Issue 447203002: Add Uint32AddWithOverflow and Uint32SubWithOverflow machine operators. Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2014 the V8 project authors. All rights reserved. 1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/compiler/instruction-selector-impl.h" 5 #include "src/compiler/instruction-selector-impl.h"
6 #include "src/compiler/node-matchers.h" 6 #include "src/compiler/node-matchers.h"
7 7
8 namespace v8 { 8 namespace v8 {
9 namespace internal { 9 namespace internal {
10 namespace compiler { 10 namespace compiler {
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after
99 99
100 static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, 100 static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
101 Node* node, ImmediateMode operand_mode) { 101 Node* node, ImmediateMode operand_mode) {
102 Arm64OperandGenerator g(selector); 102 Arm64OperandGenerator g(selector);
103 selector->Emit(opcode, g.DefineAsRegister(node), 103 selector->Emit(opcode, g.DefineAsRegister(node),
104 g.UseRegister(node->InputAt(0)), 104 g.UseRegister(node->InputAt(0)),
105 g.UseOperand(node->InputAt(1), operand_mode)); 105 g.UseOperand(node->InputAt(1), operand_mode));
106 } 106 }
107 107
108 108
109 // Shared routine for multiple binary operations.
110 static void VisitBinop(InstructionSelector* selector, Node* node, 109 static void VisitBinop(InstructionSelector* selector, Node* node,
111 InstructionCode opcode, ImmediateMode operand_mode, 110 InstructionCode opcode, ImmediateMode operand_mode,
112 FlagsContinuation* cont) { 111 FlagsContinuation* cont) {
113 Arm64OperandGenerator g(selector); 112 Arm64OperandGenerator g(selector);
114 Int32BinopMatcher m(node); 113 Int32BinopMatcher m(node);
115 InstructionOperand* inputs[4]; 114 InstructionOperand* inputs[4];
116 size_t input_count = 0; 115 size_t input_count = 0;
117 InstructionOperand* outputs[2]; 116 InstructionOperand* outputs[2];
118 size_t output_count = 0; 117 size_t output_count = 0;
119 118
(...skipping 14 matching lines...) Expand all
134 DCHECK_NE(0, output_count); 133 DCHECK_NE(0, output_count);
135 DCHECK_GE(ARRAY_SIZE(inputs), input_count); 134 DCHECK_GE(ARRAY_SIZE(inputs), input_count);
136 DCHECK_GE(ARRAY_SIZE(outputs), output_count); 135 DCHECK_GE(ARRAY_SIZE(outputs), output_count);
137 136
138 Instruction* instr = selector->Emit(cont->Encode(opcode), output_count, 137 Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
139 outputs, input_count, inputs); 138 outputs, input_count, inputs);
140 if (cont->IsBranch()) instr->MarkAsControl(); 139 if (cont->IsBranch()) instr->MarkAsControl();
141 } 140 }
142 141
143 142
144 // Shared routine for multiple binary operations.
145 static void VisitBinop(InstructionSelector* selector, Node* node, 143 static void VisitBinop(InstructionSelector* selector, Node* node,
146 ArchOpcode opcode, ImmediateMode operand_mode) { 144 ArchOpcode opcode, ImmediateMode operand_mode) {
147 FlagsContinuation cont; 145 FlagsContinuation cont;
148 VisitBinop(selector, node, opcode, operand_mode, &cont); 146 VisitBinop(selector, node, opcode, operand_mode, &cont);
149 } 147 }
150 148
151 149
150 static void VisitBinop(InstructionSelector* selector, Node* node,
151 InstructionCode opcode,
152 FlagsCondition overflow_condition) {
153 if (Node* overflow = node->FindProjection(1)) {
154 FlagsContinuation cont(overflow_condition, overflow);
155 return VisitBinop(selector, node, opcode, kArithimeticImm, &cont);
156 }
157 FlagsContinuation cont;
158 return VisitBinop(selector, node, opcode, kArithimeticImm, &cont);
159 }
160
161
152 void InstructionSelector::VisitLoad(Node* node) { 162 void InstructionSelector::VisitLoad(Node* node) {
153 MachineRepresentation rep = OpParameter<MachineRepresentation>(node); 163 MachineRepresentation rep = OpParameter<MachineRepresentation>(node);
154 Arm64OperandGenerator g(this); 164 Arm64OperandGenerator g(this);
155 Node* base = node->InputAt(0); 165 Node* base = node->InputAt(0);
156 Node* index = node->InputAt(1); 166 Node* index = node->InputAt(1);
157 167
158 InstructionOperand* result = rep == kMachineFloat64 168 InstructionOperand* result = rep == kMachineFloat64
159 ? g.DefineAsDoubleRegister(node) 169 ? g.DefineAsDoubleRegister(node)
160 : g.DefineAsRegister(node); 170 : g.DefineAsRegister(node);
161 171
(...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after
327 void InstructionSelector::VisitWord64Sar(Node* node) { 337 void InstructionSelector::VisitWord64Sar(Node* node) {
328 VisitRRO(this, kArm64Sar, node, kShift64Imm); 338 VisitRRO(this, kArm64Sar, node, kShift64Imm);
329 } 339 }
330 340
331 341
332 void InstructionSelector::VisitInt32Add(Node* node) { 342 void InstructionSelector::VisitInt32Add(Node* node) {
333 VisitBinop(this, node, kArm64Add32, kArithimeticImm); 343 VisitBinop(this, node, kArm64Add32, kArithimeticImm);
334 } 344 }
335 345
336 346
347 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
348 VisitBinop(this, node, kArm64Add32, kOverflow);
349 }
350
351
352 void InstructionSelector::VisitUint32AddWithOverflow(Node* node) {
353 VisitBinop(this, node, kArm64Add32, kUnsignedGreaterThanOrEqual);
354 }
355
356
337 void InstructionSelector::VisitInt64Add(Node* node) { 357 void InstructionSelector::VisitInt64Add(Node* node) {
338 VisitBinop(this, node, kArm64Add, kArithimeticImm); 358 VisitBinop(this, node, kArm64Add, kArithimeticImm);
339 } 359 }
340 360
341 361
342 template <typename T> 362 template <typename T>
343 static void VisitSub(InstructionSelector* selector, Node* node, 363 static void VisitSub(InstructionSelector* selector, Node* node,
344 ArchOpcode sub_opcode, ArchOpcode neg_opcode) { 364 ArchOpcode sub_opcode, ArchOpcode neg_opcode) {
345 Arm64OperandGenerator g(selector); 365 Arm64OperandGenerator g(selector);
346 BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node); 366 BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node);
347 if (m.left().Is(0)) { 367 if (m.left().Is(0)) {
348 selector->Emit(neg_opcode, g.DefineAsRegister(node), 368 selector->Emit(neg_opcode, g.DefineAsRegister(node),
349 g.UseRegister(m.right().node())); 369 g.UseRegister(m.right().node()));
350 } else { 370 } else {
351 VisitBinop(selector, node, sub_opcode, kArithimeticImm); 371 VisitBinop(selector, node, sub_opcode, kArithimeticImm);
352 } 372 }
353 } 373 }
354 374
355 375
356 void InstructionSelector::VisitInt32Sub(Node* node) { 376 void InstructionSelector::VisitInt32Sub(Node* node) {
357 VisitSub<int32_t>(this, node, kArm64Sub32, kArm64Neg32); 377 VisitSub<int32_t>(this, node, kArm64Sub32, kArm64Neg32);
358 } 378 }
359 379
360 380
381 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
382 VisitBinop(this, node, kArm64Sub32, kOverflow);
383 }
384
385
386 void InstructionSelector::VisitUint32SubWithOverflow(Node* node) {
387 VisitBinop(this, node, kArm64Sub32, kUnsignedLessThan);
388 }
389
390
361 void InstructionSelector::VisitInt64Sub(Node* node) { 391 void InstructionSelector::VisitInt64Sub(Node* node) {
362 VisitSub<int64_t>(this, node, kArm64Sub, kArm64Neg); 392 VisitSub<int64_t>(this, node, kArm64Sub, kArm64Neg);
363 } 393 }
364 394
365 395
366 void InstructionSelector::VisitInt32Mul(Node* node) { 396 void InstructionSelector::VisitInt32Mul(Node* node) {
367 VisitRRR(this, kArm64Mul32, node); 397 VisitRRR(this, kArm64Mul32, node);
368 } 398 }
369 399
370 400
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after
472 502
473 503
474 void InstructionSelector::VisitFloat64Mod(Node* node) { 504 void InstructionSelector::VisitFloat64Mod(Node* node) {
475 Arm64OperandGenerator g(this); 505 Arm64OperandGenerator g(this);
476 Emit(kArm64Float64Mod, g.DefineAsFixedDouble(node, d0), 506 Emit(kArm64Float64Mod, g.DefineAsFixedDouble(node, d0),
477 g.UseFixedDouble(node->InputAt(0), d0), 507 g.UseFixedDouble(node->InputAt(0), d0),
478 g.UseFixedDouble(node->InputAt(1), d1))->MarkAsCall(); 508 g.UseFixedDouble(node->InputAt(1), d1))->MarkAsCall();
479 } 509 }
480 510
481 511
482 void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
483 FlagsContinuation* cont) {
484 VisitBinop(this, node, kArm64Add32, kArithimeticImm, cont);
485 }
486
487
488 void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
489 FlagsContinuation* cont) {
490 VisitBinop(this, node, kArm64Sub32, kArithimeticImm, cont);
491 }
492
493
494 // Shared routine for multiple compare operations. 512 // Shared routine for multiple compare operations.
495 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode, 513 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
496 InstructionOperand* left, InstructionOperand* right, 514 InstructionOperand* left, InstructionOperand* right,
497 FlagsContinuation* cont) { 515 FlagsContinuation* cont) {
498 Arm64OperandGenerator g(selector); 516 Arm64OperandGenerator g(selector);
499 opcode = cont->Encode(opcode); 517 opcode = cont->Encode(opcode);
500 if (cont->IsBranch()) { 518 if (cont->IsBranch()) {
501 selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()), 519 selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
502 g.Label(cont->false_block()))->MarkAsControl(); 520 g.Label(cont->false_block()))->MarkAsControl();
503 } else { 521 } else {
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
572 void InstructionSelector::VisitFloat64Compare(Node* node, 590 void InstructionSelector::VisitFloat64Compare(Node* node,
573 FlagsContinuation* cont) { 591 FlagsContinuation* cont) {
574 Arm64OperandGenerator g(this); 592 Arm64OperandGenerator g(this);
575 Node* left = node->InputAt(0); 593 Node* left = node->InputAt(0);
576 Node* right = node->InputAt(1); 594 Node* right = node->InputAt(1);
577 VisitCompare(this, kArm64Float64Cmp, g.UseDoubleRegister(left), 595 VisitCompare(this, kArm64Float64Cmp, g.UseDoubleRegister(left),
578 g.UseDoubleRegister(right), cont); 596 g.UseDoubleRegister(right), cont);
579 } 597 }
580 598
581 599
600 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
601 BasicBlock* fbranch) {
602 OperandGenerator g(this);
603 Node* user = branch;
604 Node* value = branch->InputAt(0);
605
606 FlagsContinuation cont(kNotEqual, tbranch, fbranch);
607
608 // If we can fall through to the true block, invert the branch.
609 if (IsNextInAssemblyOrder(tbranch)) {
610 cont.Negate();
611 cont.SwapBlocks();
612 }
613
614 // Try to combine with comparisons against 0 by simply inverting the branch.
615 while (CanCover(user, value)) {
616 if (value->opcode() == IrOpcode::kWord32Equal) {
617 Int32BinopMatcher m(value);
618 if (m.right().Is(0)) {
619 user = value;
620 value = m.left().node();
621 cont.Negate();
622 } else {
623 break;
624 }
625 } else if (value->opcode() == IrOpcode::kWord64Equal) {
626 Int64BinopMatcher m(value);
627 if (m.right().Is(0)) {
628 user = value;
629 value = m.left().node();
630 cont.Negate();
631 } else {
632 break;
633 }
634 } else {
635 break;
636 }
637 }
638
639 // Try to combine the branch with a comparison.
640 if (CanCover(user, value)) {
641 switch (value->opcode()) {
642 case IrOpcode::kWord32Equal:
643 cont.OverwriteAndNegateIfEqual(kEqual);
644 return VisitWord32Compare(value, &cont);
645 case IrOpcode::kInt32LessThan:
646 cont.OverwriteAndNegateIfEqual(kSignedLessThan);
647 return VisitWord32Compare(value, &cont);
648 case IrOpcode::kInt32LessThanOrEqual:
649 cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
650 return VisitWord32Compare(value, &cont);
651 case IrOpcode::kUint32LessThan:
652 cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
653 return VisitWord32Compare(value, &cont);
654 case IrOpcode::kUint32LessThanOrEqual:
655 cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
656 return VisitWord32Compare(value, &cont);
657 case IrOpcode::kWord64Equal:
658 cont.OverwriteAndNegateIfEqual(kEqual);
659 return VisitWord64Compare(value, &cont);
660 case IrOpcode::kInt64LessThan:
661 cont.OverwriteAndNegateIfEqual(kSignedLessThan);
662 return VisitWord64Compare(value, &cont);
663 case IrOpcode::kInt64LessThanOrEqual:
664 cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
665 return VisitWord64Compare(value, &cont);
666 case IrOpcode::kFloat64Equal:
667 cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
668 return VisitFloat64Compare(value, &cont);
669 case IrOpcode::kFloat64LessThan:
670 cont.OverwriteAndNegateIfEqual(kUnorderedLessThan);
671 return VisitFloat64Compare(value, &cont);
672 case IrOpcode::kFloat64LessThanOrEqual:
673 cont.OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
674 return VisitFloat64Compare(value, &cont);
675 case IrOpcode::kProjection:
676 // Check if this is the overflow output projection of an
677 // <Operation>WithOverflow node.
678 if (OpParameter<int32_t>(value) == 1) {
679 // We cannot combine the <Operation>WithOverflow with this branch
680 // unless the 0th projection (the use of the actual value of the
681 // <Operation> is either NULL, which means there's no use of the
682 // actual value, or was already defined, which means it is scheduled
683 // *AFTER* this branch).
684 Node* node = value->InputAt(0);
685 Node* result = node->FindProjection(0);
686 if (result == NULL || IsDefined(result)) {
687 switch (node->opcode()) {
688 case IrOpcode::kInt32AddWithOverflow:
689 cont.OverwriteAndNegateIfEqual(kOverflow);
690 return VisitBinop(this, node, kArm64Add32, kArithimeticImm,
691 &cont);
692 case IrOpcode::kUint32AddWithOverflow:
693 cont.OverwriteAndNegateIfEqual(kUnsignedGreaterThanOrEqual);
694 return VisitBinop(this, node, kArm64Add32, kArithimeticImm,
695 &cont);
696 case IrOpcode::kInt32SubWithOverflow:
697 cont.OverwriteAndNegateIfEqual(kOverflow);
698 return VisitBinop(this, node, kArm64Sub32, kArithimeticImm,
699 &cont);
700 case IrOpcode::kUint32SubWithOverflow:
701 cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
702 return VisitBinop(this, node, kArm64Sub32, kArithimeticImm,
703 &cont);
704 default:
705 break;
706 }
707 }
708 }
709 break;
710 default:
711 break;
712 }
713 }
714
715 // Branch could not be combined with a compare, emit compare against 0.
716 VisitWord32Test(value, &cont);
717 }
718
719
582 void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation, 720 void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
583 BasicBlock* deoptimization) { 721 BasicBlock* deoptimization) {
584 Arm64OperandGenerator g(this); 722 Arm64OperandGenerator g(this);
585 CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call); 723 CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
586 CallBuffer buffer(zone(), descriptor); // TODO(turbofan): temp zone here? 724 CallBuffer buffer(zone(), descriptor); // TODO(turbofan): temp zone here?
587 725
588 // Compute InstructionOperands for inputs and outputs. 726 // Compute InstructionOperands for inputs and outputs.
589 // TODO(turbofan): on ARM64 it's probably better to use the code object in a 727 // TODO(turbofan): on ARM64 it's probably better to use the code object in a
590 // register if there are multiple uses of it. Improve constant pool and the 728 // register if there are multiple uses of it. Improve constant pool and the
591 // heuristics in the register allocator for where to emit constants. 729 // heuristics in the register allocator for where to emit constants.
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
658 // Caller clean up of stack for C-style calls. 796 // Caller clean up of stack for C-style calls.
659 if (is_c_frame && aligned_push_count > 0) { 797 if (is_c_frame && aligned_push_count > 0) {
660 DCHECK(deoptimization == NULL && continuation == NULL); 798 DCHECK(deoptimization == NULL && continuation == NULL);
661 Emit(kArm64Drop | MiscField::encode(aligned_push_count), NULL); 799 Emit(kArm64Drop | MiscField::encode(aligned_push_count), NULL);
662 } 800 }
663 } 801 }
664 802
665 } // namespace compiler 803 } // namespace compiler
666 } // namespace internal 804 } // namespace internal
667 } // namespace v8 805 } // namespace v8
OLDNEW
« no previous file with comments | « src/compiler/arm/instruction-selector-arm.cc ('k') | src/compiler/ia32/instruction-selector-ia32.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698