Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(463)

Side by Side Diff: src/compiler/arm64/instruction-selector-arm64.cc

Issue 415403005: [turbofan] Support for combining branches with <Operation>WithOverflow. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2014 the V8 project authors. All rights reserved. 1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/compiler/instruction-selector-impl.h" 5 #include "src/compiler/instruction-selector-impl.h"
6 #include "src/compiler/node-matchers.h" 6 #include "src/compiler/node-matchers.h"
7 7
8 namespace v8 { 8 namespace v8 {
9 namespace internal { 9 namespace internal {
10 namespace compiler { 10 namespace compiler {
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after
103 Node* node, ImmediateMode operand_mode) { 103 Node* node, ImmediateMode operand_mode) {
104 Arm64OperandGenerator g(selector); 104 Arm64OperandGenerator g(selector);
105 selector->Emit(opcode, g.DefineAsRegister(node), 105 selector->Emit(opcode, g.DefineAsRegister(node),
106 g.UseRegister(node->InputAt(0)), 106 g.UseRegister(node->InputAt(0)),
107 g.UseOperand(node->InputAt(1), operand_mode)); 107 g.UseOperand(node->InputAt(1), operand_mode));
108 } 108 }
109 109
110 110
111 // Shared routine for multiple binary operations. 111 // Shared routine for multiple binary operations.
112 static void VisitBinop(InstructionSelector* selector, Node* node, 112 static void VisitBinop(InstructionSelector* selector, Node* node,
113 ArchOpcode opcode, ImmediateMode operand_mode, 113 InstructionCode opcode, ImmediateMode operand_mode,
114 bool commutative) { 114 FlagsContinuation* cont) {
115 VisitRRO(selector, opcode, node, operand_mode);
116 }
117
118
119 static void VisitBinopWithOverflow(InstructionSelector* selector, Node* node,
120 InstructionCode opcode) {
121 Arm64OperandGenerator g(selector); 115 Arm64OperandGenerator g(selector);
122 Int32BinopMatcher m(node); 116 Int32BinopMatcher m(node);
123 InstructionOperand* inputs[2]; 117 InstructionOperand* inputs[4];
124 size_t input_count = 0; 118 size_t input_count = 0;
125 InstructionOperand* outputs[2]; 119 InstructionOperand* outputs[2];
126 size_t output_count = 0; 120 size_t output_count = 0;
127 121
128 inputs[input_count++] = g.UseRegister(m.left().node()); 122 inputs[input_count++] = g.UseRegister(m.left().node());
129 inputs[input_count++] = g.UseRegister(m.right().node()); 123 inputs[input_count++] = g.UseOperand(m.right().node(), operand_mode);
130 124
131 // Define outputs depending on the projections. 125 if (cont->IsBranch()) {
132 Node* projections[2]; 126 inputs[input_count++] = g.Label(cont->true_block());
133 node->CollectProjections(ARRAY_SIZE(projections), projections); 127 inputs[input_count++] = g.Label(cont->false_block());
134 if (projections[0]) {
135 outputs[output_count++] = g.DefineAsRegister(projections[0]);
136 } 128 }
137 if (projections[1]) { 129
138 opcode |= FlagsModeField::encode(kFlags_set); 130 outputs[output_count++] = g.DefineAsRegister(node);
139 opcode |= FlagsConditionField::encode(kOverflow); 131 if (cont->IsSet()) {
140 outputs[output_count++] = g.DefineAsRegister(projections[1]); 132 outputs[output_count++] = g.DefineAsRegister(cont->result());
141 } 133 }
142 134
143 ASSERT_NE(0, input_count); 135 ASSERT_NE(0, input_count);
144 ASSERT_NE(0, output_count); 136 ASSERT_NE(0, output_count);
145 ASSERT_GE(ARRAY_SIZE(inputs), input_count); 137 ASSERT_GE(ARRAY_SIZE(inputs), input_count);
146 ASSERT_GE(ARRAY_SIZE(outputs), output_count); 138 ASSERT_GE(ARRAY_SIZE(outputs), output_count);
147 139
148 selector->Emit(opcode, output_count, outputs, input_count, inputs); 140 Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
141 outputs, input_count, inputs);
142 if (cont->IsBranch()) instr->MarkAsControl();
149 } 143 }
150 144
151 145
146 // Shared routine for multiple binary operations.
147 static void VisitBinop(InstructionSelector* selector, Node* node,
148 ArchOpcode opcode, ImmediateMode operand_mode) {
149 FlagsContinuation cont;
150 VisitBinop(selector, node, opcode, operand_mode);
151 }
152
153
152 void InstructionSelector::VisitLoad(Node* node) { 154 void InstructionSelector::VisitLoad(Node* node) {
153 MachineRepresentation rep = OpParameter<MachineRepresentation>(node); 155 MachineRepresentation rep = OpParameter<MachineRepresentation>(node);
154 Arm64OperandGenerator g(this); 156 Arm64OperandGenerator g(this);
155 Node* base = node->InputAt(0); 157 Node* base = node->InputAt(0);
156 Node* index = node->InputAt(1); 158 Node* index = node->InputAt(1);
157 159
158 InstructionOperand* result = rep == kMachineFloat64 160 InstructionOperand* result = rep == kMachineFloat64
159 ? g.DefineAsDoubleRegister(node) 161 ? g.DefineAsDoubleRegister(node)
160 : g.DefineAsRegister(node); 162 : g.DefineAsRegister(node);
161 163
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after
249 Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL, 251 Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
250 g.UseRegister(index), g.UseImmediate(base), val); 252 g.UseRegister(index), g.UseImmediate(base), val);
251 } else { 253 } else {
252 Emit(opcode | AddressingModeField::encode(kMode_MRR), NULL, 254 Emit(opcode | AddressingModeField::encode(kMode_MRR), NULL,
253 g.UseRegister(base), g.UseRegister(index), val); 255 g.UseRegister(base), g.UseRegister(index), val);
254 } 256 }
255 } 257 }
256 258
257 259
258 void InstructionSelector::VisitWord32And(Node* node) { 260 void InstructionSelector::VisitWord32And(Node* node) {
259 VisitBinop(this, node, kArm64And32, kLogical32Imm, true); 261 VisitBinop(this, node, kArm64And32, kLogical32Imm);
260 } 262 }
261 263
262 264
263 void InstructionSelector::VisitWord64And(Node* node) { 265 void InstructionSelector::VisitWord64And(Node* node) {
264 VisitBinop(this, node, kArm64And, kLogical64Imm, true); 266 VisitBinop(this, node, kArm64And, kLogical64Imm);
265 } 267 }
266 268
267 269
268 void InstructionSelector::VisitWord32Or(Node* node) { 270 void InstructionSelector::VisitWord32Or(Node* node) {
269 VisitBinop(this, node, kArm64Or32, kLogical32Imm, true); 271 VisitBinop(this, node, kArm64Or32, kLogical32Imm);
270 } 272 }
271 273
272 274
273 void InstructionSelector::VisitWord64Or(Node* node) { 275 void InstructionSelector::VisitWord64Or(Node* node) {
274 VisitBinop(this, node, kArm64Or, kLogical64Imm, true); 276 VisitBinop(this, node, kArm64Or, kLogical64Imm);
275 } 277 }
276 278
277 279
278 template <typename T> 280 template <typename T>
279 static void VisitXor(InstructionSelector* selector, Node* node, 281 static void VisitXor(InstructionSelector* selector, Node* node,
280 ArchOpcode xor_opcode, ArchOpcode not_opcode) { 282 ArchOpcode xor_opcode, ArchOpcode not_opcode) {
281 Arm64OperandGenerator g(selector); 283 Arm64OperandGenerator g(selector);
282 BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node); 284 BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node);
283 if (m.right().Is(-1)) { 285 if (m.right().Is(-1)) {
284 selector->Emit(not_opcode, g.DefineAsRegister(node), 286 selector->Emit(not_opcode, g.DefineAsRegister(node),
285 g.UseRegister(m.left().node())); 287 g.UseRegister(m.left().node()));
286 } else { 288 } else {
287 VisitBinop(selector, node, xor_opcode, kLogical32Imm, true); 289 VisitBinop(selector, node, xor_opcode, kLogical32Imm);
288 } 290 }
289 } 291 }
290 292
291 293
292 void InstructionSelector::VisitWord32Xor(Node* node) { 294 void InstructionSelector::VisitWord32Xor(Node* node) {
293 VisitXor<int32_t>(this, node, kArm64Xor32, kArm64Not32); 295 VisitXor<int32_t>(this, node, kArm64Xor32, kArm64Not32);
294 } 296 }
295 297
296 298
297 void InstructionSelector::VisitWord64Xor(Node* node) { 299 void InstructionSelector::VisitWord64Xor(Node* node) {
(...skipping 25 matching lines...) Expand all
323 VisitRRO(this, kArm64Sar32, node, kShift32Imm); 325 VisitRRO(this, kArm64Sar32, node, kShift32Imm);
324 } 326 }
325 327
326 328
327 void InstructionSelector::VisitWord64Sar(Node* node) { 329 void InstructionSelector::VisitWord64Sar(Node* node) {
328 VisitRRO(this, kArm64Sar, node, kShift64Imm); 330 VisitRRO(this, kArm64Sar, node, kShift64Imm);
329 } 331 }
330 332
331 333
332 void InstructionSelector::VisitInt32Add(Node* node) { 334 void InstructionSelector::VisitInt32Add(Node* node) {
333 VisitBinop(this, node, kArm64Add32, kArithimeticImm, true); 335 VisitBinop(this, node, kArm64Add32, kArithimeticImm);
334 }
335
336
337 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
338 VisitBinopWithOverflow(this, node, kArm64Add32);
339 } 336 }
340 337
341 338
342 void InstructionSelector::VisitInt64Add(Node* node) { 339 void InstructionSelector::VisitInt64Add(Node* node) {
343 VisitBinop(this, node, kArm64Add, kArithimeticImm, true); 340 VisitBinop(this, node, kArm64Add, kArithimeticImm);
344 } 341 }
345 342
346 343
347 template <typename T> 344 template <typename T>
348 static void VisitSub(InstructionSelector* selector, Node* node, 345 static void VisitSub(InstructionSelector* selector, Node* node,
349 ArchOpcode sub_opcode, ArchOpcode neg_opcode) { 346 ArchOpcode sub_opcode, ArchOpcode neg_opcode) {
350 Arm64OperandGenerator g(selector); 347 Arm64OperandGenerator g(selector);
351 BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node); 348 BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node);
352 if (m.left().Is(0)) { 349 if (m.left().Is(0)) {
353 selector->Emit(neg_opcode, g.DefineAsRegister(node), 350 selector->Emit(neg_opcode, g.DefineAsRegister(node),
354 g.UseRegister(m.right().node())); 351 g.UseRegister(m.right().node()));
355 } else { 352 } else {
356 VisitBinop(selector, node, sub_opcode, kArithimeticImm, false); 353 VisitBinop(selector, node, sub_opcode, kArithimeticImm);
357 } 354 }
358 } 355 }
359 356
360 357
361 void InstructionSelector::VisitInt32Sub(Node* node) { 358 void InstructionSelector::VisitInt32Sub(Node* node) {
362 VisitSub<int32_t>(this, node, kArm64Sub32, kArm64Neg32); 359 VisitSub<int32_t>(this, node, kArm64Sub32, kArm64Neg32);
363 } 360 }
364 361
365 362
366 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
367 VisitBinopWithOverflow(this, node, kArm64Sub32);
368 }
369
370
371 void InstructionSelector::VisitInt64Sub(Node* node) { 363 void InstructionSelector::VisitInt64Sub(Node* node) {
372 VisitSub<int64_t>(this, node, kArm64Sub, kArm64Neg); 364 VisitSub<int64_t>(this, node, kArm64Sub, kArm64Neg);
373 } 365 }
374 366
375 367
376 void InstructionSelector::VisitInt32Mul(Node* node) { 368 void InstructionSelector::VisitInt32Mul(Node* node) {
377 VisitRRR(this, kArm64Mul32, node); 369 VisitRRR(this, kArm64Mul32, node);
378 } 370 }
379 371
380 372
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after
482 474
483 475
484 void InstructionSelector::VisitFloat64Mod(Node* node) { 476 void InstructionSelector::VisitFloat64Mod(Node* node) {
485 Arm64OperandGenerator g(this); 477 Arm64OperandGenerator g(this);
486 Emit(kArm64Float64Mod, g.DefineAsFixedDouble(node, d0), 478 Emit(kArm64Float64Mod, g.DefineAsFixedDouble(node, d0),
487 g.UseFixedDouble(node->InputAt(0), d0), 479 g.UseFixedDouble(node->InputAt(0), d0),
488 g.UseFixedDouble(node->InputAt(1), d1))->MarkAsCall(); 480 g.UseFixedDouble(node->InputAt(1), d1))->MarkAsCall();
489 } 481 }
490 482
491 483
484 void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
485 FlagsContinuation* cont) {
486 VisitBinop(this, node, kArm64Add32, kArithimeticImm, cont);
487 }
488
489
490 void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
491 FlagsContinuation* cont) {
492 VisitBinop(this, node, kArm64Sub32, kArithimeticImm, cont);
493 }
494
495
492 // Shared routine for multiple compare operations. 496 // Shared routine for multiple compare operations.
493 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode, 497 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
494 InstructionOperand* left, InstructionOperand* right, 498 InstructionOperand* left, InstructionOperand* right,
495 FlagsContinuation* cont) { 499 FlagsContinuation* cont) {
496 Arm64OperandGenerator g(selector); 500 Arm64OperandGenerator g(selector);
497 opcode = cont->Encode(opcode); 501 opcode = cont->Encode(opcode);
498 if (cont->IsBranch()) { 502 if (cont->IsBranch()) {
499 selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()), 503 selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
500 g.Label(cont->false_block()))->MarkAsControl(); 504 g.Label(cont->false_block()))->MarkAsControl();
501 } else { 505 } else {
(...skipping 156 matching lines...) Expand 10 before | Expand all | Expand 10 after
658 ASSERT(deoptimization == NULL && continuation == NULL); 662 ASSERT(deoptimization == NULL && continuation == NULL);
659 Emit(kArm64Drop | MiscField::encode(aligned_push_count), NULL); 663 Emit(kArm64Drop | MiscField::encode(aligned_push_count), NULL);
660 } 664 }
661 } 665 }
662 666
663 #endif // V8_TURBOFAN_TARGET 667 #endif // V8_TURBOFAN_TARGET
664 668
665 } // namespace compiler 669 } // namespace compiler
666 } // namespace internal 670 } // namespace internal
667 } // namespace v8 671 } // namespace v8
OLDNEW
« no previous file with comments | « src/compiler/arm/instruction-selector-arm.cc ('k') | src/compiler/ia32/instruction-selector-ia32.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698