Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(2)

Side by Side Diff: src/compiler/x64/instruction-selector-x64.cc

Issue 415403005: [turbofan] Support for combining branches with <Operation>WithOverflow. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/compiler/node.cc ('k') | test/cctest/compiler/test-instruction-selector-arm.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 the V8 project authors. All rights reserved. 1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/compiler/instruction-selector-impl.h" 5 #include "src/compiler/instruction-selector-impl.h"
6 #include "src/compiler/node-matchers.h" 6 #include "src/compiler/node-matchers.h"
7 7
8 namespace v8 { 8 namespace v8 {
9 namespace internal { 9 namespace internal {
10 namespace compiler { 10 namespace compiler {
(...skipping 158 matching lines...) Expand 10 before | Expand all | Expand 10 after
169 } else { // store [%base + %index], %|#value 169 } else { // store [%base + %index], %|#value
170 Emit(opcode | AddressingModeField::encode(kMode_MR1I), NULL, 170 Emit(opcode | AddressingModeField::encode(kMode_MR1I), NULL,
171 g.UseRegister(base), g.UseRegister(index), val); 171 g.UseRegister(base), g.UseRegister(index), val);
172 } 172 }
173 // TODO(turbofan): addressing modes [r+r*{2,4,8}+K] 173 // TODO(turbofan): addressing modes [r+r*{2,4,8}+K]
174 } 174 }
175 175
176 176
177 // Shared routine for multiple binary operations. 177 // Shared routine for multiple binary operations.
178 static void VisitBinop(InstructionSelector* selector, Node* node, 178 static void VisitBinop(InstructionSelector* selector, Node* node,
179 ArchOpcode opcode, bool commutative) { 179 InstructionCode opcode, FlagsContinuation* cont) {
180 X64OperandGenerator g(selector);
181 Node* left = node->InputAt(0);
182 Node* right = node->InputAt(1);
183 // TODO(turbofan): match complex addressing modes.
184 // TODO(turbofan): if commutative, pick the non-live-in operand as the left as
185 // this might be the last use and therefore its register can be reused.
186 if (g.CanBeImmediate(right)) {
187 selector->Emit(opcode, g.DefineSameAsFirst(node), g.Use(left),
188 g.UseImmediate(right));
189 } else if (commutative && g.CanBeImmediate(left)) {
190 selector->Emit(opcode, g.DefineSameAsFirst(node), g.Use(right),
191 g.UseImmediate(left));
192 } else {
193 selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
194 g.Use(right));
195 }
196 }
197
198
199 static void VisitBinopWithOverflow(InstructionSelector* selector, Node* node,
200 InstructionCode opcode) {
201 X64OperandGenerator g(selector); 180 X64OperandGenerator g(selector);
202 Int32BinopMatcher m(node); 181 Int32BinopMatcher m(node);
203 InstructionOperand* inputs[2]; 182 InstructionOperand* inputs[4];
204 size_t input_count = 0; 183 size_t input_count = 0;
205 InstructionOperand* outputs[2]; 184 InstructionOperand* outputs[2];
206 size_t output_count = 0; 185 size_t output_count = 0;
207 186
208 // TODO(turbofan): match complex addressing modes. 187 // TODO(turbofan): match complex addressing modes.
209 // TODO(turbofan): if commutative, pick the non-live-in operand as the left as 188 // TODO(turbofan): if commutative, pick the non-live-in operand as the left as
210 // this might be the last use and therefore its register can be reused. 189 // this might be the last use and therefore its register can be reused.
211 if (g.CanBeImmediate(m.right().node())) { 190 if (g.CanBeImmediate(m.right().node())) {
212 inputs[input_count++] = g.Use(m.left().node()); 191 inputs[input_count++] = g.Use(m.left().node());
213 inputs[input_count++] = g.UseImmediate(m.right().node()); 192 inputs[input_count++] = g.UseImmediate(m.right().node());
214 } else { 193 } else {
215 inputs[input_count++] = g.UseRegister(m.left().node()); 194 inputs[input_count++] = g.UseRegister(m.left().node());
216 inputs[input_count++] = g.Use(m.right().node()); 195 inputs[input_count++] = g.Use(m.right().node());
217 } 196 }
218 197
219 // Define outputs depending on the projections. 198 if (cont->IsBranch()) {
220 Node* projections[2]; 199 inputs[input_count++] = g.Label(cont->true_block());
221 node->CollectProjections(ARRAY_SIZE(projections), projections); 200 inputs[input_count++] = g.Label(cont->false_block());
222 if (projections[0]) {
223 outputs[output_count++] = g.DefineSameAsFirst(projections[0]);
224 } 201 }
225 if (projections[1]) { 202
226 opcode |= FlagsModeField::encode(kFlags_set); 203 outputs[output_count++] = g.DefineSameAsFirst(node);
227 opcode |= FlagsConditionField::encode(kOverflow); 204 if (cont->IsSet()) {
228 outputs[output_count++] = 205 outputs[output_count++] = g.DefineAsRegister(cont->result());
229 (projections[0] ? g.DefineAsRegister(projections[1])
230 : g.DefineSameAsFirst(projections[1]));
231 } 206 }
232 207
233 ASSERT_NE(0, input_count); 208 ASSERT_NE(0, input_count);
234 ASSERT_NE(0, output_count); 209 ASSERT_NE(0, output_count);
235 ASSERT_GE(ARRAY_SIZE(inputs), input_count); 210 ASSERT_GE(ARRAY_SIZE(inputs), input_count);
236 ASSERT_GE(ARRAY_SIZE(outputs), output_count); 211 ASSERT_GE(ARRAY_SIZE(outputs), output_count);
237 212
238 selector->Emit(opcode, output_count, outputs, input_count, inputs); 213 Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
214 outputs, input_count, inputs);
215 if (cont->IsBranch()) instr->MarkAsControl();
216 }
217
218
219 // Shared routine for multiple binary operations.
220 static void VisitBinop(InstructionSelector* selector, Node* node,
221 InstructionCode opcode) {
222 FlagsContinuation cont;
223 VisitBinop(selector, node, opcode, &cont);
239 } 224 }
240 225
241 226
242 void InstructionSelector::VisitWord32And(Node* node) { 227 void InstructionSelector::VisitWord32And(Node* node) {
243 VisitBinop(this, node, kX64And32, true); 228 VisitBinop(this, node, kX64And32);
244 } 229 }
245 230
246 231
247 void InstructionSelector::VisitWord64And(Node* node) { 232 void InstructionSelector::VisitWord64And(Node* node) {
248 VisitBinop(this, node, kX64And, true); 233 VisitBinop(this, node, kX64And);
249 } 234 }
250 235
251 236
252 void InstructionSelector::VisitWord32Or(Node* node) { 237 void InstructionSelector::VisitWord32Or(Node* node) {
253 VisitBinop(this, node, kX64Or32, true); 238 VisitBinop(this, node, kX64Or32);
254 } 239 }
255 240
256 241
257 void InstructionSelector::VisitWord64Or(Node* node) { 242 void InstructionSelector::VisitWord64Or(Node* node) {
258 VisitBinop(this, node, kX64Or, true); 243 VisitBinop(this, node, kX64Or);
259 } 244 }
260 245
261 246
262 template <typename T> 247 template <typename T>
263 static void VisitXor(InstructionSelector* selector, Node* node, 248 static void VisitXor(InstructionSelector* selector, Node* node,
264 ArchOpcode xor_opcode, ArchOpcode not_opcode) { 249 ArchOpcode xor_opcode, ArchOpcode not_opcode) {
265 X64OperandGenerator g(selector); 250 X64OperandGenerator g(selector);
266 BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node); 251 BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node);
267 if (m.right().Is(-1)) { 252 if (m.right().Is(-1)) {
268 selector->Emit(not_opcode, g.DefineSameAsFirst(node), 253 selector->Emit(not_opcode, g.DefineSameAsFirst(node),
269 g.Use(m.left().node())); 254 g.Use(m.left().node()));
270 } else { 255 } else {
271 VisitBinop(selector, node, xor_opcode, true); 256 VisitBinop(selector, node, xor_opcode);
272 } 257 }
273 } 258 }
274 259
275 260
276 void InstructionSelector::VisitWord32Xor(Node* node) { 261 void InstructionSelector::VisitWord32Xor(Node* node) {
277 VisitXor<int32_t>(this, node, kX64Xor32, kX64Not32); 262 VisitXor<int32_t>(this, node, kX64Xor32, kX64Not32);
278 } 263 }
279 264
280 265
281 void InstructionSelector::VisitWord64Xor(Node* node) { 266 void InstructionSelector::VisitWord64Xor(Node* node) {
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after
359 VisitWord32Shift(this, node, kX64Sar32); 344 VisitWord32Shift(this, node, kX64Sar32);
360 } 345 }
361 346
362 347
363 void InstructionSelector::VisitWord64Sar(Node* node) { 348 void InstructionSelector::VisitWord64Sar(Node* node) {
364 VisitWord64Shift(this, node, kX64Sar); 349 VisitWord64Shift(this, node, kX64Sar);
365 } 350 }
366 351
367 352
368 void InstructionSelector::VisitInt32Add(Node* node) { 353 void InstructionSelector::VisitInt32Add(Node* node) {
369 VisitBinop(this, node, kX64Add32, true); 354 VisitBinop(this, node, kX64Add32);
370 }
371
372
373 void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
374 VisitBinopWithOverflow(this, node, kX64Add32);
375 } 355 }
376 356
377 357
378 void InstructionSelector::VisitInt64Add(Node* node) { 358 void InstructionSelector::VisitInt64Add(Node* node) {
379 VisitBinop(this, node, kX64Add, true); 359 VisitBinop(this, node, kX64Add);
380 } 360 }
381 361
382 362
383 template <typename T> 363 template <typename T>
384 static void VisitSub(InstructionSelector* selector, Node* node, 364 static void VisitSub(InstructionSelector* selector, Node* node,
385 ArchOpcode sub_opcode, ArchOpcode neg_opcode) { 365 ArchOpcode sub_opcode, ArchOpcode neg_opcode) {
386 X64OperandGenerator g(selector); 366 X64OperandGenerator g(selector);
387 BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node); 367 BinopMatcher<IntMatcher<T>, IntMatcher<T> > m(node);
388 if (m.left().Is(0)) { 368 if (m.left().Is(0)) {
389 selector->Emit(neg_opcode, g.DefineSameAsFirst(node), 369 selector->Emit(neg_opcode, g.DefineSameAsFirst(node),
390 g.Use(m.right().node())); 370 g.Use(m.right().node()));
391 } else { 371 } else {
392 VisitBinop(selector, node, sub_opcode, false); 372 VisitBinop(selector, node, sub_opcode);
393 } 373 }
394 } 374 }
395 375
396 376
397 void InstructionSelector::VisitInt32Sub(Node* node) { 377 void InstructionSelector::VisitInt32Sub(Node* node) {
398 VisitSub<int32_t>(this, node, kX64Sub32, kX64Neg32); 378 VisitSub<int32_t>(this, node, kX64Sub32, kX64Neg32);
399 } 379 }
400 380
401 381
402 void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
403 VisitBinopWithOverflow(this, node, kX64Sub32);
404 }
405
406
407 void InstructionSelector::VisitInt64Sub(Node* node) { 382 void InstructionSelector::VisitInt64Sub(Node* node) {
408 VisitSub<int64_t>(this, node, kX64Sub, kX64Neg); 383 VisitSub<int64_t>(this, node, kX64Sub, kX64Neg);
409 } 384 }
410 385
411 386
412 static void VisitMul(InstructionSelector* selector, Node* node, 387 static void VisitMul(InstructionSelector* selector, Node* node,
413 ArchOpcode opcode) { 388 ArchOpcode opcode) {
414 X64OperandGenerator g(selector); 389 X64OperandGenerator g(selector);
415 Node* left = node->InputAt(0); 390 Node* left = node->InputAt(0);
416 Node* right = node->InputAt(1); 391 Node* right = node->InputAt(1);
(...skipping 160 matching lines...) Expand 10 before | Expand all | Expand 10 after
577 552
578 553
579 void InstructionSelector::VisitConvertInt32ToInt64(Node* node) { 554 void InstructionSelector::VisitConvertInt32ToInt64(Node* node) {
580 X64OperandGenerator g(this); 555 X64OperandGenerator g(this);
581 // TODO(dcarney): other modes 556 // TODO(dcarney): other modes
582 Emit(kX64Int32ToInt64, g.DefineAsRegister(node), 557 Emit(kX64Int32ToInt64, g.DefineAsRegister(node),
583 g.UseRegister(node->InputAt(0))); 558 g.UseRegister(node->InputAt(0)));
584 } 559 }
585 560
586 561
562 void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
563 FlagsContinuation* cont) {
564 VisitBinop(this, node, kX64Add32, cont);
565 }
566
567
568 void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
569 FlagsContinuation* cont) {
570 VisitBinop(this, node, kX64Sub32, cont);
571 }
572
573
587 // Shared routine for multiple compare operations. 574 // Shared routine for multiple compare operations.
588 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode, 575 static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
589 InstructionOperand* left, InstructionOperand* right, 576 InstructionOperand* left, InstructionOperand* right,
590 FlagsContinuation* cont) { 577 FlagsContinuation* cont) {
591 X64OperandGenerator g(selector); 578 X64OperandGenerator g(selector);
592 opcode = cont->Encode(opcode); 579 opcode = cont->Encode(opcode);
593 if (cont->IsBranch()) { 580 if (cont->IsBranch()) {
594 selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()), 581 selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
595 g.Label(cont->false_block()))->MarkAsControl(); 582 g.Label(cont->false_block()))->MarkAsControl();
596 } else { 583 } else {
(...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after
730 ASSERT(deoptimization == NULL && continuation == NULL); 717 ASSERT(deoptimization == NULL && continuation == NULL);
731 Emit(kPopStack | MiscField::encode(buffer.pushed_count), NULL); 718 Emit(kPopStack | MiscField::encode(buffer.pushed_count), NULL);
732 } 719 }
733 } 720 }
734 721
735 #endif 722 #endif
736 723
737 } // namespace compiler 724 } // namespace compiler
738 } // namespace internal 725 } // namespace internal
739 } // namespace v8 726 } // namespace v8
OLDNEW
« no previous file with comments | « src/compiler/node.cc ('k') | test/cctest/compiler/test-instruction-selector-arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698