| OLD | NEW |
| 1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| 11 // with the distribution. | 11 // with the distribution. |
| (...skipping 545 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 557 | 557 |
| 558 | 558 |
| 559 void LCodeGen::DoParallelMove(LParallelMove* move) { | 559 void LCodeGen::DoParallelMove(LParallelMove* move) { |
| 560 // xmm0 must always be a scratch register. | 560 // xmm0 must always be a scratch register. |
| 561 XMMRegister xmm_scratch = xmm0; | 561 XMMRegister xmm_scratch = xmm0; |
| 562 LUnallocated marker_operand(LUnallocated::NONE); | 562 LUnallocated marker_operand(LUnallocated::NONE); |
| 563 | 563 |
| 564 Register cpu_scratch = esi; | 564 Register cpu_scratch = esi; |
| 565 bool destroys_cpu_scratch = false; | 565 bool destroys_cpu_scratch = false; |
| 566 | 566 |
| 567 LGapResolver resolver(move->move_operands(), &marker_operand); | 567 const ZoneList<LMoveOperands>* moves = |
| 568 const ZoneList<LMoveOperands>* moves = resolver.ResolveInReverseOrder(); | 568 resolver_.Resolve(move->move_operands(), &marker_operand); |
| 569 for (int i = moves->length() - 1; i >= 0; --i) { | 569 for (int i = moves->length() - 1; i >= 0; --i) { |
| 570 LMoveOperands move = moves->at(i); | 570 LMoveOperands move = moves->at(i); |
| 571 LOperand* from = move.from(); | 571 LOperand* from = move.from(); |
| 572 LOperand* to = move.to(); | 572 LOperand* to = move.to(); |
| 573 ASSERT(!from->IsDoubleRegister() || | 573 ASSERT(!from->IsDoubleRegister() || |
| 574 !ToDoubleRegister(from).is(xmm_scratch)); | 574 !ToDoubleRegister(from).is(xmm_scratch)); |
| 575 ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(xmm_scratch)); | 575 ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(xmm_scratch)); |
| 576 ASSERT(!from->IsRegister() || !ToRegister(from).is(cpu_scratch)); | 576 ASSERT(!from->IsRegister() || !ToRegister(from).is(cpu_scratch)); |
| 577 ASSERT(!to->IsRegister() || !ToRegister(to).is(cpu_scratch)); | 577 ASSERT(!to->IsRegister() || !ToRegister(to).is(cpu_scratch)); |
| 578 if (from->IsConstantOperand()) { | 578 if (from->IsConstantOperand()) { |
| (...skipping 2824 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3403 ASSERT(!environment->HasBeenRegistered()); | 3403 ASSERT(!environment->HasBeenRegistered()); |
| 3404 RegisterEnvironmentForDeoptimization(environment); | 3404 RegisterEnvironmentForDeoptimization(environment); |
| 3405 ASSERT(osr_pc_offset_ == -1); | 3405 ASSERT(osr_pc_offset_ == -1); |
| 3406 osr_pc_offset_ = masm()->pc_offset(); | 3406 osr_pc_offset_ = masm()->pc_offset(); |
| 3407 } | 3407 } |
| 3408 | 3408 |
| 3409 | 3409 |
| 3410 #undef __ | 3410 #undef __ |
| 3411 | 3411 |
| 3412 } } // namespace v8::internal | 3412 } } // namespace v8::internal |
| OLD | NEW |