| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 205 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 216 return GeneratePrologue() && | 216 return GeneratePrologue() && |
| 217 GenerateBody() && | 217 GenerateBody() && |
| 218 GenerateDeferredCode() && | 218 GenerateDeferredCode() && |
| 219 GenerateSafepointTable(); | 219 GenerateSafepointTable(); |
| 220 } | 220 } |
| 221 | 221 |
| 222 | 222 |
| 223 void LCodeGen::FinishCode(Handle<Code> code) { | 223 void LCodeGen::FinishCode(Handle<Code> code) { |
| 224 ASSERT(is_done()); | 224 ASSERT(is_done()); |
| 225 code->set_stack_slots(StackSlotCount()); | 225 code->set_stack_slots(StackSlotCount()); |
| 226 code->set_safepoint_table_start(safepoints_.GetCodeOffset()); | 226 code->set_safepoint_table_offset(safepoints_.GetCodeOffset()); |
| 227 PopulateDeoptimizationData(code); | 227 PopulateDeoptimizationData(code); |
| 228 } | 228 } |
| 229 | 229 |
| 230 | 230 |
| 231 void LCodeGen::Abort(const char* format, ...) { | 231 void LCodeGen::Abort(const char* format, ...) { |
| 232 if (FLAG_trace_bailout) { | 232 if (FLAG_trace_bailout) { |
| 233 SmartPointer<char> debug_name = graph()->debug_name()->ToCString(); | 233 SmartPointer<char> debug_name = graph()->debug_name()->ToCString(); |
| 234 PrintF("Aborting LCodeGen in @\"%s\": ", *debug_name); | 234 PrintF("Aborting LCodeGen in @\"%s\": ", *debug_name); |
| 235 va_list arguments; | 235 va_list arguments; |
| 236 va_start(arguments, format); | 236 va_start(arguments, format); |
| (...skipping 318 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 555 translation->StoreLiteral(src_index); | 555 translation->StoreLiteral(src_index); |
| 556 } else { | 556 } else { |
| 557 UNREACHABLE(); | 557 UNREACHABLE(); |
| 558 } | 558 } |
| 559 } | 559 } |
| 560 | 560 |
| 561 | 561 |
| 562 void LCodeGen::CallCode(Handle<Code> code, | 562 void LCodeGen::CallCode(Handle<Code> code, |
| 563 RelocInfo::Mode mode, | 563 RelocInfo::Mode mode, |
| 564 LInstruction* instr) { | 564 LInstruction* instr) { |
| 565 if (instr != NULL) { | 565 ASSERT(instr != NULL); |
| 566 LPointerMap* pointers = instr->pointer_map(); | 566 LPointerMap* pointers = instr->pointer_map(); |
| 567 RecordPosition(pointers->position()); | 567 RecordPosition(pointers->position()); |
| 568 __ Call(code, mode); | 568 __ Call(code, mode); |
| 569 RegisterLazyDeoptimization(instr); | 569 RegisterLazyDeoptimization(instr); |
| 570 } else { | |
| 571 LPointerMap no_pointers(0); | |
| 572 RecordPosition(no_pointers.position()); | |
| 573 __ Call(code, mode); | |
| 574 RecordSafepoint(&no_pointers, Safepoint::kNoDeoptimizationIndex); | |
| 575 } | |
| 576 } | 570 } |
| 577 | 571 |
| 578 | 572 |
| 579 void LCodeGen::CallRuntime(const Runtime::Function* function, | 573 void LCodeGen::CallRuntime(const Runtime::Function* function, |
| 580 int num_arguments, | 574 int num_arguments, |
| 581 LInstruction* instr) { | 575 LInstruction* instr) { |
| 582 ASSERT(instr != NULL); | 576 ASSERT(instr != NULL); |
| 583 LPointerMap* pointers = instr->pointer_map(); | 577 LPointerMap* pointers = instr->pointer_map(); |
| 584 ASSERT(pointers != NULL); | 578 ASSERT(pointers != NULL); |
| 585 RecordPosition(pointers->position()); | 579 RecordPosition(pointers->position()); |
| 586 | 580 |
| 587 __ CallRuntime(function, num_arguments); | 581 __ CallRuntime(function, num_arguments); |
| 588 // Runtime calls to Throw are not supposed to ever return at the | 582 RegisterLazyDeoptimization(instr); |
| 589 // call site, so don't register lazy deoptimization for these. We do | |
| 590 // however have to record a safepoint since throwing exceptions can | |
| 591 // cause garbage collections. | |
| 592 if (!instr->IsThrow()) { | |
| 593 RegisterLazyDeoptimization(instr); | |
| 594 } else { | |
| 595 RecordSafepoint(instr->pointer_map(), Safepoint::kNoDeoptimizationIndex); | |
| 596 } | |
| 597 } | 583 } |
| 598 | 584 |
| 599 | 585 |
| 600 void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr) { | 586 void LCodeGen::RegisterLazyDeoptimization(LInstruction* instr) { |
| 601 // Create the environment to bailout to. If the call has side effects | 587 // Create the environment to bailout to. If the call has side effects |
| 602 // execution has to continue after the call otherwise execution can continue | 588 // execution has to continue after the call otherwise execution can continue |
| 603 // from a previous bailout point repeating the call. | 589 // from a previous bailout point repeating the call. |
| 604 LEnvironment* deoptimization_environment; | 590 LEnvironment* deoptimization_environment; |
| 605 if (instr->HasDeoptimizationEnvironment()) { | 591 if (instr->HasDeoptimizationEnvironment()) { |
| 606 deoptimization_environment = instr->deoptimization_environment(); | 592 deoptimization_environment = instr->deoptimization_environment(); |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 654 } | 640 } |
| 655 | 641 |
| 656 ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on ARM. | 642 ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on ARM. |
| 657 | 643 |
| 658 if (FLAG_deopt_every_n_times == 1 && | 644 if (FLAG_deopt_every_n_times == 1 && |
| 659 info_->shared_info()->opt_count() == id) { | 645 info_->shared_info()->opt_count() == id) { |
| 660 __ Jump(entry, RelocInfo::RUNTIME_ENTRY); | 646 __ Jump(entry, RelocInfo::RUNTIME_ENTRY); |
| 661 return; | 647 return; |
| 662 } | 648 } |
| 663 | 649 |
| 664 if (cc == kNoCondition) { | 650 if (cc == al) { |
| 665 if (FLAG_trap_on_deopt) __ stop("trap_on_deopt"); | 651 if (FLAG_trap_on_deopt) __ stop("trap_on_deopt"); |
| 666 __ Jump(entry, RelocInfo::RUNTIME_ENTRY); | 652 __ Jump(entry, RelocInfo::RUNTIME_ENTRY); |
| 667 } else { | 653 } else { |
| 668 if (FLAG_trap_on_deopt) { | 654 if (FLAG_trap_on_deopt) { |
| 669 Label done; | 655 Label done; |
| 670 __ b(&done, NegateCondition(cc)); | 656 __ b(&done, NegateCondition(cc)); |
| 671 __ stop("trap_on_deopt"); | 657 __ stop("trap_on_deopt"); |
| 672 __ Jump(entry, RelocInfo::RUNTIME_ENTRY); | 658 __ Jump(entry, RelocInfo::RUNTIME_ENTRY); |
| 673 __ bind(&done); | 659 __ bind(&done); |
| 674 } else { | 660 } else { |
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 729 for (int i = 0, length = inlined_closures->length(); | 715 for (int i = 0, length = inlined_closures->length(); |
| 730 i < length; | 716 i < length; |
| 731 i++) { | 717 i++) { |
| 732 DefineDeoptimizationLiteral(inlined_closures->at(i)); | 718 DefineDeoptimizationLiteral(inlined_closures->at(i)); |
| 733 } | 719 } |
| 734 | 720 |
| 735 inlined_function_count_ = deoptimization_literals_.length(); | 721 inlined_function_count_ = deoptimization_literals_.length(); |
| 736 } | 722 } |
| 737 | 723 |
| 738 | 724 |
| 725 void LCodeGen::RecordSafepoint( |
| 726 LPointerMap* pointers, |
| 727 Safepoint::Kind kind, |
| 728 int arguments, |
| 729 int deoptimization_index) { |
| 730 const ZoneList<LOperand*>* operands = pointers->operands(); |
| 731 Safepoint safepoint = safepoints_.DefineSafepoint(masm(), |
| 732 kind, arguments, deoptimization_index); |
| 733 for (int i = 0; i < operands->length(); i++) { |
| 734 LOperand* pointer = operands->at(i); |
| 735 if (pointer->IsStackSlot()) { |
| 736 safepoint.DefinePointerSlot(pointer->index()); |
| 737 } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) { |
| 738 safepoint.DefinePointerRegister(ToRegister(pointer)); |
| 739 } |
| 740 } |
| 741 if (kind & Safepoint::kWithRegisters) { |
| 742 // Register cp always contains a pointer to the context. |
| 743 safepoint.DefinePointerRegister(cp); |
| 744 } |
| 745 } |
| 746 |
| 747 |
| 739 void LCodeGen::RecordSafepoint(LPointerMap* pointers, | 748 void LCodeGen::RecordSafepoint(LPointerMap* pointers, |
| 740 int deoptimization_index) { | 749 int deoptimization_index) { |
| 741 const ZoneList<LOperand*>* operands = pointers->operands(); | 750 RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index); |
| 742 Safepoint safepoint = safepoints_.DefineSafepoint(masm(), | |
| 743 deoptimization_index); | |
| 744 for (int i = 0; i < operands->length(); i++) { | |
| 745 LOperand* pointer = operands->at(i); | |
| 746 if (pointer->IsStackSlot()) { | |
| 747 safepoint.DefinePointerSlot(pointer->index()); | |
| 748 } | |
| 749 } | |
| 750 } | 751 } |
| 751 | 752 |
| 752 | 753 |
| 753 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, | 754 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, |
| 754 int arguments, | 755 int arguments, |
| 755 int deoptimization_index) { | 756 int deoptimization_index) { |
| 756 const ZoneList<LOperand*>* operands = pointers->operands(); | 757 RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, |
| 757 Safepoint safepoint = | 758 deoptimization_index); |
| 758 safepoints_.DefineSafepointWithRegisters( | |
| 759 masm(), arguments, deoptimization_index); | |
| 760 for (int i = 0; i < operands->length(); i++) { | |
| 761 LOperand* pointer = operands->at(i); | |
| 762 if (pointer->IsStackSlot()) { | |
| 763 safepoint.DefinePointerSlot(pointer->index()); | |
| 764 } else if (pointer->IsRegister()) { | |
| 765 safepoint.DefinePointerRegister(ToRegister(pointer)); | |
| 766 } | |
| 767 } | |
| 768 // Register cp always contains a pointer to the context. | |
| 769 safepoint.DefinePointerRegister(cp); | |
| 770 } | 759 } |
| 771 | 760 |
| 772 | 761 |
| 773 void LCodeGen::RecordSafepointWithRegistersAndDoubles( | 762 void LCodeGen::RecordSafepointWithRegistersAndDoubles( |
| 774 LPointerMap* pointers, | 763 LPointerMap* pointers, |
| 775 int arguments, | 764 int arguments, |
| 776 int deoptimization_index) { | 765 int deoptimization_index) { |
| 777 const ZoneList<LOperand*>* operands = pointers->operands(); | 766 RecordSafepoint(pointers, Safepoint::kWithRegistersAndDoubles, arguments, |
| 778 Safepoint safepoint = | 767 deoptimization_index); |
| 779 safepoints_.DefineSafepointWithRegistersAndDoubles( | |
| 780 masm(), arguments, deoptimization_index); | |
| 781 for (int i = 0; i < operands->length(); i++) { | |
| 782 LOperand* pointer = operands->at(i); | |
| 783 if (pointer->IsStackSlot()) { | |
| 784 safepoint.DefinePointerSlot(pointer->index()); | |
| 785 } else if (pointer->IsRegister()) { | |
| 786 safepoint.DefinePointerRegister(ToRegister(pointer)); | |
| 787 } | |
| 788 } | |
| 789 // Register cp always contains a pointer to the context. | |
| 790 safepoint.DefinePointerRegister(cp); | |
| 791 } | 768 } |
| 792 | 769 |
| 793 | 770 |
| 794 void LCodeGen::RecordPosition(int position) { | 771 void LCodeGen::RecordPosition(int position) { |
| 795 if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return; | 772 if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return; |
| 796 masm()->positions_recorder()->RecordPosition(position); | 773 masm()->positions_recorder()->RecordPosition(position); |
| 797 } | 774 } |
| 798 | 775 |
| 799 | 776 |
| 800 void LCodeGen::DoLabel(LLabel* label) { | 777 void LCodeGen::DoLabel(LLabel* label) { |
| (...skipping 254 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1055 // the right hand side. | 1032 // the right hand side. |
| 1056 __ cmp(scratch, right); | 1033 __ cmp(scratch, right); |
| 1057 __ mov(result, scratch, LeaveCC, lt); | 1034 __ mov(result, scratch, LeaveCC, lt); |
| 1058 __ b(lt, &done); | 1035 __ b(lt, &done); |
| 1059 // If not, reduce the left hand side by the right hand | 1036 // If not, reduce the left hand side by the right hand |
| 1060 // side and check again. | 1037 // side and check again. |
| 1061 if (i < kUnfolds - 1) __ sub(scratch, scratch, right); | 1038 if (i < kUnfolds - 1) __ sub(scratch, scratch, right); |
| 1062 } | 1039 } |
| 1063 | 1040 |
| 1064 // Check for power of two on the right hand side. | 1041 // Check for power of two on the right hand side. |
| 1065 __ sub(scratch, right, Operand(1), SetCC); | 1042 __ JumpIfNotPowerOfTwoOrZero(right, scratch, &call_stub); |
| 1066 __ b(mi, &call_stub); | 1043 // Perform modulo operation (scratch contains right - 1). |
| 1067 __ tst(scratch, right); | |
| 1068 __ b(ne, &call_stub); | |
| 1069 // Perform modulo operation. | |
| 1070 __ and_(result, scratch, Operand(left)); | 1044 __ and_(result, scratch, Operand(left)); |
| 1071 | 1045 |
| 1072 __ bind(&call_stub); | 1046 __ bind(&call_stub); |
| 1073 // Call the generic stub. The numbers in r0 and r1 have | 1047 // Call the generic stub. The numbers in r0 and r1 have |
| 1074 // to be tagged to Smis. If that is not possible, deoptimize. | 1048 // to be tagged to Smis. If that is not possible, deoptimize. |
| 1075 DeferredModI* deferred = new DeferredModI(this, instr); | 1049 DeferredModI* deferred = new DeferredModI(this, instr); |
| 1076 __ TrySmiTag(left, &deoptimize, scratch); | 1050 __ TrySmiTag(left, &deoptimize, scratch); |
| 1077 __ TrySmiTag(right, &deoptimize, scratch); | 1051 __ TrySmiTag(right, &deoptimize, scratch); |
| 1078 | 1052 |
| 1079 __ b(al, deferred->entry()); | 1053 __ b(al, deferred->entry()); |
| (...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1176 Register left = ToRegister(instr->InputAt(0)); | 1150 Register left = ToRegister(instr->InputAt(0)); |
| 1177 Register right = ToRegister(instr->InputAt(1)); | 1151 Register right = ToRegister(instr->InputAt(1)); |
| 1178 | 1152 |
| 1179 __ PushSafepointRegistersAndDoubles(); | 1153 __ PushSafepointRegistersAndDoubles(); |
| 1180 GenericBinaryOpStub stub(op, OVERWRITE_LEFT, left, right); | 1154 GenericBinaryOpStub stub(op, OVERWRITE_LEFT, left, right); |
| 1181 __ CallStub(&stub); | 1155 __ CallStub(&stub); |
| 1182 RecordSafepointWithRegistersAndDoubles(instr->pointer_map(), | 1156 RecordSafepointWithRegistersAndDoubles(instr->pointer_map(), |
| 1183 0, | 1157 0, |
| 1184 Safepoint::kNoDeoptimizationIndex); | 1158 Safepoint::kNoDeoptimizationIndex); |
| 1185 // Overwrite the stored value of r0 with the result of the stub. | 1159 // Overwrite the stored value of r0 with the result of the stub. |
| 1186 __ str(r0, MemOperand(sp, DwVfpRegister::kNumAllocatableRegisters * | 1160 __ StoreToSafepointRegistersAndDoublesSlot(r0); |
| 1187 kDoubleSize)); | |
| 1188 __ PopSafepointRegistersAndDoubles(); | 1161 __ PopSafepointRegistersAndDoubles(); |
| 1189 } | 1162 } |
| 1190 | 1163 |
| 1191 | 1164 |
| 1192 void LCodeGen::DoMulI(LMulI* instr) { | 1165 void LCodeGen::DoMulI(LMulI* instr) { |
| 1193 Register scratch = scratch0(); | 1166 Register scratch = scratch0(); |
| 1194 Register left = ToRegister(instr->InputAt(0)); | 1167 Register left = ToRegister(instr->InputAt(0)); |
| 1195 Register right = EmitLoadRegister(instr->InputAt(1), scratch); | 1168 Register right = EmitLoadRegister(instr->InputAt(1), scratch); |
| 1196 | 1169 |
| 1197 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero) && | 1170 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero) && |
| 1198 !instr->InputAt(1)->IsConstantOperand()) { | 1171 !instr->InputAt(1)->IsConstantOperand()) { |
| 1199 __ orr(ToRegister(instr->TempAt(0)), left, right); | 1172 __ orr(ToRegister(instr->TempAt(0)), left, right); |
| 1200 } | 1173 } |
| 1201 | 1174 |
| 1202 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { | 1175 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { |
| 1203 // scratch:left = left * right. | 1176 // scratch:left = left * right. |
| 1204 __ smull(scratch, left, left, right); | 1177 __ smull(left, scratch, left, right); |
| 1205 __ mov(ip, Operand(left, ASR, 31)); | 1178 __ mov(ip, Operand(left, ASR, 31)); |
| 1206 __ cmp(ip, Operand(scratch)); | 1179 __ cmp(ip, Operand(scratch)); |
| 1207 DeoptimizeIf(ne, instr->environment()); | 1180 DeoptimizeIf(ne, instr->environment()); |
| 1208 } else { | 1181 } else { |
| 1209 __ mul(left, left, right); | 1182 __ mul(left, left, right); |
| 1210 } | 1183 } |
| 1211 | 1184 |
| 1212 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1185 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1213 // Bail out if the result is supposed to be negative zero. | 1186 // Bail out if the result is supposed to be negative zero. |
| 1214 Label done; | 1187 Label done; |
| 1215 __ tst(left, Operand(left)); | 1188 __ tst(left, Operand(left)); |
| 1216 __ b(ne, &done); | 1189 __ b(ne, &done); |
| 1217 if (instr->InputAt(1)->IsConstantOperand()) { | 1190 if (instr->InputAt(1)->IsConstantOperand()) { |
| 1218 if (ToInteger32(LConstantOperand::cast(instr->InputAt(1))) < 0) { | 1191 if (ToInteger32(LConstantOperand::cast(instr->InputAt(1))) <= 0) { |
| 1219 DeoptimizeIf(kNoCondition, instr->environment()); | 1192 DeoptimizeIf(al, instr->environment()); |
| 1220 } | 1193 } |
| 1221 } else { | 1194 } else { |
| 1222 // Test the non-zero operand for negative sign. | 1195 // Test the non-zero operand for negative sign. |
| 1223 __ cmp(ToRegister(instr->TempAt(0)), Operand(0)); | 1196 __ cmp(ToRegister(instr->TempAt(0)), Operand(0)); |
| 1224 DeoptimizeIf(mi, instr->environment()); | 1197 DeoptimizeIf(mi, instr->environment()); |
| 1225 } | 1198 } |
| 1226 __ bind(&done); | 1199 __ bind(&done); |
| 1227 } | 1200 } |
| 1228 } | 1201 } |
| 1229 | 1202 |
| (...skipping 188 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1418 case Token::SUB: | 1391 case Token::SUB: |
| 1419 __ vsub(left, left, right); | 1392 __ vsub(left, left, right); |
| 1420 break; | 1393 break; |
| 1421 case Token::MUL: | 1394 case Token::MUL: |
| 1422 __ vmul(left, left, right); | 1395 __ vmul(left, left, right); |
| 1423 break; | 1396 break; |
| 1424 case Token::DIV: | 1397 case Token::DIV: |
| 1425 __ vdiv(left, left, right); | 1398 __ vdiv(left, left, right); |
| 1426 break; | 1399 break; |
| 1427 case Token::MOD: { | 1400 case Token::MOD: { |
| 1428 Abort("DoArithmeticD unimplemented for MOD."); | 1401 // Save r0-r3 on the stack. |
| 1402 __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit()); |
| 1403 |
| 1404 __ PrepareCallCFunction(4, scratch0()); |
| 1405 __ vmov(r0, r1, left); |
| 1406 __ vmov(r2, r3, right); |
| 1407 __ CallCFunction(ExternalReference::double_fp_operation(Token::MOD), 4); |
| 1408 // Move the result in the double result register. |
| 1409 __ vmov(ToDoubleRegister(instr->result()), r0, r1); |
| 1410 |
| 1411 // Restore r0-r3. |
| 1412 __ ldm(ia_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit()); |
| 1429 break; | 1413 break; |
| 1430 } | 1414 } |
| 1431 default: | 1415 default: |
| 1432 UNREACHABLE(); | 1416 UNREACHABLE(); |
| 1433 break; | 1417 break; |
| 1434 } | 1418 } |
| 1435 } | 1419 } |
| 1436 | 1420 |
| 1437 | 1421 |
| 1438 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { | 1422 void LCodeGen::DoArithmeticT(LArithmeticT* instr) { |
| (...skipping 176 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1615 case Token::INSTANCEOF: | 1599 case Token::INSTANCEOF: |
| 1616 default: | 1600 default: |
| 1617 UNREACHABLE(); | 1601 UNREACHABLE(); |
| 1618 } | 1602 } |
| 1619 return cond; | 1603 return cond; |
| 1620 } | 1604 } |
| 1621 | 1605 |
| 1622 | 1606 |
| 1623 void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) { | 1607 void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) { |
| 1624 __ cmp(ToRegister(left), ToOperand(right)); | 1608 __ cmp(ToRegister(left), ToOperand(right)); |
| 1625 Abort("EmitCmpI untested."); | |
| 1626 } | 1609 } |
| 1627 | 1610 |
| 1628 | 1611 |
| 1629 void LCodeGen::DoCmpID(LCmpID* instr) { | 1612 void LCodeGen::DoCmpID(LCmpID* instr) { |
| 1630 Abort("DoCmpID unimplemented."); | 1613 LOperand* left = instr->InputAt(0); |
| 1614 LOperand* right = instr->InputAt(1); |
| 1615 LOperand* result = instr->result(); |
| 1616 Register scratch = scratch0(); |
| 1617 |
| 1618 Label unordered, done; |
| 1619 if (instr->is_double()) { |
| 1620 // Compare left and right as doubles and load the |
| 1621 // resulting flags into the normal status register. |
| 1622 __ vcmp(ToDoubleRegister(left), ToDoubleRegister(right)); |
| 1623 __ vmrs(pc); |
| 1624 // If a NaN is involved, i.e. the result is unordered (V set), |
| 1625 // jump to unordered to return false. |
| 1626 __ b(vs, &unordered); |
| 1627 } else { |
| 1628 EmitCmpI(left, right); |
| 1629 } |
| 1630 |
| 1631 Condition cc = TokenToCondition(instr->op(), instr->is_double()); |
| 1632 __ LoadRoot(ToRegister(result), Heap::kTrueValueRootIndex); |
| 1633 __ b(cc, &done); |
| 1634 |
| 1635 __ bind(&unordered); |
| 1636 __ LoadRoot(ToRegister(result), Heap::kFalseValueRootIndex); |
| 1637 __ bind(&done); |
| 1631 } | 1638 } |
| 1632 | 1639 |
| 1633 | 1640 |
| 1634 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { | 1641 void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) { |
| 1635 Abort("DoCmpIDAndBranch unimplemented."); | 1642 LOperand* left = instr->InputAt(0); |
| 1643 LOperand* right = instr->InputAt(1); |
| 1644 int false_block = chunk_->LookupDestination(instr->false_block_id()); |
| 1645 int true_block = chunk_->LookupDestination(instr->true_block_id()); |
| 1646 |
| 1647 if (instr->is_double()) { |
| 1648 // Compare left and right as doubles and load the |
| 1649 // resulting flags into the normal status register. |
| 1650 __ vcmp(ToDoubleRegister(left), ToDoubleRegister(right)); |
| 1651 __ vmrs(pc); |
| 1652 // If a NaN is involved, i.e. the result is unordered (V set), |
| 1653 // jump to false block label. |
| 1654 __ b(vs, chunk_->GetAssemblyLabel(false_block)); |
| 1655 } else { |
| 1656 EmitCmpI(left, right); |
| 1657 } |
| 1658 |
| 1659 Condition cc = TokenToCondition(instr->op(), instr->is_double()); |
| 1660 EmitBranch(true_block, false_block, cc); |
| 1636 } | 1661 } |
| 1637 | 1662 |
| 1638 | 1663 |
| 1639 void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) { | 1664 void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) { |
| 1640 Register left = ToRegister(instr->InputAt(0)); | 1665 Register left = ToRegister(instr->InputAt(0)); |
| 1641 Register right = ToRegister(instr->InputAt(1)); | 1666 Register right = ToRegister(instr->InputAt(1)); |
| 1642 Register result = ToRegister(instr->result()); | 1667 Register result = ToRegister(instr->result()); |
| 1643 | 1668 |
| 1644 __ cmp(left, Operand(right)); | 1669 __ cmp(left, Operand(right)); |
| 1645 __ LoadRoot(result, Heap::kTrueValueRootIndex, eq); | 1670 __ LoadRoot(result, Heap::kTrueValueRootIndex, eq); |
| (...skipping 347 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1993 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 2018 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
| 1994 | 2019 |
| 1995 Label true_value, done; | 2020 Label true_value, done; |
| 1996 __ tst(r0, r0); | 2021 __ tst(r0, r0); |
| 1997 __ mov(r0, Operand(FACTORY->false_value()), LeaveCC, ne); | 2022 __ mov(r0, Operand(FACTORY->false_value()), LeaveCC, ne); |
| 1998 __ mov(r0, Operand(FACTORY->true_value()), LeaveCC, eq); | 2023 __ mov(r0, Operand(FACTORY->true_value()), LeaveCC, eq); |
| 1999 } | 2024 } |
| 2000 | 2025 |
| 2001 | 2026 |
| 2002 void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) { | 2027 void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) { |
| 2003 Abort("DoInstanceOfAndBranch unimplemented."); | 2028 ASSERT(ToRegister(instr->InputAt(0)).is(r0)); // Object is in r0. |
| 2029 ASSERT(ToRegister(instr->InputAt(1)).is(r1)); // Function is in r1. |
| 2030 |
| 2031 int true_block = chunk_->LookupDestination(instr->true_block_id()); |
| 2032 int false_block = chunk_->LookupDestination(instr->false_block_id()); |
| 2033 |
| 2034 InstanceofStub stub(InstanceofStub::kArgsInRegisters); |
| 2035 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
| 2036 __ tst(r0, Operand(r0)); |
| 2037 EmitBranch(true_block, false_block, eq); |
| 2004 } | 2038 } |
| 2005 | 2039 |
| 2006 | 2040 |
| 2007 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { | 2041 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { |
| 2008 class DeferredInstanceOfKnownGlobal: public LDeferredCode { | 2042 class DeferredInstanceOfKnownGlobal: public LDeferredCode { |
| 2009 public: | 2043 public: |
| 2010 DeferredInstanceOfKnownGlobal(LCodeGen* codegen, | 2044 DeferredInstanceOfKnownGlobal(LCodeGen* codegen, |
| 2011 LInstanceOfKnownGlobal* instr) | 2045 LInstanceOfKnownGlobal* instr) |
| 2012 : LDeferredCode(codegen), instr_(instr) { } | 2046 : LDeferredCode(codegen), instr_(instr) { } |
| 2013 virtual void Generate() { | 2047 virtual void Generate() { |
| (...skipping 174 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2188 if (instr->hydrogen()->check_hole_value()) { | 2222 if (instr->hydrogen()->check_hole_value()) { |
| 2189 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 2223 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
| 2190 __ cmp(result, ip); | 2224 __ cmp(result, ip); |
| 2191 DeoptimizeIf(eq, instr->environment()); | 2225 DeoptimizeIf(eq, instr->environment()); |
| 2192 } | 2226 } |
| 2193 } | 2227 } |
| 2194 | 2228 |
| 2195 | 2229 |
| 2196 void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) { | 2230 void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) { |
| 2197 Register value = ToRegister(instr->InputAt(0)); | 2231 Register value = ToRegister(instr->InputAt(0)); |
| 2198 __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell()))); | 2232 Register scratch = scratch0(); |
| 2199 __ str(value, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset)); | 2233 |
| 2234 // Load the cell. |
| 2235 __ mov(scratch, Operand(Handle<Object>(instr->hydrogen()->cell()))); |
| 2236 |
| 2237 // If the cell we are storing to contains the hole it could have |
| 2238 // been deleted from the property dictionary. In that case, we need |
| 2239 // to update the property details in the property dictionary to mark |
| 2240 // it as no longer deleted. |
| 2241 if (instr->hydrogen()->check_hole_value()) { |
| 2242 Register scratch2 = ToRegister(instr->TempAt(0)); |
| 2243 __ ldr(scratch2, |
| 2244 FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset)); |
| 2245 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
| 2246 __ cmp(scratch2, ip); |
| 2247 DeoptimizeIf(eq, instr->environment()); |
| 2248 } |
| 2249 |
| 2250 // Store the value. |
| 2251 __ str(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset)); |
| 2200 } | 2252 } |
| 2201 | 2253 |
| 2202 | 2254 |
| 2203 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { | 2255 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { |
| 2204 // TODO(antonm): load a context with a separate instruction. | 2256 Register context = ToRegister(instr->context()); |
| 2205 Register result = ToRegister(instr->result()); | 2257 Register result = ToRegister(instr->result()); |
| 2206 __ LoadContext(result, instr->context_chain_length()); | 2258 __ ldr(result, |
| 2259 MemOperand(context, Context::SlotOffset(Context::FCONTEXT_INDEX))); |
| 2207 __ ldr(result, ContextOperand(result, instr->slot_index())); | 2260 __ ldr(result, ContextOperand(result, instr->slot_index())); |
| 2208 } | 2261 } |
| 2209 | 2262 |
| 2210 | 2263 |
| 2264 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { |
| 2265 Register context = ToRegister(instr->context()); |
| 2266 Register value = ToRegister(instr->value()); |
| 2267 __ ldr(context, |
| 2268 MemOperand(context, Context::SlotOffset(Context::FCONTEXT_INDEX))); |
| 2269 __ str(value, ContextOperand(context, instr->slot_index())); |
| 2270 if (instr->needs_write_barrier()) { |
| 2271 int offset = Context::SlotOffset(instr->slot_index()); |
| 2272 __ RecordWrite(context, Operand(offset), value, scratch0()); |
| 2273 } |
| 2274 } |
| 2275 |
| 2276 |
| 2211 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { | 2277 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { |
| 2212 Register object = ToRegister(instr->InputAt(0)); | 2278 Register object = ToRegister(instr->InputAt(0)); |
| 2213 Register result = ToRegister(instr->result()); | 2279 Register result = ToRegister(instr->result()); |
| 2214 if (instr->hydrogen()->is_in_object()) { | 2280 if (instr->hydrogen()->is_in_object()) { |
| 2215 __ ldr(result, FieldMemOperand(object, instr->hydrogen()->offset())); | 2281 __ ldr(result, FieldMemOperand(object, instr->hydrogen()->offset())); |
| 2216 } else { | 2282 } else { |
| 2217 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); | 2283 __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); |
| 2218 __ ldr(result, FieldMemOperand(result, instr->hydrogen()->offset())); | 2284 __ ldr(result, FieldMemOperand(result, instr->hydrogen()->offset())); |
| 2219 } | 2285 } |
| 2220 } | 2286 } |
| (...skipping 206 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2427 // length is a small non-negative integer, due to the test above. | 2493 // length is a small non-negative integer, due to the test above. |
| 2428 __ tst(length, Operand(length)); | 2494 __ tst(length, Operand(length)); |
| 2429 __ b(eq, &invoke); | 2495 __ b(eq, &invoke); |
| 2430 __ bind(&loop); | 2496 __ bind(&loop); |
| 2431 __ ldr(scratch, MemOperand(elements, length, LSL, 2)); | 2497 __ ldr(scratch, MemOperand(elements, length, LSL, 2)); |
| 2432 __ push(scratch); | 2498 __ push(scratch); |
| 2433 __ sub(length, length, Operand(1), SetCC); | 2499 __ sub(length, length, Operand(1), SetCC); |
| 2434 __ b(ne, &loop); | 2500 __ b(ne, &loop); |
| 2435 | 2501 |
| 2436 __ bind(&invoke); | 2502 __ bind(&invoke); |
| 2437 // Invoke the function. The number of arguments is stored in receiver | 2503 ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); |
| 2438 // which is r0, as expected by InvokeFunction. | 2504 LPointerMap* pointers = instr->pointer_map(); |
| 2505 LEnvironment* env = instr->deoptimization_environment(); |
| 2506 RecordPosition(pointers->position()); |
| 2507 RegisterEnvironmentForDeoptimization(env); |
| 2508 SafepointGenerator safepoint_generator(this, |
| 2509 pointers, |
| 2510 env->deoptimization_index()); |
| 2511 // The number of arguments is stored in receiver which is r0, as expected |
| 2512 // by InvokeFunction. |
| 2439 v8::internal::ParameterCount actual(receiver); | 2513 v8::internal::ParameterCount actual(receiver); |
| 2440 SafepointGenerator safepoint_generator(this, | |
| 2441 instr->pointer_map(), | |
| 2442 Safepoint::kNoDeoptimizationIndex); | |
| 2443 __ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator); | 2514 __ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator); |
| 2444 } | 2515 } |
| 2445 | 2516 |
| 2446 | 2517 |
| 2447 void LCodeGen::DoPushArgument(LPushArgument* instr) { | 2518 void LCodeGen::DoPushArgument(LPushArgument* instr) { |
| 2448 LOperand* argument = instr->InputAt(0); | 2519 LOperand* argument = instr->InputAt(0); |
| 2449 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) { | 2520 if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) { |
| 2450 Abort("DoPushArgument not implemented for double type."); | 2521 Abort("DoPushArgument not implemented for double type."); |
| 2451 } else { | 2522 } else { |
| 2452 Register argument_reg = EmitLoadRegister(argument, ip); | 2523 Register argument_reg = EmitLoadRegister(argument, ip); |
| 2453 __ push(argument_reg); | 2524 __ push(argument_reg); |
| 2454 } | 2525 } |
| 2455 } | 2526 } |
| 2456 | 2527 |
| 2457 | 2528 |
| 2529 void LCodeGen::DoContext(LContext* instr) { |
| 2530 Register result = ToRegister(instr->result()); |
| 2531 __ mov(result, cp); |
| 2532 } |
| 2533 |
| 2534 |
| 2535 void LCodeGen::DoOuterContext(LOuterContext* instr) { |
| 2536 Register context = ToRegister(instr->context()); |
| 2537 Register result = ToRegister(instr->result()); |
| 2538 __ ldr(result, |
| 2539 MemOperand(context, Context::SlotOffset(Context::CLOSURE_INDEX))); |
| 2540 __ ldr(result, FieldMemOperand(result, JSFunction::kContextOffset)); |
| 2541 } |
| 2542 |
| 2543 |
| 2458 void LCodeGen::DoGlobalObject(LGlobalObject* instr) { | 2544 void LCodeGen::DoGlobalObject(LGlobalObject* instr) { |
| 2545 Register context = ToRegister(instr->context()); |
| 2459 Register result = ToRegister(instr->result()); | 2546 Register result = ToRegister(instr->result()); |
| 2460 __ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX)); | 2547 __ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX)); |
| 2461 } | 2548 } |
| 2462 | 2549 |
| 2463 | 2550 |
| 2464 void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) { | 2551 void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) { |
| 2552 Register global = ToRegister(instr->global()); |
| 2465 Register result = ToRegister(instr->result()); | 2553 Register result = ToRegister(instr->result()); |
| 2466 __ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX)); | 2554 __ ldr(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset)); |
| 2467 __ ldr(result, FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset)); | |
| 2468 } | 2555 } |
| 2469 | 2556 |
| 2470 | 2557 |
| 2471 void LCodeGen::CallKnownFunction(Handle<JSFunction> function, | 2558 void LCodeGen::CallKnownFunction(Handle<JSFunction> function, |
| 2472 int arity, | 2559 int arity, |
| 2473 LInstruction* instr) { | 2560 LInstruction* instr) { |
| 2474 // Change context if needed. | 2561 // Change context if needed. |
| 2475 bool change_context = | 2562 bool change_context = |
| 2476 (graph()->info()->closure()->context() != function->context()) || | 2563 (graph()->info()->closure()->context() != function->context()) || |
| 2477 scope()->contains_with() || | 2564 scope()->contains_with() || |
| (...skipping 24 matching lines...) Expand all Loading... |
| 2502 | 2589 |
| 2503 | 2590 |
| 2504 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) { | 2591 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) { |
| 2505 ASSERT(ToRegister(instr->result()).is(r0)); | 2592 ASSERT(ToRegister(instr->result()).is(r0)); |
| 2506 __ mov(r1, Operand(instr->function())); | 2593 __ mov(r1, Operand(instr->function())); |
| 2507 CallKnownFunction(instr->function(), instr->arity(), instr); | 2594 CallKnownFunction(instr->function(), instr->arity(), instr); |
| 2508 } | 2595 } |
| 2509 | 2596 |
| 2510 | 2597 |
| 2511 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) { | 2598 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) { |
| 2599 ASSERT(instr->InputAt(0)->Equals(instr->result())); |
| 2512 Register input = ToRegister(instr->InputAt(0)); | 2600 Register input = ToRegister(instr->InputAt(0)); |
| 2513 Register scratch = scratch0(); | 2601 Register scratch = scratch0(); |
| 2514 | 2602 |
| 2515 // Deoptimize if not a heap number. | 2603 // Deoptimize if not a heap number. |
| 2516 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | 2604 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
| 2517 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | 2605 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
| 2518 __ cmp(scratch, Operand(ip)); | 2606 __ cmp(scratch, Operand(ip)); |
| 2519 DeoptimizeIf(ne, instr->environment()); | 2607 DeoptimizeIf(ne, instr->environment()); |
| 2520 | 2608 |
| 2521 Label done; | 2609 Label done; |
| 2522 | 2610 Register exponent = scratch0(); |
| 2523 Label negative; | 2611 scratch = no_reg; |
| 2524 __ ldr(scratch, FieldMemOperand(input, HeapNumber::kExponentOffset)); | 2612 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); |
| 2525 // Check the sign of the argument. If the argument is positive, just | 2613 // Check the sign of the argument. If the argument is positive, just |
| 2526 // return it. We do not need to patch the stack since |input| and | 2614 // return it. We do not need to patch the stack since |input| and |
| 2527 // |result| are the same register and |input| will be restored | 2615 // |result| are the same register and |input| would be restored |
| 2528 // unchanged by popping safepoint registers. | 2616 // unchanged by popping safepoint registers. |
| 2529 __ tst(scratch, Operand(HeapNumber::kSignMask)); | 2617 __ tst(exponent, Operand(HeapNumber::kSignMask)); |
| 2530 __ b(ne, &negative); | 2618 __ b(eq, &done); |
| 2531 __ jmp(&done); | |
| 2532 | 2619 |
| 2533 __ bind(&negative); | 2620 // Input is negative. Reverse its sign. |
| 2534 // Preserve the value of all registers. | 2621 // Preserve the value of all registers. |
| 2535 __ PushSafepointRegisters(); | 2622 __ PushSafepointRegisters(); |
| 2536 | 2623 |
| 2537 Register tmp = input.is(r0) ? r1 : r0; | 2624 // Registers were saved at the safepoint, so we can use |
| 2538 Register tmp2 = input.is(r2) ? r3 : r2; | 2625 // many scratch registers. |
| 2539 Register tmp3 = input.is(r4) ? r5 : r4; | 2626 Register tmp1 = input.is(r1) ? r0 : r1; |
| 2627 Register tmp2 = input.is(r2) ? r0 : r2; |
| 2628 Register tmp3 = input.is(r3) ? r0 : r3; |
| 2629 Register tmp4 = input.is(r4) ? r0 : r4; |
| 2630 |
| 2631 // exponent: floating point exponent value. |
| 2540 | 2632 |
| 2541 Label allocated, slow; | 2633 Label allocated, slow; |
| 2542 __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); | 2634 __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex); |
| 2543 __ AllocateHeapNumber(tmp, tmp2, tmp3, scratch, &slow); | 2635 __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow); |
| 2544 __ b(&allocated); | 2636 __ b(&allocated); |
| 2545 | 2637 |
| 2546 // Slow case: Call the runtime system to do the number allocation. | 2638 // Slow case: Call the runtime system to do the number allocation. |
| 2547 __ bind(&slow); | 2639 __ bind(&slow); |
| 2548 | 2640 |
| 2549 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); | 2641 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); |
| 2550 RecordSafepointWithRegisters( | 2642 RecordSafepointWithRegisters( |
| 2551 instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex); | 2643 instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex); |
| 2552 // Set the pointer to the new heap number in tmp. | 2644 // Set the pointer to the new heap number in tmp. |
| 2553 if (!tmp.is(r0)) __ mov(tmp, Operand(r0)); | 2645 if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0)); |
| 2554 | |
| 2555 // Restore input_reg after call to runtime. | 2646 // Restore input_reg after call to runtime. |
| 2556 MemOperand input_register_slot = masm()->SafepointRegisterSlot(input); | 2647 __ LoadFromSafepointRegisterSlot(input); |
| 2557 __ ldr(input, input_register_slot); | 2648 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); |
| 2558 | 2649 |
| 2559 __ bind(&allocated); | 2650 __ bind(&allocated); |
| 2560 __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kExponentOffset)); | 2651 // exponent: floating point exponent value. |
| 2561 __ bic(tmp2, tmp2, Operand(HeapNumber::kSignMask)); | 2652 // tmp1: allocated heap number. |
| 2562 __ str(tmp2, FieldMemOperand(tmp, HeapNumber::kExponentOffset)); | 2653 __ bic(exponent, exponent, Operand(HeapNumber::kSignMask)); |
| 2654 __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset)); |
| 2563 __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset)); | 2655 __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset)); |
| 2564 __ str(tmp2, FieldMemOperand(tmp, HeapNumber::kMantissaOffset)); | 2656 __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset)); |
| 2565 | 2657 |
| 2566 __ str(tmp, input_register_slot); | 2658 __ str(tmp1, masm()->SafepointRegisterSlot(input)); |
| 2567 __ PopSafepointRegisters(); | 2659 __ PopSafepointRegisters(); |
| 2568 | 2660 |
| 2569 __ bind(&done); | 2661 __ bind(&done); |
| 2570 } | 2662 } |
| 2571 | 2663 |
| 2572 | 2664 |
| 2573 void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) { | 2665 void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) { |
| 2574 Label is_positive; | |
| 2575 uint32_t kSignMask = 0x80000000u; | |
| 2576 Register input = ToRegister(instr->InputAt(0)); | 2666 Register input = ToRegister(instr->InputAt(0)); |
| 2577 __ tst(input, Operand(kSignMask)); | 2667 __ cmp(input, Operand(0)); |
| 2578 __ b(eq, &is_positive); | 2668 // We can make rsb conditional because the previous cmp instruction |
| 2579 __ rsb(input, input, Operand(0), SetCC); | 2669 // will clear the V (overflow) flag and rsb won't set this flag |
| 2670 // if input is positive. |
| 2671 __ rsb(input, input, Operand(0), SetCC, mi); |
| 2580 // Deoptimize on overflow. | 2672 // Deoptimize on overflow. |
| 2581 DeoptimizeIf(vs, instr->environment()); | 2673 DeoptimizeIf(vs, instr->environment()); |
| 2582 __ bind(&is_positive); | |
| 2583 } | 2674 } |
| 2584 | 2675 |
| 2585 | 2676 |
| 2586 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { | 2677 void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) { |
| 2587 // Class for deferred case. | 2678 // Class for deferred case. |
| 2588 class DeferredMathAbsTaggedHeapNumber: public LDeferredCode { | 2679 class DeferredMathAbsTaggedHeapNumber: public LDeferredCode { |
| 2589 public: | 2680 public: |
| 2590 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, | 2681 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, |
| 2591 LUnaryMathOperation* instr) | 2682 LUnaryMathOperation* instr) |
| 2592 : LDeferredCode(codegen), instr_(instr) { } | 2683 : LDeferredCode(codegen), instr_(instr) { } |
| 2593 virtual void Generate() { | 2684 virtual void Generate() { |
| 2594 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); | 2685 codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); |
| 2595 } | 2686 } |
| 2596 private: | 2687 private: |
| 2597 LUnaryMathOperation* instr_; | 2688 LUnaryMathOperation* instr_; |
| 2598 }; | 2689 }; |
| 2599 | 2690 |
| 2600 ASSERT(instr->InputAt(0)->Equals(instr->result())); | 2691 ASSERT(instr->InputAt(0)->Equals(instr->result())); |
| 2601 Representation r = instr->hydrogen()->value()->representation(); | 2692 Representation r = instr->hydrogen()->value()->representation(); |
| 2602 if (r.IsDouble()) { | 2693 if (r.IsDouble()) { |
| 2603 DwVfpRegister input = ToDoubleRegister(instr->InputAt(0)); | 2694 DwVfpRegister input = ToDoubleRegister(instr->InputAt(0)); |
| 2604 // __ vabs(input, input); | 2695 __ vabs(input, input); |
| 2605 Abort("Double DoMathAbs unimplemented"); | |
| 2606 } else if (r.IsInteger32()) { | 2696 } else if (r.IsInteger32()) { |
| 2607 EmitIntegerMathAbs(instr); | 2697 EmitIntegerMathAbs(instr); |
| 2608 } else { | 2698 } else { |
| 2609 // Representation is tagged. | 2699 // Representation is tagged. |
| 2610 DeferredMathAbsTaggedHeapNumber* deferred = | 2700 DeferredMathAbsTaggedHeapNumber* deferred = |
| 2611 new DeferredMathAbsTaggedHeapNumber(this, instr); | 2701 new DeferredMathAbsTaggedHeapNumber(this, instr); |
| 2612 Register input = ToRegister(instr->InputAt(0)); | 2702 Register input = ToRegister(instr->InputAt(0)); |
| 2613 // Smi check. | 2703 // Smi check. |
| 2614 __ JumpIfNotSmi(input, deferred->entry()); | 2704 __ JumpIfNotSmi(input, deferred->entry()); |
| 2615 // If smi, handle it directly. | 2705 // If smi, handle it directly. |
| 2616 EmitIntegerMathAbs(instr); | 2706 EmitIntegerMathAbs(instr); |
| 2617 __ bind(deferred->exit()); | 2707 __ bind(deferred->exit()); |
| 2618 } | 2708 } |
| 2619 } | 2709 } |
| 2620 | 2710 |
| 2621 | 2711 |
| 2712 // Truncates a double using a specific rounding mode. |
| 2713 // Clears the z flag (ne condition) if an overflow occurs. |
| 2714 void LCodeGen::EmitVFPTruncate(VFPRoundingMode rounding_mode, |
| 2715 SwVfpRegister result, |
| 2716 DwVfpRegister double_input, |
| 2717 Register scratch1, |
| 2718 Register scratch2) { |
| 2719 Register prev_fpscr = scratch1; |
| 2720 Register scratch = scratch2; |
| 2721 |
| 2722 // Set custom FPCSR: |
| 2723 // - Set rounding mode. |
| 2724 // - Clear vfp cumulative exception flags. |
| 2725 // - Make sure Flush-to-zero mode control bit is unset. |
| 2726 __ vmrs(prev_fpscr); |
| 2727 __ bic(scratch, prev_fpscr, Operand(kVFPExceptionMask | |
| 2728 kVFPRoundingModeMask | |
| 2729 kVFPFlushToZeroMask)); |
| 2730 __ orr(scratch, scratch, Operand(rounding_mode)); |
| 2731 __ vmsr(scratch); |
| 2732 |
| 2733 // Convert the argument to an integer. |
| 2734 __ vcvt_s32_f64(result, |
| 2735 double_input, |
| 2736 kFPSCRRounding); |
| 2737 |
| 2738 // Retrieve FPSCR. |
| 2739 __ vmrs(scratch); |
| 2740 // Restore FPSCR. |
| 2741 __ vmsr(prev_fpscr); |
| 2742 // Check for vfp exceptions. |
| 2743 __ tst(scratch, Operand(kVFPExceptionMask)); |
| 2744 } |
| 2745 |
| 2746 |
| 2622 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { | 2747 void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) { |
| 2623 DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); | 2748 DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); |
| 2624 Register result = ToRegister(instr->result()); | 2749 Register result = ToRegister(instr->result()); |
| 2625 Register prev_fpscr = ToRegister(instr->TempAt(0)); | |
| 2626 SwVfpRegister single_scratch = double_scratch0().low(); | 2750 SwVfpRegister single_scratch = double_scratch0().low(); |
| 2627 Register scratch = scratch0(); | 2751 Register scratch1 = scratch0(); |
| 2752 Register scratch2 = ToRegister(instr->TempAt(0)); |
| 2628 | 2753 |
| 2629 // Set custom FPCSR: | 2754 EmitVFPTruncate(kRoundToMinusInf, |
| 2630 // - Set rounding mode to "Round towards Minus Infinity". | 2755 single_scratch, |
| 2631 // - Clear vfp cumulative exception flags. | |
| 2632 // - Make sure Flush-to-zero mode control bit is unset. | |
| 2633 __ vmrs(prev_fpscr); | |
| 2634 __ bic(scratch, prev_fpscr, | |
| 2635 Operand(kVFPExceptionMask | kVFPRoundingModeMask | kVFPFlushToZeroMask)); | |
| 2636 __ orr(scratch, scratch, Operand(kVFPRoundToMinusInfinityBits)); | |
| 2637 __ vmsr(scratch); | |
| 2638 | |
| 2639 // Convert the argument to an integer. | |
| 2640 __ vcvt_s32_f64(single_scratch, | |
| 2641 input, | 2756 input, |
| 2642 Assembler::FPSCRRounding, | 2757 scratch1, |
| 2643 al); | 2758 scratch2); |
| 2644 | |
| 2645 // Retrieve FPSCR and check for vfp exceptions. | |
| 2646 __ vmrs(scratch); | |
| 2647 // Restore FPSCR | |
| 2648 __ vmsr(prev_fpscr); | |
| 2649 __ tst(scratch, Operand(kVFPExceptionMask)); | |
| 2650 DeoptimizeIf(ne, instr->environment()); | 2759 DeoptimizeIf(ne, instr->environment()); |
| 2651 | 2760 |
| 2652 // Move the result back to general purpose register r0. | 2761 // Move the result back to general purpose register r0. |
| 2653 __ vmov(result, single_scratch); | 2762 __ vmov(result, single_scratch); |
| 2654 | 2763 |
| 2655 // Test for -0. | 2764 // Test for -0. |
| 2656 Label done; | 2765 Label done; |
| 2657 __ cmp(result, Operand(0)); | 2766 __ cmp(result, Operand(0)); |
| 2658 __ b(ne, &done); | 2767 __ b(ne, &done); |
| 2659 __ vmov(scratch, input.high()); | 2768 __ vmov(scratch1, input.high()); |
| 2660 __ tst(scratch, Operand(HeapNumber::kSignMask)); | 2769 __ tst(scratch1, Operand(HeapNumber::kSignMask)); |
| 2661 DeoptimizeIf(ne, instr->environment()); | 2770 DeoptimizeIf(ne, instr->environment()); |
| 2662 __ bind(&done); | 2771 __ bind(&done); |
| 2663 } | 2772 } |
| 2664 | 2773 |
| 2665 | 2774 |
| 2666 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { | 2775 void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) { |
| 2667 DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); | 2776 DoubleRegister input = ToDoubleRegister(instr->InputAt(0)); |
| 2668 ASSERT(ToDoubleRegister(instr->result()).is(input)); | 2777 ASSERT(ToDoubleRegister(instr->result()).is(input)); |
| 2669 __ vsqrt(input, input); | 2778 __ vsqrt(input, input); |
| 2670 } | 2779 } |
| (...skipping 621 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3292 ASSERT(result->IsDoubleRegister()); | 3401 ASSERT(result->IsDoubleRegister()); |
| 3293 | 3402 |
| 3294 Register input_reg = ToRegister(input); | 3403 Register input_reg = ToRegister(input); |
| 3295 DoubleRegister result_reg = ToDoubleRegister(result); | 3404 DoubleRegister result_reg = ToDoubleRegister(result); |
| 3296 | 3405 |
| 3297 EmitNumberUntagD(input_reg, result_reg, instr->environment()); | 3406 EmitNumberUntagD(input_reg, result_reg, instr->environment()); |
| 3298 } | 3407 } |
| 3299 | 3408 |
| 3300 | 3409 |
| 3301 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { | 3410 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { |
| 3302 Abort("DoDoubleToI unimplemented."); | 3411 LOperand* input = instr->InputAt(0); |
| 3412 ASSERT(input->IsDoubleRegister()); |
| 3413 LOperand* result = instr->result(); |
| 3414 ASSERT(result->IsRegister()); |
| 3415 |
| 3416 DoubleRegister double_input = ToDoubleRegister(input); |
| 3417 Register result_reg = ToRegister(result); |
| 3418 SwVfpRegister single_scratch = double_scratch0().low(); |
| 3419 Register scratch1 = scratch0(); |
| 3420 Register scratch2 = ToRegister(instr->TempAt(0)); |
| 3421 |
| 3422 VFPRoundingMode rounding_mode = instr->truncating() ? kRoundToMinusInf |
| 3423 : kRoundToNearest; |
| 3424 |
| 3425 EmitVFPTruncate(rounding_mode, |
| 3426 single_scratch, |
| 3427 double_input, |
| 3428 scratch1, |
| 3429 scratch2); |
| 3430 // Deoptimize if we had a vfp invalid exception. |
| 3431 DeoptimizeIf(ne, instr->environment()); |
| 3432 // Retrieve the result. |
| 3433 __ vmov(result_reg, single_scratch); |
| 3434 |
| 3435 if (instr->truncating() && |
| 3436 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3437 Label done; |
| 3438 __ cmp(result_reg, Operand(0)); |
| 3439 __ b(ne, &done); |
| 3440 // Check for -0. |
| 3441 __ vmov(scratch1, double_input.high()); |
| 3442 __ tst(scratch1, Operand(HeapNumber::kSignMask)); |
| 3443 DeoptimizeIf(ne, instr->environment()); |
| 3444 |
| 3445 __ bind(&done); |
| 3446 } |
| 3303 } | 3447 } |
| 3304 | 3448 |
| 3305 | 3449 |
| 3306 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { | 3450 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { |
| 3307 LOperand* input = instr->InputAt(0); | 3451 LOperand* input = instr->InputAt(0); |
| 3308 ASSERT(input->IsRegister()); | 3452 ASSERT(input->IsRegister()); |
| 3309 __ tst(ToRegister(input), Operand(kSmiTagMask)); | 3453 __ tst(ToRegister(input), Operand(kSmiTagMask)); |
| 3310 DeoptimizeIf(instr->condition(), instr->environment()); | 3454 DeoptimizeIf(instr->condition(), instr->environment()); |
| 3311 } | 3455 } |
| 3312 | 3456 |
| (...skipping 179 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3492 __ ldr(r3, FieldMemOperand(r1, size - kPointerSize)); | 3636 __ ldr(r3, FieldMemOperand(r1, size - kPointerSize)); |
| 3493 __ str(r3, FieldMemOperand(r0, size - kPointerSize)); | 3637 __ str(r3, FieldMemOperand(r0, size - kPointerSize)); |
| 3494 } | 3638 } |
| 3495 } | 3639 } |
| 3496 | 3640 |
| 3497 | 3641 |
| 3498 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { | 3642 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { |
| 3499 // Use the fast case closure allocation code that allocates in new | 3643 // Use the fast case closure allocation code that allocates in new |
| 3500 // space for nested functions that don't need literals cloning. | 3644 // space for nested functions that don't need literals cloning. |
| 3501 Handle<SharedFunctionInfo> shared_info = instr->shared_info(); | 3645 Handle<SharedFunctionInfo> shared_info = instr->shared_info(); |
| 3502 bool pretenure = !instr->hydrogen()->pretenure(); | 3646 bool pretenure = instr->hydrogen()->pretenure(); |
| 3503 if (shared_info->num_literals() == 0 && !pretenure) { | 3647 if (shared_info->num_literals() == 0 && !pretenure) { |
| 3504 FastNewClosureStub stub; | 3648 FastNewClosureStub stub; |
| 3505 __ mov(r1, Operand(shared_info)); | 3649 __ mov(r1, Operand(shared_info)); |
| 3506 __ push(r1); | 3650 __ push(r1); |
| 3507 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 3651 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
| 3508 } else { | 3652 } else { |
| 3509 __ mov(r2, Operand(shared_info)); | 3653 __ mov(r2, Operand(shared_info)); |
| 3510 __ mov(r1, Operand(pretenure | 3654 __ mov(r1, Operand(pretenure |
| 3511 ? FACTORY->true_value() | 3655 ? FACTORY->true_value() |
| 3512 : FACTORY->false_value())); | 3656 : FACTORY->false_value())); |
| (...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3637 } else { | 3781 } else { |
| 3638 final_branch_condition = ne; | 3782 final_branch_condition = ne; |
| 3639 __ b(false_label); | 3783 __ b(false_label); |
| 3640 // A dead branch instruction will be generated after this point. | 3784 // A dead branch instruction will be generated after this point. |
| 3641 } | 3785 } |
| 3642 | 3786 |
| 3643 return final_branch_condition; | 3787 return final_branch_condition; |
| 3644 } | 3788 } |
| 3645 | 3789 |
| 3646 | 3790 |
| 3791 void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) { |
| 3792 Register result = ToRegister(instr->result()); |
| 3793 Label true_label; |
| 3794 Label false_label; |
| 3795 Label done; |
| 3796 |
| 3797 EmitIsConstructCall(result, scratch0()); |
| 3798 __ b(eq, &true_label); |
| 3799 |
| 3800 __ LoadRoot(result, Heap::kFalseValueRootIndex); |
| 3801 __ b(&done); |
| 3802 |
| 3803 |
| 3804 __ bind(&true_label); |
| 3805 __ LoadRoot(result, Heap::kTrueValueRootIndex); |
| 3806 |
| 3807 __ bind(&done); |
| 3808 } |
| 3809 |
| 3810 |
| 3811 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) { |
| 3812 Register temp1 = ToRegister(instr->TempAt(0)); |
| 3813 int true_block = chunk_->LookupDestination(instr->true_block_id()); |
| 3814 int false_block = chunk_->LookupDestination(instr->false_block_id()); |
| 3815 |
| 3816 EmitIsConstructCall(temp1, scratch0()); |
| 3817 EmitBranch(true_block, false_block, eq); |
| 3818 } |
| 3819 |
| 3820 |
| 3821 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) { |
| 3822 ASSERT(!temp1.is(temp2)); |
| 3823 // Get the frame pointer for the calling frame. |
| 3824 __ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
| 3825 |
| 3826 // Skip the arguments adaptor frame if it exists. |
| 3827 Label check_frame_marker; |
| 3828 __ ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset)); |
| 3829 __ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
| 3830 __ b(ne, &check_frame_marker); |
| 3831 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset)); |
| 3832 |
| 3833 // Check the marker in the calling frame. |
| 3834 __ bind(&check_frame_marker); |
| 3835 __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset)); |
| 3836 __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT))); |
| 3837 } |
| 3838 |
| 3839 |
| 3647 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { | 3840 void LCodeGen::DoLazyBailout(LLazyBailout* instr) { |
| 3648 // No code for lazy bailout instruction. Used to capture environment after a | 3841 // No code for lazy bailout instruction. Used to capture environment after a |
| 3649 // call for populating the safepoint data with deoptimization data. | 3842 // call for populating the safepoint data with deoptimization data. |
| 3650 } | 3843 } |
| 3651 | 3844 |
| 3652 | 3845 |
| 3653 void LCodeGen::DoDeoptimize(LDeoptimize* instr) { | 3846 void LCodeGen::DoDeoptimize(LDeoptimize* instr) { |
| 3654 DeoptimizeIf(kNoCondition, instr->environment()); | 3847 DeoptimizeIf(al, instr->environment()); |
| 3655 } | 3848 } |
| 3656 | 3849 |
| 3657 | 3850 |
| 3658 void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) { | 3851 void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) { |
| 3659 Register object = ToRegister(instr->object()); | 3852 Register object = ToRegister(instr->object()); |
| 3660 Register key = ToRegister(instr->key()); | 3853 Register key = ToRegister(instr->key()); |
| 3661 __ Push(object, key); | 3854 __ Push(object, key); |
| 3662 RecordPosition(instr->pointer_map()->position()); | 3855 ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment()); |
| 3856 LPointerMap* pointers = instr->pointer_map(); |
| 3857 LEnvironment* env = instr->deoptimization_environment(); |
| 3858 RecordPosition(pointers->position()); |
| 3859 RegisterEnvironmentForDeoptimization(env); |
| 3663 SafepointGenerator safepoint_generator(this, | 3860 SafepointGenerator safepoint_generator(this, |
| 3664 instr->pointer_map(), | 3861 pointers, |
| 3665 Safepoint::kNoDeoptimizationIndex); | 3862 env->deoptimization_index()); |
| 3666 __ InvokeBuiltin(Builtins::DELETE, CALL_JS, &safepoint_generator); | 3863 __ InvokeBuiltin(Builtins::DELETE, CALL_JS, &safepoint_generator); |
| 3667 } | 3864 } |
| 3668 | 3865 |
| 3669 | 3866 |
| 3670 void LCodeGen::DoStackCheck(LStackCheck* instr) { | 3867 void LCodeGen::DoStackCheck(LStackCheck* instr) { |
| 3671 // Perform stack overflow check. | 3868 // Perform stack overflow check. |
| 3672 Label ok; | 3869 Label ok; |
| 3673 __ LoadRoot(ip, Heap::kStackLimitRootIndex); | 3870 __ LoadRoot(ip, Heap::kStackLimitRootIndex); |
| 3674 __ cmp(sp, Operand(ip)); | 3871 __ cmp(sp, Operand(ip)); |
| 3675 __ b(hs, &ok); | 3872 __ b(hs, &ok); |
| 3676 StackCheckStub stub; | 3873 StackCheckStub stub; |
| 3677 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); | 3874 CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
| 3678 __ bind(&ok); | 3875 __ bind(&ok); |
| 3679 } | 3876 } |
| 3680 | 3877 |
| 3681 | 3878 |
| 3682 void LCodeGen::DoOsrEntry(LOsrEntry* instr) { | 3879 void LCodeGen::DoOsrEntry(LOsrEntry* instr) { |
| 3683 Abort("DoOsrEntry unimplemented."); | 3880 Abort("DoOsrEntry unimplemented."); |
| 3684 } | 3881 } |
| 3685 | 3882 |
| 3686 | 3883 |
| 3687 #undef __ | 3884 #undef __ |
| 3688 | 3885 |
| 3689 } } // namespace v8::internal | 3886 } } // namespace v8::internal |
| OLD | NEW |