| OLD | NEW |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 288 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 299 | 299 |
| 300 CorruptRegisters(®ister_list, kCallerSavedRegisterCorruptionValue); | 300 CorruptRegisters(®ister_list, kCallerSavedRegisterCorruptionValue); |
| 301 CorruptRegisters(&fpregister_list, kCallerSavedFPRegisterCorruptionValue); | 301 CorruptRegisters(&fpregister_list, kCallerSavedFPRegisterCorruptionValue); |
| 302 } | 302 } |
| 303 #endif | 303 #endif |
| 304 | 304 |
| 305 | 305 |
| 306 // Extending the stack by 2 * 64 bits is required for stack alignment purposes. | 306 // Extending the stack by 2 * 64 bits is required for stack alignment purposes. |
| 307 // TODO(all): Insert a marker in the extra space allocated on the stack. | 307 // TODO(all): Insert a marker in the extra space allocated on the stack. |
| 308 uintptr_t Simulator::PushAddress(uintptr_t address) { | 308 uintptr_t Simulator::PushAddress(uintptr_t address) { |
| 309 ASSERT(sizeof(uintptr_t) < 2 * kXRegSizeInBytes); | 309 ASSERT(sizeof(uintptr_t) < 2 * kXRegSize); |
| 310 intptr_t new_sp = sp() - 2 * kXRegSizeInBytes; | 310 intptr_t new_sp = sp() - 2 * kXRegSize; |
| 311 uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp); | 311 uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp); |
| 312 *stack_slot = address; | 312 *stack_slot = address; |
| 313 set_sp(new_sp); | 313 set_sp(new_sp); |
| 314 return new_sp; | 314 return new_sp; |
| 315 } | 315 } |
| 316 | 316 |
| 317 | 317 |
| 318 uintptr_t Simulator::PopAddress() { | 318 uintptr_t Simulator::PopAddress() { |
| 319 intptr_t current_sp = sp(); | 319 intptr_t current_sp = sp(); |
| 320 uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp); | 320 uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp); |
| 321 uintptr_t address = *stack_slot; | 321 uintptr_t address = *stack_slot; |
| 322 ASSERT(sizeof(uintptr_t) < 2 * kXRegSizeInBytes); | 322 ASSERT(sizeof(uintptr_t) < 2 * kXRegSize); |
| 323 set_sp(current_sp + 2 * kXRegSizeInBytes); | 323 set_sp(current_sp + 2 * kXRegSize); |
| 324 return address; | 324 return address; |
| 325 } | 325 } |
| 326 | 326 |
| 327 | 327 |
| 328 // Returns the limit of the stack area to enable checking for stack overflows. | 328 // Returns the limit of the stack area to enable checking for stack overflows. |
| 329 uintptr_t Simulator::StackLimit() const { | 329 uintptr_t Simulator::StackLimit() const { |
| 330 // Leave a safety margin of 1024 bytes to prevent overrunning the stack when | 330 // Leave a safety margin of 1024 bytes to prevent overrunning the stack when |
| 331 // pushing values. | 331 // pushing values. |
| 332 // TODO(all): Increase the stack limit protection. | 332 // TODO(all): Increase the stack limit protection. |
| 333 | 333 |
| (...skipping 273 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 607 } | 607 } |
| 608 | 608 |
| 609 | 609 |
| 610 // Helpers --------------------------------------------------------------------- | 610 // Helpers --------------------------------------------------------------------- |
| 611 int64_t Simulator::AddWithCarry(unsigned reg_size, | 611 int64_t Simulator::AddWithCarry(unsigned reg_size, |
| 612 bool set_flags, | 612 bool set_flags, |
| 613 int64_t src1, | 613 int64_t src1, |
| 614 int64_t src2, | 614 int64_t src2, |
| 615 int64_t carry_in) { | 615 int64_t carry_in) { |
| 616 ASSERT((carry_in == 0) || (carry_in == 1)); | 616 ASSERT((carry_in == 0) || (carry_in == 1)); |
| 617 ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize)); | 617 ASSERT((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits)); |
| 618 | 618 |
| 619 uint64_t u1, u2; | 619 uint64_t u1, u2; |
| 620 int64_t result; | 620 int64_t result; |
| 621 int64_t signed_sum = src1 + src2 + carry_in; | 621 int64_t signed_sum = src1 + src2 + carry_in; |
| 622 | 622 |
| 623 uint32_t N, Z, C, V; | 623 uint32_t N, Z, C, V; |
| 624 | 624 |
| 625 if (reg_size == kWRegSize) { | 625 if (reg_size == kWRegSizeInBits) { |
| 626 u1 = static_cast<uint64_t>(src1) & kWRegMask; | 626 u1 = static_cast<uint64_t>(src1) & kWRegMask; |
| 627 u2 = static_cast<uint64_t>(src2) & kWRegMask; | 627 u2 = static_cast<uint64_t>(src2) & kWRegMask; |
| 628 | 628 |
| 629 result = signed_sum & kWRegMask; | 629 result = signed_sum & kWRegMask; |
| 630 // Compute the C flag by comparing the sum to the max unsigned integer. | 630 // Compute the C flag by comparing the sum to the max unsigned integer. |
| 631 C = ((kWMaxUInt - u1) < (u2 + carry_in)) || | 631 C = ((kWMaxUInt - u1) < (u2 + carry_in)) || |
| 632 ((kWMaxUInt - u1 - carry_in) < u2); | 632 ((kWMaxUInt - u1 - carry_in) < u2); |
| 633 // Overflow iff the sign bit is the same for the two inputs and different | 633 // Overflow iff the sign bit is the same for the two inputs and different |
| 634 // for the result. | 634 // for the result. |
| 635 int64_t s_src1 = src1 << (kXRegSize - kWRegSize); | 635 int64_t s_src1 = src1 << (kXRegSizeInBits - kWRegSizeInBits); |
| 636 int64_t s_src2 = src2 << (kXRegSize - kWRegSize); | 636 int64_t s_src2 = src2 << (kXRegSizeInBits - kWRegSizeInBits); |
| 637 int64_t s_result = result << (kXRegSize - kWRegSize); | 637 int64_t s_result = result << (kXRegSizeInBits - kWRegSizeInBits); |
| 638 V = ((s_src1 ^ s_src2) >= 0) && ((s_src1 ^ s_result) < 0); | 638 V = ((s_src1 ^ s_src2) >= 0) && ((s_src1 ^ s_result) < 0); |
| 639 | 639 |
| 640 } else { | 640 } else { |
| 641 u1 = static_cast<uint64_t>(src1); | 641 u1 = static_cast<uint64_t>(src1); |
| 642 u2 = static_cast<uint64_t>(src2); | 642 u2 = static_cast<uint64_t>(src2); |
| 643 | 643 |
| 644 result = signed_sum; | 644 result = signed_sum; |
| 645 // Compute the C flag by comparing the sum to the max unsigned integer. | 645 // Compute the C flag by comparing the sum to the max unsigned integer. |
| 646 C = ((kXMaxUInt - u1) < (u2 + carry_in)) || | 646 C = ((kXMaxUInt - u1) < (u2 + carry_in)) || |
| 647 ((kXMaxUInt - u1 - carry_in) < u2); | 647 ((kXMaxUInt - u1 - carry_in) < u2); |
| (...skipping 15 matching lines...) Expand all Loading... |
| 663 } | 663 } |
| 664 | 664 |
| 665 | 665 |
| 666 int64_t Simulator::ShiftOperand(unsigned reg_size, | 666 int64_t Simulator::ShiftOperand(unsigned reg_size, |
| 667 int64_t value, | 667 int64_t value, |
| 668 Shift shift_type, | 668 Shift shift_type, |
| 669 unsigned amount) { | 669 unsigned amount) { |
| 670 if (amount == 0) { | 670 if (amount == 0) { |
| 671 return value; | 671 return value; |
| 672 } | 672 } |
| 673 int64_t mask = reg_size == kXRegSize ? kXRegMask : kWRegMask; | 673 int64_t mask = reg_size == kXRegSizeInBits ? kXRegMask : kWRegMask; |
| 674 switch (shift_type) { | 674 switch (shift_type) { |
| 675 case LSL: | 675 case LSL: |
| 676 return (value << amount) & mask; | 676 return (value << amount) & mask; |
| 677 case LSR: | 677 case LSR: |
| 678 return static_cast<uint64_t>(value) >> amount; | 678 return static_cast<uint64_t>(value) >> amount; |
| 679 case ASR: { | 679 case ASR: { |
| 680 // Shift used to restore the sign. | 680 // Shift used to restore the sign. |
| 681 unsigned s_shift = kXRegSize - reg_size; | 681 unsigned s_shift = kXRegSizeInBits - reg_size; |
| 682 // Value with its sign restored. | 682 // Value with its sign restored. |
| 683 int64_t s_value = (value << s_shift) >> s_shift; | 683 int64_t s_value = (value << s_shift) >> s_shift; |
| 684 return (s_value >> amount) & mask; | 684 return (s_value >> amount) & mask; |
| 685 } | 685 } |
| 686 case ROR: { | 686 case ROR: { |
| 687 if (reg_size == kWRegSize) { | 687 if (reg_size == kWRegSizeInBits) { |
| 688 value &= kWRegMask; | 688 value &= kWRegMask; |
| 689 } | 689 } |
| 690 return (static_cast<uint64_t>(value) >> amount) | | 690 return (static_cast<uint64_t>(value) >> amount) | |
| 691 ((value & ((1L << amount) - 1L)) << (reg_size - amount)); | 691 ((value & ((1L << amount) - 1L)) << (reg_size - amount)); |
| 692 } | 692 } |
| 693 default: | 693 default: |
| 694 UNIMPLEMENTED(); | 694 UNIMPLEMENTED(); |
| 695 return 0; | 695 return 0; |
| 696 } | 696 } |
| 697 } | 697 } |
| (...skipping 21 matching lines...) Expand all Loading... |
| 719 break; | 719 break; |
| 720 case SXTW: | 720 case SXTW: |
| 721 value = (value << 32) >> 32; | 721 value = (value << 32) >> 32; |
| 722 break; | 722 break; |
| 723 case UXTX: | 723 case UXTX: |
| 724 case SXTX: | 724 case SXTX: |
| 725 break; | 725 break; |
| 726 default: | 726 default: |
| 727 UNREACHABLE(); | 727 UNREACHABLE(); |
| 728 } | 728 } |
| 729 int64_t mask = (reg_size == kXRegSize) ? kXRegMask : kWRegMask; | 729 int64_t mask = (reg_size == kXRegSizeInBits) ? kXRegMask : kWRegMask; |
| 730 return (value << left_shift) & mask; | 730 return (value << left_shift) & mask; |
| 731 } | 731 } |
| 732 | 732 |
| 733 | 733 |
| 734 template<> double Simulator::FPDefaultNaN<double>() const { |
| 735 return kFP64DefaultNaN; |
| 736 } |
| 737 |
| 738 |
| 739 template<> float Simulator::FPDefaultNaN<float>() const { |
| 740 return kFP32DefaultNaN; |
| 741 } |
| 742 |
| 743 |
| 734 void Simulator::FPCompare(double val0, double val1) { | 744 void Simulator::FPCompare(double val0, double val1) { |
| 735 AssertSupportedFPCR(); | 745 AssertSupportedFPCR(); |
| 736 | 746 |
| 737 // TODO(jbramley): This assumes that the C++ implementation handles | 747 // TODO(jbramley): This assumes that the C++ implementation handles |
| 738 // comparisons in the way that we expect (as per AssertSupportedFPCR()). | 748 // comparisons in the way that we expect (as per AssertSupportedFPCR()). |
| 739 if ((std::isnan(val0) != 0) || (std::isnan(val1) != 0)) { | 749 if ((std::isnan(val0) != 0) || (std::isnan(val1) != 0)) { |
| 740 nzcv().SetRawValue(FPUnorderedFlag); | 750 nzcv().SetRawValue(FPUnorderedFlag); |
| 741 } else if (val0 < val1) { | 751 } else if (val0 < val1) { |
| 742 nzcv().SetRawValue(FPLessThanFlag); | 752 nzcv().SetRawValue(FPLessThanFlag); |
| 743 } else if (val0 > val1) { | 753 } else if (val0 > val1) { |
| (...skipping 305 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1049 case CBNZ_x: take_branch = (xreg(rt) != 0); break; | 1059 case CBNZ_x: take_branch = (xreg(rt) != 0); break; |
| 1050 default: UNIMPLEMENTED(); | 1060 default: UNIMPLEMENTED(); |
| 1051 } | 1061 } |
| 1052 if (take_branch) { | 1062 if (take_branch) { |
| 1053 set_pc(instr->ImmPCOffsetTarget()); | 1063 set_pc(instr->ImmPCOffsetTarget()); |
| 1054 } | 1064 } |
| 1055 } | 1065 } |
| 1056 | 1066 |
| 1057 | 1067 |
| 1058 void Simulator::AddSubHelper(Instruction* instr, int64_t op2) { | 1068 void Simulator::AddSubHelper(Instruction* instr, int64_t op2) { |
| 1059 unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize; | 1069 unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits |
| 1070 : kWRegSizeInBits; |
| 1060 bool set_flags = instr->FlagsUpdate(); | 1071 bool set_flags = instr->FlagsUpdate(); |
| 1061 int64_t new_val = 0; | 1072 int64_t new_val = 0; |
| 1062 Instr operation = instr->Mask(AddSubOpMask); | 1073 Instr operation = instr->Mask(AddSubOpMask); |
| 1063 | 1074 |
| 1064 switch (operation) { | 1075 switch (operation) { |
| 1065 case ADD: | 1076 case ADD: |
| 1066 case ADDS: { | 1077 case ADDS: { |
| 1067 new_val = AddWithCarry(reg_size, | 1078 new_val = AddWithCarry(reg_size, |
| 1068 set_flags, | 1079 set_flags, |
| 1069 reg(reg_size, instr->Rn(), instr->RnMode()), | 1080 reg(reg_size, instr->Rn(), instr->RnMode()), |
| (...skipping 10 matching lines...) Expand all Loading... |
| 1080 break; | 1091 break; |
| 1081 } | 1092 } |
| 1082 default: UNREACHABLE(); | 1093 default: UNREACHABLE(); |
| 1083 } | 1094 } |
| 1084 | 1095 |
| 1085 set_reg(reg_size, instr->Rd(), new_val, instr->RdMode()); | 1096 set_reg(reg_size, instr->Rd(), new_val, instr->RdMode()); |
| 1086 } | 1097 } |
| 1087 | 1098 |
| 1088 | 1099 |
| 1089 void Simulator::VisitAddSubShifted(Instruction* instr) { | 1100 void Simulator::VisitAddSubShifted(Instruction* instr) { |
| 1090 unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize; | 1101 unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits |
| 1102 : kWRegSizeInBits; |
| 1091 int64_t op2 = ShiftOperand(reg_size, | 1103 int64_t op2 = ShiftOperand(reg_size, |
| 1092 reg(reg_size, instr->Rm()), | 1104 reg(reg_size, instr->Rm()), |
| 1093 static_cast<Shift>(instr->ShiftDP()), | 1105 static_cast<Shift>(instr->ShiftDP()), |
| 1094 instr->ImmDPShift()); | 1106 instr->ImmDPShift()); |
| 1095 AddSubHelper(instr, op2); | 1107 AddSubHelper(instr, op2); |
| 1096 } | 1108 } |
| 1097 | 1109 |
| 1098 | 1110 |
| 1099 void Simulator::VisitAddSubImmediate(Instruction* instr) { | 1111 void Simulator::VisitAddSubImmediate(Instruction* instr) { |
| 1100 int64_t op2 = instr->ImmAddSub() << ((instr->ShiftAddSub() == 1) ? 12 : 0); | 1112 int64_t op2 = instr->ImmAddSub() << ((instr->ShiftAddSub() == 1) ? 12 : 0); |
| 1101 AddSubHelper(instr, op2); | 1113 AddSubHelper(instr, op2); |
| 1102 } | 1114 } |
| 1103 | 1115 |
| 1104 | 1116 |
| 1105 void Simulator::VisitAddSubExtended(Instruction* instr) { | 1117 void Simulator::VisitAddSubExtended(Instruction* instr) { |
| 1106 unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize; | 1118 unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits |
| 1119 : kWRegSizeInBits; |
| 1107 int64_t op2 = ExtendValue(reg_size, | 1120 int64_t op2 = ExtendValue(reg_size, |
| 1108 reg(reg_size, instr->Rm()), | 1121 reg(reg_size, instr->Rm()), |
| 1109 static_cast<Extend>(instr->ExtendMode()), | 1122 static_cast<Extend>(instr->ExtendMode()), |
| 1110 instr->ImmExtendShift()); | 1123 instr->ImmExtendShift()); |
| 1111 AddSubHelper(instr, op2); | 1124 AddSubHelper(instr, op2); |
| 1112 } | 1125 } |
| 1113 | 1126 |
| 1114 | 1127 |
| 1115 void Simulator::VisitAddSubWithCarry(Instruction* instr) { | 1128 void Simulator::VisitAddSubWithCarry(Instruction* instr) { |
| 1116 unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize; | 1129 unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits |
| 1130 : kWRegSizeInBits; |
| 1117 int64_t op2 = reg(reg_size, instr->Rm()); | 1131 int64_t op2 = reg(reg_size, instr->Rm()); |
| 1118 int64_t new_val; | 1132 int64_t new_val; |
| 1119 | 1133 |
| 1120 if ((instr->Mask(AddSubOpMask) == SUB) || instr->Mask(AddSubOpMask) == SUBS) { | 1134 if ((instr->Mask(AddSubOpMask) == SUB) || instr->Mask(AddSubOpMask) == SUBS) { |
| 1121 op2 = ~op2; | 1135 op2 = ~op2; |
| 1122 } | 1136 } |
| 1123 | 1137 |
| 1124 new_val = AddWithCarry(reg_size, | 1138 new_val = AddWithCarry(reg_size, |
| 1125 instr->FlagsUpdate(), | 1139 instr->FlagsUpdate(), |
| 1126 reg(reg_size, instr->Rn()), | 1140 reg(reg_size, instr->Rn()), |
| 1127 op2, | 1141 op2, |
| 1128 C()); | 1142 C()); |
| 1129 | 1143 |
| 1130 set_reg(reg_size, instr->Rd(), new_val); | 1144 set_reg(reg_size, instr->Rd(), new_val); |
| 1131 } | 1145 } |
| 1132 | 1146 |
| 1133 | 1147 |
| 1134 void Simulator::VisitLogicalShifted(Instruction* instr) { | 1148 void Simulator::VisitLogicalShifted(Instruction* instr) { |
| 1135 unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize; | 1149 unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits |
| 1150 : kWRegSizeInBits; |
| 1136 Shift shift_type = static_cast<Shift>(instr->ShiftDP()); | 1151 Shift shift_type = static_cast<Shift>(instr->ShiftDP()); |
| 1137 unsigned shift_amount = instr->ImmDPShift(); | 1152 unsigned shift_amount = instr->ImmDPShift(); |
| 1138 int64_t op2 = ShiftOperand(reg_size, reg(reg_size, instr->Rm()), shift_type, | 1153 int64_t op2 = ShiftOperand(reg_size, reg(reg_size, instr->Rm()), shift_type, |
| 1139 shift_amount); | 1154 shift_amount); |
| 1140 if (instr->Mask(NOT) == NOT) { | 1155 if (instr->Mask(NOT) == NOT) { |
| 1141 op2 = ~op2; | 1156 op2 = ~op2; |
| 1142 } | 1157 } |
| 1143 LogicalHelper(instr, op2); | 1158 LogicalHelper(instr, op2); |
| 1144 } | 1159 } |
| 1145 | 1160 |
| 1146 | 1161 |
| 1147 void Simulator::VisitLogicalImmediate(Instruction* instr) { | 1162 void Simulator::VisitLogicalImmediate(Instruction* instr) { |
| 1148 LogicalHelper(instr, instr->ImmLogical()); | 1163 LogicalHelper(instr, instr->ImmLogical()); |
| 1149 } | 1164 } |
| 1150 | 1165 |
| 1151 | 1166 |
| 1152 void Simulator::LogicalHelper(Instruction* instr, int64_t op2) { | 1167 void Simulator::LogicalHelper(Instruction* instr, int64_t op2) { |
| 1153 unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize; | 1168 unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits |
| 1169 : kWRegSizeInBits; |
| 1154 int64_t op1 = reg(reg_size, instr->Rn()); | 1170 int64_t op1 = reg(reg_size, instr->Rn()); |
| 1155 int64_t result = 0; | 1171 int64_t result = 0; |
| 1156 bool update_flags = false; | 1172 bool update_flags = false; |
| 1157 | 1173 |
| 1158 // Switch on the logical operation, stripping out the NOT bit, as it has a | 1174 // Switch on the logical operation, stripping out the NOT bit, as it has a |
| 1159 // different meaning for logical immediate instructions. | 1175 // different meaning for logical immediate instructions. |
| 1160 switch (instr->Mask(LogicalOpMask & ~NOT)) { | 1176 switch (instr->Mask(LogicalOpMask & ~NOT)) { |
| 1161 case ANDS: update_flags = true; // Fall through. | 1177 case ANDS: update_flags = true; // Fall through. |
| 1162 case AND: result = op1 & op2; break; | 1178 case AND: result = op1 & op2; break; |
| 1163 case ORR: result = op1 | op2; break; | 1179 case ORR: result = op1 | op2; break; |
| 1164 case EOR: result = op1 ^ op2; break; | 1180 case EOR: result = op1 ^ op2; break; |
| 1165 default: | 1181 default: |
| 1166 UNIMPLEMENTED(); | 1182 UNIMPLEMENTED(); |
| 1167 } | 1183 } |
| 1168 | 1184 |
| 1169 if (update_flags) { | 1185 if (update_flags) { |
| 1170 nzcv().SetN(CalcNFlag(result, reg_size)); | 1186 nzcv().SetN(CalcNFlag(result, reg_size)); |
| 1171 nzcv().SetZ(CalcZFlag(result)); | 1187 nzcv().SetZ(CalcZFlag(result)); |
| 1172 nzcv().SetC(0); | 1188 nzcv().SetC(0); |
| 1173 nzcv().SetV(0); | 1189 nzcv().SetV(0); |
| 1174 } | 1190 } |
| 1175 | 1191 |
| 1176 set_reg(reg_size, instr->Rd(), result, instr->RdMode()); | 1192 set_reg(reg_size, instr->Rd(), result, instr->RdMode()); |
| 1177 } | 1193 } |
| 1178 | 1194 |
| 1179 | 1195 |
| 1180 void Simulator::VisitConditionalCompareRegister(Instruction* instr) { | 1196 void Simulator::VisitConditionalCompareRegister(Instruction* instr) { |
| 1181 unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize; | 1197 unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits |
| 1198 : kWRegSizeInBits; |
| 1182 ConditionalCompareHelper(instr, reg(reg_size, instr->Rm())); | 1199 ConditionalCompareHelper(instr, reg(reg_size, instr->Rm())); |
| 1183 } | 1200 } |
| 1184 | 1201 |
| 1185 | 1202 |
| 1186 void Simulator::VisitConditionalCompareImmediate(Instruction* instr) { | 1203 void Simulator::VisitConditionalCompareImmediate(Instruction* instr) { |
| 1187 ConditionalCompareHelper(instr, instr->ImmCondCmp()); | 1204 ConditionalCompareHelper(instr, instr->ImmCondCmp()); |
| 1188 } | 1205 } |
| 1189 | 1206 |
| 1190 | 1207 |
| 1191 void Simulator::ConditionalCompareHelper(Instruction* instr, int64_t op2) { | 1208 void Simulator::ConditionalCompareHelper(Instruction* instr, int64_t op2) { |
| 1192 unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize; | 1209 unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits |
| 1210 : kWRegSizeInBits; |
| 1193 int64_t op1 = reg(reg_size, instr->Rn()); | 1211 int64_t op1 = reg(reg_size, instr->Rn()); |
| 1194 | 1212 |
| 1195 if (ConditionPassed(static_cast<Condition>(instr->Condition()))) { | 1213 if (ConditionPassed(static_cast<Condition>(instr->Condition()))) { |
| 1196 // If the condition passes, set the status flags to the result of comparing | 1214 // If the condition passes, set the status flags to the result of comparing |
| 1197 // the operands. | 1215 // the operands. |
| 1198 if (instr->Mask(ConditionalCompareMask) == CCMP) { | 1216 if (instr->Mask(ConditionalCompareMask) == CCMP) { |
| 1199 AddWithCarry(reg_size, true, op1, ~op2, 1); | 1217 AddWithCarry(reg_size, true, op1, ~op2, 1); |
| 1200 } else { | 1218 } else { |
| 1201 ASSERT(instr->Mask(ConditionalCompareMask) == CCMN); | 1219 ASSERT(instr->Mask(ConditionalCompareMask) == CCMN); |
| 1202 AddWithCarry(reg_size, true, op1, op2, 0); | 1220 AddWithCarry(reg_size, true, op1, op2, 0); |
| (...skipping 24 matching lines...) Expand all Loading... |
| 1227 void Simulator::VisitLoadStorePostIndex(Instruction* instr) { | 1245 void Simulator::VisitLoadStorePostIndex(Instruction* instr) { |
| 1228 LoadStoreHelper(instr, instr->ImmLS(), PostIndex); | 1246 LoadStoreHelper(instr, instr->ImmLS(), PostIndex); |
| 1229 } | 1247 } |
| 1230 | 1248 |
| 1231 | 1249 |
| 1232 void Simulator::VisitLoadStoreRegisterOffset(Instruction* instr) { | 1250 void Simulator::VisitLoadStoreRegisterOffset(Instruction* instr) { |
| 1233 Extend ext = static_cast<Extend>(instr->ExtendMode()); | 1251 Extend ext = static_cast<Extend>(instr->ExtendMode()); |
| 1234 ASSERT((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX)); | 1252 ASSERT((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX)); |
| 1235 unsigned shift_amount = instr->ImmShiftLS() * instr->SizeLS(); | 1253 unsigned shift_amount = instr->ImmShiftLS() * instr->SizeLS(); |
| 1236 | 1254 |
| 1237 int64_t offset = ExtendValue(kXRegSize, xreg(instr->Rm()), ext, | 1255 int64_t offset = ExtendValue(kXRegSizeInBits, xreg(instr->Rm()), ext, |
| 1238 shift_amount); | 1256 shift_amount); |
| 1239 LoadStoreHelper(instr, offset, Offset); | 1257 LoadStoreHelper(instr, offset, Offset); |
| 1240 } | 1258 } |
| 1241 | 1259 |
| 1242 | 1260 |
| 1243 void Simulator::LoadStoreHelper(Instruction* instr, | 1261 void Simulator::LoadStoreHelper(Instruction* instr, |
| 1244 int64_t offset, | 1262 int64_t offset, |
| 1245 AddrMode addrmode) { | 1263 AddrMode addrmode) { |
| 1246 unsigned srcdst = instr->Rt(); | 1264 unsigned srcdst = instr->Rt(); |
| 1247 unsigned addr_reg = instr->Rn(); | 1265 unsigned addr_reg = instr->Rn(); |
| (...skipping 20 matching lines...) Expand all Loading... |
| 1268 switch (op) { | 1286 switch (op) { |
| 1269 case LDRB_w: | 1287 case LDRB_w: |
| 1270 case LDRH_w: | 1288 case LDRH_w: |
| 1271 case LDR_w: | 1289 case LDR_w: |
| 1272 case LDR_x: set_xreg(srcdst, MemoryRead(address, num_bytes)); break; | 1290 case LDR_x: set_xreg(srcdst, MemoryRead(address, num_bytes)); break; |
| 1273 case STRB_w: | 1291 case STRB_w: |
| 1274 case STRH_w: | 1292 case STRH_w: |
| 1275 case STR_w: | 1293 case STR_w: |
| 1276 case STR_x: MemoryWrite(address, xreg(srcdst), num_bytes); break; | 1294 case STR_x: MemoryWrite(address, xreg(srcdst), num_bytes); break; |
| 1277 case LDRSB_w: { | 1295 case LDRSB_w: { |
| 1278 set_wreg(srcdst, ExtendValue(kWRegSize, MemoryRead8(address), SXTB)); | 1296 set_wreg(srcdst, |
| 1297 ExtendValue(kWRegSizeInBits, MemoryRead8(address), SXTB)); |
| 1279 break; | 1298 break; |
| 1280 } | 1299 } |
| 1281 case LDRSB_x: { | 1300 case LDRSB_x: { |
| 1282 set_xreg(srcdst, ExtendValue(kXRegSize, MemoryRead8(address), SXTB)); | 1301 set_xreg(srcdst, |
| 1302 ExtendValue(kXRegSizeInBits, MemoryRead8(address), SXTB)); |
| 1283 break; | 1303 break; |
| 1284 } | 1304 } |
| 1285 case LDRSH_w: { | 1305 case LDRSH_w: { |
| 1286 set_wreg(srcdst, ExtendValue(kWRegSize, MemoryRead16(address), SXTH)); | 1306 set_wreg(srcdst, |
| 1307 ExtendValue(kWRegSizeInBits, MemoryRead16(address), SXTH)); |
| 1287 break; | 1308 break; |
| 1288 } | 1309 } |
| 1289 case LDRSH_x: { | 1310 case LDRSH_x: { |
| 1290 set_xreg(srcdst, ExtendValue(kXRegSize, MemoryRead16(address), SXTH)); | 1311 set_xreg(srcdst, |
| 1312 ExtendValue(kXRegSizeInBits, MemoryRead16(address), SXTH)); |
| 1291 break; | 1313 break; |
| 1292 } | 1314 } |
| 1293 case LDRSW_x: { | 1315 case LDRSW_x: { |
| 1294 set_xreg(srcdst, ExtendValue(kXRegSize, MemoryRead32(address), SXTW)); | 1316 set_xreg(srcdst, |
| 1317 ExtendValue(kXRegSizeInBits, MemoryRead32(address), SXTW)); |
| 1295 break; | 1318 break; |
| 1296 } | 1319 } |
| 1297 case LDR_s: set_sreg(srcdst, MemoryReadFP32(address)); break; | 1320 case LDR_s: set_sreg(srcdst, MemoryReadFP32(address)); break; |
| 1298 case LDR_d: set_dreg(srcdst, MemoryReadFP64(address)); break; | 1321 case LDR_d: set_dreg(srcdst, MemoryReadFP64(address)); break; |
| 1299 case STR_s: MemoryWriteFP32(address, sreg(srcdst)); break; | 1322 case STR_s: MemoryWriteFP32(address, sreg(srcdst)); break; |
| 1300 case STR_d: MemoryWriteFP64(address, dreg(srcdst)); break; | 1323 case STR_d: MemoryWriteFP64(address, dreg(srcdst)); break; |
| 1301 default: UNIMPLEMENTED(); | 1324 default: UNIMPLEMENTED(); |
| 1302 } | 1325 } |
| 1303 | 1326 |
| 1304 // Handle the writeback for loads after the load to ensure safe pop | 1327 // Handle the writeback for loads after the load to ensure safe pop |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1365 | 1388 |
| 1366 LoadStorePairOp op = | 1389 LoadStorePairOp op = |
| 1367 static_cast<LoadStorePairOp>(instr->Mask(LoadStorePairMask)); | 1390 static_cast<LoadStorePairOp>(instr->Mask(LoadStorePairMask)); |
| 1368 | 1391 |
| 1369 // 'rt' and 'rt2' can only be aliased for stores. | 1392 // 'rt' and 'rt2' can only be aliased for stores. |
| 1370 ASSERT(((op & LoadStorePairLBit) == 0) || (rt != rt2)); | 1393 ASSERT(((op & LoadStorePairLBit) == 0) || (rt != rt2)); |
| 1371 | 1394 |
| 1372 switch (op) { | 1395 switch (op) { |
| 1373 case LDP_w: { | 1396 case LDP_w: { |
| 1374 set_wreg(rt, MemoryRead32(address)); | 1397 set_wreg(rt, MemoryRead32(address)); |
| 1375 set_wreg(rt2, MemoryRead32(address + kWRegSizeInBytes)); | 1398 set_wreg(rt2, MemoryRead32(address + kWRegSize)); |
| 1376 break; | 1399 break; |
| 1377 } | 1400 } |
| 1378 case LDP_s: { | 1401 case LDP_s: { |
| 1379 set_sreg(rt, MemoryReadFP32(address)); | 1402 set_sreg(rt, MemoryReadFP32(address)); |
| 1380 set_sreg(rt2, MemoryReadFP32(address + kSRegSizeInBytes)); | 1403 set_sreg(rt2, MemoryReadFP32(address + kSRegSize)); |
| 1381 break; | 1404 break; |
| 1382 } | 1405 } |
| 1383 case LDP_x: { | 1406 case LDP_x: { |
| 1384 set_xreg(rt, MemoryRead64(address)); | 1407 set_xreg(rt, MemoryRead64(address)); |
| 1385 set_xreg(rt2, MemoryRead64(address + kXRegSizeInBytes)); | 1408 set_xreg(rt2, MemoryRead64(address + kXRegSize)); |
| 1386 break; | 1409 break; |
| 1387 } | 1410 } |
| 1388 case LDP_d: { | 1411 case LDP_d: { |
| 1389 set_dreg(rt, MemoryReadFP64(address)); | 1412 set_dreg(rt, MemoryReadFP64(address)); |
| 1390 set_dreg(rt2, MemoryReadFP64(address + kDRegSizeInBytes)); | 1413 set_dreg(rt2, MemoryReadFP64(address + kDRegSize)); |
| 1391 break; | 1414 break; |
| 1392 } | 1415 } |
| 1393 case LDPSW_x: { | 1416 case LDPSW_x: { |
| 1394 set_xreg(rt, ExtendValue(kXRegSize, MemoryRead32(address), SXTW)); | 1417 set_xreg(rt, ExtendValue(kXRegSizeInBits, MemoryRead32(address), SXTW)); |
| 1395 set_xreg(rt2, ExtendValue(kXRegSize, | 1418 set_xreg(rt2, ExtendValue(kXRegSizeInBits, |
| 1396 MemoryRead32(address + kWRegSizeInBytes), SXTW)); | 1419 MemoryRead32(address + kWRegSize), SXTW)); |
| 1397 break; | 1420 break; |
| 1398 } | 1421 } |
| 1399 case STP_w: { | 1422 case STP_w: { |
| 1400 MemoryWrite32(address, wreg(rt)); | 1423 MemoryWrite32(address, wreg(rt)); |
| 1401 MemoryWrite32(address + kWRegSizeInBytes, wreg(rt2)); | 1424 MemoryWrite32(address + kWRegSize, wreg(rt2)); |
| 1402 break; | 1425 break; |
| 1403 } | 1426 } |
| 1404 case STP_s: { | 1427 case STP_s: { |
| 1405 MemoryWriteFP32(address, sreg(rt)); | 1428 MemoryWriteFP32(address, sreg(rt)); |
| 1406 MemoryWriteFP32(address + kSRegSizeInBytes, sreg(rt2)); | 1429 MemoryWriteFP32(address + kSRegSize, sreg(rt2)); |
| 1407 break; | 1430 break; |
| 1408 } | 1431 } |
| 1409 case STP_x: { | 1432 case STP_x: { |
| 1410 MemoryWrite64(address, xreg(rt)); | 1433 MemoryWrite64(address, xreg(rt)); |
| 1411 MemoryWrite64(address + kXRegSizeInBytes, xreg(rt2)); | 1434 MemoryWrite64(address + kXRegSize, xreg(rt2)); |
| 1412 break; | 1435 break; |
| 1413 } | 1436 } |
| 1414 case STP_d: { | 1437 case STP_d: { |
| 1415 MemoryWriteFP64(address, dreg(rt)); | 1438 MemoryWriteFP64(address, dreg(rt)); |
| 1416 MemoryWriteFP64(address + kDRegSizeInBytes, dreg(rt2)); | 1439 MemoryWriteFP64(address + kDRegSize, dreg(rt2)); |
| 1417 break; | 1440 break; |
| 1418 } | 1441 } |
| 1419 default: UNREACHABLE(); | 1442 default: UNREACHABLE(); |
| 1420 } | 1443 } |
| 1421 | 1444 |
| 1422 // Handle the writeback for loads after the load to ensure safe pop | 1445 // Handle the writeback for loads after the load to ensure safe pop |
| 1423 // operation even when interrupted in the middle of it. The stack pointer | 1446 // operation even when interrupted in the middle of it. The stack pointer |
| 1424 // is only updated after the load so pop(fp) will never break the invariant | 1447 // is only updated after the load so pop(fp) will never break the invariant |
| 1425 // sp <= fp expected while walking the stack in the sampler. | 1448 // sp <= fp expected while walking the stack in the sampler. |
| 1426 if (instr->IsLoad()) { | 1449 if (instr->IsLoad()) { |
| (...skipping 190 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1617 case CSEL_x: break; | 1640 case CSEL_x: break; |
| 1618 case CSINC_w: | 1641 case CSINC_w: |
| 1619 case CSINC_x: new_val++; break; | 1642 case CSINC_x: new_val++; break; |
| 1620 case CSINV_w: | 1643 case CSINV_w: |
| 1621 case CSINV_x: new_val = ~new_val; break; | 1644 case CSINV_x: new_val = ~new_val; break; |
| 1622 case CSNEG_w: | 1645 case CSNEG_w: |
| 1623 case CSNEG_x: new_val = -new_val; break; | 1646 case CSNEG_x: new_val = -new_val; break; |
| 1624 default: UNIMPLEMENTED(); | 1647 default: UNIMPLEMENTED(); |
| 1625 } | 1648 } |
| 1626 } | 1649 } |
| 1627 unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize; | 1650 unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits |
| 1651 : kWRegSizeInBits; |
| 1628 set_reg(reg_size, instr->Rd(), new_val); | 1652 set_reg(reg_size, instr->Rd(), new_val); |
| 1629 } | 1653 } |
| 1630 | 1654 |
| 1631 | 1655 |
| 1632 void Simulator::VisitDataProcessing1Source(Instruction* instr) { | 1656 void Simulator::VisitDataProcessing1Source(Instruction* instr) { |
| 1633 unsigned dst = instr->Rd(); | 1657 unsigned dst = instr->Rd(); |
| 1634 unsigned src = instr->Rn(); | 1658 unsigned src = instr->Rn(); |
| 1635 | 1659 |
| 1636 switch (instr->Mask(DataProcessing1SourceMask)) { | 1660 switch (instr->Mask(DataProcessing1SourceMask)) { |
| 1637 case RBIT_w: set_wreg(dst, ReverseBits(wreg(src), kWRegSize)); break; | 1661 case RBIT_w: set_wreg(dst, ReverseBits(wreg(src), kWRegSizeInBits)); break; |
| 1638 case RBIT_x: set_xreg(dst, ReverseBits(xreg(src), kXRegSize)); break; | 1662 case RBIT_x: set_xreg(dst, ReverseBits(xreg(src), kXRegSizeInBits)); break; |
| 1639 case REV16_w: set_wreg(dst, ReverseBytes(wreg(src), Reverse16)); break; | 1663 case REV16_w: set_wreg(dst, ReverseBytes(wreg(src), Reverse16)); break; |
| 1640 case REV16_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse16)); break; | 1664 case REV16_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse16)); break; |
| 1641 case REV_w: set_wreg(dst, ReverseBytes(wreg(src), Reverse32)); break; | 1665 case REV_w: set_wreg(dst, ReverseBytes(wreg(src), Reverse32)); break; |
| 1642 case REV32_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse32)); break; | 1666 case REV32_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse32)); break; |
| 1643 case REV_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse64)); break; | 1667 case REV_x: set_xreg(dst, ReverseBytes(xreg(src), Reverse64)); break; |
| 1644 case CLZ_w: set_wreg(dst, CountLeadingZeros(wreg(src), kWRegSize)); break; | 1668 case CLZ_w: set_wreg(dst, CountLeadingZeros(wreg(src), kWRegSizeInBits)); |
| 1645 case CLZ_x: set_xreg(dst, CountLeadingZeros(xreg(src), kXRegSize)); break; | 1669 break; |
| 1670 case CLZ_x: set_xreg(dst, CountLeadingZeros(xreg(src), kXRegSizeInBits)); |
| 1671 break; |
| 1646 case CLS_w: { | 1672 case CLS_w: { |
| 1647 set_wreg(dst, CountLeadingSignBits(wreg(src), kWRegSize)); | 1673 set_wreg(dst, CountLeadingSignBits(wreg(src), kWRegSizeInBits)); |
| 1648 break; | 1674 break; |
| 1649 } | 1675 } |
| 1650 case CLS_x: { | 1676 case CLS_x: { |
| 1651 set_xreg(dst, CountLeadingSignBits(xreg(src), kXRegSize)); | 1677 set_xreg(dst, CountLeadingSignBits(xreg(src), kXRegSizeInBits)); |
| 1652 break; | 1678 break; |
| 1653 } | 1679 } |
| 1654 default: UNIMPLEMENTED(); | 1680 default: UNIMPLEMENTED(); |
| 1655 } | 1681 } |
| 1656 } | 1682 } |
| 1657 | 1683 |
| 1658 | 1684 |
| 1659 uint64_t Simulator::ReverseBits(uint64_t value, unsigned num_bits) { | 1685 uint64_t Simulator::ReverseBits(uint64_t value, unsigned num_bits) { |
| 1660 ASSERT((num_bits == kWRegSize) || (num_bits == kXRegSize)); | 1686 ASSERT((num_bits == kWRegSizeInBits) || (num_bits == kXRegSizeInBits)); |
| 1661 uint64_t result = 0; | 1687 uint64_t result = 0; |
| 1662 for (unsigned i = 0; i < num_bits; i++) { | 1688 for (unsigned i = 0; i < num_bits; i++) { |
| 1663 result = (result << 1) | (value & 1); | 1689 result = (result << 1) | (value & 1); |
| 1664 value >>= 1; | 1690 value >>= 1; |
| 1665 } | 1691 } |
| 1666 return result; | 1692 return result; |
| 1667 } | 1693 } |
| 1668 | 1694 |
| 1669 | 1695 |
| 1670 uint64_t Simulator::ReverseBytes(uint64_t value, ReverseByteMode mode) { | 1696 uint64_t Simulator::ReverseBytes(uint64_t value, ReverseByteMode mode) { |
| (...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1755 case LSLV_x: shift_op = LSL; break; | 1781 case LSLV_x: shift_op = LSL; break; |
| 1756 case LSRV_w: | 1782 case LSRV_w: |
| 1757 case LSRV_x: shift_op = LSR; break; | 1783 case LSRV_x: shift_op = LSR; break; |
| 1758 case ASRV_w: | 1784 case ASRV_w: |
| 1759 case ASRV_x: shift_op = ASR; break; | 1785 case ASRV_x: shift_op = ASR; break; |
| 1760 case RORV_w: | 1786 case RORV_w: |
| 1761 case RORV_x: shift_op = ROR; break; | 1787 case RORV_x: shift_op = ROR; break; |
| 1762 default: UNIMPLEMENTED(); | 1788 default: UNIMPLEMENTED(); |
| 1763 } | 1789 } |
| 1764 | 1790 |
| 1765 unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize; | 1791 unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits |
| 1792 : kWRegSizeInBits; |
| 1766 if (shift_op != NO_SHIFT) { | 1793 if (shift_op != NO_SHIFT) { |
| 1767 // Shift distance encoded in the least-significant five/six bits of the | 1794 // Shift distance encoded in the least-significant five/six bits of the |
| 1768 // register. | 1795 // register. |
| 1769 int mask = (instr->SixtyFourBits() == 1) ? 0x3f : 0x1f; | 1796 int mask = (instr->SixtyFourBits() == 1) ? 0x3f : 0x1f; |
| 1770 unsigned shift = wreg(instr->Rm()) & mask; | 1797 unsigned shift = wreg(instr->Rm()) & mask; |
| 1771 result = ShiftOperand(reg_size, reg(reg_size, instr->Rn()), shift_op, | 1798 result = ShiftOperand(reg_size, reg(reg_size, instr->Rn()), shift_op, |
| 1772 shift); | 1799 shift); |
| 1773 } | 1800 } |
| 1774 set_reg(reg_size, instr->Rd(), result); | 1801 set_reg(reg_size, instr->Rd(), result); |
| 1775 } | 1802 } |
| (...skipping 15 matching lines...) Expand all Loading... |
| 1791 t = u1 * v0 + (w0 >> 32); | 1818 t = u1 * v0 + (w0 >> 32); |
| 1792 w1 = t & 0xffffffffL; | 1819 w1 = t & 0xffffffffL; |
| 1793 w2 = t >> 32; | 1820 w2 = t >> 32; |
| 1794 w1 = u0 * v1 + w1; | 1821 w1 = u0 * v1 + w1; |
| 1795 | 1822 |
| 1796 return u1 * v1 + w2 + (w1 >> 32); | 1823 return u1 * v1 + w2 + (w1 >> 32); |
| 1797 } | 1824 } |
| 1798 | 1825 |
| 1799 | 1826 |
| 1800 void Simulator::VisitDataProcessing3Source(Instruction* instr) { | 1827 void Simulator::VisitDataProcessing3Source(Instruction* instr) { |
| 1801 unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize; | 1828 unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits |
| 1829 : kWRegSizeInBits; |
| 1802 | 1830 |
| 1803 int64_t result = 0; | 1831 int64_t result = 0; |
| 1804 // Extract and sign- or zero-extend 32-bit arguments for widening operations. | 1832 // Extract and sign- or zero-extend 32-bit arguments for widening operations. |
| 1805 uint64_t rn_u32 = reg<uint32_t>(instr->Rn()); | 1833 uint64_t rn_u32 = reg<uint32_t>(instr->Rn()); |
| 1806 uint64_t rm_u32 = reg<uint32_t>(instr->Rm()); | 1834 uint64_t rm_u32 = reg<uint32_t>(instr->Rm()); |
| 1807 int64_t rn_s32 = reg<int32_t>(instr->Rn()); | 1835 int64_t rn_s32 = reg<int32_t>(instr->Rn()); |
| 1808 int64_t rm_s32 = reg<int32_t>(instr->Rm()); | 1836 int64_t rm_s32 = reg<int32_t>(instr->Rm()); |
| 1809 switch (instr->Mask(DataProcessing3SourceMask)) { | 1837 switch (instr->Mask(DataProcessing3SourceMask)) { |
| 1810 case MADD_w: | 1838 case MADD_w: |
| 1811 case MADD_x: | 1839 case MADD_x: |
| (...skipping 11 matching lines...) Expand all Loading... |
| 1823 ASSERT(instr->Ra() == kZeroRegCode); | 1851 ASSERT(instr->Ra() == kZeroRegCode); |
| 1824 result = MultiplyHighSigned(xreg(instr->Rn()), xreg(instr->Rm())); | 1852 result = MultiplyHighSigned(xreg(instr->Rn()), xreg(instr->Rm())); |
| 1825 break; | 1853 break; |
| 1826 default: UNIMPLEMENTED(); | 1854 default: UNIMPLEMENTED(); |
| 1827 } | 1855 } |
| 1828 set_reg(reg_size, instr->Rd(), result); | 1856 set_reg(reg_size, instr->Rd(), result); |
| 1829 } | 1857 } |
| 1830 | 1858 |
| 1831 | 1859 |
| 1832 void Simulator::VisitBitfield(Instruction* instr) { | 1860 void Simulator::VisitBitfield(Instruction* instr) { |
| 1833 unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize; | 1861 unsigned reg_size = instr->SixtyFourBits() ? kXRegSizeInBits |
| 1862 : kWRegSizeInBits; |
| 1834 int64_t reg_mask = instr->SixtyFourBits() ? kXRegMask : kWRegMask; | 1863 int64_t reg_mask = instr->SixtyFourBits() ? kXRegMask : kWRegMask; |
| 1835 int64_t R = instr->ImmR(); | 1864 int64_t R = instr->ImmR(); |
| 1836 int64_t S = instr->ImmS(); | 1865 int64_t S = instr->ImmS(); |
| 1837 int64_t diff = S - R; | 1866 int64_t diff = S - R; |
| 1838 int64_t mask; | 1867 int64_t mask; |
| 1839 if (diff >= 0) { | 1868 if (diff >= 0) { |
| 1840 mask = diff < reg_size - 1 ? (1L << (diff + 1)) - 1 | 1869 mask = diff < reg_size - 1 ? (1L << (diff + 1)) - 1 |
| 1841 : reg_mask; | 1870 : reg_mask; |
| 1842 } else { | 1871 } else { |
| 1843 mask = ((1L << (S + 1)) - 1); | 1872 mask = ((1L << (S + 1)) - 1); |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1877 | 1906 |
| 1878 // Merge sign extension, dest/zero and bitfield. | 1907 // Merge sign extension, dest/zero and bitfield. |
| 1879 result = signbits | (result & mask) | (dst & ~mask); | 1908 result = signbits | (result & mask) | (dst & ~mask); |
| 1880 | 1909 |
| 1881 set_reg(reg_size, instr->Rd(), result); | 1910 set_reg(reg_size, instr->Rd(), result); |
| 1882 } | 1911 } |
| 1883 | 1912 |
| 1884 | 1913 |
| 1885 void Simulator::VisitExtract(Instruction* instr) { | 1914 void Simulator::VisitExtract(Instruction* instr) { |
| 1886 unsigned lsb = instr->ImmS(); | 1915 unsigned lsb = instr->ImmS(); |
| 1887 unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSize | 1916 unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSizeInBits |
| 1888 : kWRegSize; | 1917 : kWRegSizeInBits; |
| 1889 set_reg(reg_size, | 1918 set_reg(reg_size, |
| 1890 instr->Rd(), | 1919 instr->Rd(), |
| 1891 (static_cast<uint64_t>(reg(reg_size, instr->Rm())) >> lsb) | | 1920 (static_cast<uint64_t>(reg(reg_size, instr->Rm())) >> lsb) | |
| 1892 (reg(reg_size, instr->Rn()) << (reg_size - lsb))); | 1921 (reg(reg_size, instr->Rn()) << (reg_size - lsb))); |
| 1893 } | 1922 } |
| 1894 | 1923 |
| 1895 | 1924 |
| 1896 void Simulator::VisitFPImmediate(Instruction* instr) { | 1925 void Simulator::VisitFPImmediate(Instruction* instr) { |
| 1897 AssertSupportedFPCR(); | 1926 AssertSupportedFPCR(); |
| 1898 | 1927 |
| (...skipping 175 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2074 } else if (value < 0.0) { | 2103 } else if (value < 0.0) { |
| 2075 return 0; | 2104 return 0; |
| 2076 } | 2105 } |
| 2077 return std::isnan(value) ? 0 : static_cast<uint64_t>(value); | 2106 return std::isnan(value) ? 0 : static_cast<uint64_t>(value); |
| 2078 } | 2107 } |
| 2079 | 2108 |
| 2080 | 2109 |
| 2081 void Simulator::VisitFPCompare(Instruction* instr) { | 2110 void Simulator::VisitFPCompare(Instruction* instr) { |
| 2082 AssertSupportedFPCR(); | 2111 AssertSupportedFPCR(); |
| 2083 | 2112 |
| 2084 unsigned reg_size = instr->FPType() == FP32 ? kSRegSize : kDRegSize; | 2113 unsigned reg_size = (instr->Mask(FP64) == FP64) ? kDRegSizeInBits |
| 2114 : kSRegSizeInBits; |
| 2085 double fn_val = fpreg(reg_size, instr->Rn()); | 2115 double fn_val = fpreg(reg_size, instr->Rn()); |
| 2086 | 2116 |
| 2087 switch (instr->Mask(FPCompareMask)) { | 2117 switch (instr->Mask(FPCompareMask)) { |
| 2088 case FCMP_s: | 2118 case FCMP_s: |
| 2089 case FCMP_d: FPCompare(fn_val, fpreg(reg_size, instr->Rm())); break; | 2119 case FCMP_d: FPCompare(fn_val, fpreg(reg_size, instr->Rm())); break; |
| 2090 case FCMP_s_zero: | 2120 case FCMP_s_zero: |
| 2091 case FCMP_d_zero: FPCompare(fn_val, 0.0); break; | 2121 case FCMP_d_zero: FPCompare(fn_val, 0.0); break; |
| 2092 default: UNIMPLEMENTED(); | 2122 default: UNIMPLEMENTED(); |
| 2093 } | 2123 } |
| 2094 } | 2124 } |
| 2095 | 2125 |
| 2096 | 2126 |
| 2097 void Simulator::VisitFPConditionalCompare(Instruction* instr) { | 2127 void Simulator::VisitFPConditionalCompare(Instruction* instr) { |
| 2098 AssertSupportedFPCR(); | 2128 AssertSupportedFPCR(); |
| 2099 | 2129 |
| 2100 switch (instr->Mask(FPConditionalCompareMask)) { | 2130 switch (instr->Mask(FPConditionalCompareMask)) { |
| 2101 case FCCMP_s: | 2131 case FCCMP_s: |
| 2102 case FCCMP_d: { | 2132 case FCCMP_d: { |
| 2103 if (ConditionPassed(static_cast<Condition>(instr->Condition()))) { | 2133 if (ConditionPassed(static_cast<Condition>(instr->Condition()))) { |
| 2104 // If the condition passes, set the status flags to the result of | 2134 // If the condition passes, set the status flags to the result of |
| 2105 // comparing the operands. | 2135 // comparing the operands. |
| 2106 unsigned reg_size = instr->FPType() == FP32 ? kSRegSize : kDRegSize; | 2136 unsigned reg_size = (instr->Mask(FP64) == FP64) ? kDRegSizeInBits |
| 2137 : kSRegSizeInBits; |
| 2107 FPCompare(fpreg(reg_size, instr->Rn()), fpreg(reg_size, instr->Rm())); | 2138 FPCompare(fpreg(reg_size, instr->Rn()), fpreg(reg_size, instr->Rm())); |
| 2108 } else { | 2139 } else { |
| 2109 // If the condition fails, set the status flags to the nzcv immediate. | 2140 // If the condition fails, set the status flags to the nzcv immediate. |
| 2110 nzcv().SetFlags(instr->Nzcv()); | 2141 nzcv().SetFlags(instr->Nzcv()); |
| 2111 } | 2142 } |
| 2112 break; | 2143 break; |
| 2113 } | 2144 } |
| 2114 default: UNIMPLEMENTED(); | 2145 default: UNIMPLEMENTED(); |
| 2115 } | 2146 } |
| 2116 } | 2147 } |
| (...skipping 23 matching lines...) Expand all Loading... |
| 2140 unsigned fd = instr->Rd(); | 2171 unsigned fd = instr->Rd(); |
| 2141 unsigned fn = instr->Rn(); | 2172 unsigned fn = instr->Rn(); |
| 2142 | 2173 |
| 2143 switch (instr->Mask(FPDataProcessing1SourceMask)) { | 2174 switch (instr->Mask(FPDataProcessing1SourceMask)) { |
| 2144 case FMOV_s: set_sreg(fd, sreg(fn)); break; | 2175 case FMOV_s: set_sreg(fd, sreg(fn)); break; |
| 2145 case FMOV_d: set_dreg(fd, dreg(fn)); break; | 2176 case FMOV_d: set_dreg(fd, dreg(fn)); break; |
| 2146 case FABS_s: set_sreg(fd, std::fabs(sreg(fn))); break; | 2177 case FABS_s: set_sreg(fd, std::fabs(sreg(fn))); break; |
| 2147 case FABS_d: set_dreg(fd, std::fabs(dreg(fn))); break; | 2178 case FABS_d: set_dreg(fd, std::fabs(dreg(fn))); break; |
| 2148 case FNEG_s: set_sreg(fd, -sreg(fn)); break; | 2179 case FNEG_s: set_sreg(fd, -sreg(fn)); break; |
| 2149 case FNEG_d: set_dreg(fd, -dreg(fn)); break; | 2180 case FNEG_d: set_dreg(fd, -dreg(fn)); break; |
| 2150 case FSQRT_s: set_sreg(fd, std::sqrt(sreg(fn))); break; | 2181 case FSQRT_s: set_sreg(fd, FPSqrt(sreg(fn))); break; |
| 2151 case FSQRT_d: set_dreg(fd, std::sqrt(dreg(fn))); break; | 2182 case FSQRT_d: set_dreg(fd, FPSqrt(dreg(fn))); break; |
| 2152 case FRINTA_s: set_sreg(fd, FPRoundInt(sreg(fn), FPTieAway)); break; | 2183 case FRINTA_s: set_sreg(fd, FPRoundInt(sreg(fn), FPTieAway)); break; |
| 2153 case FRINTA_d: set_dreg(fd, FPRoundInt(dreg(fn), FPTieAway)); break; | 2184 case FRINTA_d: set_dreg(fd, FPRoundInt(dreg(fn), FPTieAway)); break; |
| 2154 case FRINTN_s: set_sreg(fd, FPRoundInt(sreg(fn), FPTieEven)); break; | 2185 case FRINTN_s: set_sreg(fd, FPRoundInt(sreg(fn), FPTieEven)); break; |
| 2155 case FRINTN_d: set_dreg(fd, FPRoundInt(dreg(fn), FPTieEven)); break; | 2186 case FRINTN_d: set_dreg(fd, FPRoundInt(dreg(fn), FPTieEven)); break; |
| 2156 case FRINTZ_s: set_sreg(fd, FPRoundInt(sreg(fn), FPZero)); break; | 2187 case FRINTZ_s: set_sreg(fd, FPRoundInt(sreg(fn), FPZero)); break; |
| 2157 case FRINTZ_d: set_dreg(fd, FPRoundInt(dreg(fn), FPZero)); break; | 2188 case FRINTZ_d: set_dreg(fd, FPRoundInt(dreg(fn), FPZero)); break; |
| 2158 case FCVT_ds: set_dreg(fd, FPToDouble(sreg(fn))); break; | 2189 case FCVT_ds: set_dreg(fd, FPToDouble(sreg(fn))); break; |
| 2159 case FCVT_sd: set_sreg(fd, FPToFloat(dreg(fn), FPTieEven)); break; | 2190 case FCVT_sd: set_sreg(fd, FPToFloat(dreg(fn), FPTieEven)); break; |
| 2160 default: UNIMPLEMENTED(); | 2191 default: UNIMPLEMENTED(); |
| 2161 } | 2192 } |
| (...skipping 238 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2400 // 2^exponent. | 2431 // 2^exponent. |
| 2401 const int highest_significant_bit = 63 - CountLeadingZeros(src, 64); | 2432 const int highest_significant_bit = 63 - CountLeadingZeros(src, 64); |
| 2402 const int32_t exponent = highest_significant_bit - fbits; | 2433 const int32_t exponent = highest_significant_bit - fbits; |
| 2403 | 2434 |
| 2404 return FPRoundToFloat(0, exponent, src, round); | 2435 return FPRoundToFloat(0, exponent, src, round); |
| 2405 } | 2436 } |
| 2406 | 2437 |
| 2407 | 2438 |
| 2408 double Simulator::FPRoundInt(double value, FPRounding round_mode) { | 2439 double Simulator::FPRoundInt(double value, FPRounding round_mode) { |
| 2409 if ((value == 0.0) || (value == kFP64PositiveInfinity) || | 2440 if ((value == 0.0) || (value == kFP64PositiveInfinity) || |
| 2410 (value == kFP64NegativeInfinity) || std::isnan(value)) { | 2441 (value == kFP64NegativeInfinity)) { |
| 2411 return value; | 2442 return value; |
| 2443 } else if (std::isnan(value)) { |
| 2444 return FPProcessNaN(value); |
| 2412 } | 2445 } |
| 2413 | 2446 |
| 2414 double int_result = floor(value); | 2447 double int_result = floor(value); |
| 2415 double error = value - int_result; | 2448 double error = value - int_result; |
| 2416 switch (round_mode) { | 2449 switch (round_mode) { |
| 2417 case FPTieAway: { | 2450 case FPTieAway: { |
| 2418 // If the error is greater than 0.5, or is equal to 0.5 and the integer | 2451 // If the error is greater than 0.5, or is equal to 0.5 and the integer |
| 2419 // result is positive, round up. | 2452 // result is positive, round up. |
| 2420 if ((error > 0.5) || ((error == 0.5) && (int_result >= 0.0))) { | 2453 if ((error > 0.5) || ((error == 0.5) && (int_result >= 0.0))) { |
| 2421 int_result++; | 2454 int_result++; |
| (...skipping 23 matching lines...) Expand all Loading... |
| 2445 } | 2478 } |
| 2446 default: UNIMPLEMENTED(); | 2479 default: UNIMPLEMENTED(); |
| 2447 } | 2480 } |
| 2448 return int_result; | 2481 return int_result; |
| 2449 } | 2482 } |
| 2450 | 2483 |
| 2451 | 2484 |
| 2452 double Simulator::FPToDouble(float value) { | 2485 double Simulator::FPToDouble(float value) { |
| 2453 switch (std::fpclassify(value)) { | 2486 switch (std::fpclassify(value)) { |
| 2454 case FP_NAN: { | 2487 case FP_NAN: { |
| 2455 // Convert NaNs as the processor would, assuming that FPCR.DN (default | 2488 if (DN()) return kFP64DefaultNaN; |
| 2456 // NaN) is not set: | 2489 |
| 2490 // Convert NaNs as the processor would: |
| 2457 // - The sign is propagated. | 2491 // - The sign is propagated. |
| 2458 // - The payload (mantissa) is transferred entirely, except that the top | 2492 // - The payload (mantissa) is transferred entirely, except that the top |
| 2459 // bit is forced to '1', making the result a quiet NaN. The unused | 2493 // bit is forced to '1', making the result a quiet NaN. The unused |
| 2460 // (low-order) payload bits are set to 0. | 2494 // (low-order) payload bits are set to 0. |
| 2461 uint32_t raw = float_to_rawbits(value); | 2495 uint32_t raw = float_to_rawbits(value); |
| 2462 | 2496 |
| 2463 uint64_t sign = raw >> 31; | 2497 uint64_t sign = raw >> 31; |
| 2464 uint64_t exponent = (1 << 11) - 1; | 2498 uint64_t exponent = (1 << 11) - 1; |
| 2465 uint64_t payload = unsigned_bitextract_64(21, 0, raw); | 2499 uint64_t payload = unsigned_bitextract_64(21, 0, raw); |
| 2466 payload <<= (52 - 23); // The unused low-order bits should be 0. | 2500 payload <<= (52 - 23); // The unused low-order bits should be 0. |
| (...skipping 18 matching lines...) Expand all Loading... |
| 2485 } | 2519 } |
| 2486 | 2520 |
| 2487 | 2521 |
| 2488 float Simulator::FPToFloat(double value, FPRounding round_mode) { | 2522 float Simulator::FPToFloat(double value, FPRounding round_mode) { |
| 2489 // Only the FPTieEven rounding mode is implemented. | 2523 // Only the FPTieEven rounding mode is implemented. |
| 2490 ASSERT(round_mode == FPTieEven); | 2524 ASSERT(round_mode == FPTieEven); |
| 2491 USE(round_mode); | 2525 USE(round_mode); |
| 2492 | 2526 |
| 2493 switch (std::fpclassify(value)) { | 2527 switch (std::fpclassify(value)) { |
| 2494 case FP_NAN: { | 2528 case FP_NAN: { |
| 2495 // Convert NaNs as the processor would, assuming that FPCR.DN (default | 2529 if (DN()) return kFP32DefaultNaN; |
| 2496 // NaN) is not set: | 2530 |
| 2531 // Convert NaNs as the processor would: |
| 2497 // - The sign is propagated. | 2532 // - The sign is propagated. |
| 2498 // - The payload (mantissa) is transferred as much as possible, except | 2533 // - The payload (mantissa) is transferred as much as possible, except |
| 2499 // that the top bit is forced to '1', making the result a quiet NaN. | 2534 // that the top bit is forced to '1', making the result a quiet NaN. |
| 2500 uint64_t raw = double_to_rawbits(value); | 2535 uint64_t raw = double_to_rawbits(value); |
| 2501 | 2536 |
| 2502 uint32_t sign = raw >> 63; | 2537 uint32_t sign = raw >> 63; |
| 2503 uint32_t exponent = (1 << 8) - 1; | 2538 uint32_t exponent = (1 << 8) - 1; |
| 2504 uint32_t payload = unsigned_bitextract_64(50, 52 - 23, raw); | 2539 uint32_t payload = unsigned_bitextract_64(50, 52 - 23, raw); |
| 2505 payload |= (1 << 22); // Force a quiet NaN. | 2540 payload |= (1 << 22); // Force a quiet NaN. |
| 2506 | 2541 |
| (...skipping 30 matching lines...) Expand all Loading... |
| 2537 } | 2572 } |
| 2538 | 2573 |
| 2539 | 2574 |
| 2540 void Simulator::VisitFPDataProcessing2Source(Instruction* instr) { | 2575 void Simulator::VisitFPDataProcessing2Source(Instruction* instr) { |
| 2541 AssertSupportedFPCR(); | 2576 AssertSupportedFPCR(); |
| 2542 | 2577 |
| 2543 unsigned fd = instr->Rd(); | 2578 unsigned fd = instr->Rd(); |
| 2544 unsigned fn = instr->Rn(); | 2579 unsigned fn = instr->Rn(); |
| 2545 unsigned fm = instr->Rm(); | 2580 unsigned fm = instr->Rm(); |
| 2546 | 2581 |
| 2582 // Fmaxnm and Fminnm have special NaN handling. |
| 2547 switch (instr->Mask(FPDataProcessing2SourceMask)) { | 2583 switch (instr->Mask(FPDataProcessing2SourceMask)) { |
| 2548 case FADD_s: set_sreg(fd, sreg(fn) + sreg(fm)); break; | 2584 case FMAXNM_s: set_sreg(fd, FPMaxNM(sreg(fn), sreg(fm))); return; |
| 2549 case FADD_d: set_dreg(fd, dreg(fn) + dreg(fm)); break; | 2585 case FMAXNM_d: set_dreg(fd, FPMaxNM(dreg(fn), dreg(fm))); return; |
| 2550 case FSUB_s: set_sreg(fd, sreg(fn) - sreg(fm)); break; | 2586 case FMINNM_s: set_sreg(fd, FPMinNM(sreg(fn), sreg(fm))); return; |
| 2551 case FSUB_d: set_dreg(fd, dreg(fn) - dreg(fm)); break; | 2587 case FMINNM_d: set_dreg(fd, FPMinNM(dreg(fn), dreg(fm))); return; |
| 2552 case FMUL_s: set_sreg(fd, sreg(fn) * sreg(fm)); break; | 2588 default: |
| 2553 case FMUL_d: set_dreg(fd, dreg(fn) * dreg(fm)); break; | 2589 break; // Fall through. |
| 2554 case FDIV_s: set_sreg(fd, sreg(fn) / sreg(fm)); break; | 2590 } |
| 2555 case FDIV_d: set_dreg(fd, dreg(fn) / dreg(fm)); break; | 2591 |
| 2592 if (FPProcessNaNs(instr)) return; |
| 2593 |
| 2594 switch (instr->Mask(FPDataProcessing2SourceMask)) { |
| 2595 case FADD_s: set_sreg(fd, FPAdd(sreg(fn), sreg(fm))); break; |
| 2596 case FADD_d: set_dreg(fd, FPAdd(dreg(fn), dreg(fm))); break; |
| 2597 case FSUB_s: set_sreg(fd, FPSub(sreg(fn), sreg(fm))); break; |
| 2598 case FSUB_d: set_dreg(fd, FPSub(dreg(fn), dreg(fm))); break; |
| 2599 case FMUL_s: set_sreg(fd, FPMul(sreg(fn), sreg(fm))); break; |
| 2600 case FMUL_d: set_dreg(fd, FPMul(dreg(fn), dreg(fm))); break; |
| 2601 case FDIV_s: set_sreg(fd, FPDiv(sreg(fn), sreg(fm))); break; |
| 2602 case FDIV_d: set_dreg(fd, FPDiv(dreg(fn), dreg(fm))); break; |
| 2556 case FMAX_s: set_sreg(fd, FPMax(sreg(fn), sreg(fm))); break; | 2603 case FMAX_s: set_sreg(fd, FPMax(sreg(fn), sreg(fm))); break; |
| 2557 case FMAX_d: set_dreg(fd, FPMax(dreg(fn), dreg(fm))); break; | 2604 case FMAX_d: set_dreg(fd, FPMax(dreg(fn), dreg(fm))); break; |
| 2558 case FMIN_s: set_sreg(fd, FPMin(sreg(fn), sreg(fm))); break; | 2605 case FMIN_s: set_sreg(fd, FPMin(sreg(fn), sreg(fm))); break; |
| 2559 case FMIN_d: set_dreg(fd, FPMin(dreg(fn), dreg(fm))); break; | 2606 case FMIN_d: set_dreg(fd, FPMin(dreg(fn), dreg(fm))); break; |
| 2560 case FMAXNM_s: set_sreg(fd, FPMaxNM(sreg(fn), sreg(fm))); break; | 2607 case FMAXNM_s: |
| 2561 case FMAXNM_d: set_dreg(fd, FPMaxNM(dreg(fn), dreg(fm))); break; | 2608 case FMAXNM_d: |
| 2562 case FMINNM_s: set_sreg(fd, FPMinNM(sreg(fn), sreg(fm))); break; | 2609 case FMINNM_s: |
| 2563 case FMINNM_d: set_dreg(fd, FPMinNM(dreg(fn), dreg(fm))); break; | 2610 case FMINNM_d: |
| 2611 // These were handled before the standard FPProcessNaNs() stage. |
| 2612 UNREACHABLE(); |
| 2564 default: UNIMPLEMENTED(); | 2613 default: UNIMPLEMENTED(); |
| 2565 } | 2614 } |
| 2566 } | 2615 } |
| 2567 | 2616 |
| 2568 | 2617 |
| 2569 void Simulator::VisitFPDataProcessing3Source(Instruction* instr) { | 2618 void Simulator::VisitFPDataProcessing3Source(Instruction* instr) { |
| 2570 AssertSupportedFPCR(); | 2619 AssertSupportedFPCR(); |
| 2571 | 2620 |
| 2572 unsigned fd = instr->Rd(); | 2621 unsigned fd = instr->Rd(); |
| 2573 unsigned fn = instr->Rn(); | 2622 unsigned fn = instr->Rn(); |
| 2574 unsigned fm = instr->Rm(); | 2623 unsigned fm = instr->Rm(); |
| 2575 unsigned fa = instr->Ra(); | 2624 unsigned fa = instr->Ra(); |
| 2576 | 2625 |
| 2577 // The C99 (and C++11) fma function performs a fused multiply-accumulate. | 2626 // The C99 (and C++11) fma function performs a fused multiply-accumulate. |
| 2578 switch (instr->Mask(FPDataProcessing3SourceMask)) { | 2627 switch (instr->Mask(FPDataProcessing3SourceMask)) { |
| 2579 // fd = fa +/- (fn * fm) | 2628 // fd = fa +/- (fn * fm) |
| 2580 case FMADD_s: set_sreg(fd, fmaf(sreg(fn), sreg(fm), sreg(fa))); break; | 2629 case FMADD_s: set_sreg(fd, FPMulAdd(sreg(fa), sreg(fn), sreg(fm))); break; |
| 2581 case FMSUB_s: set_sreg(fd, fmaf(-sreg(fn), sreg(fm), sreg(fa))); break; | 2630 case FMSUB_s: set_sreg(fd, FPMulAdd(sreg(fa), -sreg(fn), sreg(fm))); break; |
| 2582 case FMADD_d: set_dreg(fd, fma(dreg(fn), dreg(fm), dreg(fa))); break; | 2631 case FMADD_d: set_dreg(fd, FPMulAdd(dreg(fa), dreg(fn), dreg(fm))); break; |
| 2583 case FMSUB_d: set_dreg(fd, fma(-dreg(fn), dreg(fm), dreg(fa))); break; | 2632 case FMSUB_d: set_dreg(fd, FPMulAdd(dreg(fa), -dreg(fn), dreg(fm))); break; |
| 2584 // Variants of the above where the result is negated. | 2633 // Negated variants of the above. |
| 2585 case FNMADD_s: set_sreg(fd, -fmaf(sreg(fn), sreg(fm), sreg(fa))); break; | 2634 case FNMADD_s: |
| 2586 case FNMSUB_s: set_sreg(fd, -fmaf(-sreg(fn), sreg(fm), sreg(fa))); break; | 2635 set_sreg(fd, FPMulAdd(-sreg(fa), -sreg(fn), sreg(fm))); |
| 2587 case FNMADD_d: set_dreg(fd, -fma(dreg(fn), dreg(fm), dreg(fa))); break; | 2636 break; |
| 2588 case FNMSUB_d: set_dreg(fd, -fma(-dreg(fn), dreg(fm), dreg(fa))); break; | 2637 case FNMSUB_s: |
| 2638 set_sreg(fd, FPMulAdd(-sreg(fa), sreg(fn), sreg(fm))); |
| 2639 break; |
| 2640 case FNMADD_d: |
| 2641 set_dreg(fd, FPMulAdd(-dreg(fa), -dreg(fn), dreg(fm))); |
| 2642 break; |
| 2643 case FNMSUB_d: |
| 2644 set_dreg(fd, FPMulAdd(-dreg(fa), dreg(fn), dreg(fm))); |
| 2645 break; |
| 2589 default: UNIMPLEMENTED(); | 2646 default: UNIMPLEMENTED(); |
| 2590 } | 2647 } |
| 2591 } | 2648 } |
| 2592 | 2649 |
| 2593 | 2650 |
| 2594 template <typename T> | 2651 template <typename T> |
| 2652 T Simulator::FPAdd(T op1, T op2) { |
| 2653 // NaNs should be handled elsewhere. |
| 2654 ASSERT(!std::isnan(op1) && !std::isnan(op2)); |
| 2655 |
| 2656 if (isinf(op1) && isinf(op2) && (op1 != op2)) { |
| 2657 // inf + -inf returns the default NaN. |
| 2658 return FPDefaultNaN<T>(); |
| 2659 } else { |
| 2660 // Other cases should be handled by standard arithmetic. |
| 2661 return op1 + op2; |
| 2662 } |
| 2663 } |
| 2664 |
| 2665 |
| 2666 template <typename T> |
| 2667 T Simulator::FPDiv(T op1, T op2) { |
| 2668 // NaNs should be handled elsewhere. |
| 2669 ASSERT(!std::isnan(op1) && !std::isnan(op2)); |
| 2670 |
| 2671 if ((isinf(op1) && isinf(op2)) || ((op1 == 0.0) && (op2 == 0.0))) { |
| 2672 // inf / inf and 0.0 / 0.0 return the default NaN. |
| 2673 return FPDefaultNaN<T>(); |
| 2674 } else { |
| 2675 // Other cases should be handled by standard arithmetic. |
| 2676 return op1 / op2; |
| 2677 } |
| 2678 } |
| 2679 |
| 2680 |
| 2681 template <typename T> |
| 2595 T Simulator::FPMax(T a, T b) { | 2682 T Simulator::FPMax(T a, T b) { |
| 2596 if (IsSignallingNaN(a)) { | 2683 // NaNs should be handled elsewhere. |
| 2597 return a; | 2684 ASSERT(!std::isnan(a) && !std::isnan(b)); |
| 2598 } else if (IsSignallingNaN(b)) { | |
| 2599 return b; | |
| 2600 } else if (std::isnan(a)) { | |
| 2601 ASSERT(IsQuietNaN(a)); | |
| 2602 return a; | |
| 2603 } else if (std::isnan(b)) { | |
| 2604 ASSERT(IsQuietNaN(b)); | |
| 2605 return b; | |
| 2606 } | |
| 2607 | 2685 |
| 2608 if ((a == 0.0) && (b == 0.0) && | 2686 if ((a == 0.0) && (b == 0.0) && |
| 2609 (copysign(1.0, a) != copysign(1.0, b))) { | 2687 (copysign(1.0, a) != copysign(1.0, b))) { |
| 2610 // a and b are zero, and the sign differs: return +0.0. | 2688 // a and b are zero, and the sign differs: return +0.0. |
| 2611 return 0.0; | 2689 return 0.0; |
| 2612 } else { | 2690 } else { |
| 2613 return (a > b) ? a : b; | 2691 return (a > b) ? a : b; |
| 2614 } | 2692 } |
| 2615 } | 2693 } |
| 2616 | 2694 |
| 2617 | 2695 |
| 2618 template <typename T> | 2696 template <typename T> |
| 2619 T Simulator::FPMaxNM(T a, T b) { | 2697 T Simulator::FPMaxNM(T a, T b) { |
| 2620 if (IsQuietNaN(a) && !IsQuietNaN(b)) { | 2698 if (IsQuietNaN(a) && !IsQuietNaN(b)) { |
| 2621 a = kFP64NegativeInfinity; | 2699 a = kFP64NegativeInfinity; |
| 2622 } else if (!IsQuietNaN(a) && IsQuietNaN(b)) { | 2700 } else if (!IsQuietNaN(a) && IsQuietNaN(b)) { |
| 2623 b = kFP64NegativeInfinity; | 2701 b = kFP64NegativeInfinity; |
| 2624 } | 2702 } |
| 2625 return FPMax(a, b); | 2703 |
| 2704 T result = FPProcessNaNs(a, b); |
| 2705 return std::isnan(result) ? result : FPMax(a, b); |
| 2626 } | 2706 } |
| 2627 | 2707 |
| 2628 template <typename T> | 2708 template <typename T> |
| 2629 T Simulator::FPMin(T a, T b) { | 2709 T Simulator::FPMin(T a, T b) { |
| 2630 if (IsSignallingNaN(a)) { | 2710 // NaNs should be handled elsewhere. |
| 2631 return a; | 2711 ASSERT(!isnan(a) && !isnan(b)); |
| 2632 } else if (IsSignallingNaN(b)) { | |
| 2633 return b; | |
| 2634 } else if (std::isnan(a)) { | |
| 2635 ASSERT(IsQuietNaN(a)); | |
| 2636 return a; | |
| 2637 } else if (std::isnan(b)) { | |
| 2638 ASSERT(IsQuietNaN(b)); | |
| 2639 return b; | |
| 2640 } | |
| 2641 | 2712 |
| 2642 if ((a == 0.0) && (b == 0.0) && | 2713 if ((a == 0.0) && (b == 0.0) && |
| 2643 (copysign(1.0, a) != copysign(1.0, b))) { | 2714 (copysign(1.0, a) != copysign(1.0, b))) { |
| 2644 // a and b are zero, and the sign differs: return -0.0. | 2715 // a and b are zero, and the sign differs: return -0.0. |
| 2645 return -0.0; | 2716 return -0.0; |
| 2646 } else { | 2717 } else { |
| 2647 return (a < b) ? a : b; | 2718 return (a < b) ? a : b; |
| 2648 } | 2719 } |
| 2649 } | 2720 } |
| 2650 | 2721 |
| 2651 | 2722 |
| 2652 template <typename T> | 2723 template <typename T> |
| 2653 T Simulator::FPMinNM(T a, T b) { | 2724 T Simulator::FPMinNM(T a, T b) { |
| 2654 if (IsQuietNaN(a) && !IsQuietNaN(b)) { | 2725 if (IsQuietNaN(a) && !IsQuietNaN(b)) { |
| 2655 a = kFP64PositiveInfinity; | 2726 a = kFP64PositiveInfinity; |
| 2656 } else if (!IsQuietNaN(a) && IsQuietNaN(b)) { | 2727 } else if (!IsQuietNaN(a) && IsQuietNaN(b)) { |
| 2657 b = kFP64PositiveInfinity; | 2728 b = kFP64PositiveInfinity; |
| 2658 } | 2729 } |
| 2659 return FPMin(a, b); | 2730 |
| 2731 T result = FPProcessNaNs(a, b); |
| 2732 return isnan(result) ? result : FPMin(a, b); |
| 2660 } | 2733 } |
| 2661 | 2734 |
| 2662 | 2735 |
| 2736 template <typename T> |
| 2737 T Simulator::FPMul(T op1, T op2) { |
| 2738 // NaNs should be handled elsewhere. |
| 2739 ASSERT(!std::isnan(op1) && !std::isnan(op2)); |
| 2740 |
| 2741 if ((isinf(op1) && (op2 == 0.0)) || (isinf(op2) && (op1 == 0.0))) { |
| 2742 // inf * 0.0 returns the default NaN. |
| 2743 return FPDefaultNaN<T>(); |
| 2744 } else { |
| 2745 // Other cases should be handled by standard arithmetic. |
| 2746 return op1 * op2; |
| 2747 } |
| 2748 } |
| 2749 |
| 2750 |
| 2751 template<typename T> |
| 2752 T Simulator::FPMulAdd(T a, T op1, T op2) { |
| 2753 T result = FPProcessNaNs3(a, op1, op2); |
| 2754 |
| 2755 T sign_a = copysign(1.0, a); |
| 2756 T sign_prod = copysign(1.0, op1) * copysign(1.0, op2); |
| 2757 bool isinf_prod = std::isinf(op1) || std::isinf(op2); |
| 2758 bool operation_generates_nan = |
| 2759 (std::isinf(op1) && (op2 == 0.0)) || // inf * 0.0 |
| 2760 (std::isinf(op2) && (op1 == 0.0)) || // 0.0 * inf |
| 2761 (std::isinf(a) && isinf_prod && (sign_a != sign_prod)); // inf - inf |
| 2762 |
| 2763 if (std::isnan(result)) { |
| 2764 // Generated NaNs override quiet NaNs propagated from a. |
| 2765 if (operation_generates_nan && IsQuietNaN(a)) { |
| 2766 return FPDefaultNaN<T>(); |
| 2767 } else { |
| 2768 return result; |
| 2769 } |
| 2770 } |
| 2771 |
| 2772 // If the operation would produce a NaN, return the default NaN. |
| 2773 if (operation_generates_nan) { |
| 2774 return FPDefaultNaN<T>(); |
| 2775 } |
| 2776 |
| 2777 // Work around broken fma implementations for exact zero results: The sign of |
| 2778 // exact 0.0 results is positive unless both a and op1 * op2 are negative. |
| 2779 if (((op1 == 0.0) || (op2 == 0.0)) && (a == 0.0)) { |
| 2780 return ((sign_a < 0) && (sign_prod < 0)) ? -0.0 : 0.0; |
| 2781 } |
| 2782 |
| 2783 result = FusedMultiplyAdd(op1, op2, a); |
| 2784 ASSERT(!std::isnan(result)); |
| 2785 |
| 2786 // Work around broken fma implementations for rounded zero results: If a is |
| 2787 // 0.0, the sign of the result is the sign of op1 * op2 before rounding. |
| 2788 if ((a == 0.0) && (result == 0.0)) { |
| 2789 return copysign(0.0, sign_prod); |
| 2790 } |
| 2791 |
| 2792 return result; |
| 2793 } |
| 2794 |
| 2795 |
| 2796 template <typename T> |
| 2797 T Simulator::FPSqrt(T op) { |
| 2798 if (std::isnan(op)) { |
| 2799 return FPProcessNaN(op); |
| 2800 } else if (op < 0.0) { |
| 2801 return FPDefaultNaN<T>(); |
| 2802 } else { |
| 2803 return std::sqrt(op); |
| 2804 } |
| 2805 } |
| 2806 |
| 2807 |
| 2808 template <typename T> |
| 2809 T Simulator::FPSub(T op1, T op2) { |
| 2810 // NaNs should be handled elsewhere. |
| 2811 ASSERT(!std::isnan(op1) && !std::isnan(op2)); |
| 2812 |
| 2813 if (isinf(op1) && isinf(op2) && (op1 == op2)) { |
| 2814 // inf - inf returns the default NaN. |
| 2815 return FPDefaultNaN<T>(); |
| 2816 } else { |
| 2817 // Other cases should be handled by standard arithmetic. |
| 2818 return op1 - op2; |
| 2819 } |
| 2820 } |
| 2821 |
| 2822 |
| 2823 template <typename T> |
| 2824 T Simulator::FPProcessNaN(T op) { |
| 2825 ASSERT(std::isnan(op)); |
| 2826 return DN() ? FPDefaultNaN<T>() : ToQuietNaN(op); |
| 2827 } |
| 2828 |
| 2829 |
| 2830 template <typename T> |
| 2831 T Simulator::FPProcessNaNs(T op1, T op2) { |
| 2832 if (IsSignallingNaN(op1)) { |
| 2833 return FPProcessNaN(op1); |
| 2834 } else if (IsSignallingNaN(op2)) { |
| 2835 return FPProcessNaN(op2); |
| 2836 } else if (std::isnan(op1)) { |
| 2837 ASSERT(IsQuietNaN(op1)); |
| 2838 return FPProcessNaN(op1); |
| 2839 } else if (std::isnan(op2)) { |
| 2840 ASSERT(IsQuietNaN(op2)); |
| 2841 return FPProcessNaN(op2); |
| 2842 } else { |
| 2843 return 0.0; |
| 2844 } |
| 2845 } |
| 2846 |
| 2847 |
| 2848 template <typename T> |
| 2849 T Simulator::FPProcessNaNs3(T op1, T op2, T op3) { |
| 2850 if (IsSignallingNaN(op1)) { |
| 2851 return FPProcessNaN(op1); |
| 2852 } else if (IsSignallingNaN(op2)) { |
| 2853 return FPProcessNaN(op2); |
| 2854 } else if (IsSignallingNaN(op3)) { |
| 2855 return FPProcessNaN(op3); |
| 2856 } else if (std::isnan(op1)) { |
| 2857 ASSERT(IsQuietNaN(op1)); |
| 2858 return FPProcessNaN(op1); |
| 2859 } else if (std::isnan(op2)) { |
| 2860 ASSERT(IsQuietNaN(op2)); |
| 2861 return FPProcessNaN(op2); |
| 2862 } else if (std::isnan(op3)) { |
| 2863 ASSERT(IsQuietNaN(op3)); |
| 2864 return FPProcessNaN(op3); |
| 2865 } else { |
| 2866 return 0.0; |
| 2867 } |
| 2868 } |
| 2869 |
| 2870 |
| 2871 bool Simulator::FPProcessNaNs(Instruction* instr) { |
| 2872 unsigned fd = instr->Rd(); |
| 2873 unsigned fn = instr->Rn(); |
| 2874 unsigned fm = instr->Rm(); |
| 2875 bool done = false; |
| 2876 |
| 2877 if (instr->Mask(FP64) == FP64) { |
| 2878 double result = FPProcessNaNs(dreg(fn), dreg(fm)); |
| 2879 if (std::isnan(result)) { |
| 2880 set_dreg(fd, result); |
| 2881 done = true; |
| 2882 } |
| 2883 } else { |
| 2884 float result = FPProcessNaNs(sreg(fn), sreg(fm)); |
| 2885 if (std::isnan(result)) { |
| 2886 set_sreg(fd, result); |
| 2887 done = true; |
| 2888 } |
| 2889 } |
| 2890 |
| 2891 return done; |
| 2892 } |
| 2893 |
| 2894 |
| 2663 void Simulator::VisitSystem(Instruction* instr) { | 2895 void Simulator::VisitSystem(Instruction* instr) { |
| 2664 // Some system instructions hijack their Op and Cp fields to represent a | 2896 // Some system instructions hijack their Op and Cp fields to represent a |
| 2665 // range of immediates instead of indicating a different instruction. This | 2897 // range of immediates instead of indicating a different instruction. This |
| 2666 // makes the decoding tricky. | 2898 // makes the decoding tricky. |
| 2667 if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) { | 2899 if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) { |
| 2668 switch (instr->Mask(SystemSysRegMask)) { | 2900 switch (instr->Mask(SystemSysRegMask)) { |
| 2669 case MRS: { | 2901 case MRS: { |
| 2670 switch (instr->ImmSystemRegister()) { | 2902 switch (instr->ImmSystemRegister()) { |
| 2671 case NZCV: set_xreg(instr->Rt(), nzcv().RawValue()); break; | 2903 case NZCV: set_xreg(instr->Rt(), nzcv().RawValue()); break; |
| 2672 case FPCR: set_xreg(instr->Rt(), fpcr().RawValue()); break; | 2904 case FPCR: set_xreg(instr->Rt(), fpcr().RawValue()); break; |
| (...skipping 756 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3429 default: | 3661 default: |
| 3430 UNIMPLEMENTED(); | 3662 UNIMPLEMENTED(); |
| 3431 } | 3663 } |
| 3432 } | 3664 } |
| 3433 | 3665 |
| 3434 #endif // USE_SIMULATOR | 3666 #endif // USE_SIMULATOR |
| 3435 | 3667 |
| 3436 } } // namespace v8::internal | 3668 } } // namespace v8::internal |
| 3437 | 3669 |
| 3438 #endif // V8_TARGET_ARCH_A64 | 3670 #endif // V8_TARGET_ARCH_A64 |
| OLD | NEW |