OLD | NEW |
---|---|
1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 899 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
910 Register tos) | 910 Register tos) |
911 : op_(op), | 911 : op_(op), |
912 value_(value), | 912 value_(value), |
913 reversed_(reversed), | 913 reversed_(reversed), |
914 overwrite_mode_(overwrite_mode), | 914 overwrite_mode_(overwrite_mode), |
915 tos_register_(tos) { | 915 tos_register_(tos) { |
916 set_comment("[ DeferredInlinedSmiOperation"); | 916 set_comment("[ DeferredInlinedSmiOperation"); |
917 } | 917 } |
918 | 918 |
919 virtual void Generate(); | 919 virtual void Generate(); |
920 // This stub makes explicit calls to SaveRegisters(), RestoreRegisters() and | |
921 // Exit(). Currently on ARM SaveRegisters() and RestoreRegisters() are empty | |
922 // methods, it is the responsibility of the deferred code to save and restore | |
923 // registers. | |
924 virtual bool AutoSaveAndRestore() { return false; } | |
925 | |
926 void JumpToNonSmiInput(Condition cond); | |
927 void JumpToAnswerOutOfRange(Condition cond); | |
920 | 928 |
921 private: | 929 private: |
930 void GenerateNonSmiInput(); | |
931 void GenerateAnswerOutOfRange(); | |
932 void WriteNonSmiAnswer(Register answer, | |
933 Register heap_number, | |
934 Register scratch); | |
935 | |
922 Token::Value op_; | 936 Token::Value op_; |
923 int value_; | 937 int value_; |
924 bool reversed_; | 938 bool reversed_; |
925 OverwriteMode overwrite_mode_; | 939 OverwriteMode overwrite_mode_; |
926 Register tos_register_; | 940 Register tos_register_; |
941 Label non_smi_input_; | |
942 Label answer_out_of_range_; | |
927 }; | 943 }; |
928 | 944 |
929 | 945 |
946 // For bit operations we try harder and handle the case where the input is not | |
947 // a Smi but a 32bits integer without calling the generic stub. | |
948 void DeferredInlineSmiOperation::JumpToNonSmiInput(Condition cond) { | |
949 ASSERT(Token::IsBitOp(op_)); | |
950 | |
951 __ b(cond, &non_smi_input_); | |
952 } | |
953 | |
954 | |
955 // For bit operations the result is always 32bits so we handle the case where | |
956 // the result does not fit in a Smi without calling the generic stub. | |
957 void DeferredInlineSmiOperation::JumpToAnswerOutOfRange(Condition cond) { | |
958 ASSERT(Token::IsBitOp(op_)); | |
959 | |
960 if ((op_ == Token::SHR) && !CpuFeatures::IsSupported(VFP3)) { | |
961 // >>> requires an unsigned to double conversion and the non VFP code | |
962 // does not support this conversion. | |
963 __ b(cond, entry_label()); | |
964 } else { | |
965 __ b(cond, &answer_out_of_range_); | |
966 } | |
967 } | |
968 | |
930 | 969 |
931 // On entry the non-constant side of the binary operation is in tos_register_ | 970 // On entry the non-constant side of the binary operation is in tos_register_ |
932 // and the constant smi side is nowhere. The tos_register_ is not used by the | 971 // and the constant smi side is nowhere. The tos_register_ is not used by the |
933 // virtual frame. On exit the answer is in the tos_register_ and the virtual | 972 // virtual frame. On exit the answer is in the tos_register_ and the virtual |
934 // frame is unchanged. | 973 // frame is unchanged. |
935 void DeferredInlineSmiOperation::Generate() { | 974 void DeferredInlineSmiOperation::Generate() { |
975 SaveRegisters(); // Currently does nothing. | |
Erik Corry
2010/09/02 08:26:48
I'm going to remove this, because it does nothing
| |
976 | |
936 VirtualFrame copied_frame(*frame_state()->frame()); | 977 VirtualFrame copied_frame(*frame_state()->frame()); |
937 copied_frame.SpillAll(); | 978 copied_frame.SpillAll(); |
938 | 979 |
939 Register lhs = r1; | 980 Register lhs = r1; |
940 Register rhs = r0; | 981 Register rhs = r0; |
941 switch (op_) { | 982 switch (op_) { |
942 case Token::ADD: { | 983 case Token::ADD: { |
943 // Revert optimistic add. | 984 // Revert optimistic add. |
944 if (reversed_) { | 985 if (reversed_) { |
945 __ sub(r0, tos_register_, Operand(Smi::FromInt(value_))); | 986 __ sub(r0, tos_register_, Operand(Smi::FromInt(value_))); |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
998 // The generic stub returns its value in r0, but that's not | 1039 // The generic stub returns its value in r0, but that's not |
999 // necessarily what we want. We want whatever the inlined code | 1040 // necessarily what we want. We want whatever the inlined code |
1000 // expected, which is that the answer is in the same register as | 1041 // expected, which is that the answer is in the same register as |
1001 // the operand was. | 1042 // the operand was. |
1002 __ Move(tos_register_, r0); | 1043 __ Move(tos_register_, r0); |
1003 | 1044 |
1004 // The tos register was not in use for the virtual frame that we | 1045 // The tos register was not in use for the virtual frame that we |
1005 // came into this function with, so we can merge back to that frame | 1046 // came into this function with, so we can merge back to that frame |
1006 // without trashing it. | 1047 // without trashing it. |
1007 copied_frame.MergeTo(frame_state()->frame()); | 1048 copied_frame.MergeTo(frame_state()->frame()); |
1049 | |
1050 RestoreRegisters(); // Currently does nothing. | |
Erik Corry
2010/09/02 08:26:48
The registers were restored by the MergeTo above.
| |
1051 Exit(); | |
1052 | |
1053 if (non_smi_input_.is_linked()) { | |
1054 GenerateNonSmiInput(); | |
1055 } | |
1056 | |
1057 if (answer_out_of_range_.is_linked()) { | |
1058 GenerateAnswerOutOfRange(); | |
1059 } | |
1060 } | |
1061 | |
1062 | |
1063 // Convert and write the integer answer into heap_number. | |
1064 void DeferredInlineSmiOperation::WriteNonSmiAnswer(Register answer, | |
1065 Register heap_number, | |
1066 Register scratch) { | |
1067 if (CpuFeatures::IsSupported(VFP3)) { | |
1068 CpuFeatures::Scope scope(VFP3); | |
1069 __ vmov(s0, answer); | |
1070 if (op_ == Token::SHR) { | |
1071 __ vcvt_f64_u32(d0, s0); | |
1072 } else { | |
1073 __ vcvt_f64_s32(d0, s0); | |
1074 } | |
1075 __ sub(scratch, heap_number, Operand(kHeapObjectTag)); | |
1076 __ vstr(d0, scratch, HeapNumber::kValueOffset); | |
1077 } else { | |
1078 WriteInt32ToHeapNumberStub stub(answer, heap_number, scratch); | |
1079 __ CallStub(&stub); | |
1080 } | |
1081 } | |
1082 | |
1083 | |
1084 void DeferredInlineSmiOperation::GenerateNonSmiInput() { | |
1085 // We know the left hand side is not a Smi and the right hand side is an | |
1086 // immediate value (value_) which can be represented as a Smi. We only | |
1087 // handle bit operations. | |
1088 ASSERT(Token::IsBitOp(op_)); | |
1089 | |
1090 if (FLAG_debug_code) { | |
1091 __ Abort("Should not fall through!"); | |
1092 } | |
1093 | |
1094 __ bind(&non_smi_input_); | |
1095 if (FLAG_debug_code) { | |
1096 __ AbortIfSmi(tos_register_); | |
1097 } | |
1098 | |
1099 Register heap_number_map = r7; | |
1100 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | |
1101 __ ldr(r3, FieldMemOperand(tos_register_, HeapNumber::kMapOffset)); | |
1102 __ cmp(r3, heap_number_map); | |
1103 // Not a number, fall back to the GenericBinaryOpStub. | |
1104 __ b(ne, entry_label()); | |
1105 | |
1106 Register int32 = r2; | |
1107 // Not a 32bits signed int, fall back to the GenericBinaryOpStub. | |
1108 __ ConvertToInt32(tos_register_, int32, r4, r5, entry_label()); | |
1109 | |
1110 // tos_register_ (r0 or r1): Original heap number. | |
1111 // int32: signed 32bits int. | |
1112 | |
1113 Label result_not_a_smi; | |
1114 int shift_value = value_ & 0x1f; | |
1115 switch (op_) { | |
1116 case Token::BIT_OR: __ orr(int32, int32, Operand(value_)); break; | |
1117 case Token::BIT_XOR: __ eor(int32, int32, Operand(value_)); break; | |
1118 case Token::BIT_AND: __ and_(int32, int32, Operand(value_)); break; | |
1119 case Token::SAR: | |
1120 ASSERT(!reversed_); | |
1121 if (shift_value != 0) { | |
1122 __ mov(int32, Operand(int32, ASR, shift_value)); | |
1123 } | |
1124 break; | |
1125 case Token::SHR: | |
1126 ASSERT(!reversed_); | |
1127 if (shift_value != 0) { | |
1128 __ mov(int32, Operand(int32, LSR, shift_value), SetCC); | |
1129 } else { | |
1130 // SHR is special because it is required to produce a positive answer. | |
1131 __ cmp(int32, Operand(0)); | |
1132 } | |
1133 if (CpuFeatures::IsSupported(VFP3)) { | |
1134 __ b(mi, &result_not_a_smi); | |
1135 } else { | |
1136 // Non VFP code cannot convert from unsigned to double, so fall back | |
1137 // to GenericBinaryOpStub. | |
1138 __ b(mi, entry_label()); | |
1139 } | |
1140 break; | |
1141 case Token::SHL: | |
1142 ASSERT(!reversed_); | |
1143 if (shift_value != 0) { | |
1144 __ mov(int32, Operand(int32, LSL, shift_value)); | |
1145 } | |
1146 break; | |
1147 default: UNREACHABLE(); | |
1148 } | |
1149 // Check that the *signed* result fits in a smi. Not necessary for AND, SAR | |
1150 // if the shift if more than 0 or SHR if the shit is more than 1. | |
1151 if (!( (op_ == Token::AND) || | |
1152 ((op_ == Token::SAR) && (shift_value > 0)) || | |
1153 ((op_ == Token::SHR) && (shift_value > 1)))) { | |
1154 __ add(r3, int32, Operand(0x40000000), SetCC); | |
1155 __ b(mi, &result_not_a_smi); | |
1156 } | |
1157 __ mov(tos_register_, Operand(int32, LSL, kSmiTagSize)); | |
1158 Exit(); | |
1159 | |
1160 if (result_not_a_smi.is_linked()) { | |
1161 __ bind(&result_not_a_smi); | |
1162 if (overwrite_mode_ != OVERWRITE_LEFT) { | |
1163 ASSERT((overwrite_mode_ == NO_OVERWRITE) || | |
1164 (overwrite_mode_ == OVERWRITE_RIGHT)); | |
1165 // If the allocation fails, fall back to the GenericBinaryOpStub. | |
Erik Corry
2010/09/02 08:26:48
I've added a comment here that we should really sp
| |
1166 __ AllocateHeapNumber(r4, r5, r6, heap_number_map, entry_label()); | |
1167 // Nothing can go wrong now, so overwrite tos. | |
1168 __ mov(tos_register_, Operand(r4)); | |
1169 } | |
1170 | |
1171 // int32: answer as signed 32bits integer. | |
1172 // tos_register_: Heap number to write the answer into. | |
1173 WriteNonSmiAnswer(int32, tos_register_, r3); | |
1174 | |
1175 Exit(); | |
1176 } | |
1177 } | |
1178 | |
1179 | |
1180 void DeferredInlineSmiOperation::GenerateAnswerOutOfRange() { | |
1181 // The input from a bitwise operation were Smis but the result cannot fit | |
1182 // into a Smi, so we store it into a heap number. tos_resgiter_ holds the | |
1183 // result to be converted. | |
1184 ASSERT(Token::IsBitOp(op_)); | |
1185 ASSERT(!reversed_); | |
1186 | |
1187 if (FLAG_debug_code) { | |
1188 __ Abort("Should not fall through!"); | |
1189 } | |
1190 | |
1191 __ bind(&answer_out_of_range_); | |
1192 if (((value_ & 0x1f) == 0) && (op_ == Token::SHR)) { | |
1193 // >>> 0 is a special case where the result is already tagged but wrong | |
1194 // because the Smi is negative. We untag it. | |
1195 __ mov(tos_register_, Operand(tos_register_, ASR, kSmiTagSize)); | |
1196 } | |
1197 | |
1198 // Allocate the result heap number. | |
1199 Register heap_number_map = r7; | |
1200 Register heap_number = r4; | |
1201 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | |
1202 // If the allocation fails, fall back to the GenericBinaryOpStub. | |
1203 __ AllocateHeapNumber(heap_number, r5, r6, heap_number_map, entry_label()); | |
1204 WriteNonSmiAnswer(tos_register_, heap_number, r3); | |
1205 __ mov(tos_register_, Operand(heap_number)); | |
1206 | |
1207 Exit(); | |
1008 } | 1208 } |
1009 | 1209 |
1010 | 1210 |
1011 static bool PopCountLessThanEqual2(unsigned int x) { | 1211 static bool PopCountLessThanEqual2(unsigned int x) { |
1012 x &= x - 1; | 1212 x &= x - 1; |
1013 return (x & (x - 1)) == 0; | 1213 return (x & (x - 1)) == 0; |
1014 } | 1214 } |
1015 | 1215 |
1016 | 1216 |
1017 // Returns the index of the lowest bit set. | 1217 // Returns the index of the lowest bit set. |
(...skipping 166 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1184 case Token::BIT_AND: { | 1384 case Token::BIT_AND: { |
1185 if (both_sides_are_smi) { | 1385 if (both_sides_are_smi) { |
1186 switch (op) { | 1386 switch (op) { |
1187 case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break; | 1387 case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break; |
1188 case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break; | 1388 case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break; |
1189 case Token::BIT_AND: __ And(tos, tos, Operand(value)); break; | 1389 case Token::BIT_AND: __ And(tos, tos, Operand(value)); break; |
1190 default: UNREACHABLE(); | 1390 default: UNREACHABLE(); |
1191 } | 1391 } |
1192 frame_->EmitPush(tos, TypeInfo::Smi()); | 1392 frame_->EmitPush(tos, TypeInfo::Smi()); |
1193 } else { | 1393 } else { |
1194 DeferredCode* deferred = | 1394 DeferredInlineSmiOperation* deferred = |
1195 new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos); | 1395 new DeferredInlineSmiOperation(op, int_value, reversed, mode, tos); |
1196 __ tst(tos, Operand(kSmiTagMask)); | 1396 __ tst(tos, Operand(kSmiTagMask)); |
1197 deferred->Branch(ne); | 1397 deferred->JumpToNonSmiInput(ne); |
1198 switch (op) { | 1398 switch (op) { |
1199 case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break; | 1399 case Token::BIT_OR: __ orr(tos, tos, Operand(value)); break; |
1200 case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break; | 1400 case Token::BIT_XOR: __ eor(tos, tos, Operand(value)); break; |
1201 case Token::BIT_AND: __ And(tos, tos, Operand(value)); break; | 1401 case Token::BIT_AND: __ And(tos, tos, Operand(value)); break; |
1202 default: UNREACHABLE(); | 1402 default: UNREACHABLE(); |
1203 } | 1403 } |
1204 deferred->BindExit(); | 1404 deferred->BindExit(); |
1205 TypeInfo result_type = | 1405 TypeInfo result_type = |
1206 (op == Token::BIT_AND) ? TypeInfo::Smi() : TypeInfo::Integer32(); | 1406 (op == Token::BIT_AND) ? TypeInfo::Smi() : TypeInfo::Integer32(); |
1207 frame_->EmitPush(tos, result_type); | 1407 frame_->EmitPush(tos, result_type); |
(...skipping 25 matching lines...) Expand all Loading... | |
1233 __ mov(tos, Operand(tos, LSL, scratch)); // Shift constant. | 1433 __ mov(tos, Operand(tos, LSL, scratch)); // Shift constant. |
1234 deferred->BindExit(); | 1434 deferred->BindExit(); |
1235 TypeInfo result = TypeInfo::Integer32(); | 1435 TypeInfo result = TypeInfo::Integer32(); |
1236 frame_->EmitPush(tos, result); | 1436 frame_->EmitPush(tos, result); |
1237 break; | 1437 break; |
1238 } | 1438 } |
1239 // Fall through! | 1439 // Fall through! |
1240 case Token::SHR: | 1440 case Token::SHR: |
1241 case Token::SAR: { | 1441 case Token::SAR: { |
1242 ASSERT(!reversed); | 1442 ASSERT(!reversed); |
1243 int shift_amount = int_value & 0x1f; | 1443 int shift_value = int_value & 0x1f; |
1244 TypeInfo result = TypeInfo::Number(); | 1444 TypeInfo result = TypeInfo::Number(); |
1245 | 1445 |
1246 if (op == Token::SHR) { | 1446 if (op == Token::SHR) { |
1247 if (shift_amount > 1) { | 1447 if (shift_value > 1) { |
1248 result = TypeInfo::Smi(); | 1448 result = TypeInfo::Smi(); |
1249 } else if (shift_amount > 0) { | 1449 } else if (shift_value > 0) { |
1250 result = TypeInfo::Integer32(); | 1450 result = TypeInfo::Integer32(); |
1251 } | 1451 } |
1252 } else if (op == Token::SAR) { | 1452 } else if (op == Token::SAR) { |
1253 if (shift_amount > 0) { | 1453 if (shift_value > 0) { |
1254 result = TypeInfo::Smi(); | 1454 result = TypeInfo::Smi(); |
1255 } else { | 1455 } else { |
1256 result = TypeInfo::Integer32(); | 1456 result = TypeInfo::Integer32(); |
1257 } | 1457 } |
1258 } else { | 1458 } else { |
1259 ASSERT(op == Token::SHL); | 1459 ASSERT(op == Token::SHL); |
1260 result = TypeInfo::Integer32(); | 1460 result = TypeInfo::Integer32(); |
1261 } | 1461 } |
1262 | 1462 |
1263 Register scratch = VirtualFrame::scratch0(); | 1463 DeferredInlineSmiOperation* deferred = |
1264 Register scratch2 = VirtualFrame::scratch1(); | |
1265 int shift_value = int_value & 0x1f; // least significant 5 bits | |
1266 DeferredCode* deferred = | |
1267 new DeferredInlineSmiOperation(op, shift_value, false, mode, tos); | 1464 new DeferredInlineSmiOperation(op, shift_value, false, mode, tos); |
1268 uint32_t problematic_mask = kSmiTagMask; | 1465 if (!both_sides_are_smi) { |
1269 // For unsigned shift by zero all negative smis are problematic. | 1466 __ tst(tos, Operand(kSmiTagMask)); |
1270 bool skip_smi_test = both_sides_are_smi; | 1467 deferred->JumpToNonSmiInput(ne); |
1271 if (shift_value == 0 && op == Token::SHR) { | |
1272 problematic_mask |= 0x80000000; | |
1273 skip_smi_test = false; | |
1274 } | |
1275 if (!skip_smi_test) { | |
1276 __ tst(tos, Operand(problematic_mask)); | |
1277 deferred->Branch(ne); // Go slow for problematic input. | |
1278 } | 1468 } |
1279 switch (op) { | 1469 switch (op) { |
1280 case Token::SHL: { | 1470 case Token::SHL: { |
1281 if (shift_value != 0) { | 1471 if (shift_value != 0) { |
1472 Register scratch = VirtualFrame::scratch0(); | |
1282 int adjusted_shift = shift_value - kSmiTagSize; | 1473 int adjusted_shift = shift_value - kSmiTagSize; |
1283 ASSERT(adjusted_shift >= 0); | 1474 ASSERT(adjusted_shift >= 0); |
1475 | |
1284 if (adjusted_shift != 0) { | 1476 if (adjusted_shift != 0) { |
1285 __ mov(scratch, Operand(tos, LSL, adjusted_shift)); | 1477 __ mov(tos, Operand(tos, LSL, adjusted_shift)); |
1286 // Check that the *signed* result fits in a smi. | |
1287 __ add(scratch2, scratch, Operand(0x40000000), SetCC); | |
1288 deferred->Branch(mi); | |
1289 __ mov(tos, Operand(scratch, LSL, kSmiTagSize)); | |
1290 } else { | |
1291 // Check that the *signed* result fits in a smi. | |
1292 __ add(scratch2, tos, Operand(0x40000000), SetCC); | |
1293 deferred->Branch(mi); | |
1294 __ mov(tos, Operand(tos, LSL, kSmiTagSize)); | |
1295 } | 1478 } |
1479 // Check that the *signed* result fits in a smi. | |
1480 __ add(scratch, tos, Operand(0x40000000), SetCC); | |
1481 deferred->JumpToAnswerOutOfRange(mi); | |
1482 __ mov(tos, Operand(tos, LSL, kSmiTagSize)); | |
1296 } | 1483 } |
1297 break; | 1484 break; |
1298 } | 1485 } |
1299 case Token::SHR: { | 1486 case Token::SHR: { |
1300 if (shift_value != 0) { | 1487 if (shift_value != 0) { |
1488 Register scratch = VirtualFrame::scratch0(); | |
1301 __ mov(scratch, Operand(tos, ASR, kSmiTagSize)); // Remove tag. | 1489 __ mov(scratch, Operand(tos, ASR, kSmiTagSize)); // Remove tag. |
1302 // LSR by immediate 0 means shifting 32 bits. | 1490 __ mov(tos, Operand(scratch, LSR, shift_value)); |
1303 __ mov(scratch, Operand(scratch, LSR, shift_value)); | |
1304 if (shift_value == 1) { | 1491 if (shift_value == 1) { |
1305 // check that the *unsigned* result fits in a smi | 1492 // Check that the *unsigned* result fits in a smi. |
1306 // neither of the two high-order bits can be set: | 1493 // Neither of the two high-order bits can be set: |
1307 // - 0x80000000: high bit would be lost when smi tagging | 1494 // - 0x80000000: high bit would be lost when smi tagging |
1308 // - 0x40000000: this number would convert to negative when | 1495 // - 0x40000000: this number would convert to negative when Smi |
1309 // smi tagging these two cases can only happen with shifts | 1496 // tagging. |
1310 // by 0 or 1 when handed a valid smi | 1497 // These two cases can only happen with shifts by 0 or 1 when |
1311 __ tst(scratch, Operand(0xc0000000)); | 1498 // handed a valid smi. |
1312 deferred->Branch(ne); | 1499 __ tst(tos, Operand(0xc0000000)); |
1313 } else { | 1500 if (!CpuFeatures::IsSupported(VFP3)) { |
1314 ASSERT(shift_value >= 2); | 1501 // If the unsigned result does not fit in a Smi, we require an |
1315 result = TypeInfo::Smi(); // SHR by at least 2 gives a Smi. | 1502 // unsigned to double conversion. Without VFP V8 has to fall |
1503 // back to the runtime. The deferred code will expect tos | |
1504 // to hold the original Smi to be shifted. | |
1505 __ mov(tos, Operand(scratch, LSL, kSmiTagSize), LeaveCC, ne); | |
1506 } | |
1507 deferred->JumpToAnswerOutOfRange(ne); | |
1316 } | 1508 } |
1317 __ mov(tos, Operand(scratch, LSL, kSmiTagSize)); | 1509 __ mov(tos, Operand(tos, LSL, kSmiTagSize)); |
1510 } else { | |
1511 __ cmp(tos, Operand(0)); | |
1512 deferred->JumpToAnswerOutOfRange(mi); | |
1318 } | 1513 } |
1319 break; | 1514 break; |
1320 } | 1515 } |
1321 case Token::SAR: { | 1516 case Token::SAR: { |
1322 // In the ARM instructions set, ASR by immediate 0 means shifting 32 | |
1323 // bits. | |
1324 if (shift_value != 0) { | 1517 if (shift_value != 0) { |
1325 // Do the shift and the tag removal in one operation. If the shift | 1518 // Do the shift and the tag removal in one operation. If the shift |
1326 // is 31 bits (the highest possible value) then we emit the | 1519 // is 31 bits (the highest possible value) then we emit the |
1327 // instruction as a shift by 0 which means shift arithmetically by | 1520 // instruction as a shift by 0 which in the ARM ISA means shift |
1328 // 32. | 1521 // arithmetically by 32. |
1329 __ mov(tos, Operand(tos, ASR, (kSmiTagSize + shift_value) & 0x1f)); | 1522 __ mov(tos, Operand(tos, ASR, (kSmiTagSize + shift_value) & 0x1f)); |
1330 // Put tag back. | |
1331 __ mov(tos, Operand(tos, LSL, kSmiTagSize)); | 1523 __ mov(tos, Operand(tos, LSL, kSmiTagSize)); |
1332 // SAR by at least 1 gives a Smi. | |
1333 result = TypeInfo::Smi(); | |
1334 } | 1524 } |
1335 break; | 1525 break; |
1336 } | 1526 } |
1337 default: UNREACHABLE(); | 1527 default: UNREACHABLE(); |
1338 } | 1528 } |
1339 deferred->BindExit(); | 1529 deferred->BindExit(); |
1340 frame_->EmitPush(tos, result); | 1530 frame_->EmitPush(tos, result); |
1341 break; | 1531 break; |
1342 } | 1532 } |
1343 | 1533 |
(...skipping 5761 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
7105 BinaryOpIC::GetName(runtime_operands_type_)); | 7295 BinaryOpIC::GetName(runtime_operands_type_)); |
7106 return name_; | 7296 return name_; |
7107 } | 7297 } |
7108 | 7298 |
7109 | 7299 |
7110 #undef __ | 7300 #undef __ |
7111 | 7301 |
7112 } } // namespace v8::internal | 7302 } } // namespace v8::internal |
7113 | 7303 |
7114 #endif // V8_TARGET_ARCH_ARM | 7304 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |