Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(23)

Side by Side Diff: src/x64/macro-assembler-x64.cc

Issue 21014003: Optionally use 31-bits SMI value for 64-bit system (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Addressed danno's comments Created 7 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 966 matching lines...) Expand 10 before | Expand all | Expand 10 after
977 void MacroAssembler::Set(const Operand& dst, int64_t x) { 977 void MacroAssembler::Set(const Operand& dst, int64_t x) {
978 if (is_int32(x)) { 978 if (is_int32(x)) {
979 movq(dst, Immediate(static_cast<int32_t>(x))); 979 movq(dst, Immediate(static_cast<int32_t>(x)));
980 } else { 980 } else {
981 Set(kScratchRegister, x); 981 Set(kScratchRegister, x);
982 movq(dst, kScratchRegister); 982 movq(dst, kScratchRegister);
983 } 983 }
984 } 984 }
985 985
986 986
987 // ----------------------------------------------------------------------------
988 // Smi tagging, untagging and tag detection.
989
990 static inline Immediate SmiToImmediate(Smi* src) {
danno 2013/08/19 21:47:44 Why not add a constructor to the Immeidate constru
haitao.feng 2013/08/20 15:09:30 This is what I have done originally at https://cod
danno 2013/08/20 15:58:14 Ooops! I'm sorry, I apologize, that's embarrassing
haitao.feng 2013/08/21 13:18:03 I will use the constructor with an assertion in th
991 if (SmiValuesAre32Bits()) {
992 UNREACHABLE();
993 return Immediate(2);
994 } else {
995 return Immediate(static_cast<int32_t>(reinterpret_cast<intptr_t>(src)));
996 }
997 }
998
999
987 bool MacroAssembler::IsUnsafeInt(const int x) { 1000 bool MacroAssembler::IsUnsafeInt(const int x) {
988 static const int kMaxBits = 17; 1001 static const int kMaxBits = 17;
989 return !is_intn(x, kMaxBits); 1002 return !is_intn(x, kMaxBits);
990 } 1003 }
991 1004
992 1005
993 void MacroAssembler::SafeMove(Register dst, Smi* src) { 1006 void MacroAssembler::SafeMove(Register dst, Smi* src) {
994 ASSERT(!dst.is(kScratchRegister)); 1007 ASSERT(!dst.is(kScratchRegister));
995 ASSERT(kSmiValueSize == 32); // JIT cookie can be converted to Smi. 1008 if (SmiValuesAre32Bits()) {
996 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) { 1009 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
997 Move(dst, Smi::FromInt(src->value() ^ jit_cookie())); 1010 Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
998 Move(kScratchRegister, Smi::FromInt(jit_cookie())); 1011 Move(kScratchRegister, Smi::FromInt(jit_cookie()));
999 xor_(dst, kScratchRegister); 1012 xor_(dst, kScratchRegister);
1013 } else {
1014 Move(dst, src);
1015 }
1000 } else { 1016 } else {
1001 Move(dst, src); 1017 ASSERT(SmiValuesAre31Bits());
1018 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
1019 movq(dst, Immediate(static_cast<int>(reinterpret_cast<intptr_t>(src)) ^
1020 jit_cookie()));
1021 movq(kScratchRegister, Immediate(jit_cookie()));
1022 xor_(dst, kScratchRegister);
1023 } else {
1024 Move(dst, src);
1025 }
1002 } 1026 }
1003 } 1027 }
1004 1028
1005 1029
1006 void MacroAssembler::SafePush(Smi* src) { 1030 void MacroAssembler::SafePush(Smi* src) {
1007 ASSERT(kSmiValueSize == 32); // JIT cookie can be converted to Smi. 1031 if (SmiValuesAre32Bits()) {
1008 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) { 1032 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
1009 Push(Smi::FromInt(src->value() ^ jit_cookie())); 1033 Push(Smi::FromInt(src->value() ^ jit_cookie()));
1010 Move(kScratchRegister, Smi::FromInt(jit_cookie())); 1034 Move(kScratchRegister, Smi::FromInt(jit_cookie()));
1011 xor_(Operand(rsp, 0), kScratchRegister); 1035 xor_(Operand(rsp, 0), kScratchRegister);
1036 } else {
1037 Push(src);
1038 }
1012 } else { 1039 } else {
1013 Push(src); 1040 ASSERT(SmiValuesAre31Bits());
1041 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
1042 push(Immediate(static_cast<int>(reinterpret_cast<intptr_t>(src)) ^
1043 jit_cookie()));
1044 movq(kScratchRegister, Immediate(jit_cookie()));
1045 xor_(Operand(rsp, 0), kScratchRegister);
1046 } else {
1047 Push(src);
1048 }
1014 } 1049 }
1015 } 1050 }
1016 1051
1017 1052
1018 // ----------------------------------------------------------------------------
1019 // Smi tagging, untagging and tag detection.
1020
1021 Register MacroAssembler::GetSmiConstant(Smi* source) { 1053 Register MacroAssembler::GetSmiConstant(Smi* source) {
1022 int value = source->value(); 1054 int value = source->value();
1023 if (value == 0) { 1055 if (value == 0) {
1024 xorl(kScratchRegister, kScratchRegister); 1056 xorl(kScratchRegister, kScratchRegister);
1025 return kScratchRegister; 1057 return kScratchRegister;
1026 } 1058 }
1027 if (value == 1) { 1059 if (value == 1) {
1028 return kSmiConstantRegister; 1060 return kSmiConstantRegister;
1029 } 1061 }
1030 LoadSmiConstant(kScratchRegister, source); 1062 LoadSmiConstant(kScratchRegister, source);
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
1090 neg(dst); 1122 neg(dst);
1091 } 1123 }
1092 } 1124 }
1093 1125
1094 1126
1095 void MacroAssembler::Integer32ToSmi(Register dst, Register src) { 1127 void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
1096 STATIC_ASSERT(kSmiTag == 0); 1128 STATIC_ASSERT(kSmiTag == 0);
1097 if (!dst.is(src)) { 1129 if (!dst.is(src)) {
1098 movl(dst, src); 1130 movl(dst, src);
1099 } 1131 }
1100 shl(dst, Immediate(kSmiShift)); 1132 if (SmiValuesAre32Bits()) {
1133 shl(dst, Immediate(kSmiShift));
1134 } else {
1135 ASSERT(SmiValuesAre31Bits());
1136 shll(dst, Immediate(kSmiShift));
1137 movsxlq(dst, dst);
1138 }
1101 } 1139 }
1102 1140
1103 1141
1104 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) { 1142 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
1105 if (emit_debug_code()) { 1143 if (emit_debug_code()) {
1106 testb(dst, Immediate(0x01)); 1144 testb(dst, Immediate(0x01));
1107 Label ok; 1145 Label ok;
1108 j(zero, &ok, Label::kNear); 1146 j(zero, &ok, Label::kNear);
1109 if (allow_stub_calls()) { 1147 if (allow_stub_calls()) {
1110 Abort(kInteger32ToSmiFieldWritingToNonSmiLocation); 1148 Abort(kInteger32ToSmiFieldWritingToNonSmiLocation);
1111 } else { 1149 } else {
1112 int3(); 1150 int3();
1113 } 1151 }
1114 bind(&ok); 1152 bind(&ok);
1115 } 1153 }
1116 ASSERT(kSmiShift % kBitsPerByte == 0); 1154 if (SmiValuesAre32Bits()) {
1117 movl(Operand(dst, kSmiShift / kBitsPerByte), src); 1155 ASSERT(kSmiShift % kBitsPerByte == 0);
1156 movl(Operand(dst, kSmiShift / kBitsPerByte), src);
1157 } else {
1158 ASSERT(SmiValuesAre31Bits());
1159 Integer32ToSmi(kScratchRegister, src);
1160 movq(dst, kScratchRegister);
1161 }
1118 } 1162 }
1119 1163
1120 1164
1121 void MacroAssembler::Integer64PlusConstantToSmi(Register dst, 1165 void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
1122 Register src, 1166 Register src,
1123 int constant) { 1167 int constant) {
1124 if (dst.is(src)) { 1168 if (dst.is(src)) {
1125 addl(dst, Immediate(constant)); 1169 addl(dst, Immediate(constant));
1126 } else { 1170 } else {
1127 leal(dst, Operand(src, constant)); 1171 leal(dst, Operand(src, constant));
1128 } 1172 }
1129 shl(dst, Immediate(kSmiShift)); 1173 Integer32ToSmi(dst, dst);
1130 } 1174 }
1131 1175
1132 1176
1133 void MacroAssembler::SmiToInteger32(Register dst, Register src) { 1177 void MacroAssembler::SmiToInteger32(Register dst, Register src) {
1134 STATIC_ASSERT(kSmiTag == 0); 1178 STATIC_ASSERT(kSmiTag == 0);
1135 if (!dst.is(src)) { 1179 if (!dst.is(src)) {
1136 movq(dst, src); 1180 movq(dst, src);
1137 } 1181 }
1138 shr(dst, Immediate(kSmiShift)); 1182 if (SmiValuesAre32Bits()) {
1183 shr(dst, Immediate(kSmiShift));
1184 } else {
1185 ASSERT(SmiValuesAre31Bits());
1186 sarl(dst, Immediate(kSmiShift));
1187 }
1139 } 1188 }
1140 1189
1141 1190
1142 void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) { 1191 void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
1143 movl(dst, Operand(src, kSmiShift / kBitsPerByte)); 1192 if (SmiValuesAre32Bits()) {
1193 movl(dst, Operand(src, kSmiShift / kBitsPerByte));
1194 } else {
1195 ASSERT(SmiValuesAre31Bits());
1196 movl(dst, src);
1197 sarl(dst, Immediate(kSmiShift));
1198 }
1144 } 1199 }
1145 1200
1146 1201
1147 void MacroAssembler::SmiToInteger64(Register dst, Register src) { 1202 void MacroAssembler::SmiToInteger64(Register dst, Register src) {
1148 STATIC_ASSERT(kSmiTag == 0); 1203 STATIC_ASSERT(kSmiTag == 0);
1149 if (!dst.is(src)) { 1204 if (!dst.is(src)) {
1150 movq(dst, src); 1205 movq(dst, src);
1151 } 1206 }
1152 sar(dst, Immediate(kSmiShift)); 1207 sar(dst, Immediate(kSmiShift));
1153 } 1208 }
1154 1209
1155 1210
1156 void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) { 1211 void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
1157 movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte)); 1212 if (SmiValuesAre32Bits()) {
1213 movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
1214 } else {
1215 ASSERT(SmiValuesAre31Bits());
1216 movq(dst, src);
1217 SmiToInteger64(dst, dst);
1218 }
1158 } 1219 }
1159 1220
1160 1221
1161 void MacroAssembler::SmiTest(Register src) { 1222 void MacroAssembler::SmiTest(Register src) {
1162 AssertSmi(src); 1223 AssertSmi(src);
1163 testq(src, src); 1224 if (SmiValuesAre32Bits()) {
1225 testq(src, src);
1226 } else {
1227 ASSERT(SmiValuesAre31Bits());
1228 testl(src, src);
1229 }
1164 } 1230 }
1165 1231
1166 1232
1167 void MacroAssembler::SmiCompare(Register smi1, Register smi2) { 1233 void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
1168 AssertSmi(smi1); 1234 AssertSmi(smi1);
1169 AssertSmi(smi2); 1235 AssertSmi(smi2);
1170 cmpq(smi1, smi2); 1236 if (SmiValuesAre32Bits()) {
1237 cmpq(smi1, smi2);
1238 } else {
1239 ASSERT(SmiValuesAre31Bits());
1240 cmpl(smi1, smi2);
1241 }
1171 } 1242 }
1172 1243
1173 1244
1174 void MacroAssembler::SmiCompare(Register dst, Smi* src) { 1245 void MacroAssembler::SmiCompare(Register dst, Smi* src) {
1175 AssertSmi(dst); 1246 AssertSmi(dst);
1176 Cmp(dst, src); 1247 Cmp(dst, src);
1177 } 1248 }
1178 1249
1179 1250
1180 void MacroAssembler::Cmp(Register dst, Smi* src) { 1251 void MacroAssembler::Cmp(Register dst, Smi* src) {
1181 ASSERT(!dst.is(kScratchRegister)); 1252 ASSERT(!dst.is(kScratchRegister));
1182 if (src->value() == 0) { 1253 if (src->value() == 0) {
1183 testq(dst, dst); 1254 if (SmiValuesAre32Bits()) {
1255 testq(dst, dst);
1256 } else {
1257 ASSERT(SmiValuesAre31Bits());
1258 testl(dst, dst);
1259 }
1184 } else { 1260 } else {
1185 Register constant_reg = GetSmiConstant(src); 1261 Register constant_reg = GetSmiConstant(src);
1186 cmpq(dst, constant_reg); 1262 if (SmiValuesAre32Bits()) {
1263 cmpq(dst, constant_reg);
1264 } else {
1265 ASSERT(SmiValuesAre31Bits());
1266 cmpl(dst, constant_reg);
1267 }
1187 } 1268 }
1188 } 1269 }
1189 1270
1190 1271
1191 void MacroAssembler::SmiCompare(Register dst, const Operand& src) { 1272 void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
1192 AssertSmi(dst); 1273 AssertSmi(dst);
1193 AssertSmi(src); 1274 AssertSmi(src);
1194 cmpq(dst, src); 1275 if (SmiValuesAre32Bits()) {
1276 cmpq(dst, src);
1277 } else {
1278 ASSERT(SmiValuesAre31Bits());
1279 cmpl(dst, src);
1280 }
1195 } 1281 }
1196 1282
1197 1283
1198 void MacroAssembler::SmiCompare(const Operand& dst, Register src) { 1284 void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
1199 AssertSmi(dst); 1285 AssertSmi(dst);
1200 AssertSmi(src); 1286 AssertSmi(src);
1201 cmpq(dst, src); 1287 if (SmiValuesAre32Bits()) {
1288 cmpq(dst, src);
1289 } else {
1290 ASSERT(SmiValuesAre31Bits());
1291 cmpl(dst, src);
1292 }
1202 } 1293 }
1203 1294
1204 1295
1205 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) { 1296 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
1206 AssertSmi(dst); 1297 AssertSmi(dst);
1207 cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value())); 1298 if (SmiValuesAre32Bits()) {
1299 cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
1300 } else {
1301 ASSERT(SmiValuesAre31Bits());
1302 cmpl(dst, SmiToImmediate(src));
1303 }
1208 } 1304 }
1209 1305
1210 1306
1211 void MacroAssembler::Cmp(const Operand& dst, Smi* src) { 1307 void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
1212 // The Operand cannot use the smi register. 1308 if (SmiValuesAre32Bits()) {
1213 Register smi_reg = GetSmiConstant(src); 1309 // The Operand cannot use the smi register.
1214 ASSERT(!dst.AddressUsesRegister(smi_reg)); 1310 Register smi_reg = GetSmiConstant(src);
1215 cmpq(dst, smi_reg); 1311 ASSERT(!dst.AddressUsesRegister(smi_reg));
1312 cmpq(dst, smi_reg);
1313 } else {
1314 ASSERT(SmiValuesAre31Bits());
1315 cmpl(dst, SmiToImmediate(src));
1316 }
1216 } 1317 }
1217 1318
1218 1319
1219 void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) { 1320 void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
1220 cmpl(Operand(dst, kSmiShift / kBitsPerByte), src); 1321 if (SmiValuesAre32Bits()) {
1322 cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
1323 } else {
1324 ASSERT(SmiValuesAre31Bits());
1325 SmiToInteger32(kScratchRegister, dst);
1326 cmpl(kScratchRegister, src);
1327 }
1221 } 1328 }
1222 1329
1223 1330
1224 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst, 1331 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
1225 Register src, 1332 Register src,
1226 int power) { 1333 int power) {
1227 ASSERT(power >= 0); 1334 ASSERT(power >= 0);
1228 ASSERT(power < 64); 1335 ASSERT(power < 64);
1229 if (power == 0) { 1336 if (power == 0) {
1230 SmiToInteger64(dst, src); 1337 SmiToInteger64(dst, src);
1231 return; 1338 return;
1232 } 1339 }
1233 if (!dst.is(src)) { 1340 if (!dst.is(src)) {
1234 movq(dst, src); 1341 movq(dst, src);
1235 } 1342 }
1236 if (power < kSmiShift) { 1343 if (power < kSmiShift) {
1237 sar(dst, Immediate(kSmiShift - power)); 1344 sar(dst, Immediate(kSmiShift - power));
1238 } else if (power > kSmiShift) { 1345 } else if (power > kSmiShift) {
1239 shl(dst, Immediate(power - kSmiShift)); 1346 shl(dst, Immediate(power - kSmiShift));
1240 } 1347 }
1241 } 1348 }
1242 1349
1243 1350
1244 void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst, 1351 void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
1245 Register src, 1352 Register src,
1246 int power) { 1353 int power) {
1247 ASSERT((0 <= power) && (power < 32)); 1354 ASSERT((0 <= power) && (power < 32));
1248 if (dst.is(src)) { 1355 if (dst.is(src)) {
1249 shr(dst, Immediate(power + kSmiShift)); 1356 if (SmiValuesAre32Bits()) {
1357 shr(dst, Immediate(power + kSmiShift));
1358 } else {
1359 ASSERT(SmiValuesAre31Bits());
1360 shrl(dst, Immediate(power + kSmiShift));
1361 }
1250 } else { 1362 } else {
1251 UNIMPLEMENTED(); // Not used. 1363 UNIMPLEMENTED(); // Not used.
1252 } 1364 }
1253 } 1365 }
1254 1366
1255 1367
1256 void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2, 1368 void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
1257 Label* on_not_smis, 1369 Label* on_not_smis,
1258 Label::Distance near_jump) { 1370 Label::Distance near_jump) {
1259 if (dst.is(src1) || dst.is(src2)) { 1371 if (dst.is(src1) || dst.is(src2)) {
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
1293 testb(kScratchRegister, Immediate(3)); 1405 testb(kScratchRegister, Immediate(3));
1294 return zero; 1406 return zero;
1295 } 1407 }
1296 1408
1297 1409
1298 Condition MacroAssembler::CheckBothSmi(Register first, Register second) { 1410 Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
1299 if (first.is(second)) { 1411 if (first.is(second)) {
1300 return CheckSmi(first); 1412 return CheckSmi(first);
1301 } 1413 }
1302 STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3); 1414 STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
1303 leal(kScratchRegister, Operand(first, second, times_1, 0)); 1415 if (SmiValuesAre32Bits()) {
1304 testb(kScratchRegister, Immediate(0x03)); 1416 leal(kScratchRegister, Operand(first, second, times_1, 0));
1417 testb(kScratchRegister, Immediate(0x03));
1418 } else {
1419 ASSERT(SmiValuesAre31Bits());
1420 movl(kScratchRegister, first);
1421 orl(kScratchRegister, second);
1422 testb(kScratchRegister, Immediate(kSmiTagMask));
1423 }
1305 return zero; 1424 return zero;
1306 } 1425 }
1307 1426
1308 1427
1309 Condition MacroAssembler::CheckBothNonNegativeSmi(Register first, 1428 Condition MacroAssembler::CheckBothNonNegativeSmi(Register first,
1310 Register second) { 1429 Register second) {
1311 if (first.is(second)) { 1430 if (first.is(second)) {
1312 return CheckNonNegativeSmi(first); 1431 return CheckNonNegativeSmi(first);
1313 } 1432 }
1314 movq(kScratchRegister, first); 1433 movq(kScratchRegister, first);
(...skipping 19 matching lines...) Expand all
1334 andl(scratch, second); 1453 andl(scratch, second);
1335 } 1454 }
1336 testb(scratch, Immediate(kSmiTagMask)); 1455 testb(scratch, Immediate(kSmiTagMask));
1337 return zero; 1456 return zero;
1338 } 1457 }
1339 1458
1340 1459
1341 Condition MacroAssembler::CheckIsMinSmi(Register src) { 1460 Condition MacroAssembler::CheckIsMinSmi(Register src) {
1342 ASSERT(!src.is(kScratchRegister)); 1461 ASSERT(!src.is(kScratchRegister));
1343 // If we overflow by subtracting one, it's the minimal smi value. 1462 // If we overflow by subtracting one, it's the minimal smi value.
1344 cmpq(src, kSmiConstantRegister); 1463 if (SmiValuesAre32Bits()) {
1464 cmpq(src, kSmiConstantRegister);
1465 } else {
1466 ASSERT(SmiValuesAre31Bits());
1467 cmpl(src, kSmiConstantRegister);
1468 }
1345 return overflow; 1469 return overflow;
1346 } 1470 }
1347 1471
1348 1472
1349 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) { 1473 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
1350 // A 32-bit integer value can always be converted to a smi. 1474 if (SmiValuesAre32Bits()) {
1351 return always; 1475 // A 32-bit integer value can always be converted to a smi.
1476 return always;
1477 } else {
1478 ASSERT(SmiValuesAre31Bits());
1479 cmpl(src, Immediate(0xc0000000));
1480 return positive;
1481 }
1352 } 1482 }
1353 1483
1354 1484
1355 Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) { 1485 Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
1356 // An unsigned 32-bit integer value is valid as long as the high bit 1486 if (SmiValuesAre32Bits()) {
1357 // is not set. 1487 // An unsigned 32-bit integer value is valid as long as the high bit
1358 testl(src, src); 1488 // is not set.
1359 return positive; 1489 testl(src, src);
1490 return positive;
1491 } else {
1492 ASSERT(SmiValuesAre31Bits());
1493 testl(src, Immediate(0xc0000000));
1494 return zero;
1495 }
1360 } 1496 }
1361 1497
1362 1498
1363 void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) { 1499 void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) {
1364 if (dst.is(src)) { 1500 if (dst.is(src)) {
1365 andl(dst, Immediate(kSmiTagMask)); 1501 andl(dst, Immediate(kSmiTagMask));
1366 } else { 1502 } else {
1367 movl(dst, Immediate(kSmiTagMask)); 1503 movl(dst, Immediate(kSmiTagMask));
1368 andl(dst, src); 1504 andl(dst, src);
1369 } 1505 }
(...skipping 12 matching lines...) Expand all
1382 1518
1383 1519
1384 void MacroAssembler::JumpIfNotValidSmiValue(Register src, 1520 void MacroAssembler::JumpIfNotValidSmiValue(Register src,
1385 Label* on_invalid, 1521 Label* on_invalid,
1386 Label::Distance near_jump) { 1522 Label::Distance near_jump) {
1387 Condition is_valid = CheckInteger32ValidSmiValue(src); 1523 Condition is_valid = CheckInteger32ValidSmiValue(src);
1388 j(NegateCondition(is_valid), on_invalid, near_jump); 1524 j(NegateCondition(is_valid), on_invalid, near_jump);
1389 } 1525 }
1390 1526
1391 1527
1528 void MacroAssembler::JumpIfValidSmiValue(Register src,
1529 Label* on_valid,
1530 Label::Distance near_jump) {
1531 Condition is_valid = CheckInteger32ValidSmiValue(src);
1532 j(is_valid, on_valid, near_jump);
1533 }
1534
1535
1392 void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src, 1536 void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
1393 Label* on_invalid, 1537 Label* on_invalid,
1394 Label::Distance near_jump) { 1538 Label::Distance near_jump) {
1395 Condition is_valid = CheckUInteger32ValidSmiValue(src); 1539 Condition is_valid = CheckUInteger32ValidSmiValue(src);
1396 j(NegateCondition(is_valid), on_invalid, near_jump); 1540 j(NegateCondition(is_valid), on_invalid, near_jump);
1397 } 1541 }
1398 1542
1399 1543
1544 void MacroAssembler::JumpIfUIntValidSmiValue(Register src,
1545 Label* on_valid,
1546 Label::Distance near_jump) {
1547 Condition is_valid = CheckUInteger32ValidSmiValue(src);
1548 j(is_valid, on_valid, near_jump);
1549 }
1550
1551
1400 void MacroAssembler::JumpIfSmi(Register src, 1552 void MacroAssembler::JumpIfSmi(Register src,
1401 Label* on_smi, 1553 Label* on_smi,
1402 Label::Distance near_jump) { 1554 Label::Distance near_jump) {
1403 Condition smi = CheckSmi(src); 1555 Condition smi = CheckSmi(src);
1404 j(smi, on_smi, near_jump); 1556 j(smi, on_smi, near_jump);
1405 } 1557 }
1406 1558
1407 1559
1408 void MacroAssembler::JumpIfNotSmi(Register src, 1560 void MacroAssembler::JumpIfNotSmi(Register src,
1409 Label* on_not_smi, 1561 Label* on_not_smi,
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
1455 Label::Distance near_jump) { 1607 Label::Distance near_jump) {
1456 // Does not assume that src is a smi. 1608 // Does not assume that src is a smi.
1457 ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask)); 1609 ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
1458 STATIC_ASSERT(kSmiTag == 0); 1610 STATIC_ASSERT(kSmiTag == 0);
1459 ASSERT(!dst.is(kScratchRegister)); 1611 ASSERT(!dst.is(kScratchRegister));
1460 ASSERT(!src.is(kScratchRegister)); 1612 ASSERT(!src.is(kScratchRegister));
1461 1613
1462 JumpIfNotSmi(src, on_not_smi_result, near_jump); 1614 JumpIfNotSmi(src, on_not_smi_result, near_jump);
1463 Register tmp = (dst.is(src) ? kScratchRegister : dst); 1615 Register tmp = (dst.is(src) ? kScratchRegister : dst);
1464 LoadSmiConstant(tmp, constant); 1616 LoadSmiConstant(tmp, constant);
1465 addq(tmp, src); 1617 if (SmiValuesAre32Bits()) {
1618 addq(tmp, src);
1619 } else {
1620 ASSERT(SmiValuesAre31Bits());
1621 addl(tmp, src);
1622 }
1466 j(overflow, on_not_smi_result, near_jump); 1623 j(overflow, on_not_smi_result, near_jump);
1467 if (dst.is(src)) { 1624 if (dst.is(src)) {
1468 movq(dst, tmp); 1625 movq(dst, tmp);
1469 } 1626 }
1627 if (SmiValuesAre31Bits()) {
1628 movsxlq(dst, dst);
1629 }
1470 } 1630 }
1471 1631
1472 1632
1473 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) { 1633 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
1474 if (constant->value() == 0) { 1634 if (constant->value() == 0) {
1475 if (!dst.is(src)) { 1635 if (!dst.is(src)) {
1476 movq(dst, src); 1636 movq(dst, src);
1477 } 1637 }
1478 return; 1638 return;
1479 } else if (dst.is(src)) { 1639 } else if (dst.is(src)) {
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
1514 LoadSmiConstant(dst, constant); 1674 LoadSmiConstant(dst, constant);
1515 addq(dst, src); 1675 addq(dst, src);
1516 return; 1676 return;
1517 } 1677 }
1518 } 1678 }
1519 } 1679 }
1520 1680
1521 1681
1522 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) { 1682 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
1523 if (constant->value() != 0) { 1683 if (constant->value() != 0) {
1524 addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value())); 1684 if (SmiValuesAre32Bits()) {
1685 addl(Operand(dst, kSmiShift / kBitsPerByte),
1686 Immediate(constant->value()));
1687 } else {
1688 ASSERT(SmiValuesAre31Bits());
1689 addq(dst, SmiToImmediate(constant));
1690 }
1525 } 1691 }
1526 } 1692 }
1527 1693
1528 1694
1529 void MacroAssembler::SmiAddConstant(Register dst, 1695 void MacroAssembler::SmiAddConstant(Register dst,
1530 Register src, 1696 Register src,
1531 Smi* constant, 1697 Smi* constant,
1532 Label* on_not_smi_result, 1698 const SmiInstructionWrapper& wrapper) {
1533 Label::Distance near_jump) {
1534 if (constant->value() == 0) { 1699 if (constant->value() == 0) {
1535 if (!dst.is(src)) { 1700 if (!dst.is(src)) {
1536 movq(dst, src); 1701 movq(dst, src);
1537 } 1702 }
1538 } else if (dst.is(src)) { 1703 } else if (SmiValuesAre32Bits()) {
1539 ASSERT(!dst.is(kScratchRegister)); 1704 if (dst.is(src)) {
1540 1705 if (wrapper.NeedsKeepSourceOperandsIntact()) {
danno 2013/08/19 21:47:44 Here and everywhere else for the Add/Sub operation
haitao.feng 2013/08/20 15:09:30 Thanks for the recommendation. I will use that tri
1541 LoadSmiConstant(kScratchRegister, constant); 1706 ASSERT(!dst.is(kScratchRegister));
1542 addq(kScratchRegister, src); 1707 ASSERT(wrapper.NeedsCheckOverflow());
1543 j(overflow, on_not_smi_result, near_jump); 1708 LoadSmiConstant(kScratchRegister, constant);
1544 movq(dst, kScratchRegister); 1709 addq(kScratchRegister, src);
1710 wrapper.BailoutIf(overflow);
1711 movq(dst, kScratchRegister);
1712 } else {
1713 UNIMPLEMENTED(); // Not used.
1714 }
1715 } else {
1716 ASSERT(wrapper.NeedsCheckOverflow());
1717 LoadSmiConstant(dst, constant);
1718 addq(dst, src);
1719 if (wrapper.NeedsCheckOverflow()) wrapper.BailoutIf(overflow);
1720 }
1545 } else { 1721 } else {
1546 LoadSmiConstant(dst, constant); 1722 ASSERT(SmiValuesAre31Bits());
1547 addq(dst, src); 1723 if (dst.is(src)) {
1548 j(overflow, on_not_smi_result, near_jump); 1724 if (wrapper.NeedsKeepSourceOperandsIntact()) {
1725 ASSERT(!dst.is(kScratchRegister));
1726 ASSERT(wrapper.NeedsCheckOverflow());
1727 LoadSmiConstant(kScratchRegister, constant);
1728 addl(kScratchRegister, src);
1729 wrapper.BailoutIf(overflow);
1730 movsxlq(dst, kScratchRegister);
1731 } else {
1732 addl(dst, SmiToImmediate(constant));
1733 if (wrapper.NeedsCheckOverflow()) wrapper.BailoutIf(overflow);
1734 movsxlq(dst, dst);
1735 }
1736 } else {
1737 movl(dst, src);
1738 addl(dst, SmiToImmediate(constant));
1739 if (wrapper.NeedsCheckOverflow()) wrapper.BailoutIf(overflow);
1740 movsxlq(dst, dst);
1741 }
1549 } 1742 }
1550 } 1743 }
1551 1744
1552 1745
1553 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) { 1746 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
1554 if (constant->value() == 0) { 1747 if (constant->value() == 0) {
1555 if (!dst.is(src)) { 1748 if (!dst.is(src)) {
1556 movq(dst, src); 1749 movq(dst, src);
1557 } 1750 }
1558 } else if (dst.is(src)) { 1751 } else if (dst.is(src)) {
1559 ASSERT(!dst.is(kScratchRegister)); 1752 ASSERT(!dst.is(kScratchRegister));
1560 Register constant_reg = GetSmiConstant(constant); 1753 Register constant_reg = GetSmiConstant(constant);
1561 subq(dst, constant_reg); 1754 subq(dst, constant_reg);
1562 } else { 1755 } else {
1563 if (constant->value() == Smi::kMinValue) { 1756 if (constant->value() == Smi::kMinValue) {
1564 LoadSmiConstant(dst, constant); 1757 if (SmiValuesAre32Bits()) {
1565 // Adding and subtracting the min-value gives the same result, it only 1758 LoadSmiConstant(dst, constant);
1566 // differs on the overflow bit, which we don't check here. 1759 // Adding and subtracting the min-value gives the same result, it only
1567 addq(dst, src); 1760 // differs on the overflow bit, which we don't check here.
1761 addq(dst, src);
1762 } else {
1763 ASSERT(SmiValuesAre31Bits());
1764 movq(dst, src);
1765 subq(dst, SmiToImmediate(constant));
1766 }
1568 } else { 1767 } else {
1569 // Subtract by adding the negation. 1768 // Subtract by adding the negation.
1570 LoadSmiConstant(dst, Smi::FromInt(-constant->value())); 1769 LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
1571 addq(dst, src); 1770 addq(dst, src);
1572 } 1771 }
1573 } 1772 }
1574 } 1773 }
1575 1774
1576 1775
1577 void MacroAssembler::SmiSubConstant(Register dst, 1776 void MacroAssembler::SmiSubConstant(Register dst,
1578 Register src, 1777 Register src,
1579 Smi* constant, 1778 Smi* constant,
1580 Label* on_not_smi_result, 1779 const SmiInstructionWrapper& wrapper) {
1581 Label::Distance near_jump) {
1582 if (constant->value() == 0) { 1780 if (constant->value() == 0) {
1583 if (!dst.is(src)) { 1781 if (!dst.is(src)) {
1584 movq(dst, src); 1782 movq(dst, src);
1585 } 1783 }
1586 } else if (dst.is(src)) { 1784 } else if (SmiValuesAre32Bits()) {
1587 ASSERT(!dst.is(kScratchRegister)); 1785 if (dst.is(src)) {
1588 if (constant->value() == Smi::kMinValue) { 1786 ASSERT(!dst.is(kScratchRegister));
1589 // Subtracting min-value from any non-negative value will overflow. 1787 if (constant->value() == Smi::kMinValue) {
1590 // We test the non-negativeness before doing the subtraction. 1788 // Subtracting min-value from any non-negative value will overflow.
1591 testq(src, src); 1789 // We test the non-negativeness before doing the subtraction.
1592 j(not_sign, on_not_smi_result, near_jump); 1790 if (wrapper.NeedsCheckOverflow()) {
1593 LoadSmiConstant(kScratchRegister, constant); 1791 testq(src, src);
1594 subq(dst, kScratchRegister); 1792 wrapper.BailoutIf(not_sign);
1793 }
1794 LoadSmiConstant(kScratchRegister, constant);
1795 subq(dst, kScratchRegister);
1796 } else {
1797 // Subtract by adding the negation.
1798 if (wrapper.NeedsKeepSourceOperandsIntact()) {
1799 ASSERT(wrapper.NeedsCheckOverflow());
1800 LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
1801 addq(kScratchRegister, dst);
1802 wrapper.BailoutIf(overflow);
1803 movq(dst, kScratchRegister);
1804 } else {
1805 UNIMPLEMENTED(); // Not used.
1806 }
1807 }
1595 } else { 1808 } else {
1596 // Subtract by adding the negation. 1809 if (constant->value() == Smi::kMinValue) {
1597 LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value())); 1810 // Subtracting min-value from any non-negative value will overflow.
1598 addq(kScratchRegister, dst); 1811 // We test the non-negativeness before doing the subtraction.
1599 j(overflow, on_not_smi_result, near_jump); 1812 if (wrapper.NeedsCheckOverflow()) {
1600 movq(dst, kScratchRegister); 1813 testq(src, src);
1814 wrapper.BailoutIf(not_sign);
1815 }
1816 LoadSmiConstant(dst, constant);
1817 // Adding and subtracting the min-value gives the same result, it only
1818 // differs on the overflow bit, which we don't check here.
1819 addq(dst, src);
1820 } else {
1821 // Subtract by adding the negation.
1822 LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
1823 addq(dst, src);
1824 if (wrapper.NeedsCheckOverflow()) wrapper.BailoutIf(overflow);
1825 }
1601 } 1826 }
1602 } else { 1827 } else {
1603 if (constant->value() == Smi::kMinValue) { 1828 ASSERT(SmiValuesAre31Bits());
1604 // Subtracting min-value from any non-negative value will overflow. 1829 if (dst.is(src)) {
1605 // We test the non-negativeness before doing the subtraction. 1830 ASSERT(!dst.is(kScratchRegister));
1606 testq(src, src); 1831 if (constant->value() == Smi::kMinValue) {
1607 j(not_sign, on_not_smi_result, near_jump); 1832 // Subtracting min-value from any non-negative value will overflow.
1608 LoadSmiConstant(dst, constant); 1833 // We test the non-negativeness before doing the subtraction.
1609 // Adding and subtracting the min-value gives the same result, it only 1834 if (wrapper.NeedsCheckOverflow()) {
1610 // differs on the overflow bit, which we don't check here. 1835 testl(src, src);
1611 addq(dst, src); 1836 wrapper.BailoutIf(not_sign);
1837 }
1838 subq(dst, SmiToImmediate(constant));
1839 } else {
1840 if (wrapper.NeedsKeepSourceOperandsIntact()) {
1841 ASSERT(wrapper.NeedsCheckOverflow());
1842 // Subtract by adding the negation.
1843 LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
1844 addl(kScratchRegister, dst);
1845 wrapper.BailoutIf(overflow);
1846 movsxlq(dst, kScratchRegister);
1847 } else {
1848 subl(dst, SmiToImmediate(constant));
1849 if (wrapper.NeedsCheckOverflow()) wrapper.BailoutIf(overflow);
1850 movsxlq(dst, dst);
1851 }
1852 }
1612 } else { 1853 } else {
1613 // Subtract by adding the negation. 1854 if (constant->value() == Smi::kMinValue) {
1614 LoadSmiConstant(dst, Smi::FromInt(-(constant->value()))); 1855 // Subtracting min-value from any non-negative value will overflow.
1615 addq(dst, src); 1856 // We test the non-negativeness before doing the subtraction.
1616 j(overflow, on_not_smi_result, near_jump); 1857 if (wrapper.NeedsCheckOverflow()) {
1858 testl(src, src);
1859 wrapper.BailoutIf(not_sign);
1860 }
1861 movq(dst, src);
1862 subq(dst, SmiToImmediate(constant));
1863 } else {
1864 movl(dst, src);
1865 subl(dst, SmiToImmediate(constant));
1866 if (wrapper.NeedsCheckOverflow()) wrapper.BailoutIf(overflow);
1867 movsxlq(dst, dst);
1868 }
1617 } 1869 }
1618 } 1870 }
1619 } 1871 }
1620 1872
1621 1873
1622 void MacroAssembler::SmiNeg(Register dst, 1874 void MacroAssembler::SmiNeg(Register dst,
1623 Register src, 1875 Register src,
1624 Label* on_smi_result, 1876 Label* on_smi_result,
1625 Label::Distance near_jump) { 1877 Label::Distance near_jump) {
1626 if (dst.is(src)) { 1878 if (dst.is(src)) {
1627 ASSERT(!dst.is(kScratchRegister)); 1879 ASSERT(!dst.is(kScratchRegister));
1628 movq(kScratchRegister, src); 1880 movq(kScratchRegister, src);
1629 neg(dst); // Low 32 bits are retained as zero by negation. 1881 neg(dst); // Low 32 bits are retained as zero by negation.
1630 // Test if result is zero or Smi::kMinValue. 1882 // Test if result is zero or Smi::kMinValue.
1883 if (SmiValuesAre32Bits()) {
1631 cmpq(dst, kScratchRegister); 1884 cmpq(dst, kScratchRegister);
1632 j(not_equal, on_smi_result, near_jump); 1885 } else {
1633 movq(src, kScratchRegister); 1886 ASSERT(SmiValuesAre31Bits());
1887 cmpl(dst, kScratchRegister);
1888 }
1889 j(not_equal, on_smi_result, near_jump);
1890 movq(src, kScratchRegister);
1634 } else { 1891 } else {
1635 movq(dst, src); 1892 movq(dst, src);
1636 neg(dst); 1893 neg(dst);
1637 cmpq(dst, src); 1894 if (SmiValuesAre32Bits()) {
1895 cmpq(dst, src);
1896 } else {
1897 ASSERT(SmiValuesAre31Bits());
1898 cmpl(dst, src);
1899 }
1638 // If the result is zero or Smi::kMinValue, negation failed to create a smi. 1900 // If the result is zero or Smi::kMinValue, negation failed to create a smi.
1639 j(not_equal, on_smi_result, near_jump); 1901 j(not_equal, on_smi_result, near_jump);
1640 } 1902 }
1641 } 1903 }
1642 1904
1643 1905
1644 void MacroAssembler::SmiAdd(Register dst, 1906 void MacroAssembler::SmiAdd(Register dst,
1645 Register src1, 1907 Register src1,
1646 Register src2, 1908 Register src2,
1647 Label* on_not_smi_result, 1909 const SmiInstructionWrapper& wrapper) {
1648 Label::Distance near_jump) {
1649 ASSERT_NOT_NULL(on_not_smi_result);
1650 ASSERT(!dst.is(src2)); 1910 ASSERT(!dst.is(src2));
1651 if (dst.is(src1)) { 1911 if (SmiValuesAre32Bits()) {
1652 movq(kScratchRegister, src1); 1912 if (dst.is(src1)) {
1653 addq(kScratchRegister, src2); 1913 if (wrapper.NeedsKeepSourceOperandsIntact()) {
1654 j(overflow, on_not_smi_result, near_jump); 1914 ASSERT(wrapper.NeedsCheckOverflow());
1655 movq(dst, kScratchRegister); 1915 movq(kScratchRegister, src1);
1916 addq(kScratchRegister, src2);
1917 wrapper.BailoutIf(overflow);
1918 movq(dst, kScratchRegister);
1919 } else {
1920 addq(dst, src2);
1921 if (wrapper.NeedsCheckOverflow()) wrapper.BailoutIf(overflow);
1922 }
1923 } else {
1924 movq(dst, src1);
1925 addq(dst, src2);
1926 if (wrapper.NeedsCheckOverflow()) wrapper.BailoutIf(overflow);
1927 }
1656 } else { 1928 } else {
1657 movq(dst, src1); 1929 ASSERT(SmiValuesAre31Bits());
1658 addq(dst, src2); 1930 if (dst.is(src1)) {
1659 j(overflow, on_not_smi_result, near_jump); 1931 if (wrapper.NeedsKeepSourceOperandsIntact()) {
1932 ASSERT(wrapper.NeedsCheckOverflow());
1933 movl(kScratchRegister, src1);
1934 addl(kScratchRegister, src2);
1935 wrapper.BailoutIf(overflow);
1936 movsxlq(dst, kScratchRegister);
1937 } else {
1938 addl(dst, src2);
1939 if (wrapper.NeedsCheckOverflow()) wrapper.BailoutIf(overflow);
1940 movsxlq(dst, dst);
1941 }
1942 } else {
1943 movl(dst, src1);
1944 addl(dst, src2);
1945 if (wrapper.NeedsCheckOverflow()) wrapper.BailoutIf(overflow);
1946 movsxlq(dst, dst);
1947 }
1660 } 1948 }
1661 } 1949 }
1662 1950
1663 1951
1664 void MacroAssembler::SmiAdd(Register dst, 1952 void MacroAssembler::SmiAdd(Register dst,
1665 Register src1, 1953 Register src1,
1666 const Operand& src2, 1954 const Operand& src2,
1667 Label* on_not_smi_result, 1955 const SmiInstructionWrapper& wrapper) {
danno 2013/08/19 21:47:44 This code seems identical to the version directly
haitao.feng 2013/08/20 15:09:30 I will do that.
1668 Label::Distance near_jump) { 1956 if (SmiValuesAre32Bits()) {
1669 ASSERT_NOT_NULL(on_not_smi_result); 1957 if (dst.is(src1)) {
1670 if (dst.is(src1)) { 1958 if (wrapper.NeedsKeepSourceOperandsIntact()) {
1671 movq(kScratchRegister, src1); 1959 ASSERT(wrapper.NeedsCheckOverflow());
1672 addq(kScratchRegister, src2); 1960 movq(kScratchRegister, src1);
1673 j(overflow, on_not_smi_result, near_jump); 1961 addq(kScratchRegister, src2);
1674 movq(dst, kScratchRegister); 1962 wrapper.BailoutIf(overflow);
1963 movq(dst, kScratchRegister);
1964 } else {
1965 addq(dst, src2);
1966 if (wrapper.NeedsCheckOverflow()) wrapper.BailoutIf(overflow);
danno 2013/08/19 21:47:44 Always call BaikoutIf, here and elsewhere.
1967 }
1968 } else {
1969 ASSERT(!src2.AddressUsesRegister(dst));
1970 movq(dst, src1);
1971 addq(dst, src2);
1972 if (wrapper.NeedsCheckOverflow()) wrapper.BailoutIf(overflow);
1973 }
1675 } else { 1974 } else {
1676 ASSERT(!src2.AddressUsesRegister(dst)); 1975 if (dst.is(src1)) {
1677 movq(dst, src1); 1976 if (wrapper.NeedsKeepSourceOperandsIntact()) {
1678 addq(dst, src2); 1977 ASSERT(wrapper.NeedsCheckOverflow());
1679 j(overflow, on_not_smi_result, near_jump); 1978 movl(kScratchRegister, src1);
1979 addl(kScratchRegister, src2);
1980 wrapper.BailoutIf(overflow);
1981 movsxlq(dst, kScratchRegister);
1982 } else {
1983 addl(dst, src2);
1984 if (wrapper.NeedsCheckOverflow()) wrapper.BailoutIf(overflow);
1985 movsxlq(dst, dst);
1986 }
1987 } else {
1988 ASSERT(!src2.AddressUsesRegister(dst));
1989 movl(dst, src1);
1990 addl(dst, src2);
1991 if (wrapper.NeedsCheckOverflow()) wrapper.BailoutIf(overflow);
1992 movsxlq(dst, dst);
1993 }
1680 } 1994 }
1681 } 1995 }
1682 1996
1683 1997
1684 void MacroAssembler::SmiAdd(Register dst, 1998 void MacroAssembler::SmiAdd(Register dst,
1685 Register src1, 1999 Register src1,
1686 Register src2) { 2000 Register src2) {
1687 // No overflow checking. Use only when it's known that 2001 // No overflow checking. Use only when it's known that
1688 // overflowing is impossible. 2002 // overflowing is impossible.
1689 if (!dst.is(src1)) { 2003 if (!dst.is(src1)) {
1690 if (emit_debug_code()) { 2004 if (emit_debug_code()) {
1691 movq(kScratchRegister, src1); 2005 movq(kScratchRegister, src1);
1692 addq(kScratchRegister, src2); 2006 addq(kScratchRegister, src2);
1693 Check(no_overflow, kSmiAdditionOverflow); 2007 Check(no_overflow, kSmiAdditionOverflow);
1694 } 2008 }
1695 lea(dst, Operand(src1, src2, times_1, 0)); 2009 lea(dst, Operand(src1, src2, times_1, 0));
1696 } else { 2010 } else {
1697 addq(dst, src2); 2011 addq(dst, src2);
1698 Assert(no_overflow, kSmiAdditionOverflow); 2012 Assert(no_overflow, kSmiAdditionOverflow);
1699 } 2013 }
1700 } 2014 }
1701 2015
1702 2016
1703 void MacroAssembler::SmiSub(Register dst, 2017 void MacroAssembler::SmiSub(Register dst,
1704 Register src1, 2018 Register src1,
1705 Register src2, 2019 Register src2,
1706 Label* on_not_smi_result, 2020 const SmiInstructionWrapper& wrapper) {
1707 Label::Distance near_jump) {
1708 ASSERT_NOT_NULL(on_not_smi_result);
1709 ASSERT(!dst.is(src2)); 2021 ASSERT(!dst.is(src2));
1710 if (dst.is(src1)) { 2022 if (SmiValuesAre32Bits()) {
1711 cmpq(dst, src2); 2023 if (dst.is(src1)) {
1712 j(overflow, on_not_smi_result, near_jump); 2024 if (wrapper.NeedsCheckOverflow()) {
1713 subq(dst, src2); 2025 cmpq(dst, src2);
2026 wrapper.BailoutIf(overflow);
2027 }
2028 subq(dst, src2);
2029 } else {
2030 movq(dst, src1);
2031 subq(dst, src2);
2032 if (wrapper.NeedsCheckOverflow()) wrapper.BailoutIf(overflow);
2033 }
1714 } else { 2034 } else {
1715 movq(dst, src1); 2035 ASSERT(SmiValuesAre31Bits());
1716 subq(dst, src2); 2036 if (dst.is(src1)) {
1717 j(overflow, on_not_smi_result, near_jump); 2037 if (wrapper.NeedsCheckOverflow()) {
2038 cmpl(dst, src2);
2039 wrapper.BailoutIf(overflow);
2040 }
2041 subq(dst, src2);
2042 } else {
2043 movl(dst, src1);
2044 subl(dst, src2);
2045 if (wrapper.NeedsCheckOverflow()) wrapper.BailoutIf(overflow);
2046 movsxlq(dst, dst);
2047 }
1718 } 2048 }
1719 } 2049 }
1720 2050
1721 2051
1722 void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) { 2052 void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
1723 // No overflow checking. Use only when it's known that 2053 // No overflow checking. Use only when it's known that
1724 // overflowing is impossible (e.g., subtracting two positive smis). 2054 // overflowing is impossible (e.g., subtracting two positive smis).
1725 ASSERT(!dst.is(src2)); 2055 ASSERT(!dst.is(src2));
1726 if (!dst.is(src1)) { 2056 if (!dst.is(src1)) {
1727 movq(dst, src1); 2057 movq(dst, src1);
1728 } 2058 }
1729 subq(dst, src2); 2059 subq(dst, src2);
1730 Assert(no_overflow, kSmiSubtractionOverflow); 2060 Assert(no_overflow, kSmiSubtractionOverflow);
1731 } 2061 }
1732 2062
1733 2063
1734 void MacroAssembler::SmiSub(Register dst, 2064 void MacroAssembler::SmiSub(Register dst,
1735 Register src1, 2065 Register src1,
1736 const Operand& src2, 2066 const Operand& src2,
1737 Label* on_not_smi_result, 2067 const SmiInstructionWrapper& wrapper) {
danno 2013/08/19 21:47:44 This code seems identical to the version directly
1738 Label::Distance near_jump) { 2068 if (SmiValuesAre32Bits()) {
1739 ASSERT_NOT_NULL(on_not_smi_result); 2069 if (dst.is(src1)) {
1740 if (dst.is(src1)) { 2070 if (wrapper.NeedsCheckOverflow()) {
1741 movq(kScratchRegister, src2); 2071 cmpq(dst, src2);
1742 cmpq(src1, kScratchRegister); 2072 wrapper.BailoutIf(overflow);
1743 j(overflow, on_not_smi_result, near_jump); 2073 }
1744 subq(src1, kScratchRegister); 2074 subq(dst, src2);
2075 } else {
2076 movq(dst, src1);
2077 subq(dst, src2);
2078 if (wrapper.NeedsCheckOverflow()) wrapper.BailoutIf(overflow);
2079 }
1745 } else { 2080 } else {
1746 movq(dst, src1); 2081 ASSERT(SmiValuesAre31Bits());
1747 subq(dst, src2); 2082 if (dst.is(src1)) {
1748 j(overflow, on_not_smi_result, near_jump); 2083 if (wrapper.NeedsCheckOverflow()) {
2084 cmpl(dst, src2);
2085 wrapper.BailoutIf(overflow);
2086 }
2087 subq(dst, src2);
2088 } else {
2089 movl(dst, src1);
2090 subl(dst, src2);
2091 if (wrapper.NeedsCheckOverflow()) wrapper.BailoutIf(overflow);
2092 movsxlq(dst, dst);
2093 }
1749 } 2094 }
1750 } 2095 }
1751 2096
1752 2097
1753 void MacroAssembler::SmiSub(Register dst, 2098 void MacroAssembler::SmiSub(Register dst,
1754 Register src1, 2099 Register src1,
1755 const Operand& src2) { 2100 const Operand& src2) {
1756 // No overflow checking. Use only when it's known that 2101 // No overflow checking. Use only when it's known that
1757 // overflowing is impossible (e.g., subtracting two positive smis). 2102 // overflowing is impossible (e.g., subtracting two positive smis).
1758 if (!dst.is(src1)) { 2103 if (!dst.is(src1)) {
(...skipping 10 matching lines...) Expand all
1769 Label* on_not_smi_result, 2114 Label* on_not_smi_result,
1770 Label::Distance near_jump) { 2115 Label::Distance near_jump) {
1771 ASSERT(!dst.is(src2)); 2116 ASSERT(!dst.is(src2));
1772 ASSERT(!dst.is(kScratchRegister)); 2117 ASSERT(!dst.is(kScratchRegister));
1773 ASSERT(!src1.is(kScratchRegister)); 2118 ASSERT(!src1.is(kScratchRegister));
1774 ASSERT(!src2.is(kScratchRegister)); 2119 ASSERT(!src2.is(kScratchRegister));
1775 2120
1776 if (dst.is(src1)) { 2121 if (dst.is(src1)) {
1777 Label failure, zero_correct_result; 2122 Label failure, zero_correct_result;
1778 movq(kScratchRegister, src1); // Create backup for later testing. 2123 movq(kScratchRegister, src1); // Create backup for later testing.
1779 SmiToInteger64(dst, src1); 2124 if (SmiValuesAre32Bits()) {
1780 imul(dst, src2); 2125 SmiToInteger64(dst, src1);
2126 imul(dst, src2);
2127 } else {
2128 ASSERT(SmiValuesAre31Bits());
2129 SmiToInteger32(dst, src1);
2130 imull(dst, src2);
2131 }
1781 j(overflow, &failure, Label::kNear); 2132 j(overflow, &failure, Label::kNear);
1782 2133
1783 // Check for negative zero result. If product is zero, and one 2134 // Check for negative zero result. If product is zero, and one
1784 // argument is negative, go to slow case. 2135 // argument is negative, go to slow case.
1785 Label correct_result; 2136 Label correct_result;
1786 testq(dst, dst); 2137 testq(dst, dst);
1787 j(not_zero, &correct_result, Label::kNear); 2138 j(not_zero, &correct_result, Label::kNear);
1788 2139
1789 movq(dst, kScratchRegister); 2140 movq(dst, kScratchRegister);
1790 xor_(dst, src2); 2141 xor_(dst, src2);
1791 // Result was positive zero. 2142 // Result was positive zero.
1792 j(positive, &zero_correct_result, Label::kNear); 2143 j(positive, &zero_correct_result, Label::kNear);
1793 2144
1794 bind(&failure); // Reused failure exit, restores src1. 2145 bind(&failure); // Reused failure exit, restores src1.
1795 movq(src1, kScratchRegister); 2146 movq(src1, kScratchRegister);
1796 jmp(on_not_smi_result, near_jump); 2147 jmp(on_not_smi_result, near_jump);
1797 2148
1798 bind(&zero_correct_result); 2149 bind(&zero_correct_result);
1799 Set(dst, 0); 2150 Set(dst, 0);
1800 2151
1801 bind(&correct_result); 2152 bind(&correct_result);
2153 if (SmiValuesAre31Bits()) {
2154 movsxlq(dst, dst);
2155 }
1802 } else { 2156 } else {
1803 SmiToInteger64(dst, src1); 2157 if (SmiValuesAre32Bits()) {
1804 imul(dst, src2); 2158 SmiToInteger64(dst, src1);
2159 imul(dst, src2);
2160 } else {
2161 ASSERT(SmiValuesAre31Bits());
2162 SmiToInteger32(dst, src1);
2163 imull(dst, src2);
2164 }
1805 j(overflow, on_not_smi_result, near_jump); 2165 j(overflow, on_not_smi_result, near_jump);
1806 // Check for negative zero result. If product is zero, and one 2166 // Check for negative zero result. If product is zero, and one
1807 // argument is negative, go to slow case. 2167 // argument is negative, go to slow case.
1808 Label correct_result; 2168 Label correct_result;
1809 testq(dst, dst); 2169 testq(dst, dst);
1810 j(not_zero, &correct_result, Label::kNear); 2170 j(not_zero, &correct_result, Label::kNear);
1811 // One of src1 and src2 is zero, the check whether the other is 2171 // One of src1 and src2 is zero, the check whether the other is
1812 // negative. 2172 // negative.
1813 movq(kScratchRegister, src1); 2173 movq(kScratchRegister, src1);
1814 xor_(kScratchRegister, src2); 2174 xor_(kScratchRegister, src2);
1815 j(negative, on_not_smi_result, near_jump); 2175 j(negative, on_not_smi_result, near_jump);
1816 bind(&correct_result); 2176 bind(&correct_result);
2177 if (SmiValuesAre31Bits()) {
2178 movsxlq(dst, dst);
2179 }
1817 } 2180 }
1818 } 2181 }
1819 2182
1820 2183
1821 void MacroAssembler::SmiDiv(Register dst, 2184 void MacroAssembler::SmiDiv(Register dst,
1822 Register src1, 2185 Register src1,
1823 Register src2, 2186 Register src2,
1824 Label* on_not_smi_result, 2187 Label* on_not_smi_result,
1825 Label::Distance near_jump) { 2188 Label::Distance near_jump) {
1826 ASSERT(!src1.is(kScratchRegister)); 2189 ASSERT(!src1.is(kScratchRegister));
(...skipping 12 matching lines...) Expand all
1839 } 2202 }
1840 SmiToInteger32(rax, src1); 2203 SmiToInteger32(rax, src1);
1841 // We need to rule out dividing Smi::kMinValue by -1, since that would 2204 // We need to rule out dividing Smi::kMinValue by -1, since that would
1842 // overflow in idiv and raise an exception. 2205 // overflow in idiv and raise an exception.
1843 // We combine this with negative zero test (negative zero only happens 2206 // We combine this with negative zero test (negative zero only happens
1844 // when dividing zero by a negative number). 2207 // when dividing zero by a negative number).
1845 2208
1846 // We overshoot a little and go to slow case if we divide min-value 2209 // We overshoot a little and go to slow case if we divide min-value
1847 // by any negative value, not just -1. 2210 // by any negative value, not just -1.
1848 Label safe_div; 2211 Label safe_div;
1849 testl(rax, Immediate(0x7fffffff)); 2212 if (SmiValuesAre32Bits()) {
2213 testl(rax, Immediate(0x7fffffff));
2214 } else {
2215 ASSERT(SmiValuesAre31Bits());
2216 testl(rax, Immediate(0x3fffffff));
2217 }
1850 j(not_zero, &safe_div, Label::kNear); 2218 j(not_zero, &safe_div, Label::kNear);
1851 testq(src2, src2); 2219 testq(src2, src2);
1852 if (src1.is(rax)) { 2220 if (src1.is(rax)) {
1853 j(positive, &safe_div, Label::kNear); 2221 j(positive, &safe_div, Label::kNear);
1854 movq(src1, kScratchRegister); 2222 movq(src1, kScratchRegister);
1855 jmp(on_not_smi_result, near_jump); 2223 jmp(on_not_smi_result, near_jump);
1856 } else { 2224 } else {
1857 j(negative, on_not_smi_result, near_jump); 2225 j(negative, on_not_smi_result, near_jump);
1858 } 2226 }
1859 bind(&safe_div); 2227 bind(&safe_div);
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after
1933 testq(src1, src1); 2301 testq(src1, src1);
1934 j(negative, on_not_smi_result, near_jump); 2302 j(negative, on_not_smi_result, near_jump);
1935 bind(&smi_result); 2303 bind(&smi_result);
1936 Integer32ToSmi(dst, rdx); 2304 Integer32ToSmi(dst, rdx);
1937 } 2305 }
1938 2306
1939 2307
1940 void MacroAssembler::SmiNot(Register dst, Register src) { 2308 void MacroAssembler::SmiNot(Register dst, Register src) {
1941 ASSERT(!dst.is(kScratchRegister)); 2309 ASSERT(!dst.is(kScratchRegister));
1942 ASSERT(!src.is(kScratchRegister)); 2310 ASSERT(!src.is(kScratchRegister));
2311 if (SmiValuesAre32Bits()) {
1943 // Set tag and padding bits before negating, so that they are zero afterwards. 2312 // Set tag and padding bits before negating, so that they are zero afterwards.
1944 movl(kScratchRegister, Immediate(~0)); 2313 movl(kScratchRegister, Immediate(~0));
2314 } else {
2315 ASSERT(SmiValuesAre31Bits());
2316 movl(kScratchRegister, Immediate(1));
2317 }
1945 if (dst.is(src)) { 2318 if (dst.is(src)) {
1946 xor_(dst, kScratchRegister); 2319 xor_(dst, kScratchRegister);
1947 } else { 2320 } else {
1948 lea(dst, Operand(src, kScratchRegister, times_1, 0)); 2321 lea(dst, Operand(src, kScratchRegister, times_1, 0));
1949 } 2322 }
1950 not_(dst); 2323 not_(dst);
1951 } 2324 }
1952 2325
1953 2326
1954 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) { 2327 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
1955 ASSERT(!dst.is(src2)); 2328 ASSERT(!dst.is(src2));
1956 if (!dst.is(src1)) { 2329 if (!dst.is(src1)) {
1957 movq(dst, src1); 2330 movq(dst, src1);
1958 } 2331 }
1959 and_(dst, src2); 2332 and_(dst, src2);
1960 } 2333 }
1961 2334
1962 2335
1963 void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) { 2336 void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) {
1964 if (constant->value() == 0) { 2337 if (constant->value() == 0) {
1965 Set(dst, 0); 2338 Set(dst, 0);
1966 } else if (dst.is(src)) { 2339 } else if (SmiValuesAre32Bits()) {
1967 ASSERT(!dst.is(kScratchRegister)); 2340 if (dst.is(src)) {
1968 Register constant_reg = GetSmiConstant(constant); 2341 ASSERT(!dst.is(kScratchRegister));
1969 and_(dst, constant_reg); 2342 Register constant_reg = GetSmiConstant(constant);
2343 and_(dst, constant_reg);
2344 } else {
2345 LoadSmiConstant(dst, constant);
2346 and_(dst, src);
2347 }
1970 } else { 2348 } else {
1971 LoadSmiConstant(dst, constant); 2349 ASSERT(SmiValuesAre31Bits());
1972 and_(dst, src); 2350 if (!dst.is(src)) {
2351 movq(dst, src);
2352 }
2353 and_(dst, SmiToImmediate(constant));
1973 } 2354 }
1974 } 2355 }
1975 2356
1976 2357
1977 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) { 2358 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
1978 if (!dst.is(src1)) { 2359 if (!dst.is(src1)) {
1979 ASSERT(!src1.is(src2)); 2360 ASSERT(!src1.is(src2));
1980 movq(dst, src1); 2361 movq(dst, src1);
1981 } 2362 }
1982 or_(dst, src2); 2363 or_(dst, src2);
1983 } 2364 }
1984 2365
1985 2366
1986 void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) { 2367 void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
1987 if (dst.is(src)) { 2368 if (SmiValuesAre32Bits()) {
1988 ASSERT(!dst.is(kScratchRegister)); 2369 if (dst.is(src)) {
1989 Register constant_reg = GetSmiConstant(constant); 2370 ASSERT(!dst.is(kScratchRegister));
1990 or_(dst, constant_reg); 2371 Register constant_reg = GetSmiConstant(constant);
2372 or_(dst, constant_reg);
2373 } else {
2374 LoadSmiConstant(dst, constant);
2375 or_(dst, src);
2376 }
1991 } else { 2377 } else {
1992 LoadSmiConstant(dst, constant); 2378 ASSERT(SmiValuesAre31Bits());
1993 or_(dst, src); 2379 if (!dst.is(src)) {
2380 movq(dst, src);
2381 }
2382 or_(dst, SmiToImmediate(constant));
1994 } 2383 }
1995 } 2384 }
1996 2385
1997 2386
1998 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) { 2387 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
1999 if (!dst.is(src1)) { 2388 if (!dst.is(src1)) {
2000 ASSERT(!src1.is(src2)); 2389 ASSERT(!src1.is(src2));
2001 movq(dst, src1); 2390 movq(dst, src1);
2002 } 2391 }
2003 xor_(dst, src2); 2392 xor_(dst, src2);
2004 } 2393 }
2005 2394
2006 2395
2007 void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) { 2396 void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
2008 if (dst.is(src)) { 2397 if (SmiValuesAre32Bits()) {
2009 ASSERT(!dst.is(kScratchRegister)); 2398 if (dst.is(src)) {
2010 Register constant_reg = GetSmiConstant(constant); 2399 ASSERT(!dst.is(kScratchRegister));
2011 xor_(dst, constant_reg); 2400 Register constant_reg = GetSmiConstant(constant);
2401 xor_(dst, constant_reg);
2402 } else {
2403 LoadSmiConstant(dst, constant);
2404 xor_(dst, src);
2405 }
2012 } else { 2406 } else {
2013 LoadSmiConstant(dst, constant); 2407 ASSERT(SmiValuesAre31Bits());
2014 xor_(dst, src); 2408 if (!dst.is(src)) {
2409 movq(dst, src);
2410 }
2411 xor_(dst, SmiToImmediate(constant));
2015 } 2412 }
2016 } 2413 }
2017 2414
2018 2415
2019 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst, 2416 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
2020 Register src, 2417 Register src,
2021 int shift_value) { 2418 int shift_value) {
2022 ASSERT(is_uint5(shift_value)); 2419 ASSERT(is_uint5(shift_value));
2023 if (shift_value > 0) { 2420 if (shift_value > 0) {
2024 if (dst.is(src)) { 2421 if (dst.is(src)) {
2025 sar(dst, Immediate(shift_value + kSmiShift)); 2422 sar(dst, Immediate(shift_value + kSmiShift));
2026 shl(dst, Immediate(kSmiShift)); 2423 shl(dst, Immediate(kSmiShift));
2027 } else { 2424 } else {
2028 UNIMPLEMENTED(); // Not used. 2425 UNIMPLEMENTED(); // Not used.
2029 } 2426 }
2030 } 2427 }
2031 } 2428 }
2032 2429
2033 2430
2034 void MacroAssembler::SmiShiftLeftConstant(Register dst, 2431 void MacroAssembler::SmiShiftLeftConstant(
2035 Register src, 2432 Register dst,
2036 int shift_value) { 2433 Register src,
2037 if (!dst.is(src)) { 2434 int shift_value,
2038 movq(dst, src); 2435 const SmiInstructionWrapper &wrapper) {
2039 } 2436 if (SmiValuesAre32Bits()) {
2040 if (shift_value > 0) { 2437 if (!dst.is(src)) {
2041 shl(dst, Immediate(shift_value)); 2438 movq(dst, src);
2439 }
2440 if (shift_value > 0) {
2441 shl(dst, Immediate(shift_value));
2442 }
2443 } else {
2444 ASSERT(SmiValuesAre31Bits());
2445 if (!dst.is(src)) {
2446 movq(dst, src);
2447 } else {
2448 ASSERT(!wrapper.NeedsKeepSourceOperandsIntact());
2449 }
2450 if (shift_value > 0) {
2451 SmiToInteger32(dst, dst);
2452 shll(dst, Immediate(shift_value));
2453 Condition is_valid = CheckInteger32ValidSmiValue(dst);
2454 wrapper.BailoutIf(NegateCondition(is_valid));
2455 Integer32ToSmi(dst, dst);
2456 }
2042 } 2457 }
2043 } 2458 }
2044 2459
2045 2460
2046 void MacroAssembler::SmiShiftLogicalRightConstant( 2461 void MacroAssembler::SmiShiftLogicalRightConstant(
2047 Register dst, Register src, int shift_value, 2462 Register dst, Register src, int shift_value,
2048 Label* on_not_smi_result, Label::Distance near_jump) { 2463 Label* on_not_smi_result, Label::Distance near_jump) {
2049 // Logic right shift interprets its result as an *unsigned* number. 2464 // Logic right shift interprets its result as an *unsigned* number.
2050 if (dst.is(src)) { 2465 if (dst.is(src)) {
2051 UNIMPLEMENTED(); // Not used. 2466 UNIMPLEMENTED(); // Not used.
2052 } else { 2467 } else {
2053 movq(dst, src); 2468 movq(dst, src);
2054 if (shift_value == 0) { 2469 if (shift_value == 0) {
2055 testq(dst, dst); 2470 if (SmiValuesAre32Bits()) {
2471 testq(dst, dst);
2472 } else {
2473 ASSERT(SmiValuesAre31Bits());
2474 testl(dst, dst);
2475 }
2056 j(negative, on_not_smi_result, near_jump); 2476 j(negative, on_not_smi_result, near_jump);
2057 } 2477 }
2058 shr(dst, Immediate(shift_value + kSmiShift)); 2478 if (SmiValuesAre32Bits()) {
2059 shl(dst, Immediate(kSmiShift)); 2479 shr(dst, Immediate(shift_value + kSmiShift));
2480 shl(dst, Immediate(kSmiShift));
2481 } else {
2482 ASSERT(SmiValuesAre31Bits());
2483 SmiToInteger32(dst, dst);
2484 shrl(dst, Immediate(shift_value));
2485 JumpIfUIntNotValidSmiValue(dst, on_not_smi_result, near_jump);
2486 shll(dst, Immediate(kSmiShift));
2487 }
2060 } 2488 }
2061 } 2489 }
2062 2490
2063 2491
2064 void MacroAssembler::SmiShiftLeft(Register dst, 2492 void MacroAssembler::SmiShiftLeft(Register dst,
2065 Register src1, 2493 Register src1,
2066 Register src2) { 2494 Register src2,
2067 ASSERT(!dst.is(rcx)); 2495 Label* on_not_smi_result) {
2068 // Untag shift amount. 2496 if (SmiValuesAre32Bits()) {
2069 if (!dst.is(src1)) { 2497 ASSERT(!dst.is(rcx));
danno 2013/08/19 21:47:44 Here and elsewhere, I think it really makes testab
2070 movq(dst, src1); 2498 // Untag shift amount.
2499 if (!dst.is(src1)) {
2500 movq(dst, src1);
2501 }
2502 SmiToInteger32(rcx, src2);
2503 // Shift amount specified by lower 5 bits, not six as the shl opcode.
2504 and_(rcx, Immediate(0x1f));
2505 shl_cl(dst);
2506 } else {
2507 ASSERT(SmiValuesAre31Bits());
2508 ASSERT(!dst.is(kScratchRegister));
2509 ASSERT(!src1.is(kScratchRegister));
2510 ASSERT(!src2.is(kScratchRegister));
2511 ASSERT(!dst.is(rcx));
2512 Label result_ok;
2513
2514 if (dst.is(src1)) {
2515 UNIMPLEMENTED(); // Not used.
2516 } else {
2517 if (src1.is(rcx) || src2.is(rcx)) {
2518 movq(kScratchRegister, rcx);
2519 }
2520 movq(dst, src1);
2521 SmiToInteger32(dst, dst);
2522 // Untag shift amount.
2523 SmiToInteger32(rcx, src2);
2524 // Shift amount specified by lower 5 bits, not six as the shl opcode.
2525 andl(rcx, Immediate(0x1f));
2526 shll_cl(dst);
2527 JumpIfValidSmiValue(dst, &result_ok, Label::kNear);
2528 if (src1.is(rcx) || src2.is(rcx)) {
2529 if (src1.is(rcx)) {
2530 movq(src1, kScratchRegister);
2531 } else {
2532 movq(src2, kScratchRegister);
2533 }
2534 }
2535 jmp(on_not_smi_result);
2536 bind(&result_ok);
2537 Integer32ToSmi(dst, dst);
2538 }
2071 } 2539 }
2072 SmiToInteger32(rcx, src2);
2073 // Shift amount specified by lower 5 bits, not six as the shl opcode.
2074 and_(rcx, Immediate(0x1f));
2075 shl_cl(dst);
2076 } 2540 }
2077 2541
2078 2542
2079 void MacroAssembler::SmiShiftLogicalRight(Register dst, 2543 void MacroAssembler::SmiShiftLogicalRight(Register dst,
2080 Register src1, 2544 Register src1,
2081 Register src2, 2545 Register src2,
2082 Label* on_not_smi_result, 2546 Label* on_not_smi_result,
2083 Label::Distance near_jump) { 2547 Label::Distance near_jump) {
2084 ASSERT(!dst.is(kScratchRegister)); 2548 ASSERT(!dst.is(kScratchRegister));
2085 ASSERT(!src1.is(kScratchRegister)); 2549 ASSERT(!src1.is(kScratchRegister));
2086 ASSERT(!src2.is(kScratchRegister)); 2550 ASSERT(!src2.is(kScratchRegister));
2087 ASSERT(!dst.is(rcx)); 2551 ASSERT(!dst.is(rcx));
2088 // dst and src1 can be the same, because the one case that bails out 2552 Label result_ok;
2089 // is a shift by 0, which leaves dst, and therefore src1, unchanged. 2553
2554 if (dst.is(src1)) {
2555 ASSERT(SmiValuesAre32Bits());
2556 } else {
2557 movq(dst, src1);
2558 }
2559
2090 if (src1.is(rcx) || src2.is(rcx)) { 2560 if (src1.is(rcx) || src2.is(rcx)) {
2091 movq(kScratchRegister, rcx); 2561 movq(kScratchRegister, rcx);
2092 } 2562 }
2093 if (!dst.is(src1)) { 2563 movq(dst, src1);
2094 movq(dst, src1); 2564 SmiToInteger32(dst, dst);
2095 }
2096 SmiToInteger32(rcx, src2); 2565 SmiToInteger32(rcx, src2);
2097 orl(rcx, Immediate(kSmiShift)); 2566 shrl_cl(dst);
2098 shr_cl(dst); // Shift is rcx modulo 0x1f + 32. 2567 JumpIfUIntValidSmiValue(dst, &result_ok, Label::kNear);
2099 shl(dst, Immediate(kSmiShift));
2100 testq(dst, dst);
2101 if (src1.is(rcx) || src2.is(rcx)) { 2568 if (src1.is(rcx) || src2.is(rcx)) {
2102 Label positive_result;
2103 j(positive, &positive_result, Label::kNear);
2104 if (src1.is(rcx)) { 2569 if (src1.is(rcx)) {
2105 movq(src1, kScratchRegister); 2570 movq(src1, kScratchRegister);
2106 } else { 2571 } else {
2107 movq(src2, kScratchRegister); 2572 movq(src2, kScratchRegister);
2108 } 2573 }
2109 jmp(on_not_smi_result, near_jump);
2110 bind(&positive_result);
2111 } else {
2112 // src2 was zero and src1 negative.
2113 j(negative, on_not_smi_result, near_jump);
2114 } 2574 }
2575 if (dst.is(src1)) {
2576 Integer32ToSmi(dst, dst);
2577 }
2578 jmp(on_not_smi_result);
2579 bind(&result_ok);
2580 Integer32ToSmi(dst, dst);
2115 } 2581 }
2116 2582
2117 2583
2118 void MacroAssembler::SmiShiftArithmeticRight(Register dst, 2584 void MacroAssembler::SmiShiftArithmeticRight(Register dst,
2119 Register src1, 2585 Register src1,
2120 Register src2) { 2586 Register src2) {
2121 ASSERT(!dst.is(kScratchRegister)); 2587 ASSERT(!dst.is(kScratchRegister));
2122 ASSERT(!src1.is(kScratchRegister)); 2588 ASSERT(!src1.is(kScratchRegister));
2123 ASSERT(!src2.is(kScratchRegister)); 2589 ASSERT(!src2.is(kScratchRegister));
2124 ASSERT(!dst.is(rcx)); 2590 ASSERT(!dst.is(rcx));
2125 if (src1.is(rcx)) { 2591 if (src1.is(rcx)) {
2126 movq(kScratchRegister, src1); 2592 movq(kScratchRegister, src1);
2127 } else if (src2.is(rcx)) { 2593 } else if (src2.is(rcx)) {
2128 movq(kScratchRegister, src2); 2594 movq(kScratchRegister, src2);
2129 } 2595 }
2130 if (!dst.is(src1)) { 2596 if (!dst.is(src1)) {
2131 movq(dst, src1); 2597 movq(dst, src1);
2132 } 2598 }
2133 SmiToInteger32(rcx, src2); 2599 SmiToInteger32(rcx, src2);
2134 orl(rcx, Immediate(kSmiShift)); 2600 if (SmiValuesAre32Bits()) {
2135 sar_cl(dst); // Shift 32 + original rcx & 0x1f. 2601 orl(rcx, Immediate(kSmiShift));
2136 shl(dst, Immediate(kSmiShift)); 2602 sar_cl(dst); // Shift 32 + original rcx & 0x1f.
2603 } else {
2604 ASSERT(SmiValuesAre31Bits());
2605 SmiToInteger32(dst, dst);
2606 sarl_cl(dst);
2607 }
2608 Integer32ToSmi(dst, dst);
2137 if (src1.is(rcx)) { 2609 if (src1.is(rcx)) {
2138 movq(src1, kScratchRegister); 2610 movq(src1, kScratchRegister);
2139 } else if (src2.is(rcx)) { 2611 } else if (src2.is(rcx)) {
2140 movq(src2, kScratchRegister); 2612 movq(src2, kScratchRegister);
2141 } 2613 }
2142 } 2614 }
2143 2615
2144 2616
2145 void MacroAssembler::SelectNonSmi(Register dst, 2617 void MacroAssembler::SelectNonSmi(Register dst,
2146 Register src1, 2618 Register src1,
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
2183 2655
2184 SmiIndex MacroAssembler::SmiToIndex(Register dst, 2656 SmiIndex MacroAssembler::SmiToIndex(Register dst,
2185 Register src, 2657 Register src,
2186 int shift) { 2658 int shift) {
2187 ASSERT(is_uint6(shift)); 2659 ASSERT(is_uint6(shift));
2188 // There is a possible optimization if shift is in the range 60-63, but that 2660 // There is a possible optimization if shift is in the range 60-63, but that
2189 // will (and must) never happen. 2661 // will (and must) never happen.
2190 if (!dst.is(src)) { 2662 if (!dst.is(src)) {
2191 movq(dst, src); 2663 movq(dst, src);
2192 } 2664 }
2193 if (shift < kSmiShift) { 2665 if (SmiValuesAre32Bits()) {
2194 sar(dst, Immediate(kSmiShift - shift)); 2666 if (shift < kSmiShift) {
2667 sar(dst, Immediate(kSmiShift - shift));
2668 } else {
2669 shl(dst, Immediate(shift - kSmiShift));
2670 }
2671 return SmiIndex(dst, times_1);
2195 } else { 2672 } else {
2196 shl(dst, Immediate(shift - kSmiShift)); 2673 ASSERT(SmiValuesAre31Bits());
2674 if (shift == times_1) {
2675 sar(dst, Immediate(kSmiShift));
2676 return SmiIndex(dst, times_1);
2677 }
2678 return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
2197 } 2679 }
2198 return SmiIndex(dst, times_1);
2199 } 2680 }
2200 2681
2682
2201 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst, 2683 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
2202 Register src, 2684 Register src,
2203 int shift) { 2685 int shift) {
2204 // Register src holds a positive smi. 2686 // Register src holds a positive smi.
2205 ASSERT(is_uint6(shift)); 2687 ASSERT(is_uint6(shift));
2206 if (!dst.is(src)) { 2688 if (!dst.is(src)) {
2207 movq(dst, src); 2689 movq(dst, src);
2208 } 2690 }
2209 neg(dst); 2691 neg(dst);
2210 if (shift < kSmiShift) { 2692 if (SmiValuesAre32Bits()) {
2211 sar(dst, Immediate(kSmiShift - shift)); 2693 if (shift < kSmiShift) {
2694 sar(dst, Immediate(kSmiShift - shift));
2695 } else {
2696 shl(dst, Immediate(shift - kSmiShift));
2697 }
2698 return SmiIndex(dst, times_1);
2212 } else { 2699 } else {
2213 shl(dst, Immediate(shift - kSmiShift)); 2700 ASSERT(SmiValuesAre31Bits());
2701 if (shift == times_1) {
2702 sar(dst, Immediate(kSmiShift));
2703 return SmiIndex(dst, times_1);
2704 }
2705 return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
2214 } 2706 }
2215 return SmiIndex(dst, times_1);
2216 } 2707 }
2217 2708
2218 2709
2219 void MacroAssembler::AddSmiField(Register dst, const Operand& src) { 2710 void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
2220 ASSERT_EQ(0, kSmiShift % kBitsPerByte); 2711 if (SmiValuesAre32Bits()) {
2221 addl(dst, Operand(src, kSmiShift / kBitsPerByte)); 2712 ASSERT_EQ(0, kSmiShift % kBitsPerByte);
2713 addl(dst, Operand(src, kSmiShift / kBitsPerByte));
2714 } else {
2715 ASSERT(SmiValuesAre31Bits());
2716 SmiToInteger32(kScratchRegister, src);
2717 addl(dst, kScratchRegister);
2718 }
2222 } 2719 }
2223 2720
2224 2721
2722 void MacroAssembler::Test(const Operand& src, Smi* source) {
2723 if (SmiValuesAre32Bits()) {
2724 testl(Operand(src, kIntSize), Immediate(source->value()));
2725 } else {
2726 ASSERT(SmiValuesAre31Bits());
2727 testl(src, SmiToImmediate(source));
2728 }
2729 }
2730
2731
2732 void MacroAssembler::TestBit(const Operand& src, int bits) {
2733 int byte_offset = bits / kBitsPerByte;
2734 int bit_in_byte = bits & (kBitsPerByte - 1);
2735 testb(Operand(src, byte_offset), Immediate(1 << bit_in_byte));
2736 }
2737
2738
2739 void MacroAssembler::PushInt64AsTwoSmis(Register src, Register scratch) {
2740 movq(scratch, src);
2741 // High bits.
2742 shr(src, Immediate(64 - kSmiShift));
2743 shl(src, Immediate(kSmiShift));
2744 push(src);
2745 // Low bits.
2746 shl(scratch, Immediate(kSmiShift));
2747 push(scratch);
2748 }
2749
2750
2751 void MacroAssembler::PopInt64AsTwoSmis(Register dst, Register scratch) {
2752 pop(scratch);
2753 // Low bits.
2754 shr(scratch, Immediate(kSmiShift));
2755 pop(dst);
2756 shr(dst, Immediate(kSmiShift));
2757 // High bits.
2758 shl(dst, Immediate(64 - kSmiShift));
2759 or_(dst, scratch);
2760 }
2761
2762
2763 bool MacroAssembler::IsUnsafeSmiOperator(Token::Value op) {
2764 return (op == Token::ADD || op == Token::SUB || op == Token::MUL ||
2765 op == Token::DIV || (SmiValuesAre31Bits() && op == Token::SHL) ||
2766 op == Token::SHR);
2767 }
2768
2769
2770 // End of smi tagging, untagging and tag detection.
2771 // ----------------------------------------------------------------------------
2772
2773
2225 void MacroAssembler::JumpIfNotString(Register object, 2774 void MacroAssembler::JumpIfNotString(Register object,
2226 Register object_map, 2775 Register object_map,
2227 Label* not_string, 2776 Label* not_string,
2228 Label::Distance near_jump) { 2777 Label::Distance near_jump) {
2229 Condition is_smi = CheckSmi(object); 2778 Condition is_smi = CheckSmi(object);
2230 j(is_smi, not_string, near_jump); 2779 j(is_smi, not_string, near_jump);
2231 CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map); 2780 CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
2232 j(above_equal, not_string, near_jump); 2781 j(above_equal, not_string, near_jump);
2233 } 2782 }
2234 2783
(...skipping 237 matching lines...) Expand 10 before | Expand all | Expand 10 after
2472 } 3021 }
2473 3022
2474 3023
2475 void MacroAssembler::Drop(int stack_elements) { 3024 void MacroAssembler::Drop(int stack_elements) {
2476 if (stack_elements > 0) { 3025 if (stack_elements > 0) {
2477 addq(rsp, Immediate(stack_elements * kPointerSize)); 3026 addq(rsp, Immediate(stack_elements * kPointerSize));
2478 } 3027 }
2479 } 3028 }
2480 3029
2481 3030
2482 void MacroAssembler::Test(const Operand& src, Smi* source) {
2483 testl(Operand(src, kIntSize), Immediate(source->value()));
2484 }
2485
2486
2487 void MacroAssembler::TestBit(const Operand& src, int bits) {
2488 int byte_offset = bits / kBitsPerByte;
2489 int bit_in_byte = bits & (kBitsPerByte - 1);
2490 testb(Operand(src, byte_offset), Immediate(1 << bit_in_byte));
2491 }
2492
2493
2494 void MacroAssembler::Jump(ExternalReference ext) { 3031 void MacroAssembler::Jump(ExternalReference ext) {
2495 LoadAddress(kScratchRegister, ext); 3032 LoadAddress(kScratchRegister, ext);
2496 jmp(kScratchRegister); 3033 jmp(kScratchRegister);
2497 } 3034 }
2498 3035
2499 3036
2500 void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) { 3037 void MacroAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
2501 movq(kScratchRegister, destination, rmode); 3038 movq(kScratchRegister, destination, rmode);
2502 jmp(kScratchRegister); 3039 jmp(kScratchRegister);
2503 } 3040 }
(...skipping 2190 matching lines...) Expand 10 before | Expand all | Expand 10 after
4694 j(greater, &no_memento_available); 5231 j(greater, &no_memento_available);
4695 CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize), 5232 CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize),
4696 Heap::kAllocationMementoMapRootIndex); 5233 Heap::kAllocationMementoMapRootIndex);
4697 bind(&no_memento_available); 5234 bind(&no_memento_available);
4698 } 5235 }
4699 5236
4700 5237
4701 } } // namespace v8::internal 5238 } } // namespace v8::internal
4702 5239
4703 #endif // V8_TARGET_ARCH_X64 5240 #endif // V8_TARGET_ARCH_X64
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698