Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(23)

Side by Side Diff: src/mips/macro-assembler-mips.cc

Issue 1779713009: Implement optional turbofan UnalignedLoad and UnalignedStore operators (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Fix review remarks Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 1
2 // Copyright 2012 the V8 project authors. All rights reserved. 2 // Copyright 2012 the V8 project authors. All rights reserved.
3 // Use of this source code is governed by a BSD-style license that can be 3 // Use of this source code is governed by a BSD-style license that can be
4 // found in the LICENSE file. 4 // found in the LICENSE file.
5 5
6 #include <limits.h> // For LONG_MIN, LONG_MAX. 6 #include <limits.h> // For LONG_MIN, LONG_MAX.
7 7
8 #if V8_TARGET_ARCH_MIPS 8 #if V8_TARGET_ARCH_MIPS
9 9
10 #include "src/base/bits.h" 10 #include "src/base/bits.h"
(...skipping 1147 matching lines...) Expand 10 before | Expand all | Expand 10 after
1158 DCHECK(!tmp.is(rt)); 1158 DCHECK(!tmp.is(rt));
1159 sll(tmp, rs, sa); 1159 sll(tmp, rs, sa);
1160 Addu(rd, rt, tmp); 1160 Addu(rd, rt, tmp);
1161 } 1161 }
1162 } 1162 }
1163 1163
1164 1164
1165 // ------------Pseudo-instructions------------- 1165 // ------------Pseudo-instructions-------------
1166 1166
1167 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) { 1167 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
1168 lwr(rd, rs); 1168 DCHECK(!rd.is(at));
1169 lwl(rd, MemOperand(rs.rm(), rs.offset() + 3)); 1169 DCHECK(!rs.rm().is(at));
1170 if (IsMipsArchVariant(kMips32r6)) {
1171 lw(rd, rs);
1172 } else {
1173 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1174 IsMipsArchVariant(kLoongson));
1175 if (is_int16(rs.offset()) && is_int16(rs.offset() + 3)) {
1176 if (!rd.is(rs.rm())) {
1177 lwr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
1178 lwl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
1179 } else {
1180 lwr(at, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
1181 lwl(at, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
1182 mov(rd, at);
1183 }
1184 } else { // Offset > 16 bits, use multiple instructions to load.
1185 LoadRegPlusOffsetToAt(rs);
1186 lwr(rd, MemOperand(at, kMipsLwrOffset));
1187 lwl(rd, MemOperand(at, kMipsLwlOffset));
1188 }
1189 }
1170 } 1190 }
1171 1191
1172 1192
1173 void MacroAssembler::Usw(Register rd, const MemOperand& rs) { 1193 void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
1174 swr(rd, rs); 1194 DCHECK(!rd.is(at));
1175 swl(rd, MemOperand(rs.rm(), rs.offset() + 3)); 1195 DCHECK(!rs.rm().is(at));
1196 if (IsMipsArchVariant(kMips32r6)) {
1197 sw(rd, rs);
1198 } else {
1199 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1200 IsMipsArchVariant(kLoongson));
1201 if (is_int16(rs.offset()) && is_int16(rs.offset() + 3)) {
1202 swr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwrOffset));
1203 swl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwlOffset));
1204 } else {
1205 LoadRegPlusOffsetToAt(rs);
1206 swr(rd, MemOperand(at, kMipsSwrOffset));
1207 swl(rd, MemOperand(at, kMipsSwlOffset));
1208 }
1209 }
1210 }
1211
1212 void MacroAssembler::Ulh(Register rd, const MemOperand& rs) {
1213 DCHECK(!rd.is(at));
1214 DCHECK(!rs.rm().is(at));
1215 if (IsMipsArchVariant(kMips32r6)) {
1216 lh(rd, rs);
1217 } else {
1218 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1219 IsMipsArchVariant(kLoongson));
1220 if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
1221 #if defined(V8_TARGET_LITTLE_ENDIAN)
1222 lbu(at, rs);
1223 lb(rd, MemOperand(rs.rm(), rs.offset() + 1));
1224 #elif defined(V8_TARGET_BIG_ENDIAN)
1225 lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
1226 lb(rd, rs);
1227 #endif
1228 } else { // Offset > 16 bits, use multiple instructions to load.
1229 LoadRegPlusOffsetToAt(rs);
1230 #if defined(V8_TARGET_LITTLE_ENDIAN)
1231 lb(rd, MemOperand(at, 1));
1232 lbu(at, MemOperand(at, 0));
1233 #elif defined(V8_TARGET_BIG_ENDIAN)
1234 lb(rd, MemOperand(at, 0));
1235 lbu(at, MemOperand(at, 1));
1236 #endif
1237 }
1238 sll(rd, rd, 8);
1239 or_(rd, rd, at);
1240 }
1241 }
1242
1243 void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) {
1244 DCHECK(!rd.is(at));
1245 DCHECK(!rs.rm().is(at));
1246 if (IsMipsArchVariant(kMips32r6)) {
1247 lhu(rd, rs);
1248 } else {
1249 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1250 IsMipsArchVariant(kLoongson));
1251 if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
1252 #if defined(V8_TARGET_LITTLE_ENDIAN)
1253 lbu(at, rs);
1254 lbu(rd, MemOperand(rs.rm(), rs.offset() + 1));
1255 #elif defined(V8_TARGET_BIG_ENDIAN)
1256 lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
1257 lbu(rd, rs);
1258 #endif
1259 } else { // Offset > 16 bits, use multiple instructions to load.
1260 LoadRegPlusOffsetToAt(rs);
1261 #if defined(V8_TARGET_LITTLE_ENDIAN)
1262 lbu(rd, MemOperand(at, 1));
1263 lbu(at, MemOperand(at, 0));
1264 #elif defined(V8_TARGET_BIG_ENDIAN)
1265 lbu(rd, MemOperand(at, 0));
1266 lbu(at, MemOperand(at, 1));
1267 #endif
1268 }
1269 sll(rd, rd, 8);
1270 or_(rd, rd, at);
1271 }
1272 }
1273
1274 void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
1275 DCHECK(!rd.is(at));
1276 DCHECK(!rs.rm().is(at));
1277 DCHECK(!rs.rm().is(scratch));
1278 DCHECK(!scratch.is(at));
1279 if (IsMipsArchVariant(kMips32r6)) {
1280 sh(rd, rs);
1281 } else {
1282 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1283 IsMipsArchVariant(kLoongson));
1284 MemOperand source = rs;
1285 // If offset > 16 bits, load address to at with offset 0.
1286 if (!is_int16(rs.offset()) || !is_int16(rs.offset() + 1)) {
1287 LoadRegPlusOffsetToAt(rs);
1288 source = MemOperand(at, 0);
1289 }
1290
1291 if (!scratch.is(rd)) {
1292 mov(scratch, rd);
1293 }
1294
1295 #if defined(V8_TARGET_LITTLE_ENDIAN)
1296 sb(scratch, source);
1297 srl(scratch, scratch, 8);
1298 sb(scratch, MemOperand(source.rm(), source.offset() + 1));
1299 #elif defined(V8_TARGET_BIG_ENDIAN)
1300 sb(scratch, MemOperand(source.rm(), source.offset() + 1));
1301 srl(scratch, scratch, 8);
1302 sb(scratch, source);
1303 #endif
1304 }
1305 }
1306
1307 void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
1308 Register scratch) {
1309 if (IsMipsArchVariant(kMips32r6)) {
1310 lwc1(fd, rs);
1311 } else {
1312 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1313 IsMipsArchVariant(kLoongson));
1314 Ulw(scratch, rs);
1315 mtc1(scratch, fd);
1316 }
1317 }
1318
1319 void MacroAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
1320 Register scratch) {
1321 if (IsMipsArchVariant(kMips32r6)) {
1322 swc1(fd, rs);
1323 } else {
1324 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1325 IsMipsArchVariant(kLoongson));
1326 mfc1(scratch, fd);
1327 Usw(scratch, rs);
1328 }
1329 }
1330
1331 void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
1332 Register scratch) {
1333 DCHECK(!scratch.is(at));
1334 if (IsMipsArchVariant(kMips32r6)) {
1335 ldc1(fd, rs);
1336 } else {
1337 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1338 IsMipsArchVariant(kLoongson));
1339 Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset));
1340 mtc1(scratch, fd);
1341 Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset));
1342 Mthc1(scratch, fd);
1343 }
1344 }
1345
1346 void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
1347 Register scratch) {
1348 DCHECK(!scratch.is(at));
1349 if (IsMipsArchVariant(kMips32r6)) {
1350 sdc1(fd, rs);
1351 } else {
1352 DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
1353 IsMipsArchVariant(kLoongson));
1354 mfc1(scratch, fd);
1355 Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset));
1356 Mfhc1(scratch, fd);
1357 Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset));
1358 }
1176 } 1359 }
1177 1360
1178 1361
1179 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) { 1362 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
1180 AllowDeferredHandleDereference smi_check; 1363 AllowDeferredHandleDereference smi_check;
1181 if (value->IsSmi()) { 1364 if (value->IsSmi()) {
1182 li(dst, Operand(value), mode); 1365 li(dst, Operand(value), mode);
1183 } else { 1366 } else {
1184 DCHECK(value->IsHeapObject()); 1367 DCHECK(value->IsHeapObject());
1185 if (isolate()->heap()->InNewSpace(*value)) { 1368 if (isolate()->heap()->InNewSpace(*value)) {
(...skipping 4837 matching lines...) Expand 10 before | Expand all | Expand 10 after
6023 if (mag.shift > 0) sra(result, result, mag.shift); 6206 if (mag.shift > 0) sra(result, result, mag.shift);
6024 srl(at, dividend, 31); 6207 srl(at, dividend, 31);
6025 Addu(result, result, Operand(at)); 6208 Addu(result, result, Operand(at));
6026 } 6209 }
6027 6210
6028 6211
6029 } // namespace internal 6212 } // namespace internal
6030 } // namespace v8 6213 } // namespace v8
6031 6214
6032 #endif // V8_TARGET_ARCH_MIPS 6215 #endif // V8_TARGET_ARCH_MIPS
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698