OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <limits.h> // For LONG_MIN, LONG_MAX. | 5 #include <limits.h> // For LONG_MIN, LONG_MAX. |
6 | 6 |
7 #if V8_TARGET_ARCH_MIPS64 | 7 #if V8_TARGET_ARCH_MIPS64 |
8 | 8 |
9 #include "src/base/division-by-constant.h" | 9 #include "src/base/division-by-constant.h" |
10 #include "src/bootstrapper.h" | 10 #include "src/bootstrapper.h" |
(...skipping 1276 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1287 daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask)); | 1287 daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask)); |
1288 } else if (!(j.imm64_ & kHiMask)) { | 1288 } else if (!(j.imm64_ & kHiMask)) { |
1289 ori(rd, zero_reg, (j.imm64_ & kImm16Mask)); | 1289 ori(rd, zero_reg, (j.imm64_ & kImm16Mask)); |
1290 } else if (!(j.imm64_ & kImm16Mask)) { | 1290 } else if (!(j.imm64_ & kImm16Mask)) { |
1291 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask); | 1291 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask); |
1292 } else { | 1292 } else { |
1293 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask); | 1293 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask); |
1294 ori(rd, rd, (j.imm64_ & kImm16Mask)); | 1294 ori(rd, rd, (j.imm64_ & kImm16Mask)); |
1295 } | 1295 } |
1296 } else { | 1296 } else { |
1297 if (is_int48(j.imm64_)) { | 1297 if (kArchVariant == kMips64r6) { |
1298 if ((j.imm64_ >> 32) & kImm16Mask) { | 1298 int64_t imm = j.imm64_; |
ivica.bogosavljevic
2015/12/21 16:15:55
This is the optimized case, so we need to get here
Alan Li
2016/01/20 14:26:59
Done.
| |
1299 lui(rd, (j.imm64_ >> 32) & kImm16Mask); | 1299 lui(rd, (imm >> kLuiShift) & kImm16Mask); |
1300 if ((j.imm64_ >> 16) & kImm16Mask) { | 1300 if (imm & kImm16Mask) { |
1301 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); | 1301 ori(rd, rd, (imm & kImm16Mask)); |
1302 } | |
1303 if ((imm >> 31) & 0x1) { | |
ivica.bogosavljevic
2015/12/21 16:15:56
Personally, I don't like this shifting bit magic s
Alan Li
2016/01/20 14:26:59
Done.
| |
1304 imm = (imm >> 32) + 1; | |
1305 } else { | |
1306 imm = imm >> 32; | |
1307 } | |
1308 dahi(rd, imm & kImm16Mask); | |
1309 if (!((j.imm64_ >> 48 == 0xffff && (j.imm64_ > 47) & 0x1) || | |
1310 (j.imm64_ >> 48 == 0x0000 && ((j.imm64_ > 47) & 0x1) == 0x0))) { | |
1311 if ((imm >> 15) & 0x1) { | |
1312 imm = (imm >> 16) + 1; | |
1313 } else { | |
1314 imm = imm >> 16; | |
1315 } | |
1316 dati(rd, imm & kImm16Mask); | |
1317 } | |
1318 } else { | |
1319 if (is_int48(j.imm64_)) { | |
1320 if ((j.imm64_ >> 32) & kImm16Mask) { | |
1321 lui(rd, (j.imm64_ >> 32) & kImm16Mask); | |
1322 if ((j.imm64_ >> 16) & kImm16Mask) { | |
1323 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); | |
1324 } | |
1325 } else { | |
1326 ori(rd, zero_reg, (j.imm64_ >> 16) & kImm16Mask); | |
1327 } | |
1328 dsll(rd, rd, 16); | |
1329 if (j.imm64_ & kImm16Mask) { | |
1330 ori(rd, rd, j.imm64_ & kImm16Mask); | |
1302 } | 1331 } |
1303 } else { | 1332 } else { |
1304 ori(rd, zero_reg, (j.imm64_ >> 16) & kImm16Mask); | 1333 lui(rd, (j.imm64_ >> 48) & kImm16Mask); |
1305 } | 1334 if ((j.imm64_ >> 32) & kImm16Mask) { |
1306 dsll(rd, rd, 16); | 1335 ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask); |
1307 if (j.imm64_ & kImm16Mask) { | 1336 } |
1308 ori(rd, rd, j.imm64_ & kImm16Mask); | 1337 if ((j.imm64_ >> 16) & kImm16Mask) { |
1309 } | |
1310 } else { | |
1311 lui(rd, (j.imm64_ >> 48) & kImm16Mask); | |
1312 if ((j.imm64_ >> 32) & kImm16Mask) { | |
1313 ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask); | |
1314 } | |
1315 if ((j.imm64_ >> 16) & kImm16Mask) { | |
1316 dsll(rd, rd, 16); | |
1317 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); | |
1318 if (j.imm64_ & kImm16Mask) { | |
1319 dsll(rd, rd, 16); | 1338 dsll(rd, rd, 16); |
1320 ori(rd, rd, j.imm64_ & kImm16Mask); | 1339 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); |
1340 if (j.imm64_ & kImm16Mask) { | |
1341 dsll(rd, rd, 16); | |
1342 ori(rd, rd, j.imm64_ & kImm16Mask); | |
1343 } else { | |
1344 dsll(rd, rd, 16); | |
1345 } | |
1321 } else { | 1346 } else { |
1322 dsll(rd, rd, 16); | 1347 if (j.imm64_ & kImm16Mask) { |
1323 } | 1348 dsll32(rd, rd, 0); |
1324 } else { | 1349 ori(rd, rd, j.imm64_ & kImm16Mask); |
1325 if (j.imm64_ & kImm16Mask) { | 1350 } else { |
1326 dsll32(rd, rd, 0); | 1351 dsll32(rd, rd, 0); |
1327 ori(rd, rd, j.imm64_ & kImm16Mask); | 1352 } |
1328 } else { | |
1329 dsll32(rd, rd, 0); | |
1330 } | 1353 } |
1331 } | 1354 } |
1332 } | 1355 } |
1333 } | 1356 } |
1334 } else if (MustUseReg(j.rmode_)) { | 1357 } else if (MustUseReg(j.rmode_)) { |
1335 RecordRelocInfo(j.rmode_, j.imm64_); | 1358 RecordRelocInfo(j.rmode_, j.imm64_); |
1336 lui(rd, (j.imm64_ >> 32) & kImm16Mask); | 1359 lui(rd, (j.imm64_ >> 32) & kImm16Mask); |
1337 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); | 1360 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); |
1338 dsll(rd, rd, 16); | 1361 dsll(rd, rd, 16); |
1339 ori(rd, rd, j.imm64_ & kImm16Mask); | 1362 ori(rd, rd, j.imm64_ & kImm16Mask); |
1340 } else if (mode == ADDRESS_LOAD) { | 1363 } else if (mode == ADDRESS_LOAD) { |
1341 // We always need the same number of instructions as we may need to patch | 1364 // We always need the same number of instructions as we may need to patch |
1342 // this code to load another value which may need all 4 instructions. | 1365 // this code to load another value which may need all 4 instructions. |
1343 lui(rd, (j.imm64_ >> 32) & kImm16Mask); | 1366 lui(rd, (j.imm64_ >> 32) & kImm16Mask); |
1344 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); | 1367 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); |
1345 dsll(rd, rd, 16); | 1368 dsll(rd, rd, 16); |
1346 ori(rd, rd, j.imm64_ & kImm16Mask); | 1369 ori(rd, rd, j.imm64_ & kImm16Mask); |
1347 } else { | 1370 } else { |
1348 lui(rd, (j.imm64_ >> 48) & kImm16Mask); | 1371 if (kArchVariant == kMips64r6) { |
1349 ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask); | 1372 int64_t imm = j.imm64_; |
1350 dsll(rd, rd, 16); | 1373 lui(rd, (imm >> kLuiShift) & kImm16Mask); |
1351 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); | 1374 if (imm & kImm16Mask) { |
ivica.bogosavljevic
2015/12/21 16:15:56
Looks good
Alan Li
2016/01/20 14:26:59
Acknowledged.
| |
1352 dsll(rd, rd, 16); | 1375 ori(rd, rd, (imm & kImm16Mask)); |
1353 ori(rd, rd, j.imm64_ & kImm16Mask); | 1376 } |
1377 if ((imm >> 31) & 0x1) { | |
1378 imm = (imm >> 32) + 1; | |
1379 } else { | |
1380 imm = imm >> 32; | |
1381 } | |
1382 dahi(rd, imm & kImm16Mask); | |
1383 if ((imm >> 15) & 0x1) { | |
1384 imm = (imm >> 16) + 1; | |
1385 } else { | |
1386 imm = imm >> 16; | |
1387 } | |
1388 dati(rd, imm & kImm16Mask); | |
1389 } else { | |
1390 lui(rd, (j.imm64_ >> 48) & kImm16Mask); | |
ivica.bogosavljevic
2015/12/21 16:15:55
This is the old code, nothing is changed here exce
Alan Li
2016/01/20 14:26:59
The old code is used in non-r6 architecture.
On 2
| |
1391 ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask); | |
1392 dsll(rd, rd, 16); | |
1393 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask); | |
1394 dsll(rd, rd, 16); | |
1395 ori(rd, rd, j.imm64_ & kImm16Mask); | |
1396 } | |
1354 } | 1397 } |
1355 } | 1398 } |
1356 | 1399 |
1357 | 1400 |
1358 void MacroAssembler::MultiPush(RegList regs) { | 1401 void MacroAssembler::MultiPush(RegList regs) { |
1359 int16_t num_to_push = NumberOfBitsSet(regs); | 1402 int16_t num_to_push = NumberOfBitsSet(regs); |
1360 int16_t stack_offset = num_to_push * kPointerSize; | 1403 int16_t stack_offset = num_to_push * kPointerSize; |
1361 | 1404 |
1362 Dsubu(sp, sp, Operand(stack_offset)); | 1405 Dsubu(sp, sp, Operand(stack_offset)); |
1363 for (int16_t i = kNumRegisters - 1; i >= 0; i--) { | 1406 for (int16_t i = kNumRegisters - 1; i >= 0; i--) { |
(...skipping 4957 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
6321 if (mag.shift > 0) sra(result, result, mag.shift); | 6364 if (mag.shift > 0) sra(result, result, mag.shift); |
6322 srl(at, dividend, 31); | 6365 srl(at, dividend, 31); |
6323 Addu(result, result, Operand(at)); | 6366 Addu(result, result, Operand(at)); |
6324 } | 6367 } |
6325 | 6368 |
6326 | 6369 |
6327 } // namespace internal | 6370 } // namespace internal |
6328 } // namespace v8 | 6371 } // namespace v8 |
6329 | 6372 |
6330 #endif // V8_TARGET_ARCH_MIPS64 | 6373 #endif // V8_TARGET_ARCH_MIPS64 |
OLD | NEW |