OLD | NEW |
1 // Copyright 2006-2009 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2009 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
11 // with the distribution. | 11 // with the distribution. |
12 // * Neither the name of Google Inc. nor the names of its | 12 // * Neither the name of Google Inc. nor the names of its |
13 // contributors may be used to endorse or promote products derived | 13 // contributors may be used to endorse or promote products derived |
14 // from this software without specific prior written permission. | 14 // from this software without specific prior written permission. |
15 // | 15 // |
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | 27 |
| 28 #include <limits.h> // For LONG_MIN, LONG_MAX. |
| 29 |
28 #include "v8.h" | 30 #include "v8.h" |
29 | 31 |
30 #if defined(V8_TARGET_ARCH_ARM) | 32 #if defined(V8_TARGET_ARCH_ARM) |
31 | 33 |
32 #include "bootstrapper.h" | 34 #include "bootstrapper.h" |
33 #include "codegen-inl.h" | 35 #include "codegen-inl.h" |
34 #include "debug.h" | 36 #include "debug.h" |
35 #include "runtime.h" | 37 #include "runtime.h" |
36 | 38 |
37 namespace v8 { | 39 namespace v8 { |
(...skipping 1288 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1326 void MacroAssembler::SmiToDoubleVFPRegister(Register smi, | 1328 void MacroAssembler::SmiToDoubleVFPRegister(Register smi, |
1327 DwVfpRegister value, | 1329 DwVfpRegister value, |
1328 Register scratch1, | 1330 Register scratch1, |
1329 SwVfpRegister scratch2) { | 1331 SwVfpRegister scratch2) { |
1330 mov(scratch1, Operand(smi, ASR, kSmiTagSize)); | 1332 mov(scratch1, Operand(smi, ASR, kSmiTagSize)); |
1331 vmov(scratch2, scratch1); | 1333 vmov(scratch2, scratch1); |
1332 vcvt_f64_s32(value, scratch2); | 1334 vcvt_f64_s32(value, scratch2); |
1333 } | 1335 } |
1334 | 1336 |
1335 | 1337 |
| 1338 // Tries to get a signed int32 out of a double precision floating point heap |
| 1339 // number. Rounds towards 0. Branch to 'not_int32' if the double is out of the |
| 1340 // 32bits signed integer range. |
| 1341 void MacroAssembler::ConvertToInt32(Register source, |
| 1342 Register dest, |
| 1343 Register scratch, |
| 1344 Register scratch2, |
| 1345 Label *not_int32) { |
| 1346 if (CpuFeatures::IsSupported(VFP3)) { |
| 1347 CpuFeatures::Scope scope(VFP3); |
| 1348 sub(scratch, source, Operand(kHeapObjectTag)); |
| 1349 vldr(d0, scratch, HeapNumber::kValueOffset); |
| 1350 vcvt_s32_f64(s0, d0); |
| 1351 vmov(dest, s0); |
| 1352 // Signed vcvt instruction will saturate to the minimum (0x80000000) or |
| 1353 // maximun (0x7fffffff) signed 32bits integer when the double is out of |
| 1354 // range. When substracting one, the minimum signed integer becomes the |
| 1355 // maximun signed integer. |
| 1356 sub(scratch, dest, Operand(1)); |
| 1357 cmp(scratch, Operand(LONG_MAX - 1)); |
| 1358 // If equal then dest was LONG_MAX, if greater dest was LONG_MIN. |
| 1359 b(ge, not_int32); |
| 1360 } else { |
| 1361 // This code is faster for doubles that are in the ranges -0x7fffffff to |
| 1362 // -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds almost to |
| 1363 // the range of signed int32 values that are not Smis. Jumps to the label |
| 1364 // 'not_int32' if the double isn't in the range -0x80000000.0 to |
| 1365 // 0x80000000.0 (excluding the endpoints). |
| 1366 Label right_exponent, done; |
| 1367 // Get exponent word. |
| 1368 ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset)); |
| 1369 // Get exponent alone in scratch2. |
| 1370 Ubfx(scratch2, |
| 1371 scratch, |
| 1372 HeapNumber::kExponentShift, |
| 1373 HeapNumber::kExponentBits); |
| 1374 // Load dest with zero. We use this either for the final shift or |
| 1375 // for the answer. |
| 1376 mov(dest, Operand(0)); |
| 1377 // Check whether the exponent matches a 32 bit signed int that is not a Smi. |
| 1378 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is |
| 1379 // the exponent that we are fastest at and also the highest exponent we can |
| 1380 // handle here. |
| 1381 const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30; |
| 1382 // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we |
| 1383 // split it up to avoid a constant pool entry. You can't do that in general |
| 1384 // for cmp because of the overflow flag, but we know the exponent is in the |
| 1385 // range 0-2047 so there is no overflow. |
| 1386 int fudge_factor = 0x400; |
| 1387 sub(scratch2, scratch2, Operand(fudge_factor)); |
| 1388 cmp(scratch2, Operand(non_smi_exponent - fudge_factor)); |
| 1389 // If we have a match of the int32-but-not-Smi exponent then skip some |
| 1390 // logic. |
| 1391 b(eq, &right_exponent); |
| 1392 // If the exponent is higher than that then go to slow case. This catches |
| 1393 // numbers that don't fit in a signed int32, infinities and NaNs. |
| 1394 b(gt, not_int32); |
| 1395 |
| 1396 // We know the exponent is smaller than 30 (biased). If it is less than |
| 1397 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie |
| 1398 // it rounds to zero. |
| 1399 const uint32_t zero_exponent = HeapNumber::kExponentBias + 0; |
| 1400 sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC); |
| 1401 // Dest already has a Smi zero. |
| 1402 b(lt, &done); |
| 1403 |
| 1404 // We have an exponent between 0 and 30 in scratch2. Subtract from 30 to |
| 1405 // get how much to shift down. |
| 1406 rsb(dest, scratch2, Operand(30)); |
| 1407 |
| 1408 bind(&right_exponent); |
| 1409 // Get the top bits of the mantissa. |
| 1410 and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask)); |
| 1411 // Put back the implicit 1. |
| 1412 orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift)); |
| 1413 // Shift up the mantissa bits to take up the space the exponent used to |
| 1414 // take. We just orred in the implicit bit so that took care of one and |
| 1415 // we want to leave the sign bit 0 so we subtract 2 bits from the shift |
| 1416 // distance. |
| 1417 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2; |
| 1418 mov(scratch2, Operand(scratch2, LSL, shift_distance)); |
| 1419 // Put sign in zero flag. |
| 1420 tst(scratch, Operand(HeapNumber::kSignMask)); |
| 1421 // Get the second half of the double. For some exponents we don't |
| 1422 // actually need this because the bits get shifted out again, but |
| 1423 // it's probably slower to test than just to do it. |
| 1424 ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset)); |
| 1425 // Shift down 22 bits to get the last 10 bits. |
| 1426 orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance)); |
| 1427 // Move down according to the exponent. |
| 1428 mov(dest, Operand(scratch, LSR, dest)); |
| 1429 // Fix sign if sign bit was set. |
| 1430 rsb(dest, dest, Operand(0), LeaveCC, ne); |
| 1431 bind(&done); |
| 1432 } |
| 1433 } |
| 1434 |
| 1435 |
1336 void MacroAssembler::GetLeastBitsFromSmi(Register dst, | 1436 void MacroAssembler::GetLeastBitsFromSmi(Register dst, |
1337 Register src, | 1437 Register src, |
1338 int num_least_bits) { | 1438 int num_least_bits) { |
1339 if (CpuFeatures::IsSupported(ARMv7)) { | 1439 if (CpuFeatures::IsSupported(ARMv7)) { |
1340 ubfx(dst, src, kSmiTagSize, num_least_bits); | 1440 ubfx(dst, src, kSmiTagSize, num_least_bits); |
1341 } else { | 1441 } else { |
1342 mov(dst, Operand(src, ASR, kSmiTagSize)); | 1442 mov(dst, Operand(src, ASR, kSmiTagSize)); |
1343 and_(dst, dst, Operand((1 << num_least_bits) - 1)); | 1443 and_(dst, dst, Operand((1 << num_least_bits) - 1)); |
1344 } | 1444 } |
1345 } | 1445 } |
(...skipping 516 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1862 | 1962 |
1863 void CodePatcher::Emit(Address addr) { | 1963 void CodePatcher::Emit(Address addr) { |
1864 masm()->emit(reinterpret_cast<Instr>(addr)); | 1964 masm()->emit(reinterpret_cast<Instr>(addr)); |
1865 } | 1965 } |
1866 #endif // ENABLE_DEBUGGER_SUPPORT | 1966 #endif // ENABLE_DEBUGGER_SUPPORT |
1867 | 1967 |
1868 | 1968 |
1869 } } // namespace v8::internal | 1969 } } // namespace v8::internal |
1870 | 1970 |
1871 #endif // V8_TARGET_ARCH_ARM | 1971 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |