OLD | NEW |
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 1319 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1330 __ Ret(); | 1330 __ Ret(); |
1331 | 1331 |
1332 StubRuntimeCallHelper call_helper; | 1332 StubRuntimeCallHelper call_helper; |
1333 char_at_generator.GenerateSlow(masm, call_helper); | 1333 char_at_generator.GenerateSlow(masm, call_helper); |
1334 | 1334 |
1335 __ bind(&miss); | 1335 __ bind(&miss); |
1336 GenerateMiss(masm); | 1336 GenerateMiss(masm); |
1337 } | 1337 } |
1338 | 1338 |
1339 | 1339 |
| 1340 // Convert unsigned integer with specified number of leading zeroes in binary |
| 1341 // representation to IEEE 754 double. |
| 1342 // Integer to convert is passed in register hiword. |
| 1343 // Resulting double is returned in registers hiword:loword. |
| 1344 // This functions does not work correctly for 0. |
| 1345 static void GenerateUInt2Double(MacroAssembler* masm, |
| 1346 Register hiword, |
| 1347 Register loword, |
| 1348 Register scratch, |
| 1349 int leading_zeroes) { |
| 1350 const int meaningful_bits = kBitsPerInt - leading_zeroes - 1; |
| 1351 const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits; |
| 1352 |
| 1353 const int mantissa_shift_for_hi_word = |
| 1354 meaningful_bits - HeapNumber::kMantissaBitsInTopWord; |
| 1355 |
| 1356 const int mantissa_shift_for_lo_word = |
| 1357 kBitsPerInt - mantissa_shift_for_hi_word; |
| 1358 |
| 1359 __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift)); |
| 1360 if (mantissa_shift_for_hi_word > 0) { |
| 1361 __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word)); |
| 1362 __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word)); |
| 1363 } else { |
| 1364 __ mov(loword, Operand(0, RelocInfo::NONE)); |
| 1365 __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word)); |
| 1366 } |
| 1367 |
| 1368 // If least significant bit of biased exponent was not 1 it was corrupted |
| 1369 // by most significant bit of mantissa so we should fix that. |
| 1370 if (!(biased_exponent & 1)) { |
| 1371 __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift)); |
| 1372 } |
| 1373 } |
| 1374 |
| 1375 |
| 1376 void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm, |
| 1377 ExternalArrayType array_type) { |
| 1378 // ---------- S t a t e -------------- |
| 1379 // -- lr : return address |
| 1380 // -- r0 : key |
| 1381 // -- r1 : receiver |
| 1382 // ----------------------------------- |
| 1383 Label slow, failed_allocation; |
| 1384 |
| 1385 Register key = r0; |
| 1386 Register receiver = r1; |
| 1387 |
| 1388 // Check that the object isn't a smi |
| 1389 __ BranchOnSmi(receiver, &slow); |
| 1390 |
| 1391 // Check that the key is a smi. |
| 1392 __ BranchOnNotSmi(key, &slow); |
| 1393 |
| 1394 // Check that the object is a JS object. Load map into r2. |
| 1395 __ CompareObjectType(receiver, r2, r3, FIRST_JS_OBJECT_TYPE); |
| 1396 __ b(lt, &slow); |
| 1397 |
| 1398 // Check that the receiver does not require access checks. We need |
| 1399 // to check this explicitly since this generic stub does not perform |
| 1400 // map checks. |
| 1401 __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset)); |
| 1402 __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded)); |
| 1403 __ b(ne, &slow); |
| 1404 |
| 1405 // Check that the elements array is the appropriate type of |
| 1406 // ExternalArray. |
| 1407 __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| 1408 __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); |
| 1409 __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type)); |
| 1410 __ cmp(r2, ip); |
| 1411 __ b(ne, &slow); |
| 1412 |
| 1413 // Check that the index is in range. |
| 1414 __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset)); |
| 1415 __ cmp(ip, Operand(key, ASR, kSmiTagSize)); |
| 1416 // Unsigned comparison catches both negative and too-large values. |
| 1417 __ b(lo, &slow); |
| 1418 |
| 1419 // r3: elements array |
| 1420 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); |
| 1421 // r3: base pointer of external storage |
| 1422 |
| 1423 // We are not untagging smi key and instead work with it |
| 1424 // as if it was premultiplied by 2. |
| 1425 ASSERT((kSmiTag == 0) && (kSmiTagSize == 1)); |
| 1426 |
| 1427 Register value = r2; |
| 1428 switch (array_type) { |
| 1429 case kExternalByteArray: |
| 1430 __ ldrsb(value, MemOperand(r3, key, LSR, 1)); |
| 1431 break; |
| 1432 case kExternalUnsignedByteArray: |
| 1433 __ ldrb(value, MemOperand(r3, key, LSR, 1)); |
| 1434 break; |
| 1435 case kExternalShortArray: |
| 1436 __ ldrsh(value, MemOperand(r3, key, LSL, 0)); |
| 1437 break; |
| 1438 case kExternalUnsignedShortArray: |
| 1439 __ ldrh(value, MemOperand(r3, key, LSL, 0)); |
| 1440 break; |
| 1441 case kExternalIntArray: |
| 1442 case kExternalUnsignedIntArray: |
| 1443 __ ldr(value, MemOperand(r3, key, LSL, 1)); |
| 1444 break; |
| 1445 case kExternalFloatArray: |
| 1446 if (CpuFeatures::IsSupported(VFP3)) { |
| 1447 CpuFeatures::Scope scope(VFP3); |
| 1448 __ add(r2, r3, Operand(key, LSL, 1)); |
| 1449 __ vldr(s0, r2, 0); |
| 1450 } else { |
| 1451 __ ldr(value, MemOperand(r3, key, LSL, 1)); |
| 1452 } |
| 1453 break; |
| 1454 default: |
| 1455 UNREACHABLE(); |
| 1456 break; |
| 1457 } |
| 1458 |
| 1459 // For integer array types: |
| 1460 // r2: value |
| 1461 // For floating-point array type |
| 1462 // s0: value (if VFP3 is supported) |
| 1463 // r2: value (if VFP3 is not supported) |
| 1464 |
| 1465 if (array_type == kExternalIntArray) { |
| 1466 // For the Int and UnsignedInt array types, we need to see whether |
| 1467 // the value can be represented in a Smi. If not, we need to convert |
| 1468 // it to a HeapNumber. |
| 1469 Label box_int; |
| 1470 __ cmp(value, Operand(0xC0000000)); |
| 1471 __ b(mi, &box_int); |
| 1472 // Tag integer as smi and return it. |
| 1473 __ mov(r0, Operand(value, LSL, kSmiTagSize)); |
| 1474 __ Ret(); |
| 1475 |
| 1476 __ bind(&box_int); |
| 1477 // Allocate a HeapNumber for the result and perform int-to-double |
| 1478 // conversion. Don't touch r0 or r1 as they are needed if allocation |
| 1479 // fails. |
| 1480 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); |
| 1481 __ AllocateHeapNumber(r5, r3, r4, r6, &slow); |
| 1482 // Now we can use r0 for the result as key is not needed any more. |
| 1483 __ mov(r0, r5); |
| 1484 |
| 1485 if (CpuFeatures::IsSupported(VFP3)) { |
| 1486 CpuFeatures::Scope scope(VFP3); |
| 1487 __ vmov(s0, value); |
| 1488 __ vcvt_f64_s32(d0, s0); |
| 1489 __ sub(r3, r0, Operand(kHeapObjectTag)); |
| 1490 __ vstr(d0, r3, HeapNumber::kValueOffset); |
| 1491 __ Ret(); |
| 1492 } else { |
| 1493 WriteInt32ToHeapNumberStub stub(value, r0, r3); |
| 1494 __ TailCallStub(&stub); |
| 1495 } |
| 1496 } else if (array_type == kExternalUnsignedIntArray) { |
| 1497 // The test is different for unsigned int values. Since we need |
| 1498 // the value to be in the range of a positive smi, we can't |
| 1499 // handle either of the top two bits being set in the value. |
| 1500 if (CpuFeatures::IsSupported(VFP3)) { |
| 1501 CpuFeatures::Scope scope(VFP3); |
| 1502 Label box_int, done; |
| 1503 __ tst(value, Operand(0xC0000000)); |
| 1504 __ b(ne, &box_int); |
| 1505 // Tag integer as smi and return it. |
| 1506 __ mov(r0, Operand(value, LSL, kSmiTagSize)); |
| 1507 __ Ret(); |
| 1508 |
| 1509 __ bind(&box_int); |
| 1510 __ vmov(s0, value); |
| 1511 // Allocate a HeapNumber for the result and perform int-to-double |
| 1512 // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all |
| 1513 // registers - also when jumping due to exhausted young space. |
| 1514 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); |
| 1515 __ AllocateHeapNumber(r2, r3, r4, r6, &slow); |
| 1516 |
| 1517 __ vcvt_f64_u32(d0, s0); |
| 1518 __ sub(r1, r2, Operand(kHeapObjectTag)); |
| 1519 __ vstr(d0, r1, HeapNumber::kValueOffset); |
| 1520 |
| 1521 __ mov(r0, r2); |
| 1522 __ Ret(); |
| 1523 } else { |
| 1524 // Check whether unsigned integer fits into smi. |
| 1525 Label box_int_0, box_int_1, done; |
| 1526 __ tst(value, Operand(0x80000000)); |
| 1527 __ b(ne, &box_int_0); |
| 1528 __ tst(value, Operand(0x40000000)); |
| 1529 __ b(ne, &box_int_1); |
| 1530 // Tag integer as smi and return it. |
| 1531 __ mov(r0, Operand(value, LSL, kSmiTagSize)); |
| 1532 __ Ret(); |
| 1533 |
| 1534 Register hiword = value; // r2. |
| 1535 Register loword = r3; |
| 1536 |
| 1537 __ bind(&box_int_0); |
| 1538 // Integer does not have leading zeros. |
| 1539 GenerateUInt2Double(masm, hiword, loword, r4, 0); |
| 1540 __ b(&done); |
| 1541 |
| 1542 __ bind(&box_int_1); |
| 1543 // Integer has one leading zero. |
| 1544 GenerateUInt2Double(masm, hiword, loword, r4, 1); |
| 1545 |
| 1546 |
| 1547 __ bind(&done); |
| 1548 // Integer was converted to double in registers hiword:loword. |
| 1549 // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber |
| 1550 // clobbers all registers - also when jumping due to exhausted young |
| 1551 // space. |
| 1552 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); |
| 1553 __ AllocateHeapNumber(r4, r5, r7, r6, &slow); |
| 1554 |
| 1555 __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset)); |
| 1556 __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset)); |
| 1557 |
| 1558 __ mov(r0, r4); |
| 1559 __ Ret(); |
| 1560 } |
| 1561 } else if (array_type == kExternalFloatArray) { |
| 1562 // For the floating-point array type, we need to always allocate a |
| 1563 // HeapNumber. |
| 1564 if (CpuFeatures::IsSupported(VFP3)) { |
| 1565 CpuFeatures::Scope scope(VFP3); |
| 1566 // Allocate a HeapNumber for the result. Don't use r0 and r1 as |
| 1567 // AllocateHeapNumber clobbers all registers - also when jumping due to |
| 1568 // exhausted young space. |
| 1569 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); |
| 1570 __ AllocateHeapNumber(r2, r3, r4, r6, &slow); |
| 1571 __ vcvt_f64_f32(d0, s0); |
| 1572 __ sub(r1, r2, Operand(kHeapObjectTag)); |
| 1573 __ vstr(d0, r1, HeapNumber::kValueOffset); |
| 1574 |
| 1575 __ mov(r0, r2); |
| 1576 __ Ret(); |
| 1577 } else { |
| 1578 // Allocate a HeapNumber for the result. Don't use r0 and r1 as |
| 1579 // AllocateHeapNumber clobbers all registers - also when jumping due to |
| 1580 // exhausted young space. |
| 1581 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); |
| 1582 __ AllocateHeapNumber(r3, r4, r5, r6, &slow); |
| 1583 // VFP is not available, do manual single to double conversion. |
| 1584 |
| 1585 // r2: floating point value (binary32) |
| 1586 // r3: heap number for result |
| 1587 |
| 1588 // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to |
| 1589 // the slow case from here. |
| 1590 __ and_(r0, value, Operand(kBinary32MantissaMask)); |
| 1591 |
| 1592 // Extract exponent to r1. OK to clobber r1 now as there are no jumps to |
| 1593 // the slow case from here. |
| 1594 __ mov(r1, Operand(value, LSR, kBinary32MantissaBits)); |
| 1595 __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits)); |
| 1596 |
| 1597 Label exponent_rebiased; |
| 1598 __ teq(r1, Operand(0x00, RelocInfo::NONE)); |
| 1599 __ b(eq, &exponent_rebiased); |
| 1600 |
| 1601 __ teq(r1, Operand(0xff)); |
| 1602 __ mov(r1, Operand(0x7ff), LeaveCC, eq); |
| 1603 __ b(eq, &exponent_rebiased); |
| 1604 |
| 1605 // Rebias exponent. |
| 1606 __ add(r1, |
| 1607 r1, |
| 1608 Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias)); |
| 1609 |
| 1610 __ bind(&exponent_rebiased); |
| 1611 __ and_(r2, value, Operand(kBinary32SignMask)); |
| 1612 value = no_reg; |
| 1613 __ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord)); |
| 1614 |
| 1615 // Shift mantissa. |
| 1616 static const int kMantissaShiftForHiWord = |
| 1617 kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; |
| 1618 |
| 1619 static const int kMantissaShiftForLoWord = |
| 1620 kBitsPerInt - kMantissaShiftForHiWord; |
| 1621 |
| 1622 __ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord)); |
| 1623 __ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord)); |
| 1624 |
| 1625 __ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset)); |
| 1626 __ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset)); |
| 1627 |
| 1628 __ mov(r0, r3); |
| 1629 __ Ret(); |
| 1630 } |
| 1631 |
| 1632 } else { |
| 1633 // Tag integer as smi and return it. |
| 1634 __ mov(r0, Operand(value, LSL, kSmiTagSize)); |
| 1635 __ Ret(); |
| 1636 } |
| 1637 |
| 1638 // Slow case, key and receiver still in r0 and r1. |
| 1639 __ bind(&slow); |
| 1640 __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1, r2, r3); |
| 1641 GenerateRuntimeGetProperty(masm); |
| 1642 } |
| 1643 |
| 1644 |
1340 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { | 1645 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { |
1341 // ---------- S t a t e -------------- | 1646 // ---------- S t a t e -------------- |
1342 // -- lr : return address | 1647 // -- lr : return address |
1343 // -- r0 : key | 1648 // -- r0 : key |
1344 // -- r1 : receiver | 1649 // -- r1 : receiver |
1345 // ----------------------------------- | 1650 // ----------------------------------- |
1346 Label slow; | 1651 Label slow; |
1347 | 1652 |
1348 // Check that the receiver isn't a smi. | 1653 // Check that the receiver isn't a smi. |
1349 __ BranchOnSmi(r1, &slow); | 1654 __ BranchOnSmi(r1, &slow); |
(...skipping 176 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1526 __ tst(value, Operand(kSmiTagMask)); | 1831 __ tst(value, Operand(kSmiTagMask)); |
1527 __ Ret(eq); | 1832 __ Ret(eq); |
1528 // Update write barrier for the elements array address. | 1833 // Update write barrier for the elements array address. |
1529 __ sub(r4, r5, Operand(elements)); | 1834 __ sub(r4, r5, Operand(elements)); |
1530 __ RecordWrite(elements, Operand(r4), r5, r6); | 1835 __ RecordWrite(elements, Operand(r4), r5, r6); |
1531 | 1836 |
1532 __ Ret(); | 1837 __ Ret(); |
1533 } | 1838 } |
1534 | 1839 |
1535 | 1840 |
| 1841 // Convert and store int passed in register ival to IEEE 754 single precision |
| 1842 // floating point value at memory location (dst + 4 * wordoffset) |
| 1843 // If VFP3 is available use it for conversion. |
| 1844 static void StoreIntAsFloat(MacroAssembler* masm, |
| 1845 Register dst, |
| 1846 Register wordoffset, |
| 1847 Register ival, |
| 1848 Register fval, |
| 1849 Register scratch1, |
| 1850 Register scratch2) { |
| 1851 if (CpuFeatures::IsSupported(VFP3)) { |
| 1852 CpuFeatures::Scope scope(VFP3); |
| 1853 __ vmov(s0, ival); |
| 1854 __ add(scratch1, dst, Operand(wordoffset, LSL, 2)); |
| 1855 __ vcvt_f32_s32(s0, s0); |
| 1856 __ vstr(s0, scratch1, 0); |
| 1857 } else { |
| 1858 Label not_special, done; |
| 1859 // Move sign bit from source to destination. This works because the sign |
| 1860 // bit in the exponent word of the double has the same position and polarity |
| 1861 // as the 2's complement sign bit in a Smi. |
| 1862 ASSERT(kBinary32SignMask == 0x80000000u); |
| 1863 |
| 1864 __ and_(fval, ival, Operand(kBinary32SignMask), SetCC); |
| 1865 // Negate value if it is negative. |
| 1866 __ rsb(ival, ival, Operand(0, RelocInfo::NONE), LeaveCC, ne); |
| 1867 |
| 1868 // We have -1, 0 or 1, which we treat specially. Register ival contains |
| 1869 // absolute value: it is either equal to 1 (special case of -1 and 1), |
| 1870 // greater than 1 (not a special case) or less than 1 (special case of 0). |
| 1871 __ cmp(ival, Operand(1)); |
| 1872 __ b(gt, ¬_special); |
| 1873 |
| 1874 // For 1 or -1 we need to or in the 0 exponent (biased). |
| 1875 static const uint32_t exponent_word_for_1 = |
| 1876 kBinary32ExponentBias << kBinary32ExponentShift; |
| 1877 |
| 1878 __ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq); |
| 1879 __ b(&done); |
| 1880 |
| 1881 __ bind(¬_special); |
| 1882 // Count leading zeros. |
| 1883 // Gets the wrong answer for 0, but we already checked for that case above. |
| 1884 Register zeros = scratch2; |
| 1885 __ CountLeadingZeros(zeros, ival, scratch1); |
| 1886 |
| 1887 // Compute exponent and or it into the exponent register. |
| 1888 __ rsb(scratch1, |
| 1889 zeros, |
| 1890 Operand((kBitsPerInt - 1) + kBinary32ExponentBias)); |
| 1891 |
| 1892 __ orr(fval, |
| 1893 fval, |
| 1894 Operand(scratch1, LSL, kBinary32ExponentShift)); |
| 1895 |
| 1896 // Shift up the source chopping the top bit off. |
| 1897 __ add(zeros, zeros, Operand(1)); |
| 1898 // This wouldn't work for 1 and -1 as the shift would be 32 which means 0. |
| 1899 __ mov(ival, Operand(ival, LSL, zeros)); |
| 1900 // And the top (top 20 bits). |
| 1901 __ orr(fval, |
| 1902 fval, |
| 1903 Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits)); |
| 1904 |
| 1905 __ bind(&done); |
| 1906 __ str(fval, MemOperand(dst, wordoffset, LSL, 2)); |
| 1907 } |
| 1908 } |
| 1909 |
| 1910 |
| 1911 static bool IsElementTypeSigned(ExternalArrayType array_type) { |
| 1912 switch (array_type) { |
| 1913 case kExternalByteArray: |
| 1914 case kExternalShortArray: |
| 1915 case kExternalIntArray: |
| 1916 return true; |
| 1917 |
| 1918 case kExternalUnsignedByteArray: |
| 1919 case kExternalUnsignedShortArray: |
| 1920 case kExternalUnsignedIntArray: |
| 1921 return false; |
| 1922 |
| 1923 default: |
| 1924 UNREACHABLE(); |
| 1925 return false; |
| 1926 } |
| 1927 } |
| 1928 |
| 1929 |
| 1930 void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm, |
| 1931 ExternalArrayType array_type) { |
| 1932 // ---------- S t a t e -------------- |
| 1933 // -- r0 : value |
| 1934 // -- r1 : key |
| 1935 // -- r2 : receiver |
| 1936 // -- lr : return address |
| 1937 // ----------------------------------- |
| 1938 Label slow, check_heap_number; |
| 1939 |
| 1940 // Register usage. |
| 1941 Register value = r0; |
| 1942 Register key = r1; |
| 1943 Register receiver = r2; |
| 1944 // r3 mostly holds the elements array or the destination external array. |
| 1945 |
| 1946 // Check that the object isn't a smi. |
| 1947 __ BranchOnSmi(receiver, &slow); |
| 1948 |
| 1949 // Check that the object is a JS object. Load map into r3. |
| 1950 __ CompareObjectType(receiver, r3, r4, FIRST_JS_OBJECT_TYPE); |
| 1951 __ b(le, &slow); |
| 1952 |
| 1953 // Check that the receiver does not require access checks. We need |
| 1954 // to do this because this generic stub does not perform map checks. |
| 1955 __ ldrb(ip, FieldMemOperand(r3, Map::kBitFieldOffset)); |
| 1956 __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded)); |
| 1957 __ b(ne, &slow); |
| 1958 |
| 1959 // Check that the key is a smi. |
| 1960 __ BranchOnNotSmi(key, &slow); |
| 1961 |
| 1962 // Check that the elements array is the appropriate type of ExternalArray. |
| 1963 __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| 1964 __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset)); |
| 1965 __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type)); |
| 1966 __ cmp(r4, ip); |
| 1967 __ b(ne, &slow); |
| 1968 |
| 1969 // Check that the index is in range. |
| 1970 __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the index. |
| 1971 __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset)); |
| 1972 __ cmp(r4, ip); |
| 1973 // Unsigned comparison catches both negative and too-large values. |
| 1974 __ b(hs, &slow); |
| 1975 |
| 1976 // Handle both smis and HeapNumbers in the fast path. Go to the |
| 1977 // runtime for all other kinds of values. |
| 1978 // r3: external array. |
| 1979 // r4: key (integer). |
| 1980 __ BranchOnNotSmi(value, &check_heap_number); |
| 1981 __ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value. |
| 1982 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); |
| 1983 |
| 1984 // r3: base pointer of external storage. |
| 1985 // r4: key (integer). |
| 1986 // r5: value (integer). |
| 1987 switch (array_type) { |
| 1988 case kExternalByteArray: |
| 1989 case kExternalUnsignedByteArray: |
| 1990 __ strb(r5, MemOperand(r3, r4, LSL, 0)); |
| 1991 break; |
| 1992 case kExternalShortArray: |
| 1993 case kExternalUnsignedShortArray: |
| 1994 __ strh(r5, MemOperand(r3, r4, LSL, 1)); |
| 1995 break; |
| 1996 case kExternalIntArray: |
| 1997 case kExternalUnsignedIntArray: |
| 1998 __ str(r5, MemOperand(r3, r4, LSL, 2)); |
| 1999 break; |
| 2000 case kExternalFloatArray: |
| 2001 // Perform int-to-float conversion and store to memory. |
| 2002 StoreIntAsFloat(masm, r3, r4, r5, r6, r7, r9); |
| 2003 break; |
| 2004 default: |
| 2005 UNREACHABLE(); |
| 2006 break; |
| 2007 } |
| 2008 |
| 2009 // Entry registers are intact, r0 holds the value which is the return value. |
| 2010 __ Ret(); |
| 2011 |
| 2012 |
| 2013 // r3: external array. |
| 2014 // r4: index (integer). |
| 2015 __ bind(&check_heap_number); |
| 2016 __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE); |
| 2017 __ b(ne, &slow); |
| 2018 |
| 2019 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); |
| 2020 |
| 2021 // r3: base pointer of external storage. |
| 2022 // r4: key (integer). |
| 2023 |
| 2024 // The WebGL specification leaves the behavior of storing NaN and |
| 2025 // +/-Infinity into integer arrays basically undefined. For more |
| 2026 // reproducible behavior, convert these to zero. |
| 2027 if (CpuFeatures::IsSupported(VFP3)) { |
| 2028 CpuFeatures::Scope scope(VFP3); |
| 2029 |
| 2030 |
| 2031 if (array_type == kExternalFloatArray) { |
| 2032 // vldr requires offset to be a multiple of 4 so we can not |
| 2033 // include -kHeapObjectTag into it. |
| 2034 __ sub(r5, r0, Operand(kHeapObjectTag)); |
| 2035 __ vldr(d0, r5, HeapNumber::kValueOffset); |
| 2036 __ add(r5, r3, Operand(r4, LSL, 2)); |
| 2037 __ vcvt_f32_f64(s0, d0); |
| 2038 __ vstr(s0, r5, 0); |
| 2039 } else { |
| 2040 // Need to perform float-to-int conversion. |
| 2041 // Test for NaN or infinity (both give zero). |
| 2042 __ ldr(r6, FieldMemOperand(r5, HeapNumber::kExponentOffset)); |
| 2043 |
| 2044 // Hoisted load. vldr requires offset to be a multiple of 4 so we can not |
| 2045 // include -kHeapObjectTag into it. |
| 2046 __ sub(r5, r0, Operand(kHeapObjectTag)); |
| 2047 __ vldr(d0, r5, HeapNumber::kValueOffset); |
| 2048 |
| 2049 __ Sbfx(r6, r6, HeapNumber::kExponentShift, HeapNumber::kExponentBits); |
| 2050 // NaNs and Infinities have all-one exponents so they sign extend to -1. |
| 2051 __ cmp(r6, Operand(-1)); |
| 2052 __ mov(r5, Operand(Smi::FromInt(0)), LeaveCC, eq); |
| 2053 |
| 2054 // Not infinity or NaN simply convert to int. |
| 2055 if (IsElementTypeSigned(array_type)) { |
| 2056 __ vcvt_s32_f64(s0, d0, Assembler::RoundToZero, ne); |
| 2057 } else { |
| 2058 __ vcvt_u32_f64(s0, d0, Assembler::RoundToZero, ne); |
| 2059 } |
| 2060 __ vmov(r5, s0, ne); |
| 2061 |
| 2062 switch (array_type) { |
| 2063 case kExternalByteArray: |
| 2064 case kExternalUnsignedByteArray: |
| 2065 __ strb(r5, MemOperand(r3, r4, LSL, 0)); |
| 2066 break; |
| 2067 case kExternalShortArray: |
| 2068 case kExternalUnsignedShortArray: |
| 2069 __ strh(r5, MemOperand(r3, r4, LSL, 1)); |
| 2070 break; |
| 2071 case kExternalIntArray: |
| 2072 case kExternalUnsignedIntArray: |
| 2073 __ str(r5, MemOperand(r3, r4, LSL, 2)); |
| 2074 break; |
| 2075 default: |
| 2076 UNREACHABLE(); |
| 2077 break; |
| 2078 } |
| 2079 } |
| 2080 |
| 2081 // Entry registers are intact, r0 holds the value which is the return value. |
| 2082 __ Ret(); |
| 2083 } else { |
| 2084 // VFP3 is not available do manual conversions. |
| 2085 __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset)); |
| 2086 __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset)); |
| 2087 |
| 2088 if (array_type == kExternalFloatArray) { |
| 2089 Label done, nan_or_infinity_or_zero; |
| 2090 static const int kMantissaInHiWordShift = |
| 2091 kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; |
| 2092 |
| 2093 static const int kMantissaInLoWordShift = |
| 2094 kBitsPerInt - kMantissaInHiWordShift; |
| 2095 |
| 2096 // Test for all special exponent values: zeros, subnormal numbers, NaNs |
| 2097 // and infinities. All these should be converted to 0. |
| 2098 __ mov(r7, Operand(HeapNumber::kExponentMask)); |
| 2099 __ and_(r9, r5, Operand(r7), SetCC); |
| 2100 __ b(eq, &nan_or_infinity_or_zero); |
| 2101 |
| 2102 __ teq(r9, Operand(r7)); |
| 2103 __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq); |
| 2104 __ b(eq, &nan_or_infinity_or_zero); |
| 2105 |
| 2106 // Rebias exponent. |
| 2107 __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift)); |
| 2108 __ add(r9, |
| 2109 r9, |
| 2110 Operand(kBinary32ExponentBias - HeapNumber::kExponentBias)); |
| 2111 |
| 2112 __ cmp(r9, Operand(kBinary32MaxExponent)); |
| 2113 __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt); |
| 2114 __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt); |
| 2115 __ b(gt, &done); |
| 2116 |
| 2117 __ cmp(r9, Operand(kBinary32MinExponent)); |
| 2118 __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt); |
| 2119 __ b(lt, &done); |
| 2120 |
| 2121 __ and_(r7, r5, Operand(HeapNumber::kSignMask)); |
| 2122 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); |
| 2123 __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift)); |
| 2124 __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift)); |
| 2125 __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift)); |
| 2126 |
| 2127 __ bind(&done); |
| 2128 __ str(r5, MemOperand(r3, r4, LSL, 2)); |
| 2129 // Entry registers are intact, r0 holds the value which is the return |
| 2130 // value. |
| 2131 __ Ret(); |
| 2132 |
| 2133 __ bind(&nan_or_infinity_or_zero); |
| 2134 __ and_(r7, r5, Operand(HeapNumber::kSignMask)); |
| 2135 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); |
| 2136 __ orr(r9, r9, r7); |
| 2137 __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift)); |
| 2138 __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift)); |
| 2139 __ b(&done); |
| 2140 } else { |
| 2141 bool is_signed_type = IsElementTypeSigned(array_type); |
| 2142 int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt; |
| 2143 int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000; |
| 2144 |
| 2145 Label done, sign; |
| 2146 |
| 2147 // Test for all special exponent values: zeros, subnormal numbers, NaNs |
| 2148 // and infinities. All these should be converted to 0. |
| 2149 __ mov(r7, Operand(HeapNumber::kExponentMask)); |
| 2150 __ and_(r9, r5, Operand(r7), SetCC); |
| 2151 __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq); |
| 2152 __ b(eq, &done); |
| 2153 |
| 2154 __ teq(r9, Operand(r7)); |
| 2155 __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq); |
| 2156 __ b(eq, &done); |
| 2157 |
| 2158 // Unbias exponent. |
| 2159 __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift)); |
| 2160 __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC); |
| 2161 // If exponent is negative than result is 0. |
| 2162 __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi); |
| 2163 __ b(mi, &done); |
| 2164 |
| 2165 // If exponent is too big than result is minimal value. |
| 2166 __ cmp(r9, Operand(meaningfull_bits - 1)); |
| 2167 __ mov(r5, Operand(min_value), LeaveCC, ge); |
| 2168 __ b(ge, &done); |
| 2169 |
| 2170 __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC); |
| 2171 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); |
| 2172 __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord)); |
| 2173 |
| 2174 __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC); |
| 2175 __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl); |
| 2176 __ b(pl, &sign); |
| 2177 |
| 2178 __ rsb(r9, r9, Operand(0, RelocInfo::NONE)); |
| 2179 __ mov(r5, Operand(r5, LSL, r9)); |
| 2180 __ rsb(r9, r9, Operand(meaningfull_bits)); |
| 2181 __ orr(r5, r5, Operand(r6, LSR, r9)); |
| 2182 |
| 2183 __ bind(&sign); |
| 2184 __ teq(r7, Operand(0, RelocInfo::NONE)); |
| 2185 __ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne); |
| 2186 |
| 2187 __ bind(&done); |
| 2188 switch (array_type) { |
| 2189 case kExternalByteArray: |
| 2190 case kExternalUnsignedByteArray: |
| 2191 __ strb(r5, MemOperand(r3, r4, LSL, 0)); |
| 2192 break; |
| 2193 case kExternalShortArray: |
| 2194 case kExternalUnsignedShortArray: |
| 2195 __ strh(r5, MemOperand(r3, r4, LSL, 1)); |
| 2196 break; |
| 2197 case kExternalIntArray: |
| 2198 case kExternalUnsignedIntArray: |
| 2199 __ str(r5, MemOperand(r3, r4, LSL, 2)); |
| 2200 break; |
| 2201 default: |
| 2202 UNREACHABLE(); |
| 2203 break; |
| 2204 } |
| 2205 } |
| 2206 } |
| 2207 |
| 2208 // Slow case: call runtime. |
| 2209 __ bind(&slow); |
| 2210 |
| 2211 // Entry registers are intact. |
| 2212 // r0: value |
| 2213 // r1: key |
| 2214 // r2: receiver |
| 2215 GenerateRuntimeSetProperty(masm); |
| 2216 } |
| 2217 |
| 2218 |
1536 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { | 2219 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { |
1537 // ----------- S t a t e ------------- | 2220 // ----------- S t a t e ------------- |
1538 // -- r0 : value | 2221 // -- r0 : value |
1539 // -- r1 : receiver | 2222 // -- r1 : receiver |
1540 // -- r2 : name | 2223 // -- r2 : name |
1541 // -- lr : return address | 2224 // -- lr : return address |
1542 // ----------------------------------- | 2225 // ----------------------------------- |
1543 | 2226 |
1544 // Get the receiver from the stack and probe the stub cache. | 2227 // Get the receiver from the stack and probe the stub cache. |
1545 Code::Flags flags = Code::ComputeFlags(Code::STORE_IC, | 2228 Code::Flags flags = Code::ComputeFlags(Code::STORE_IC, |
(...skipping 158 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1704 | 2387 |
1705 | 2388 |
1706 void PatchInlinedSmiCode(Address address) { | 2389 void PatchInlinedSmiCode(Address address) { |
1707 UNIMPLEMENTED(); | 2390 UNIMPLEMENTED(); |
1708 } | 2391 } |
1709 | 2392 |
1710 | 2393 |
1711 } } // namespace v8::internal | 2394 } } // namespace v8::internal |
1712 | 2395 |
1713 #endif // V8_TARGET_ARCH_ARM | 2396 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |