OLD | NEW |
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 1351 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1362 } | 1362 } |
1363 | 1363 |
1364 // If least significant bit of biased exponent was not 1 it was corrupted | 1364 // If least significant bit of biased exponent was not 1 it was corrupted |
1365 // by most significant bit of mantissa so we should fix that. | 1365 // by most significant bit of mantissa so we should fix that. |
1366 if (!(biased_exponent & 1)) { | 1366 if (!(biased_exponent & 1)) { |
1367 __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift)); | 1367 __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift)); |
1368 } | 1368 } |
1369 } | 1369 } |
1370 | 1370 |
1371 | 1371 |
1372 void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm, | |
1373 ExternalArrayType array_type) { | |
1374 // ---------- S t a t e -------------- | |
1375 // -- lr : return address | |
1376 // -- r0 : key | |
1377 // -- r1 : receiver | |
1378 // ----------------------------------- | |
1379 Label slow, failed_allocation; | |
1380 | |
1381 Register key = r0; | |
1382 Register receiver = r1; | |
1383 | |
1384 // Check that the object isn't a smi | |
1385 __ BranchOnSmi(receiver, &slow); | |
1386 | |
1387 // Check that the key is a smi. | |
1388 __ BranchOnNotSmi(key, &slow); | |
1389 | |
1390 // Check that the object is a JS object. Load map into r2. | |
1391 __ CompareObjectType(receiver, r2, r3, FIRST_JS_OBJECT_TYPE); | |
1392 __ b(lt, &slow); | |
1393 | |
1394 // Check that the receiver does not require access checks. We need | |
1395 // to check this explicitly since this generic stub does not perform | |
1396 // map checks. | |
1397 __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset)); | |
1398 __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded)); | |
1399 __ b(ne, &slow); | |
1400 | |
1401 // Check that the elements array is the appropriate type of | |
1402 // ExternalArray. | |
1403 __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
1404 __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); | |
1405 __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type)); | |
1406 __ cmp(r2, ip); | |
1407 __ b(ne, &slow); | |
1408 | |
1409 // Check that the index is in range. | |
1410 __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset)); | |
1411 __ cmp(ip, Operand(key, ASR, kSmiTagSize)); | |
1412 // Unsigned comparison catches both negative and too-large values. | |
1413 __ b(lo, &slow); | |
1414 | |
1415 // r3: elements array | |
1416 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); | |
1417 // r3: base pointer of external storage | |
1418 | |
1419 // We are not untagging smi key and instead work with it | |
1420 // as if it was premultiplied by 2. | |
1421 ASSERT((kSmiTag == 0) && (kSmiTagSize == 1)); | |
1422 | |
1423 Register value = r2; | |
1424 switch (array_type) { | |
1425 case kExternalByteArray: | |
1426 __ ldrsb(value, MemOperand(r3, key, LSR, 1)); | |
1427 break; | |
1428 case kExternalUnsignedByteArray: | |
1429 __ ldrb(value, MemOperand(r3, key, LSR, 1)); | |
1430 break; | |
1431 case kExternalShortArray: | |
1432 __ ldrsh(value, MemOperand(r3, key, LSL, 0)); | |
1433 break; | |
1434 case kExternalUnsignedShortArray: | |
1435 __ ldrh(value, MemOperand(r3, key, LSL, 0)); | |
1436 break; | |
1437 case kExternalIntArray: | |
1438 case kExternalUnsignedIntArray: | |
1439 __ ldr(value, MemOperand(r3, key, LSL, 1)); | |
1440 break; | |
1441 case kExternalFloatArray: | |
1442 if (CpuFeatures::IsSupported(VFP3)) { | |
1443 CpuFeatures::Scope scope(VFP3); | |
1444 __ add(r2, r3, Operand(key, LSL, 1)); | |
1445 __ vldr(s0, r2, 0); | |
1446 } else { | |
1447 __ ldr(value, MemOperand(r3, key, LSL, 1)); | |
1448 } | |
1449 break; | |
1450 default: | |
1451 UNREACHABLE(); | |
1452 break; | |
1453 } | |
1454 | |
1455 // For integer array types: | |
1456 // r2: value | |
1457 // For floating-point array type | |
1458 // s0: value (if VFP3 is supported) | |
1459 // r2: value (if VFP3 is not supported) | |
1460 | |
1461 if (array_type == kExternalIntArray) { | |
1462 // For the Int and UnsignedInt array types, we need to see whether | |
1463 // the value can be represented in a Smi. If not, we need to convert | |
1464 // it to a HeapNumber. | |
1465 Label box_int; | |
1466 __ cmp(value, Operand(0xC0000000)); | |
1467 __ b(mi, &box_int); | |
1468 // Tag integer as smi and return it. | |
1469 __ mov(r0, Operand(value, LSL, kSmiTagSize)); | |
1470 __ Ret(); | |
1471 | |
1472 __ bind(&box_int); | |
1473 // Allocate a HeapNumber for the result and perform int-to-double | |
1474 // conversion. Don't touch r0 or r1 as they are needed if allocation | |
1475 // fails. | |
1476 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); | |
1477 __ AllocateHeapNumber(r5, r3, r4, r6, &slow); | |
1478 // Now we can use r0 for the result as key is not needed any more. | |
1479 __ mov(r0, r5); | |
1480 | |
1481 if (CpuFeatures::IsSupported(VFP3)) { | |
1482 CpuFeatures::Scope scope(VFP3); | |
1483 __ vmov(s0, value); | |
1484 __ vcvt_f64_s32(d0, s0); | |
1485 __ sub(r3, r0, Operand(kHeapObjectTag)); | |
1486 __ vstr(d0, r3, HeapNumber::kValueOffset); | |
1487 __ Ret(); | |
1488 } else { | |
1489 WriteInt32ToHeapNumberStub stub(value, r0, r3); | |
1490 __ TailCallStub(&stub); | |
1491 } | |
1492 } else if (array_type == kExternalUnsignedIntArray) { | |
1493 // The test is different for unsigned int values. Since we need | |
1494 // the value to be in the range of a positive smi, we can't | |
1495 // handle either of the top two bits being set in the value. | |
1496 if (CpuFeatures::IsSupported(VFP3)) { | |
1497 CpuFeatures::Scope scope(VFP3); | |
1498 Label box_int, done; | |
1499 __ tst(value, Operand(0xC0000000)); | |
1500 __ b(ne, &box_int); | |
1501 // Tag integer as smi and return it. | |
1502 __ mov(r0, Operand(value, LSL, kSmiTagSize)); | |
1503 __ Ret(); | |
1504 | |
1505 __ bind(&box_int); | |
1506 __ vmov(s0, value); | |
1507 // Allocate a HeapNumber for the result and perform int-to-double | |
1508 // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all | |
1509 // registers - also when jumping due to exhausted young space. | |
1510 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); | |
1511 __ AllocateHeapNumber(r2, r3, r4, r6, &slow); | |
1512 | |
1513 __ vcvt_f64_u32(d0, s0); | |
1514 __ sub(r1, r2, Operand(kHeapObjectTag)); | |
1515 __ vstr(d0, r1, HeapNumber::kValueOffset); | |
1516 | |
1517 __ mov(r0, r2); | |
1518 __ Ret(); | |
1519 } else { | |
1520 // Check whether unsigned integer fits into smi. | |
1521 Label box_int_0, box_int_1, done; | |
1522 __ tst(value, Operand(0x80000000)); | |
1523 __ b(ne, &box_int_0); | |
1524 __ tst(value, Operand(0x40000000)); | |
1525 __ b(ne, &box_int_1); | |
1526 // Tag integer as smi and return it. | |
1527 __ mov(r0, Operand(value, LSL, kSmiTagSize)); | |
1528 __ Ret(); | |
1529 | |
1530 Register hiword = value; // r2. | |
1531 Register loword = r3; | |
1532 | |
1533 __ bind(&box_int_0); | |
1534 // Integer does not have leading zeros. | |
1535 GenerateUInt2Double(masm, hiword, loword, r4, 0); | |
1536 __ b(&done); | |
1537 | |
1538 __ bind(&box_int_1); | |
1539 // Integer has one leading zero. | |
1540 GenerateUInt2Double(masm, hiword, loword, r4, 1); | |
1541 | |
1542 | |
1543 __ bind(&done); | |
1544 // Integer was converted to double in registers hiword:loword. | |
1545 // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber | |
1546 // clobbers all registers - also when jumping due to exhausted young | |
1547 // space. | |
1548 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); | |
1549 __ AllocateHeapNumber(r4, r5, r7, r6, &slow); | |
1550 | |
1551 __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset)); | |
1552 __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset)); | |
1553 | |
1554 __ mov(r0, r4); | |
1555 __ Ret(); | |
1556 } | |
1557 } else if (array_type == kExternalFloatArray) { | |
1558 // For the floating-point array type, we need to always allocate a | |
1559 // HeapNumber. | |
1560 if (CpuFeatures::IsSupported(VFP3)) { | |
1561 CpuFeatures::Scope scope(VFP3); | |
1562 // Allocate a HeapNumber for the result. Don't use r0 and r1 as | |
1563 // AllocateHeapNumber clobbers all registers - also when jumping due to | |
1564 // exhausted young space. | |
1565 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); | |
1566 __ AllocateHeapNumber(r2, r3, r4, r6, &slow); | |
1567 __ vcvt_f64_f32(d0, s0); | |
1568 __ sub(r1, r2, Operand(kHeapObjectTag)); | |
1569 __ vstr(d0, r1, HeapNumber::kValueOffset); | |
1570 | |
1571 __ mov(r0, r2); | |
1572 __ Ret(); | |
1573 } else { | |
1574 // Allocate a HeapNumber for the result. Don't use r0 and r1 as | |
1575 // AllocateHeapNumber clobbers all registers - also when jumping due to | |
1576 // exhausted young space. | |
1577 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); | |
1578 __ AllocateHeapNumber(r3, r4, r5, r6, &slow); | |
1579 // VFP is not available, do manual single to double conversion. | |
1580 | |
1581 // r2: floating point value (binary32) | |
1582 // r3: heap number for result | |
1583 | |
1584 // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to | |
1585 // the slow case from here. | |
1586 __ and_(r0, value, Operand(kBinary32MantissaMask)); | |
1587 | |
1588 // Extract exponent to r1. OK to clobber r1 now as there are no jumps to | |
1589 // the slow case from here. | |
1590 __ mov(r1, Operand(value, LSR, kBinary32MantissaBits)); | |
1591 __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits)); | |
1592 | |
1593 Label exponent_rebiased; | |
1594 __ teq(r1, Operand(0x00, RelocInfo::NONE)); | |
1595 __ b(eq, &exponent_rebiased); | |
1596 | |
1597 __ teq(r1, Operand(0xff)); | |
1598 __ mov(r1, Operand(0x7ff), LeaveCC, eq); | |
1599 __ b(eq, &exponent_rebiased); | |
1600 | |
1601 // Rebias exponent. | |
1602 __ add(r1, | |
1603 r1, | |
1604 Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias)); | |
1605 | |
1606 __ bind(&exponent_rebiased); | |
1607 __ and_(r2, value, Operand(kBinary32SignMask)); | |
1608 value = no_reg; | |
1609 __ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord)); | |
1610 | |
1611 // Shift mantissa. | |
1612 static const int kMantissaShiftForHiWord = | |
1613 kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; | |
1614 | |
1615 static const int kMantissaShiftForLoWord = | |
1616 kBitsPerInt - kMantissaShiftForHiWord; | |
1617 | |
1618 __ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord)); | |
1619 __ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord)); | |
1620 | |
1621 __ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset)); | |
1622 __ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset)); | |
1623 | |
1624 __ mov(r0, r3); | |
1625 __ Ret(); | |
1626 } | |
1627 | |
1628 } else { | |
1629 // Tag integer as smi and return it. | |
1630 __ mov(r0, Operand(value, LSL, kSmiTagSize)); | |
1631 __ Ret(); | |
1632 } | |
1633 | |
1634 // Slow case, key and receiver still in r0 and r1. | |
1635 __ bind(&slow); | |
1636 __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1, r2, r3); | |
1637 GenerateRuntimeGetProperty(masm); | |
1638 } | |
1639 | |
1640 | |
1641 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { | 1372 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { |
1642 // ---------- S t a t e -------------- | 1373 // ---------- S t a t e -------------- |
1643 // -- lr : return address | 1374 // -- lr : return address |
1644 // -- r0 : key | 1375 // -- r0 : key |
1645 // -- r1 : receiver | 1376 // -- r1 : receiver |
1646 // ----------------------------------- | 1377 // ----------------------------------- |
1647 Label slow; | 1378 Label slow; |
1648 | 1379 |
1649 // Check that the receiver isn't a smi. | 1380 // Check that the receiver isn't a smi. |
1650 __ BranchOnSmi(r1, &slow); | 1381 __ BranchOnSmi(r1, &slow); |
(...skipping 246 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1897 __ orr(fval, | 1628 __ orr(fval, |
1898 fval, | 1629 fval, |
1899 Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits)); | 1630 Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits)); |
1900 | 1631 |
1901 __ bind(&done); | 1632 __ bind(&done); |
1902 __ str(fval, MemOperand(dst, wordoffset, LSL, 2)); | 1633 __ str(fval, MemOperand(dst, wordoffset, LSL, 2)); |
1903 } | 1634 } |
1904 } | 1635 } |
1905 | 1636 |
1906 | 1637 |
1907 static bool IsElementTypeSigned(ExternalArrayType array_type) { | |
1908 switch (array_type) { | |
1909 case kExternalByteArray: | |
1910 case kExternalShortArray: | |
1911 case kExternalIntArray: | |
1912 return true; | |
1913 | |
1914 case kExternalUnsignedByteArray: | |
1915 case kExternalUnsignedShortArray: | |
1916 case kExternalUnsignedIntArray: | |
1917 return false; | |
1918 | |
1919 default: | |
1920 UNREACHABLE(); | |
1921 return false; | |
1922 } | |
1923 } | |
1924 | |
1925 | |
1926 void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm, | |
1927 ExternalArrayType array_type) { | |
1928 // ---------- S t a t e -------------- | |
1929 // -- r0 : value | |
1930 // -- r1 : key | |
1931 // -- r2 : receiver | |
1932 // -- lr : return address | |
1933 // ----------------------------------- | |
1934 Label slow, check_heap_number; | |
1935 | |
1936 // Register usage. | |
1937 Register value = r0; | |
1938 Register key = r1; | |
1939 Register receiver = r2; | |
1940 // r3 mostly holds the elements array or the destination external array. | |
1941 | |
1942 // Check that the object isn't a smi. | |
1943 __ BranchOnSmi(receiver, &slow); | |
1944 | |
1945 // Check that the object is a JS object. Load map into r3. | |
1946 __ CompareObjectType(receiver, r3, r4, FIRST_JS_OBJECT_TYPE); | |
1947 __ b(le, &slow); | |
1948 | |
1949 // Check that the receiver does not require access checks. We need | |
1950 // to do this because this generic stub does not perform map checks. | |
1951 __ ldrb(ip, FieldMemOperand(r3, Map::kBitFieldOffset)); | |
1952 __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded)); | |
1953 __ b(ne, &slow); | |
1954 | |
1955 // Check that the key is a smi. | |
1956 __ BranchOnNotSmi(key, &slow); | |
1957 | |
1958 // Check that the elements array is the appropriate type of ExternalArray. | |
1959 __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
1960 __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset)); | |
1961 __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type)); | |
1962 __ cmp(r4, ip); | |
1963 __ b(ne, &slow); | |
1964 | |
1965 // Check that the index is in range. | |
1966 __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the index. | |
1967 __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset)); | |
1968 __ cmp(r4, ip); | |
1969 // Unsigned comparison catches both negative and too-large values. | |
1970 __ b(hs, &slow); | |
1971 | |
1972 // Handle both smis and HeapNumbers in the fast path. Go to the | |
1973 // runtime for all other kinds of values. | |
1974 // r3: external array. | |
1975 // r4: key (integer). | |
1976 __ BranchOnNotSmi(value, &check_heap_number); | |
1977 __ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value. | |
1978 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); | |
1979 | |
1980 // r3: base pointer of external storage. | |
1981 // r4: key (integer). | |
1982 // r5: value (integer). | |
1983 switch (array_type) { | |
1984 case kExternalByteArray: | |
1985 case kExternalUnsignedByteArray: | |
1986 __ strb(r5, MemOperand(r3, r4, LSL, 0)); | |
1987 break; | |
1988 case kExternalShortArray: | |
1989 case kExternalUnsignedShortArray: | |
1990 __ strh(r5, MemOperand(r3, r4, LSL, 1)); | |
1991 break; | |
1992 case kExternalIntArray: | |
1993 case kExternalUnsignedIntArray: | |
1994 __ str(r5, MemOperand(r3, r4, LSL, 2)); | |
1995 break; | |
1996 case kExternalFloatArray: | |
1997 // Perform int-to-float conversion and store to memory. | |
1998 StoreIntAsFloat(masm, r3, r4, r5, r6, r7, r9); | |
1999 break; | |
2000 default: | |
2001 UNREACHABLE(); | |
2002 break; | |
2003 } | |
2004 | |
2005 // Entry registers are intact, r0 holds the value which is the return value. | |
2006 __ Ret(); | |
2007 | |
2008 | |
2009 // r3: external array. | |
2010 // r4: index (integer). | |
2011 __ bind(&check_heap_number); | |
2012 __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE); | |
2013 __ b(ne, &slow); | |
2014 | |
2015 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); | |
2016 | |
2017 // r3: base pointer of external storage. | |
2018 // r4: key (integer). | |
2019 | |
2020 // The WebGL specification leaves the behavior of storing NaN and | |
2021 // +/-Infinity into integer arrays basically undefined. For more | |
2022 // reproducible behavior, convert these to zero. | |
2023 if (CpuFeatures::IsSupported(VFP3)) { | |
2024 CpuFeatures::Scope scope(VFP3); | |
2025 | |
2026 | |
2027 if (array_type == kExternalFloatArray) { | |
2028 // vldr requires offset to be a multiple of 4 so we can not | |
2029 // include -kHeapObjectTag into it. | |
2030 __ sub(r5, r0, Operand(kHeapObjectTag)); | |
2031 __ vldr(d0, r5, HeapNumber::kValueOffset); | |
2032 __ add(r5, r3, Operand(r4, LSL, 2)); | |
2033 __ vcvt_f32_f64(s0, d0); | |
2034 __ vstr(s0, r5, 0); | |
2035 } else { | |
2036 // Need to perform float-to-int conversion. | |
2037 // Test for NaN or infinity (both give zero). | |
2038 __ ldr(r6, FieldMemOperand(r5, HeapNumber::kExponentOffset)); | |
2039 | |
2040 // Hoisted load. vldr requires offset to be a multiple of 4 so we can not | |
2041 // include -kHeapObjectTag into it. | |
2042 __ sub(r5, r0, Operand(kHeapObjectTag)); | |
2043 __ vldr(d0, r5, HeapNumber::kValueOffset); | |
2044 | |
2045 __ Sbfx(r6, r6, HeapNumber::kExponentShift, HeapNumber::kExponentBits); | |
2046 // NaNs and Infinities have all-one exponents so they sign extend to -1. | |
2047 __ cmp(r6, Operand(-1)); | |
2048 __ mov(r5, Operand(Smi::FromInt(0)), LeaveCC, eq); | |
2049 | |
2050 // Not infinity or NaN simply convert to int. | |
2051 if (IsElementTypeSigned(array_type)) { | |
2052 __ vcvt_s32_f64(s0, d0, Assembler::RoundToZero, ne); | |
2053 } else { | |
2054 __ vcvt_u32_f64(s0, d0, Assembler::RoundToZero, ne); | |
2055 } | |
2056 __ vmov(r5, s0, ne); | |
2057 | |
2058 switch (array_type) { | |
2059 case kExternalByteArray: | |
2060 case kExternalUnsignedByteArray: | |
2061 __ strb(r5, MemOperand(r3, r4, LSL, 0)); | |
2062 break; | |
2063 case kExternalShortArray: | |
2064 case kExternalUnsignedShortArray: | |
2065 __ strh(r5, MemOperand(r3, r4, LSL, 1)); | |
2066 break; | |
2067 case kExternalIntArray: | |
2068 case kExternalUnsignedIntArray: | |
2069 __ str(r5, MemOperand(r3, r4, LSL, 2)); | |
2070 break; | |
2071 default: | |
2072 UNREACHABLE(); | |
2073 break; | |
2074 } | |
2075 } | |
2076 | |
2077 // Entry registers are intact, r0 holds the value which is the return value. | |
2078 __ Ret(); | |
2079 } else { | |
2080 // VFP3 is not available do manual conversions. | |
2081 __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset)); | |
2082 __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset)); | |
2083 | |
2084 if (array_type == kExternalFloatArray) { | |
2085 Label done, nan_or_infinity_or_zero; | |
2086 static const int kMantissaInHiWordShift = | |
2087 kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; | |
2088 | |
2089 static const int kMantissaInLoWordShift = | |
2090 kBitsPerInt - kMantissaInHiWordShift; | |
2091 | |
2092 // Test for all special exponent values: zeros, subnormal numbers, NaNs | |
2093 // and infinities. All these should be converted to 0. | |
2094 __ mov(r7, Operand(HeapNumber::kExponentMask)); | |
2095 __ and_(r9, r5, Operand(r7), SetCC); | |
2096 __ b(eq, &nan_or_infinity_or_zero); | |
2097 | |
2098 __ teq(r9, Operand(r7)); | |
2099 __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq); | |
2100 __ b(eq, &nan_or_infinity_or_zero); | |
2101 | |
2102 // Rebias exponent. | |
2103 __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift)); | |
2104 __ add(r9, | |
2105 r9, | |
2106 Operand(kBinary32ExponentBias - HeapNumber::kExponentBias)); | |
2107 | |
2108 __ cmp(r9, Operand(kBinary32MaxExponent)); | |
2109 __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt); | |
2110 __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt); | |
2111 __ b(gt, &done); | |
2112 | |
2113 __ cmp(r9, Operand(kBinary32MinExponent)); | |
2114 __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt); | |
2115 __ b(lt, &done); | |
2116 | |
2117 __ and_(r7, r5, Operand(HeapNumber::kSignMask)); | |
2118 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); | |
2119 __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift)); | |
2120 __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift)); | |
2121 __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift)); | |
2122 | |
2123 __ bind(&done); | |
2124 __ str(r5, MemOperand(r3, r4, LSL, 2)); | |
2125 // Entry registers are intact, r0 holds the value which is the return | |
2126 // value. | |
2127 __ Ret(); | |
2128 | |
2129 __ bind(&nan_or_infinity_or_zero); | |
2130 __ and_(r7, r5, Operand(HeapNumber::kSignMask)); | |
2131 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); | |
2132 __ orr(r9, r9, r7); | |
2133 __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift)); | |
2134 __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift)); | |
2135 __ b(&done); | |
2136 } else { | |
2137 bool is_signed_type = IsElementTypeSigned(array_type); | |
2138 int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt; | |
2139 int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000; | |
2140 | |
2141 Label done, sign; | |
2142 | |
2143 // Test for all special exponent values: zeros, subnormal numbers, NaNs | |
2144 // and infinities. All these should be converted to 0. | |
2145 __ mov(r7, Operand(HeapNumber::kExponentMask)); | |
2146 __ and_(r9, r5, Operand(r7), SetCC); | |
2147 __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq); | |
2148 __ b(eq, &done); | |
2149 | |
2150 __ teq(r9, Operand(r7)); | |
2151 __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq); | |
2152 __ b(eq, &done); | |
2153 | |
2154 // Unbias exponent. | |
2155 __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift)); | |
2156 __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC); | |
2157 // If exponent is negative than result is 0. | |
2158 __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi); | |
2159 __ b(mi, &done); | |
2160 | |
2161 // If exponent is too big than result is minimal value. | |
2162 __ cmp(r9, Operand(meaningfull_bits - 1)); | |
2163 __ mov(r5, Operand(min_value), LeaveCC, ge); | |
2164 __ b(ge, &done); | |
2165 | |
2166 __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC); | |
2167 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); | |
2168 __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord)); | |
2169 | |
2170 __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC); | |
2171 __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl); | |
2172 __ b(pl, &sign); | |
2173 | |
2174 __ rsb(r9, r9, Operand(0, RelocInfo::NONE)); | |
2175 __ mov(r5, Operand(r5, LSL, r9)); | |
2176 __ rsb(r9, r9, Operand(meaningfull_bits)); | |
2177 __ orr(r5, r5, Operand(r6, LSR, r9)); | |
2178 | |
2179 __ bind(&sign); | |
2180 __ teq(r7, Operand(0, RelocInfo::NONE)); | |
2181 __ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne); | |
2182 | |
2183 __ bind(&done); | |
2184 switch (array_type) { | |
2185 case kExternalByteArray: | |
2186 case kExternalUnsignedByteArray: | |
2187 __ strb(r5, MemOperand(r3, r4, LSL, 0)); | |
2188 break; | |
2189 case kExternalShortArray: | |
2190 case kExternalUnsignedShortArray: | |
2191 __ strh(r5, MemOperand(r3, r4, LSL, 1)); | |
2192 break; | |
2193 case kExternalIntArray: | |
2194 case kExternalUnsignedIntArray: | |
2195 __ str(r5, MemOperand(r3, r4, LSL, 2)); | |
2196 break; | |
2197 default: | |
2198 UNREACHABLE(); | |
2199 break; | |
2200 } | |
2201 } | |
2202 } | |
2203 | |
2204 // Slow case: call runtime. | |
2205 __ bind(&slow); | |
2206 | |
2207 // Entry registers are intact. | |
2208 // r0: value | |
2209 // r1: key | |
2210 // r2: receiver | |
2211 GenerateRuntimeSetProperty(masm); | |
2212 } | |
2213 | |
2214 | |
2215 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { | 1638 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { |
2216 // ----------- S t a t e ------------- | 1639 // ----------- S t a t e ------------- |
2217 // -- r0 : value | 1640 // -- r0 : value |
2218 // -- r1 : receiver | 1641 // -- r1 : receiver |
2219 // -- r2 : name | 1642 // -- r2 : name |
2220 // -- lr : return address | 1643 // -- lr : return address |
2221 // ----------------------------------- | 1644 // ----------------------------------- |
2222 | 1645 |
2223 // Get the receiver from the stack and probe the stub cache. | 1646 // Get the receiver from the stack and probe the stub cache. |
2224 Code::Flags flags = Code::ComputeFlags(Code::STORE_IC, | 1647 Code::Flags flags = Code::ComputeFlags(Code::STORE_IC, |
(...skipping 158 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2383 | 1806 |
2384 | 1807 |
2385 void PatchInlinedSmiCode(Address address) { | 1808 void PatchInlinedSmiCode(Address address) { |
2386 UNIMPLEMENTED(); | 1809 UNIMPLEMENTED(); |
2387 } | 1810 } |
2388 | 1811 |
2389 | 1812 |
2390 } } // namespace v8::internal | 1813 } } // namespace v8::internal |
2391 | 1814 |
2392 #endif // V8_TARGET_ARCH_ARM | 1815 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |