| OLD | NEW |
| 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 1321 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1332 Label box_int; | 1332 Label box_int; |
| 1333 __ cmp(value, Operand(0xC0000000)); | 1333 __ cmp(value, Operand(0xC0000000)); |
| 1334 __ b(mi, &box_int); | 1334 __ b(mi, &box_int); |
| 1335 // Tag integer as smi and return it. | 1335 // Tag integer as smi and return it. |
| 1336 __ mov(r0, Operand(value, LSL, kSmiTagSize)); | 1336 __ mov(r0, Operand(value, LSL, kSmiTagSize)); |
| 1337 __ Ret(); | 1337 __ Ret(); |
| 1338 | 1338 |
| 1339 __ bind(&box_int); | 1339 __ bind(&box_int); |
| 1340 // Allocate a HeapNumber for the result and perform int-to-double | 1340 // Allocate a HeapNumber for the result and perform int-to-double |
| 1341 // conversion. Use r0 for result as key is not needed any more. | 1341 // conversion. Use r0 for result as key is not needed any more. |
| 1342 __ AllocateHeapNumber(r0, r3, r4, &slow); | 1342 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); |
| 1343 __ AllocateHeapNumber(r0, r3, r4, r6, &slow); |
| 1343 | 1344 |
| 1344 if (CpuFeatures::IsSupported(VFP3)) { | 1345 if (CpuFeatures::IsSupported(VFP3)) { |
| 1345 CpuFeatures::Scope scope(VFP3); | 1346 CpuFeatures::Scope scope(VFP3); |
| 1346 __ vmov(s0, value); | 1347 __ vmov(s0, value); |
| 1347 __ vcvt_f64_s32(d0, s0); | 1348 __ vcvt_f64_s32(d0, s0); |
| 1348 __ sub(r3, r0, Operand(kHeapObjectTag)); | 1349 __ sub(r3, r0, Operand(kHeapObjectTag)); |
| 1349 __ vstr(d0, r3, HeapNumber::kValueOffset); | 1350 __ vstr(d0, r3, HeapNumber::kValueOffset); |
| 1350 __ Ret(); | 1351 __ Ret(); |
| 1351 } else { | 1352 } else { |
| 1352 WriteInt32ToHeapNumberStub stub(value, r0, r3); | 1353 WriteInt32ToHeapNumberStub stub(value, r0, r3); |
| (...skipping 10 matching lines...) Expand all Loading... |
| 1363 __ b(ne, &box_int); | 1364 __ b(ne, &box_int); |
| 1364 // Tag integer as smi and return it. | 1365 // Tag integer as smi and return it. |
| 1365 __ mov(r0, Operand(value, LSL, kSmiTagSize)); | 1366 __ mov(r0, Operand(value, LSL, kSmiTagSize)); |
| 1366 __ Ret(); | 1367 __ Ret(); |
| 1367 | 1368 |
| 1368 __ bind(&box_int); | 1369 __ bind(&box_int); |
| 1369 __ vmov(s0, value); | 1370 __ vmov(s0, value); |
| 1370 // Allocate a HeapNumber for the result and perform int-to-double | 1371 // Allocate a HeapNumber for the result and perform int-to-double |
| 1371 // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all | 1372 // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all |
| 1372 // registers - also when jumping due to exhausted young space. | 1373 // registers - also when jumping due to exhausted young space. |
| 1373 __ AllocateHeapNumber(r2, r3, r4, &slow); | 1374 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); |
| 1375 __ AllocateHeapNumber(r2, r3, r4, r6, &slow); |
| 1374 | 1376 |
| 1375 __ vcvt_f64_u32(d0, s0); | 1377 __ vcvt_f64_u32(d0, s0); |
| 1376 __ sub(r1, r2, Operand(kHeapObjectTag)); | 1378 __ sub(r1, r2, Operand(kHeapObjectTag)); |
| 1377 __ vstr(d0, r1, HeapNumber::kValueOffset); | 1379 __ vstr(d0, r1, HeapNumber::kValueOffset); |
| 1378 | 1380 |
| 1379 __ mov(r0, r2); | 1381 __ mov(r0, r2); |
| 1380 __ Ret(); | 1382 __ Ret(); |
| 1381 } else { | 1383 } else { |
| 1382 // Check whether unsigned integer fits into smi. | 1384 // Check whether unsigned integer fits into smi. |
| 1383 Label box_int_0, box_int_1, done; | 1385 Label box_int_0, box_int_1, done; |
| (...skipping 16 matching lines...) Expand all Loading... |
| 1400 __ bind(&box_int_1); | 1402 __ bind(&box_int_1); |
| 1401 // Integer has one leading zero. | 1403 // Integer has one leading zero. |
| 1402 GenerateUInt2Double(masm, hiword, loword, r4, 1); | 1404 GenerateUInt2Double(masm, hiword, loword, r4, 1); |
| 1403 | 1405 |
| 1404 | 1406 |
| 1405 __ bind(&done); | 1407 __ bind(&done); |
| 1406 // Integer was converted to double in registers hiword:loword. | 1408 // Integer was converted to double in registers hiword:loword. |
| 1407 // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber | 1409 // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber |
| 1408 // clobbers all registers - also when jumping due to exhausted young | 1410 // clobbers all registers - also when jumping due to exhausted young |
| 1409 // space. | 1411 // space. |
| 1410 __ AllocateHeapNumber(r4, r5, r6, &slow); | 1412 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); |
| 1413 __ AllocateHeapNumber(r4, r5, r7, r6, &slow); |
| 1411 | 1414 |
| 1412 __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset)); | 1415 __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset)); |
| 1413 __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset)); | 1416 __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset)); |
| 1414 | 1417 |
| 1415 __ mov(r0, r4); | 1418 __ mov(r0, r4); |
| 1416 __ Ret(); | 1419 __ Ret(); |
| 1417 } | 1420 } |
| 1418 } else if (array_type == kExternalFloatArray) { | 1421 } else if (array_type == kExternalFloatArray) { |
| 1419 // For the floating-point array type, we need to always allocate a | 1422 // For the floating-point array type, we need to always allocate a |
| 1420 // HeapNumber. | 1423 // HeapNumber. |
| 1421 if (CpuFeatures::IsSupported(VFP3)) { | 1424 if (CpuFeatures::IsSupported(VFP3)) { |
| 1422 CpuFeatures::Scope scope(VFP3); | 1425 CpuFeatures::Scope scope(VFP3); |
| 1423 // Allocate a HeapNumber for the result. Don't use r0 and r1 as | 1426 // Allocate a HeapNumber for the result. Don't use r0 and r1 as |
| 1424 // AllocateHeapNumber clobbers all registers - also when jumping due to | 1427 // AllocateHeapNumber clobbers all registers - also when jumping due to |
| 1425 // exhausted young space. | 1428 // exhausted young space. |
| 1426 __ AllocateHeapNumber(r2, r3, r4, &slow); | 1429 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); |
| 1430 __ AllocateHeapNumber(r2, r3, r4, r6, &slow); |
| 1427 __ vcvt_f64_f32(d0, s0); | 1431 __ vcvt_f64_f32(d0, s0); |
| 1428 __ sub(r1, r2, Operand(kHeapObjectTag)); | 1432 __ sub(r1, r2, Operand(kHeapObjectTag)); |
| 1429 __ vstr(d0, r1, HeapNumber::kValueOffset); | 1433 __ vstr(d0, r1, HeapNumber::kValueOffset); |
| 1430 | 1434 |
| 1431 __ mov(r0, r2); | 1435 __ mov(r0, r2); |
| 1432 __ Ret(); | 1436 __ Ret(); |
| 1433 } else { | 1437 } else { |
| 1434 // Allocate a HeapNumber for the result. Don't use r0 and r1 as | 1438 // Allocate a HeapNumber for the result. Don't use r0 and r1 as |
| 1435 // AllocateHeapNumber clobbers all registers - also when jumping due to | 1439 // AllocateHeapNumber clobbers all registers - also when jumping due to |
| 1436 // exhausted young space. | 1440 // exhausted young space. |
| 1437 __ AllocateHeapNumber(r3, r4, r5, &slow); | 1441 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); |
| 1442 __ AllocateHeapNumber(r3, r4, r5, r6, &slow); |
| 1438 // VFP is not available, do manual single to double conversion. | 1443 // VFP is not available, do manual single to double conversion. |
| 1439 | 1444 |
| 1440 // r2: floating point value (binary32) | 1445 // r2: floating point value (binary32) |
| 1441 // r3: heap number for result | 1446 // r3: heap number for result |
| 1442 | 1447 |
| 1443 // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to | 1448 // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to |
| 1444 // the slow case from here. | 1449 // the slow case from here. |
| 1445 __ and_(r0, value, Operand(kBinary32MantissaMask)); | 1450 __ and_(r0, value, Operand(kBinary32MantissaMask)); |
| 1446 | 1451 |
| 1447 // Extract exponent to r1. OK to clobber r1 now as there are no jumps to | 1452 // Extract exponent to r1. OK to clobber r1 now as there are no jumps to |
| (...skipping 706 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2154 GenerateMiss(masm); | 2159 GenerateMiss(masm); |
| 2155 } | 2160 } |
| 2156 | 2161 |
| 2157 | 2162 |
| 2158 #undef __ | 2163 #undef __ |
| 2159 | 2164 |
| 2160 | 2165 |
| 2161 } } // namespace v8::internal | 2166 } } // namespace v8::internal |
| 2162 | 2167 |
| 2163 #endif // V8_TARGET_ARCH_ARM | 2168 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |