Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1106)

Side by Side Diff: src/arm/code-stubs-arm.cc

Issue 3247008: Handle bitwise operations with literal Smi for 32bits integers without... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 10 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | src/arm/codegen-arm.cc » ('j') | src/arm/codegen-arm.cc » ('J')
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 1445 matching lines...) Expand 10 before | Expand all | Expand 10 after
1456 // Only second argument is a string. 1456 // Only second argument is a string.
1457 __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS); 1457 __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS);
1458 1458
1459 __ bind(&not_strings); 1459 __ bind(&not_strings);
1460 } 1460 }
1461 1461
1462 __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return. 1462 __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return.
1463 } 1463 }
1464 1464
1465 1465
1466 // Tries to get a signed int32 out of a double precision floating point heap
1467 // number. Rounds towards 0. Fastest for doubles that are in the ranges
1468 // -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds
1469 // almost to the range of signed int32 values that are not Smis. Jumps to the
1470 // label 'slow' if the double isn't in the range -0x80000000.0 to 0x80000000.0
1471 // (excluding the endpoints).
1472 static void GetInt32(MacroAssembler* masm,
1473 Register source,
1474 Register dest,
1475 Register scratch,
1476 Register scratch2,
1477 Label* slow) {
1478 Label right_exponent, done;
1479 // Get exponent word.
1480 __ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
1481 // Get exponent alone in scratch2.
1482 __ Ubfx(scratch2,
1483 scratch,
1484 HeapNumber::kExponentShift,
1485 HeapNumber::kExponentBits);
1486 // Load dest with zero. We use this either for the final shift or
1487 // for the answer.
1488 __ mov(dest, Operand(0));
1489 // Check whether the exponent matches a 32 bit signed int that is not a Smi.
1490 // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
1491 // the exponent that we are fastest at and also the highest exponent we can
1492 // handle here.
1493 const uint32_t non_smi_exponent = HeapNumber::kExponentBias + 30;
1494 // The non_smi_exponent, 0x41d, is too big for ARM's immediate field so we
1495 // split it up to avoid a constant pool entry. You can't do that in general
1496 // for cmp because of the overflow flag, but we know the exponent is in the
1497 // range 0-2047 so there is no overflow.
1498 int fudge_factor = 0x400;
1499 __ sub(scratch2, scratch2, Operand(fudge_factor));
1500 __ cmp(scratch2, Operand(non_smi_exponent - fudge_factor));
1501 // If we have a match of the int32-but-not-Smi exponent then skip some logic.
1502 __ b(eq, &right_exponent);
1503 // If the exponent is higher than that then go to slow case. This catches
1504 // numbers that don't fit in a signed int32, infinities and NaNs.
1505 __ b(gt, slow);
1506
1507 // We know the exponent is smaller than 30 (biased). If it is less than
1508 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
1509 // it rounds to zero.
1510 const uint32_t zero_exponent = HeapNumber::kExponentBias + 0;
1511 __ sub(scratch2, scratch2, Operand(zero_exponent - fudge_factor), SetCC);
1512 // Dest already has a Smi zero.
1513 __ b(lt, &done);
1514 if (!CpuFeatures::IsSupported(VFP3)) {
1515 // We have an exponent between 0 and 30 in scratch2. Subtract from 30 to
1516 // get how much to shift down.
1517 __ rsb(dest, scratch2, Operand(30));
1518 }
1519 __ bind(&right_exponent);
1520 if (CpuFeatures::IsSupported(VFP3)) {
1521 CpuFeatures::Scope scope(VFP3);
1522 // ARMv7 VFP3 instructions implementing double precision to integer
1523 // conversion using round to zero.
1524 __ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
1525 __ vmov(d7, scratch2, scratch);
1526 __ vcvt_s32_f64(s15, d7);
1527 __ vmov(dest, s15);
1528 } else {
1529 // Get the top bits of the mantissa.
1530 __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
1531 // Put back the implicit 1.
1532 __ orr(scratch2, scratch2, Operand(1 << HeapNumber::kExponentShift));
1533 // Shift up the mantissa bits to take up the space the exponent used to
1534 // take. We just orred in the implicit bit so that took care of one and
1535 // we want to leave the sign bit 0 so we subtract 2 bits from the shift
1536 // distance.
1537 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
1538 __ mov(scratch2, Operand(scratch2, LSL, shift_distance));
1539 // Put sign in zero flag.
1540 __ tst(scratch, Operand(HeapNumber::kSignMask));
1541 // Get the second half of the double. For some exponents we don't
1542 // actually need this because the bits get shifted out again, but
1543 // it's probably slower to test than just to do it.
1544 __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
1545 // Shift down 22 bits to get the last 10 bits.
1546 __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
1547 // Move down according to the exponent.
1548 __ mov(dest, Operand(scratch, LSR, dest));
1549 // Fix sign if sign bit was set.
1550 __ rsb(dest, dest, Operand(0), LeaveCC, ne);
1551 }
1552 __ bind(&done);
1553 }
1554
1555 // For bitwise ops where the inputs are not both Smis we here try to determine 1466 // For bitwise ops where the inputs are not both Smis we here try to determine
1556 // whether both inputs are either Smis or at least heap numbers that can be 1467 // whether both inputs are either Smis or at least heap numbers that can be
1557 // represented by a 32 bit signed value. We truncate towards zero as required 1468 // represented by a 32 bit signed value. We truncate towards zero as required
1558 // by the ES spec. If this is the case we do the bitwise op and see if the 1469 // by the ES spec. If this is the case we do the bitwise op and see if the
1559 // result is a Smi. If so, great, otherwise we try to find a heap number to 1470 // result is a Smi. If so, great, otherwise we try to find a heap number to
1560 // write the answer into (either by allocating or by overwriting). 1471 // write the answer into (either by allocating or by overwriting).
1561 // On entry the operands are in lhs and rhs. On exit the answer is in r0. 1472 // On entry the operands are in lhs and rhs. On exit the answer is in r0.
1562 void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm, 1473 void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm,
1563 Register lhs, 1474 Register lhs,
1564 Register rhs) { 1475 Register rhs) {
1565 Label slow, result_not_a_smi; 1476 Label slow, result_not_a_smi;
1566 Label rhs_is_smi, lhs_is_smi; 1477 Label rhs_is_smi, lhs_is_smi;
1567 Label done_checking_rhs, done_checking_lhs; 1478 Label done_checking_rhs, done_checking_lhs;
1568 1479
1569 Register heap_number_map = r6; 1480 Register heap_number_map = r6;
1570 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); 1481 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1571 1482
1572 __ tst(lhs, Operand(kSmiTagMask)); 1483 __ tst(lhs, Operand(kSmiTagMask));
1573 __ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number. 1484 __ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number.
1574 __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset)); 1485 __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset));
1575 __ cmp(r4, heap_number_map); 1486 __ cmp(r4, heap_number_map);
1576 __ b(ne, &slow); 1487 __ b(ne, &slow);
1577 GetInt32(masm, lhs, r3, r5, r4, &slow); 1488 __ ConvertToInt32(lhs, r3, r5, r4, &slow);
1578 __ jmp(&done_checking_lhs); 1489 __ jmp(&done_checking_lhs);
1579 __ bind(&lhs_is_smi); 1490 __ bind(&lhs_is_smi);
1580 __ mov(r3, Operand(lhs, ASR, 1)); 1491 __ mov(r3, Operand(lhs, ASR, 1));
1581 __ bind(&done_checking_lhs); 1492 __ bind(&done_checking_lhs);
1582 1493
1583 __ tst(rhs, Operand(kSmiTagMask)); 1494 __ tst(rhs, Operand(kSmiTagMask));
1584 __ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number. 1495 __ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number.
1585 __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset)); 1496 __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset));
1586 __ cmp(r4, heap_number_map); 1497 __ cmp(r4, heap_number_map);
1587 __ b(ne, &slow); 1498 __ b(ne, &slow);
1588 GetInt32(masm, rhs, r2, r5, r4, &slow); 1499 __ ConvertToInt32(rhs, r2, r5, r4, &slow);
1589 __ jmp(&done_checking_rhs); 1500 __ jmp(&done_checking_rhs);
1590 __ bind(&rhs_is_smi); 1501 __ bind(&rhs_is_smi);
1591 __ mov(r2, Operand(rhs, ASR, 1)); 1502 __ mov(r2, Operand(rhs, ASR, 1));
1592 __ bind(&done_checking_rhs); 1503 __ bind(&done_checking_rhs);
1593 1504
1594 ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)))); 1505 ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))));
1595 1506
1596 // r0 and r1: Original operands (Smi or heap numbers). 1507 // r0 and r1: Original operands (Smi or heap numbers).
1597 // r2 and r3: Signed int32 operands. 1508 // r2 and r3: Signed int32 operands.
1598 switch (op_) { 1509 switch (op_) {
(...skipping 834 matching lines...) Expand 10 before | Expand all | Expand 10 after
2433 __ mov(r0, Operand(r1)); 2344 __ mov(r0, Operand(r1));
2434 } 2345 }
2435 } else if (op_ == Token::BIT_NOT) { 2346 } else if (op_ == Token::BIT_NOT) {
2436 // Check if the operand is a heap number. 2347 // Check if the operand is a heap number.
2437 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); 2348 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
2438 __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); 2349 __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2439 __ cmp(r1, heap_number_map); 2350 __ cmp(r1, heap_number_map);
2440 __ b(ne, &slow); 2351 __ b(ne, &slow);
2441 2352
2442 // Convert the heap number is r0 to an untagged integer in r1. 2353 // Convert the heap number is r0 to an untagged integer in r1.
2443 GetInt32(masm, r0, r1, r2, r3, &slow); 2354 __ ConvertToInt32(r0, r1, r2, r3, &slow);
2444 2355
2445 // Do the bitwise operation (move negated) and check if the result 2356 // Do the bitwise operation (move negated) and check if the result
2446 // fits in a smi. 2357 // fits in a smi.
2447 Label try_float; 2358 Label try_float;
2448 __ mvn(r1, Operand(r1)); 2359 __ mvn(r1, Operand(r1));
2449 __ add(r2, r1, Operand(0x40000000), SetCC); 2360 __ add(r2, r1, Operand(0x40000000), SetCC);
2450 __ b(mi, &try_float); 2361 __ b(mi, &try_float);
2451 __ mov(r0, Operand(r1, LSL, kSmiTagSize)); 2362 __ mov(r0, Operand(r1, LSL, kSmiTagSize));
2452 __ b(&done); 2363 __ b(&done);
2453 2364
(...skipping 2314 matching lines...) Expand 10 before | Expand all | Expand 10 after
4768 __ bind(&string_add_runtime); 4679 __ bind(&string_add_runtime);
4769 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); 4680 __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
4770 } 4681 }
4771 4682
4772 4683
4773 #undef __ 4684 #undef __
4774 4685
4775 } } // namespace v8::internal 4686 } } // namespace v8::internal
4776 4687
4777 #endif // V8_TARGET_ARCH_ARM 4688 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « no previous file | src/arm/codegen-arm.cc » ('j') | src/arm/codegen-arm.cc » ('J')

Powered by Google App Engine
This is Rietveld 408576698