Index: runtime/vm/intrinsifier_arm.cc |
=================================================================== |
--- runtime/vm/intrinsifier_arm.cc (revision 24410) |
+++ runtime/vm/intrinsifier_arm.cc (working copy) |
@@ -869,9 +869,9 @@ |
__ bx(LR, EQ); |
// Arguments are Smi but the shift produced an overflow to Mint. |
- __ CompareImmediate(R6, 0); |
+ __ CompareImmediate(R1, 0); |
__ b(&fall_through, LT); |
- __ SmiUntag(R6); |
+ __ SmiUntag(R1); |
// Pull off high bits that will be shifted off of R6 by making a mask |
regis
2013/06/25 17:05:16
R6 -> R1 here and all occurrences below.
zra
2013/06/25 17:34:30
Done.
|
// ((1 << R0) - 1), shifting it to the left, masking R6, then shifting back. |
@@ -882,10 +882,10 @@ |
__ sub(R7, R7, ShifterOperand(1)); // R7 <- R7 - 1 |
__ rsb(R8, R0, ShifterOperand(32)); // R8 <- 32 - R0 |
__ mov(R7, ShifterOperand(R7, LSL, R8)); // R7 <- R7 << R8 |
- __ and_(R7, R6, ShifterOperand(R7)); // R7 <- R7 & R6 |
+ __ and_(R7, R1, ShifterOperand(R7)); // R7 <- R7 & R6 |
__ mov(R7, ShifterOperand(R7, LSR, R8)); // R7 <- R7 >> R8 |
// Now R7 has the bits that fall off of R6 on a left shift. |
- __ mov(R1, ShifterOperand(R6, LSL, R0)); // R1 gets the low bits. |
+ __ mov(R1, ShifterOperand(R1, LSL, R0)); // R1 gets the low bits. |
regis
2013/06/25 17:05:16
That should work better :-)
|
const Class& mint_class = Class::Handle( |
Isolate::Current()->object_store()->mint_class()); |