Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1490)

Unified Diff: runtime/vm/intrinsifier_arm.cc

Issue 1156593002: Cache current thread in a reserved register and use it in LoadIsolate (Closed) Base URL: https://github.com/dart-lang/sdk.git@master
Patch Set: Added more comments. Created 5 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: runtime/vm/intrinsifier_arm.cc
diff --git a/runtime/vm/intrinsifier_arm.cc b/runtime/vm/intrinsifier_arm.cc
index 3cf09c59b9492f6be90d789f65796f11126365df..2037335ee9a2ef9e1b1e1ac07a5c11b9a094bacc 100644
--- a/runtime/vm/intrinsifier_arm.cc
+++ b/runtime/vm/intrinsifier_arm.cc
@@ -576,10 +576,10 @@ void Intrinsifier::Integer_shl(Assembler* assembler) {
__ LoadImmediate(R7, 1);
__ mov(R7, Operand(R7, LSL, R0)); // R7 <- 1 << R0
__ sub(R7, R7, Operand(1)); // R7 <- R7 - 1
- __ rsb(R8, R0, Operand(32)); // R8 <- 32 - R0
- __ mov(R7, Operand(R7, LSL, R8)); // R7 <- R7 << R8
+ __ rsb(R9, R0, Operand(32)); // R9 <- 32 - R0
+ __ mov(R7, Operand(R7, LSL, R9)); // R7 <- R7 << R9
__ and_(R7, R1, Operand(R7)); // R7 <- R7 & R1
- __ mov(R7, Operand(R7, LSR, R8)); // R7 <- R7 >> R8
+ __ mov(R7, Operand(R7, LSR, R9)); // R7 <- R7 >> R9
// Now R7 has the bits that fall off of R1 on a left shift.
__ mov(R1, Operand(R1, LSL, R0)); // R1 gets the low bits.
@@ -817,12 +817,12 @@ void Intrinsifier::Bigint_lsh(Assembler* assembler) {
__ add(R6, R3, Operand(TypedData::data_offset() - kHeapObjectTag));
// R7 = &x_digits[x_used]
__ add(R7, R6, Operand(R2, LSL, 1));
- // R8 = &r_digits[1]
- __ add(R8, R4, Operand(TypedData::data_offset() - kHeapObjectTag +
+ // R9 = &r_digits[1]
+ __ add(R9, R4, Operand(TypedData::data_offset() - kHeapObjectTag +
Bigint::kBytesPerDigit));
- // R8 = &r_digits[x_used + n ~/ _DIGIT_BITS + 1]
+ // R9 = &r_digits[x_used + n ~/ _DIGIT_BITS + 1]
__ add(R0, R0, Operand(R2, ASR, 1));
- __ add(R8, R8, Operand(R0, LSL, 2));
+ __ add(R9, R9, Operand(R0, LSL, 2));
// R3 = n % _DIGIT_BITS
__ and_(R3, R5, Operand(31));
// R2 = 32 - R3
@@ -832,11 +832,11 @@ void Intrinsifier::Bigint_lsh(Assembler* assembler) {
__ Bind(&loop);
__ ldr(R0, Address(R7, -Bigint::kBytesPerDigit, Address::PreIndex));
__ orr(R1, R1, Operand(R0, LSR, R2));
- __ str(R1, Address(R8, -Bigint::kBytesPerDigit, Address::PreIndex));
+ __ str(R1, Address(R9, -Bigint::kBytesPerDigit, Address::PreIndex));
__ mov(R1, Operand(R0, LSL, R3));
__ teq(R7, Operand(R6));
__ b(&loop, NE);
- __ str(R1, Address(R8, -Bigint::kBytesPerDigit, Address::PreIndex));
+ __ str(R1, Address(R9, -Bigint::kBytesPerDigit, Address::PreIndex));
// Returning Object::null() is not required, since this method is private.
__ Ret();
}
@@ -853,15 +853,15 @@ void Intrinsifier::Bigint_rsh(Assembler* assembler) {
__ SmiUntag(R5);
// R0 = n ~/ _DIGIT_BITS
__ Asr(R0, R5, Operand(5));
- // R8 = &r_digits[0]
- __ add(R8, R4, Operand(TypedData::data_offset() - kHeapObjectTag));
+ // R9 = &r_digits[0]
+ __ add(R9, R4, Operand(TypedData::data_offset() - kHeapObjectTag));
// R7 = &x_digits[n ~/ _DIGIT_BITS]
__ add(R7, R3, Operand(TypedData::data_offset() - kHeapObjectTag));
__ add(R7, R7, Operand(R0, LSL, 2));
// R6 = &r_digits[x_used - n ~/ _DIGIT_BITS - 1]
__ add(R0, R0, Operand(1));
__ rsb(R0, R0, Operand(R2, ASR, 1));
- __ add(R6, R8, Operand(R0, LSL, 2));
+ __ add(R6, R9, Operand(R0, LSL, 2));
// R3 = n % _DIGIT_BITS
__ and_(R3, R5, Operand(31));
// R2 = 32 - R3
@@ -875,12 +875,12 @@ void Intrinsifier::Bigint_rsh(Assembler* assembler) {
__ Bind(&loop);
__ ldr(R0, Address(R7, Bigint::kBytesPerDigit, Address::PostIndex));
__ orr(R1, R1, Operand(R0, LSL, R2));
- __ str(R1, Address(R8, Bigint::kBytesPerDigit, Address::PostIndex));
+ __ str(R1, Address(R9, Bigint::kBytesPerDigit, Address::PostIndex));
__ mov(R1, Operand(R0, LSR, R3));
__ Bind(&loop_entry);
- __ teq(R8, Operand(R6));
+ __ teq(R9, Operand(R6));
__ b(&loop, NE);
- __ str(R1, Address(R8, 0));
+ __ str(R1, Address(R9, 0));
// Returning Object::null() is not required, since this method is private.
__ Ret();
}
@@ -909,8 +909,8 @@ void Intrinsifier::Bigint_absAdd(Assembler* assembler) {
// R7 = &digits[a_used >> 1], a_used is Smi.
__ add(R7, R3, Operand(R4, LSL, 1));
- // R8 = &digits[used >> 1], used is Smi.
- __ add(R8, R3, Operand(R2, LSL, 1));
+ // R9 = &digits[used >> 1], used is Smi.
+ __ add(R9, R3, Operand(R2, LSL, 1));
__ adds(R0, R0, Operand(0)); // carry flag = 0
Label add_loop;
@@ -924,7 +924,7 @@ void Intrinsifier::Bigint_absAdd(Assembler* assembler) {
__ b(&add_loop, NE);
Label last_carry;
- __ teq(R3, Operand(R8)); // Does not affect carry flag.
+ __ teq(R3, Operand(R9)); // Does not affect carry flag.
__ b(&last_carry, EQ); // If used - a_used == 0.
Label carry_loop;
@@ -932,7 +932,7 @@ void Intrinsifier::Bigint_absAdd(Assembler* assembler) {
// Loop used - a_used times, used - a_used > 0.
__ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex));
__ adcs(R0, R0, Operand(0));
- __ teq(R3, Operand(R8)); // Does not affect carry flag.
+ __ teq(R3, Operand(R9)); // Does not affect carry flag.
__ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex));
__ b(&carry_loop, NE);
@@ -969,8 +969,8 @@ void Intrinsifier::Bigint_absSub(Assembler* assembler) {
// R7 = &digits[a_used >> 1], a_used is Smi.
__ add(R7, R3, Operand(R4, LSL, 1));
- // R8 = &digits[used >> 1], used is Smi.
- __ add(R8, R3, Operand(R2, LSL, 1));
+ // R9 = &digits[used >> 1], used is Smi.
+ __ add(R9, R3, Operand(R2, LSL, 1));
__ subs(R0, R0, Operand(0)); // carry flag = 1
Label sub_loop;
@@ -984,7 +984,7 @@ void Intrinsifier::Bigint_absSub(Assembler* assembler) {
__ b(&sub_loop, NE);
Label done;
- __ teq(R3, Operand(R8)); // Does not affect carry flag.
+ __ teq(R3, Operand(R9)); // Does not affect carry flag.
__ b(&done, EQ); // If used - a_used == 0.
Label carry_loop;
@@ -992,7 +992,7 @@ void Intrinsifier::Bigint_absSub(Assembler* assembler) {
// Loop used - a_used times, used - a_used > 0.
__ ldr(R0, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex));
__ sbcs(R0, R0, Operand(0));
- __ teq(R3, Operand(R8)); // Does not affect carry flag.
+ __ teq(R3, Operand(R9)); // Does not affect carry flag.
__ str(R0, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex));
__ b(&carry_loop, NE);
@@ -1161,9 +1161,9 @@ void Intrinsifier::Bigint_sqrAdd(Assembler* assembler) {
// int n = used - i - 1; while (--n >= 0) ...
__ ldr(R0, Address(SP, 0 * kWordSize)); // used is Smi
- __ sub(R8, R0, Operand(R2));
+ __ sub(R9, R0, Operand(R2));
__ mov(R0, Operand(2)); // n = used - i - 2; if (n >= 0) ... while (--n >= 0)
- __ rsbs(R8, R0, Operand(R8, ASR, kSmiTagSize));
+ __ rsbs(R9, R0, Operand(R9, ASR, kSmiTagSize));
Label loop, done;
__ b(&done, MI);
@@ -1174,7 +1174,7 @@ void Intrinsifier::Bigint_sqrAdd(Assembler* assembler) {
// ajp: R5
// c: R7:R6
// t: R2:R1:R0 (not live at loop entry)
- // n: R8
+ // n: R9
// uint32_t xi = *xip++
__ ldr(R2, Address(R4, Bigint::kBytesPerDigit, Address::PostIndex));
@@ -1197,7 +1197,7 @@ void Intrinsifier::Bigint_sqrAdd(Assembler* assembler) {
__ str(R0, Address(R5, Bigint::kBytesPerDigit, Address::PostIndex));
// while (--n >= 0)
- __ subs(R8, R8, Operand(1)); // --n
+ __ subs(R9, R9, Operand(1)); // --n
__ b(&loop, PL);
__ Bind(&done);

Powered by Google App Engine
This is Rietveld 408576698