Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(521)

Unified Diff: runtime/vm/intrinsifier_arm.cc

Issue 1421253004: Use the iOS ABI when running SIMARM on Mac or targeting iOS. (Closed) Base URL: git@github.com:dart-lang/sdk.git@master
Patch Set: R711 -> NOTFP Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « runtime/vm/intermediate_language_arm.cc ('k') | runtime/vm/signal_handler_macos.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: runtime/vm/intrinsifier_arm.cc
diff --git a/runtime/vm/intrinsifier_arm.cc b/runtime/vm/intrinsifier_arm.cc
index 0a62b088ceab9f19663526403d270229cd7b703c..25b53b33227af0d9c848ebf7f8e4603ecd4603d2 100644
--- a/runtime/vm/intrinsifier_arm.cc
+++ b/runtime/vm/intrinsifier_arm.cc
@@ -531,14 +531,14 @@ void Intrinsifier::Integer_shl(Assembler* assembler) {
// ((1 << R0) - 1), shifting it to the left, masking R1, then shifting back.
// high bits = (((1 << R0) - 1) << (32 - R0)) & R1) >> (32 - R0)
// lo bits = R1 << R0
- __ LoadImmediate(R7, 1);
- __ mov(R7, Operand(R7, LSL, R0)); // R7 <- 1 << R0
- __ sub(R7, R7, Operand(1)); // R7 <- R7 - 1
+ __ LoadImmediate(NOTFP, 1);
+ __ mov(NOTFP, Operand(NOTFP, LSL, R0)); // NOTFP <- 1 << R0
+ __ sub(NOTFP, NOTFP, Operand(1)); // NOTFP <- NOTFP - 1
__ rsb(R6, R0, Operand(32)); // R6 <- 32 - R0
- __ mov(R7, Operand(R7, LSL, R6)); // R7 <- R7 << R6
- __ and_(R7, R1, Operand(R7)); // R7 <- R7 & R1
- __ mov(R7, Operand(R7, LSR, R6)); // R7 <- R7 >> R6
- // Now R7 has the bits that fall off of R1 on a left shift.
+ __ mov(NOTFP, Operand(NOTFP, LSL, R6)); // NOTFP <- NOTFP << R6
+ __ and_(NOTFP, R1, Operand(NOTFP)); // NOTFP <- NOTFP & R1
+ __ mov(NOTFP, Operand(NOTFP, LSR, R6)); // NOTFP <- NOTFP >> R6
+ // Now NOTFP has the bits that fall off of R1 on a left shift.
__ mov(R1, Operand(R1, LSL, R0)); // R1 gets the low bits.
const Class& mint_class = Class::Handle(
@@ -547,7 +547,7 @@ void Intrinsifier::Integer_shl(Assembler* assembler) {
__ str(R1, FieldAddress(R0, Mint::value_offset()));
- __ str(R7, FieldAddress(R0, Mint::value_offset() + kWordSize));
+ __ str(NOTFP, FieldAddress(R0, Mint::value_offset() + kWordSize));
__ Pop(R6);
__ Ret();
__ Bind(&fall_through);
@@ -621,13 +621,13 @@ static void CompareIntegers(Assembler* assembler, Condition true_condition) {
// Get left as 64 bit integer.
Get64SmiOrMint(assembler, R3, R2, R1, &fall_through);
// Get right as 64 bit integer.
- Get64SmiOrMint(assembler, R7, R8, R0, &fall_through);
+ Get64SmiOrMint(assembler, NOTFP, R8, R0, &fall_through);
// R3: left high.
// R2: left low.
- // R7: right high.
+ // NOTFP: right high.
// R8: right low.
- __ cmp(R3, Operand(R7)); // Compare left hi, right high.
+ __ cmp(R3, Operand(NOTFP)); // Compare left hi, right high.
__ b(&is_false, hi_false_cond);
__ b(&is_true, hi_true_cond);
__ cmp(R2, Operand(R8)); // Compare left lo, right lo.
@@ -776,8 +776,8 @@ void Intrinsifier::Bigint_lsh(Assembler* assembler) {
__ Asr(R4, R3, Operand(5));
// R8 = &x_digits[0]
__ add(R8, R1, Operand(TypedData::data_offset() - kHeapObjectTag));
- // R7 = &x_digits[x_used]
- __ add(R7, R8, Operand(R0, LSL, 1));
+ // NOTFP = &x_digits[x_used]
+ __ add(NOTFP, R8, Operand(R0, LSL, 1));
// R6 = &r_digits[1]
__ add(R6, R2, Operand(TypedData::data_offset() - kHeapObjectTag +
Bigint::kBytesPerDigit));
@@ -791,11 +791,11 @@ void Intrinsifier::Bigint_lsh(Assembler* assembler) {
__ mov(R9, Operand(0));
Label loop;
__ Bind(&loop);
- __ ldr(R4, Address(R7, -Bigint::kBytesPerDigit, Address::PreIndex));
+ __ ldr(R4, Address(NOTFP, -Bigint::kBytesPerDigit, Address::PreIndex));
__ orr(R9, R9, Operand(R4, LSR, R0));
__ str(R9, Address(R6, -Bigint::kBytesPerDigit, Address::PreIndex));
__ mov(R9, Operand(R4, LSL, R1));
- __ teq(R7, Operand(R8));
+ __ teq(NOTFP, Operand(R8));
__ b(&loop, NE);
__ str(R9, Address(R6, -Bigint::kBytesPerDigit, Address::PreIndex));
// Returning Object::null() is not required, since this method is private.
@@ -816,9 +816,9 @@ void Intrinsifier::Bigint_rsh(Assembler* assembler) {
__ Asr(R4, R3, Operand(5));
// R6 = &r_digits[0]
__ add(R6, R2, Operand(TypedData::data_offset() - kHeapObjectTag));
- // R7 = &x_digits[n ~/ _DIGIT_BITS]
- __ add(R7, R1, Operand(TypedData::data_offset() - kHeapObjectTag));
- __ add(R7, R7, Operand(R4, LSL, 2));
+ // NOTFP = &x_digits[n ~/ _DIGIT_BITS]
+ __ add(NOTFP, R1, Operand(TypedData::data_offset() - kHeapObjectTag));
+ __ add(NOTFP, NOTFP, Operand(R4, LSL, 2));
// R8 = &r_digits[x_used - n ~/ _DIGIT_BITS - 1]
__ add(R4, R4, Operand(1));
__ rsb(R4, R4, Operand(R0, ASR, 1));
@@ -828,13 +828,13 @@ void Intrinsifier::Bigint_rsh(Assembler* assembler) {
// R0 = 32 - R1
__ rsb(R0, R1, Operand(32));
// R9 = x_digits[n ~/ _DIGIT_BITS] >> (n % _DIGIT_BITS)
- __ ldr(R9, Address(R7, Bigint::kBytesPerDigit, Address::PostIndex));
+ __ ldr(R9, Address(NOTFP, Bigint::kBytesPerDigit, Address::PostIndex));
__ mov(R9, Operand(R9, LSR, R1));
Label loop_entry;
__ b(&loop_entry);
Label loop;
__ Bind(&loop);
- __ ldr(R4, Address(R7, Bigint::kBytesPerDigit, Address::PostIndex));
+ __ ldr(R4, Address(NOTFP, Bigint::kBytesPerDigit, Address::PostIndex));
__ orr(R9, R9, Operand(R4, LSL, R0));
__ str(R9, Address(R6, Bigint::kBytesPerDigit, Address::PostIndex));
__ mov(R9, Operand(R4, LSR, R1));
@@ -867,8 +867,8 @@ void Intrinsifier::Bigint_absAdd(Assembler* assembler) {
// R8 = &r_digits[0]
__ add(R8, R8, Operand(TypedData::data_offset() - kHeapObjectTag));
- // R7 = &digits[a_used >> 1], a_used is Smi.
- __ add(R7, R1, Operand(R2, LSL, 1));
+ // NOTFP = &digits[a_used >> 1], a_used is Smi.
+ __ add(NOTFP, R1, Operand(R2, LSL, 1));
// R6 = &digits[used >> 1], used is Smi.
__ add(R6, R1, Operand(R0, LSL, 1));
@@ -880,7 +880,7 @@ void Intrinsifier::Bigint_absAdd(Assembler* assembler) {
__ ldr(R4, Address(R1, Bigint::kBytesPerDigit, Address::PostIndex));
__ ldr(R9, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex));
__ adcs(R4, R4, Operand(R9));
- __ teq(R1, Operand(R7)); // Does not affect carry flag.
+ __ teq(R1, Operand(NOTFP)); // Does not affect carry flag.
__ str(R4, Address(R8, Bigint::kBytesPerDigit, Address::PostIndex));
__ b(&add_loop, NE);
@@ -927,8 +927,8 @@ void Intrinsifier::Bigint_absSub(Assembler* assembler) {
// R8 = &r_digits[0]
__ add(R8, R8, Operand(TypedData::data_offset() - kHeapObjectTag));
- // R7 = &digits[a_used >> 1], a_used is Smi.
- __ add(R7, R1, Operand(R2, LSL, 1));
+ // NOTFP = &digits[a_used >> 1], a_used is Smi.
+ __ add(NOTFP, R1, Operand(R2, LSL, 1));
// R6 = &digits[used >> 1], used is Smi.
__ add(R6, R1, Operand(R0, LSL, 1));
@@ -940,7 +940,7 @@ void Intrinsifier::Bigint_absSub(Assembler* assembler) {
__ ldr(R4, Address(R1, Bigint::kBytesPerDigit, Address::PostIndex));
__ ldr(R9, Address(R3, Bigint::kBytesPerDigit, Address::PostIndex));
__ sbcs(R4, R4, Operand(R9));
- __ teq(R1, Operand(R7)); // Does not affect carry flag.
+ __ teq(R1, Operand(NOTFP)); // Does not affect carry flag.
__ str(R4, Address(R8, Bigint::kBytesPerDigit, Address::PostIndex));
__ b(&sub_loop, NE);
@@ -1103,18 +1103,18 @@ void Intrinsifier::Bigint_sqrAdd(Assembler* assembler) {
__ tst(R3, Operand(R3));
__ b(&x_zero, EQ);
- // R7 = ajp = &a_digits[i]
+ // NOTFP = ajp = &a_digits[i]
__ ldr(R1, Address(SP, 1 * kWordSize)); // a_digits
__ add(R1, R1, Operand(R2, LSL, 2)); // j == 2*i, i is Smi.
- __ add(R7, R1, Operand(TypedData::data_offset() - kHeapObjectTag));
+ __ add(NOTFP, R1, Operand(TypedData::data_offset() - kHeapObjectTag));
// R8:R0 = t = x*x + *ajp
- __ ldr(R0, Address(R7, 0));
+ __ ldr(R0, Address(NOTFP, 0));
__ mov(R8, Operand(0));
__ umaal(R0, R8, R3, R3); // R8:R0 = R3*R3 + R8 + R0.
// *ajp++ = low32(t) = R0
- __ str(R0, Address(R7, Bigint::kBytesPerDigit, Address::PostIndex));
+ __ str(R0, Address(NOTFP, Bigint::kBytesPerDigit, Address::PostIndex));
// R8 = low32(c) = high32(t)
// R9 = high32(c) = 0
@@ -1132,7 +1132,7 @@ void Intrinsifier::Bigint_sqrAdd(Assembler* assembler) {
__ Bind(&loop);
// x: R3
// xip: R4
- // ajp: R7
+ // ajp: NOTFP
// c: R9:R8
// t: R2:R1:R0 (not live at loop entry)
// n: R6
@@ -1149,13 +1149,13 @@ void Intrinsifier::Bigint_sqrAdd(Assembler* assembler) {
__ adds(R0, R0, Operand(R8));
__ adcs(R1, R1, Operand(R9));
__ adc(R2, R2, Operand(0)); // R2:R1:R0 = 2*x*xi + c.
- __ ldr(R8, Address(R7, 0)); // R8 = aj = *ajp.
+ __ ldr(R8, Address(NOTFP, 0)); // R8 = aj = *ajp.
__ adds(R0, R0, Operand(R8));
__ adcs(R8, R1, Operand(0));
__ adc(R9, R2, Operand(0)); // R9:R8:R0 = 2*x*xi + c + aj.
// *ajp++ = low32(t) = R0
- __ str(R0, Address(R7, Bigint::kBytesPerDigit, Address::PostIndex));
+ __ str(R0, Address(NOTFP, Bigint::kBytesPerDigit, Address::PostIndex));
// while (--n >= 0)
__ subs(R6, R6, Operand(1)); // --n
@@ -1163,7 +1163,7 @@ void Intrinsifier::Bigint_sqrAdd(Assembler* assembler) {
__ Bind(&done);
// uint32_t aj = *ajp
- __ ldr(R0, Address(R7, 0));
+ __ ldr(R0, Address(NOTFP, 0));
// uint64_t t = aj + c
__ adds(R8, R8, Operand(R0));
@@ -1171,7 +1171,7 @@ void Intrinsifier::Bigint_sqrAdd(Assembler* assembler) {
// *ajp = low32(t) = R8
// *(ajp + 1) = high32(t) = R9
- __ strd(R8, R9, R7, 0);
+ __ strd(R8, R9, NOTFP, 0);
__ Bind(&x_zero);
__ mov(R0, Operand(Smi::RawValue(1))); // One digit processed.
@@ -1677,11 +1677,11 @@ void Intrinsifier::OneByteString_getHashCode(Assembler* assembler) {
// hash_ ^= hash_ >> 6;
// Get one characters (ch).
__ Bind(&loop);
- __ ldrb(R7, Address(R8, 0));
- // R7: ch.
+ __ ldrb(NOTFP, Address(R8, 0));
+ // NOTFP: ch.
__ add(R3, R3, Operand(1));
__ add(R8, R8, Operand(1));
- __ add(R0, R0, Operand(R7));
+ __ add(R0, R0, Operand(NOTFP));
__ add(R0, R0, Operand(R0, LSL, 10));
__ eor(R0, R0, Operand(R0, LSR, 6));
__ cmp(R3, Operand(R2));
@@ -1739,8 +1739,8 @@ static void TryAllocateOnebyteString(Assembler* assembler,
// R1: potential next object start.
// R2: allocation size.
// R3: heap.
- __ ldr(R7, Address(R3, Heap::EndOffset(space)));
- __ cmp(R1, Operand(R7));
+ __ ldr(NOTFP, Address(R3, Heap::EndOffset(space)));
+ __ cmp(R1, Operand(NOTFP));
__ b(&fail, CS);
// Successfully allocated the object(s), now update top to point to
@@ -1824,20 +1824,20 @@ void Intrinsifier::OneByteString_substringUnchecked(Assembler* assembler) {
// R2: Untagged number of bytes to copy.
// R0: Tagged result string.
// R8: Pointer into R3.
- // R7: Pointer into R0.
+ // NOTFP: Pointer into R0.
// R1: Scratch register.
Label loop, done;
__ cmp(R2, Operand(0));
__ b(&done, LE);
__ mov(R8, Operand(R3));
- __ mov(R7, Operand(R0));
+ __ mov(NOTFP, Operand(R0));
__ Bind(&loop);
__ ldrb(R1, Address(R8, 0));
__ AddImmediate(R8, 1);
__ sub(R2, R2, Operand(1));
__ cmp(R2, Operand(0));
- __ strb(R1, FieldAddress(R7, OneByteString::data_offset()));
- __ AddImmediate(R7, 1);
+ __ strb(R1, FieldAddress(NOTFP, OneByteString::data_offset()));
+ __ AddImmediate(NOTFP, 1);
__ b(&loop, GT);
__ Bind(&done);
« no previous file with comments | « runtime/vm/intermediate_language_arm.cc ('k') | runtime/vm/signal_handler_macos.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698