Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(44)

Unified Diff: src/arm/codegen-arm.cc

Issue 993002: Port of optimized ICs for external and pixel arrays from ia32 to ARM. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 10 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: src/arm/codegen-arm.cc
===================================================================
--- src/arm/codegen-arm.cc (revision 4136)
+++ src/arm/codegen-arm.cc (working copy)
@@ -4684,42 +4684,6 @@
}
-// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
-// instruction. On pre-ARM5 hardware this routine gives the wrong answer for 0
-// (31 instead of 32).
-static void CountLeadingZeros(
- MacroAssembler* masm,
- Register source,
- Register scratch,
- Register zeros) {
-#ifdef CAN_USE_ARMV5_INSTRUCTIONS
- __ clz(zeros, source); // This instruction is only supported after ARM5.
-#else
- __ mov(zeros, Operand(0));
- __ mov(scratch, source);
- // Top 16.
- __ tst(scratch, Operand(0xffff0000));
- __ add(zeros, zeros, Operand(16), LeaveCC, eq);
- __ mov(scratch, Operand(scratch, LSL, 16), LeaveCC, eq);
- // Top 8.
- __ tst(scratch, Operand(0xff000000));
- __ add(zeros, zeros, Operand(8), LeaveCC, eq);
- __ mov(scratch, Operand(scratch, LSL, 8), LeaveCC, eq);
- // Top 4.
- __ tst(scratch, Operand(0xf0000000));
- __ add(zeros, zeros, Operand(4), LeaveCC, eq);
- __ mov(scratch, Operand(scratch, LSL, 4), LeaveCC, eq);
- // Top 2.
- __ tst(scratch, Operand(0xc0000000));
- __ add(zeros, zeros, Operand(2), LeaveCC, eq);
- __ mov(scratch, Operand(scratch, LSL, 2), LeaveCC, eq);
- // Top bit.
- __ tst(scratch, Operand(0x80000000u));
- __ add(zeros, zeros, Operand(1), LeaveCC, eq);
-#endif
-}
-
-
// Takes a Smi and converts to an IEEE 64 bit floating point value in two
// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
@@ -4787,21 +4751,20 @@
__ b(gt, &not_special);
// We have -1, 0 or 1, which we treat specially.
Mads Ager (chromium) 2010/03/23 11:46:54 Could we clearify the comment to state that source
- __ cmp(source_, Operand(0));
// For 1 or -1 we need to or in the 0 exponent (biased to 1023).
static const uint32_t exponent_word_for_1 =
HeapNumber::kExponentBias << HeapNumber::kExponentShift;
- __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, ne);
+ __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
// 1, 0 and -1 all have 0 for the second word.
__ mov(mantissa, Operand(0));
__ Ret();
__ bind(&not_special);
- // Count leading zeros. Uses result2 for a scratch register on pre-ARM5.
+ // Count leading zeros. Uses mantissa for a scratch register on pre-ARM5.
// Gets the wrong answer for 0, but we already checked for that case above.
- CountLeadingZeros(masm, source_, mantissa, zeros_);
+ __ CountLeadingZeros(source_, mantissa, zeros_);
// Compute exponent and or it into the exponent register.
- // We use result2 as a scratch register here.
+ // We use mantissa as a scratch register here.
__ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias));
__ orr(exponent,
exponent,
@@ -4820,45 +4783,6 @@
}
-// This stub can convert a signed int32 to a heap number (double). It does
-// not work for int32s that are in Smi range! No GC occurs during this stub
-// so you don't have to set up the frame.
-class WriteInt32ToHeapNumberStub : public CodeStub {
- public:
- WriteInt32ToHeapNumberStub(Register the_int,
- Register the_heap_number,
- Register scratch)
- : the_int_(the_int),
- the_heap_number_(the_heap_number),
- scratch_(scratch) { }
-
- private:
- Register the_int_;
- Register the_heap_number_;
- Register scratch_;
-
- // Minor key encoding in 16 bits.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 14> {};
-
- Major MajorKey() { return WriteInt32ToHeapNumber; }
- int MinorKey() {
- // Encode the parameters in a unique 16 bit value.
- return the_int_.code() +
- (the_heap_number_.code() << 4) +
- (scratch_.code() << 8);
- }
-
- void Generate(MacroAssembler* masm);
-
- const char* GetName() { return "WriteInt32ToHeapNumberStub"; }
-
-#ifdef DEBUG
- void Print() { PrintF("WriteInt32ToHeapNumberStub\n"); }
-#endif
-};
-
-
// See comment for class.
void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
Label max_negative_int;
@@ -5041,7 +4965,7 @@
CpuFeatures::Scope scope(VFP3);
__ mov(r7, Operand(r1, ASR, kSmiTagSize));
__ vmov(s15, r7);
- __ vcvt(d7, s15);
+ __ vcvt_f64_s32(d7, s15);
// Load the double from rhs, tagged HeapNumber r0, to d6.
__ sub(r7, r0, Operand(kHeapObjectTag));
__ vldr(d6, r7, HeapNumber::kValueOffset);
@@ -5084,7 +5008,7 @@
__ vldr(d7, r7, HeapNumber::kValueOffset);
__ mov(r7, Operand(r0, ASR, kSmiTagSize));
__ vmov(s13, r7);
- __ vcvt(d6, s13);
+ __ vcvt_f64_s32(d6, s13);
} else {
__ push(lr);
// Load lhs to a double in r2, r3.
@@ -5420,29 +5344,6 @@
}
-// Allocates a heap number or jumps to the label if the young space is full and
-// a scavenge is needed.
-static void AllocateHeapNumber(
- MacroAssembler* masm,
- Label* need_gc, // Jump here if young space is full.
- Register result, // The tagged address of the new heap number.
- Register scratch1, // A scratch register.
- Register scratch2) { // Another scratch register.
- // Allocate an object in the heap for the heap number and tag it as a heap
- // object.
- __ AllocateInNewSpace(HeapNumber::kSize / kPointerSize,
- result,
- scratch1,
- scratch2,
- need_gc,
- TAG_OBJECT);
-
- // Get heap number map and store it in the allocated object.
- __ LoadRoot(scratch1, Heap::kHeapNumberMapRootIndex);
- __ str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
-}
-
-
// We fall into this code if the operands were Smis, but the result was
// not (eg. overflow). We branch into this code (to the not_smi label) if
// the operands were not both Smi. The operands are in r0 and r1. In order
@@ -5459,7 +5360,7 @@
// Smi-smi case (overflow).
// Since both are Smis there is no heap number to overwrite, so allocate.
// The new heap number is in r5. r6 and r7 are scratch.
- AllocateHeapNumber(masm, &slow, r5, r6, r7);
+ __ AllocateHeapNumber(r5, r6, r7, &slow);
// If we have floating point hardware, inline ADD, SUB, MUL, and DIV,
// using registers d7 and d6 for the double values.
@@ -5469,10 +5370,10 @@
CpuFeatures::Scope scope(VFP3);
__ mov(r7, Operand(r0, ASR, kSmiTagSize));
__ vmov(s15, r7);
- __ vcvt(d7, s15);
+ __ vcvt_f64_s32(d7, s15);
__ mov(r7, Operand(r1, ASR, kSmiTagSize));
__ vmov(s13, r7);
- __ vcvt(d6, s13);
+ __ vcvt_f64_s32(d6, s13);
} else {
// Write Smi from r0 to r3 and r2 in double format. r6 is scratch.
__ mov(r7, Operand(r0));
@@ -5543,7 +5444,7 @@
if (mode == NO_OVERWRITE) {
// In the case where there is no chance of an overwritable float we may as
// well do the allocation immediately while r0 and r1 are untouched.
- AllocateHeapNumber(masm, &slow, r5, r6, r7);
+ __ AllocateHeapNumber(r5, r6, r7, &slow);
}
// Move r0 to a double in r2-r3.
@@ -5568,7 +5469,7 @@
__ bind(&r0_is_smi);
if (mode == OVERWRITE_RIGHT) {
// We can't overwrite a Smi so get address of new heap number into r5.
- AllocateHeapNumber(masm, &slow, r5, r6, r7);
+ __ AllocateHeapNumber(r5, r6, r7, &slow);
}
if (use_fp_registers) {
@@ -5576,7 +5477,7 @@
// Convert smi in r0 to double in d7.
__ mov(r7, Operand(r0, ASR, kSmiTagSize));
__ vmov(s15, r7);
- __ vcvt(d7, s15);
+ __ vcvt_f64_s32(d7, s15);
} else {
// Write Smi from r0 to r3 and r2 in double format.
__ mov(r7, Operand(r0));
@@ -5610,7 +5511,7 @@
__ bind(&r1_is_smi);
if (mode == OVERWRITE_LEFT) {
// We can't overwrite a Smi so get address of new heap number into r5.
- AllocateHeapNumber(masm, &slow, r5, r6, r7);
+ __ AllocateHeapNumber(r5, r6, r7, &slow);
}
if (use_fp_registers) {
@@ -5618,7 +5519,7 @@
// Convert smi in r1 to double in d6.
__ mov(r7, Operand(r1, ASR, kSmiTagSize));
__ vmov(s13, r7);
- __ vcvt(d6, s13);
+ __ vcvt_f64_s32(d6, s13);
} else {
// Write Smi from r1 to r1 and r0 in double format.
__ mov(r7, Operand(r1));
@@ -5745,7 +5646,7 @@
// conversion using round to zero.
__ ldr(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
__ vmov(d7, scratch2, scratch);
- __ vcvt(s15, d7);
+ __ vcvt_s32_f64(s15, d7);
__ vmov(dest, s15);
} else {
// Get the top bits of the mantissa.
@@ -5857,7 +5758,7 @@
}
case NO_OVERWRITE: {
// Get a new heap number in r5. r6 and r7 are scratch.
- AllocateHeapNumber(masm, &slow, r5, r6, r7);
+ __ AllocateHeapNumber(r5, r6, r7, &slow);
}
default: break;
}
@@ -5877,7 +5778,7 @@
if (mode_ != NO_OVERWRITE) {
__ bind(&have_to_allocate);
// Get a new heap number in r5. r6 and r7 are scratch.
- AllocateHeapNumber(masm, &slow, r5, r6, r7);
+ __ AllocateHeapNumber(r5, r6, r7, &slow);
__ jmp(&got_a_heap_number);
}
@@ -6295,7 +6196,7 @@
__ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
__ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
} else {
- AllocateHeapNumber(masm, &slow, r1, r2, r3);
+ __ AllocateHeapNumber(r1, r2, r3, &slow);
__ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
__ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
__ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
@@ -6325,7 +6226,7 @@
// Allocate a fresh heap number, but don't overwrite r0 until
// we're sure we can do it without going through the slow case
// that needs the value in r0.
- AllocateHeapNumber(masm, &slow, r2, r3, r4);
+ __ AllocateHeapNumber(r2, r3, r4, &slow);
__ mov(r0, Operand(r2));
}

Powered by Google App Engine
This is Rietveld 408576698