Index: src/arm/lithium-codegen-arm.cc |
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc |
index 5aebadaef36b96eb101224281b940ee4b6ac872f..696d3c24558781d3f1bbb0d6d881382d93e0ca47 100644 |
--- a/src/arm/lithium-codegen-arm.cc |
+++ b/src/arm/lithium-codegen-arm.cc |
@@ -3252,7 +3252,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { |
int element_size_shift = ElementsKindToShiftSize(elements_kind); |
int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) |
? (element_size_shift - kSmiTagSize) : element_size_shift; |
- int additional_offset = instr->additional_index() << element_size_shift; |
+ int base_offset = instr->base_offset(); |
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || |
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { |
@@ -3262,17 +3262,16 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { |
: Operand(key, LSL, shift_size); |
__ add(scratch0(), external_pointer, operand); |
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { |
- __ vldr(double_scratch0().low(), scratch0(), additional_offset); |
+ __ vldr(double_scratch0().low(), scratch0(), base_offset); |
__ vcvt_f64_f32(result, double_scratch0().low()); |
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS |
- __ vldr(result, scratch0(), additional_offset); |
+ __ vldr(result, scratch0(), base_offset); |
} |
} else { |
Register result = ToRegister(instr->result()); |
MemOperand mem_operand = PrepareKeyedOperand( |
key, external_pointer, key_is_constant, constant_key, |
- element_size_shift, shift_size, |
- instr->additional_index(), additional_offset); |
+ element_size_shift, shift_size, base_offset); |
switch (elements_kind) { |
case EXTERNAL_BYTE_ELEMENTS: |
__ ldrsb(result, mem_operand); |
@@ -3323,15 +3322,13 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { |
int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); |
- int base_offset = |
- FixedDoubleArray::kHeaderSize - kHeapObjectTag + |
- (instr->additional_index() << element_size_shift); |
+ int base_offset = instr->base_offset(); |
if (key_is_constant) { |
int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); |
if (constant_key & 0xF0000000) { |
Abort(kArrayIndexConstantValueTooBig); |
} |
- base_offset += constant_key << element_size_shift; |
+ base_offset += constant_key * kDoubleSize; |
} |
__ add(scratch, elements, Operand(base_offset)); |
@@ -3357,12 +3354,11 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { |
Register result = ToRegister(instr->result()); |
Register scratch = scratch0(); |
Register store_base = scratch; |
- int offset = 0; |
+ int offset = instr->base_offset(); |
if (instr->key()->IsConstantOperand()) { |
LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); |
- offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) + |
- instr->additional_index()); |
+ offset += ToInteger32(const_operand) * kPointerSize; |
store_base = elements; |
} else { |
Register key = ToRegister(instr->key()); |
@@ -3375,9 +3371,8 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { |
} else { |
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); |
} |
- offset = FixedArray::OffsetOfElementAt(instr->additional_index()); |
} |
- __ ldr(result, FieldMemOperand(store_base, offset)); |
+ __ ldr(result, MemOperand(store_base, offset)); |
// Check for the hole value. |
if (instr->hydrogen()->RequiresHoleCheck()) { |
@@ -3410,19 +3405,12 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key, |
int constant_key, |
int element_size, |
int shift_size, |
- int additional_index, |
- int additional_offset) { |
- if (additional_index != 0 && !key_is_constant) { |
- additional_index *= 1 << (element_size - shift_size); |
- __ add(scratch0(), key, Operand(additional_index)); |
- } |
- |
+ int base_offset) { |
if (key_is_constant) { |
- return MemOperand(base, |
- (constant_key << element_size) + additional_offset); |
+ return MemOperand(base, (constant_key << element_size) + base_offset); |
} |
- if (additional_index == 0) { |
+ if (base_offset == 0) { |
if (shift_size >= 0) { |
return MemOperand(base, key, LSL, shift_size); |
} else { |
@@ -3432,9 +3420,12 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key, |
} |
if (shift_size >= 0) { |
+ ASSERT((base_offset & ((1 << shift_size) - 1)) == 0); |
+ __ add(scratch0(), key, Operand(base_offset >> shift_size)); |
return MemOperand(base, scratch0(), LSL, shift_size); |
} else { |
ASSERT_EQ(-1, shift_size); |
+ __ add(scratch0(), key, Operand(base_offset << 1)); |
return MemOperand(base, scratch0(), LSR, 1); |
} |
} |
@@ -4326,7 +4317,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { |
int element_size_shift = ElementsKindToShiftSize(elements_kind); |
int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) |
? (element_size_shift - kSmiTagSize) : element_size_shift; |
- int additional_offset = instr->additional_index() << element_size_shift; |
+ int base_offset = instr->base_offset(); |
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS || |
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { |
@@ -4344,16 +4335,16 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { |
} |
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { |
__ vcvt_f32_f64(double_scratch0().low(), value); |
- __ vstr(double_scratch0().low(), address, additional_offset); |
+ __ vstr(double_scratch0().low(), address, base_offset); |
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS |
- __ vstr(value, address, additional_offset); |
+ __ vstr(value, address, base_offset); |
} |
} else { |
Register value(ToRegister(instr->value())); |
MemOperand mem_operand = PrepareKeyedOperand( |
key, external_pointer, key_is_constant, constant_key, |
element_size_shift, shift_size, |
- instr->additional_index(), additional_offset); |
+ base_offset); |
switch (elements_kind) { |
case EXTERNAL_PIXEL_ELEMENTS: |
case EXTERNAL_BYTE_ELEMENTS: |
@@ -4391,6 +4382,7 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { |
Register scratch = scratch0(); |
DwVfpRegister double_scratch = double_scratch0(); |
bool key_is_constant = instr->key()->IsConstantOperand(); |
+ int base_offset = instr->base_offset(); |
// Calculate the effective address of the slot in the array to store the |
// double value. |
@@ -4401,13 +4393,11 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { |
Abort(kArrayIndexConstantValueTooBig); |
} |
__ add(scratch, elements, |
- Operand((constant_key << element_size_shift) + |
- FixedDoubleArray::kHeaderSize - kHeapObjectTag)); |
+ Operand((constant_key << element_size_shift) + base_offset)); |
} else { |
int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) |
? (element_size_shift - kSmiTagSize) : element_size_shift; |
- __ add(scratch, elements, |
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); |
+ __ add(scratch, elements, Operand(base_offset)); |
__ add(scratch, scratch, |
Operand(ToRegister(instr->key()), LSL, shift_size)); |
} |
@@ -4420,10 +4410,9 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { |
__ Assert(ne, kDefaultNaNModeNotSet); |
} |
__ VFPCanonicalizeNaN(double_scratch, value); |
- __ vstr(double_scratch, scratch, |
- instr->additional_index() << element_size_shift); |
+ __ vstr(double_scratch, scratch, 0); |
} else { |
- __ vstr(value, scratch, instr->additional_index() << element_size_shift); |
+ __ vstr(value, scratch, 0); |
} |
} |
@@ -4435,14 +4424,13 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { |
: no_reg; |
Register scratch = scratch0(); |
Register store_base = scratch; |
- int offset = 0; |
+ int offset = instr->base_offset(); |
// Do the store. |
if (instr->key()->IsConstantOperand()) { |
ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); |
LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); |
- offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) + |
- instr->additional_index()); |
+ offset += ToInteger32(const_operand) * kPointerSize; |
store_base = elements; |
} else { |
// Even though the HLoadKeyed instruction forces the input |
@@ -4454,16 +4442,15 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { |
} else { |
__ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); |
} |
- offset = FixedArray::OffsetOfElementAt(instr->additional_index()); |
} |
- __ str(value, FieldMemOperand(store_base, offset)); |
+ __ str(value, MemOperand(store_base, offset)); |
if (instr->hydrogen()->NeedsWriteBarrier()) { |
SmiCheck check_needed = |
instr->hydrogen()->value()->IsHeapObject() |
? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
// Compute address of modified element and store it into key register. |
- __ add(key, store_base, Operand(offset - kHeapObjectTag)); |
+ __ add(key, store_base, Operand(offset)); |
__ RecordWrite(elements, |
key, |
value, |
@@ -5864,7 +5851,7 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { |
__ cmp(index, Operand::Zero()); |
__ b(lt, &out_of_object); |
- __ add(scratch, object, Operand::PointerOffsetFromSmiKey(index)); |
+ __ add(scratch, object, Operand(index, LSL, kPointerSizeLog2)); |
__ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize)); |
__ b(&done); |
@@ -5872,8 +5859,7 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { |
__ bind(&out_of_object); |
__ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); |
// Index is equal to negated out of object property index plus 1. |
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); |
- __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index)); |
+ __ sub(scratch, result, Operand(index, LSL, kPointerSizeLog2)); |
__ ldr(result, FieldMemOperand(scratch, |
FixedArray::kHeaderSize - kPointerSize)); |
__ bind(&done); |