Chromium Code Reviews| Index: runtime/vm/intermediate_language_arm.cc |
| diff --git a/runtime/vm/intermediate_language_arm.cc b/runtime/vm/intermediate_language_arm.cc |
| index bb286e3db5bce80ef065a4207515e3bfdb9c0d83..8580bace7bbea802da12c6dd58e98da7224fe2a2 100644 |
| --- a/runtime/vm/intermediate_language_arm.cc |
| +++ b/runtime/vm/intermediate_language_arm.cc |
| @@ -1583,16 +1583,16 @@ void GuardFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| class StoreInstanceFieldSlowPath : public SlowPathCode { |
| public: |
| - explicit StoreInstanceFieldSlowPath(StoreInstanceFieldInstr* instruction) |
| - : instruction_(instruction) { } |
| + explicit StoreInstanceFieldSlowPath(const Class& cls, |
|
srdjan
2014/01/30 23:58:39
Remove explicit.
I would put cls as second argumen
Cutch
2014/01/31 00:18:49
Done.
|
| + StoreInstanceFieldInstr* instruction) |
| + : instruction_(instruction), cls_(cls) { } |
| virtual void EmitNativeCode(FlowGraphCompiler* compiler) { |
| __ Comment("StoreInstanceFieldSlowPath"); |
| __ Bind(entry_label()); |
| - const Class& double_class = compiler->double_class(); |
| const Code& stub = |
| - Code::Handle(StubCode::GetAllocationStubForClass(double_class)); |
| - const ExternalLabel label(double_class.ToCString(), stub.EntryPoint()); |
| + Code::Handle(StubCode::GetAllocationStubForClass(cls_)); |
| + const ExternalLabel label(cls_.ToCString(), stub.EntryPoint()); |
| LocationSummary* locs = instruction_->locs(); |
| locs->live_registers()->Remove(locs->out()); |
| @@ -1610,6 +1610,7 @@ class StoreInstanceFieldSlowPath : public SlowPathCode { |
| private: |
| StoreInstanceFieldInstr* instruction_; |
| + const Class& cls_; |
| }; |
| @@ -1653,12 +1654,22 @@ void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| DRegister value = EvenDRegisterOf(locs()->in(1).fpu_reg()); |
| Register temp = locs()->temp(0).reg(); |
| Register temp2 = locs()->temp(1).reg(); |
| + const intptr_t cid = field().UnboxedFieldCid(); |
| if (is_initialization_) { |
| + const Class* cls = NULL; |
| + switch (cid) { |
| + case kDoubleCid: |
| + cls = &compiler->double_class(); |
| + break; |
| + // TODO(johnmccutchan): Add kFloat32x4Cid here. |
| + default: |
| + UNREACHABLE(); |
| + } |
| StoreInstanceFieldSlowPath* slow_path = |
| - new StoreInstanceFieldSlowPath(this); |
| + new StoreInstanceFieldSlowPath(*cls, this); |
| compiler->AddSlowPathCode(slow_path); |
| - __ TryAllocate(compiler->double_class(), |
| + __ TryAllocate(*cls, |
| slow_path->entry_label(), |
| temp, |
| temp2); |
| @@ -1670,7 +1681,15 @@ void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| } else { |
| __ ldr(temp, FieldAddress(instance_reg, field().Offset())); |
| } |
| - __ StoreDToOffset(value, temp, Double::value_offset() - kHeapObjectTag); |
| + switch (cid) { |
| + case kDoubleCid: |
| + __ StoreDToOffset(value, temp, Double::value_offset() - kHeapObjectTag); |
| + // TODO(johnmccutchan): Add kFloat32x4Cid here. |
| + break; |
| + default: |
| + UNREACHABLE(); |
| + } |
| + |
| return; |
| } |
| @@ -1680,25 +1699,36 @@ void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| Register temp2 = locs()->temp(1).reg(); |
| DRegister fpu_temp = EvenDRegisterOf(locs()->temp(2).fpu_reg()); |
| - Label store_pointer, copy_payload; |
| + Label store_pointer; |
| + Label copy_double; |
| + Label store_double; |
| + |
| __ LoadObject(temp, Field::ZoneHandle(field().raw())); |
| - __ ldr(temp2, FieldAddress(temp, Field::guarded_cid_offset())); |
| - __ CompareImmediate(temp2, kDoubleCid); |
| - __ b(&store_pointer, NE); |
| + |
| __ ldr(temp2, FieldAddress(temp, Field::is_nullable_offset())); |
| __ CompareImmediate(temp2, kNullCid); |
| __ b(&store_pointer, EQ); |
| + |
| __ ldrb(temp2, FieldAddress(temp, Field::kind_bits_offset())); |
| __ tst(temp2, ShifterOperand(1 << Field::kUnboxingCandidateBit)); |
| __ b(&store_pointer, EQ); |
| + __ ldr(temp2, FieldAddress(temp, Field::guarded_cid_offset())); |
| + __ CompareImmediate(temp2, kDoubleCid); |
| + __ b(&store_double, EQ); |
| + |
| + // Fall through. |
| + __ b(&store_pointer); |
| + |
| + __ Bind(&store_double); |
| + |
| __ ldr(temp, FieldAddress(instance_reg, field().Offset())); |
| __ CompareImmediate(temp, |
| reinterpret_cast<intptr_t>(Object::null())); |
| - __ b(©_payload, NE); |
| + __ b(©_double, NE); |
| StoreInstanceFieldSlowPath* slow_path = |
| - new StoreInstanceFieldSlowPath(this); |
| + new StoreInstanceFieldSlowPath(compiler->double_class(), this); |
| compiler->AddSlowPathCode(slow_path); |
| if (!compiler->is_optimizing()) { |
| @@ -1715,7 +1745,7 @@ void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| __ StoreIntoObject(instance_reg, |
| FieldAddress(instance_reg, field().Offset()), |
| temp2); |
| - __ Bind(©_payload); |
| + __ Bind(©_double); |
| __ LoadDFromOffset(fpu_temp, |
| value_reg, |
| Double::value_offset() - kHeapObjectTag); |
| @@ -1925,7 +1955,16 @@ void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| DRegister result = EvenDRegisterOf(locs()->out().fpu_reg()); |
| Register temp = locs()->temp(0).reg(); |
| __ ldr(temp, FieldAddress(instance_reg, offset_in_bytes())); |
| - __ LoadDFromOffset(result, temp, Double::value_offset() - kHeapObjectTag); |
| + intptr_t cid = field()->UnboxedFieldCid(); |
| + switch (cid) { |
| + case kDoubleCid: |
| + __ LoadDFromOffset(result, temp, |
| + Double::value_offset() - kHeapObjectTag); |
| + break; |
| + // TODO(johnmccutchan): Add Float32x4 path here. |
| + default: |
| + UNREACHABLE(); |
| + } |
| return; |
| } |
| @@ -1936,20 +1975,27 @@ void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| DRegister value = EvenDRegisterOf(locs()->temp(0).fpu_reg()); |
| Label load_pointer; |
| + Label load_double; |
| + |
| __ LoadObject(result_reg, Field::ZoneHandle(field()->raw())); |
| FieldAddress field_cid_operand(result_reg, Field::guarded_cid_offset()); |
| FieldAddress field_nullability_operand(result_reg, |
| Field::is_nullable_offset()); |
| - __ ldr(temp, field_cid_operand); |
| - __ CompareImmediate(temp, kDoubleCid); |
| - __ b(&load_pointer, NE); |
| - |
| __ ldr(temp, field_nullability_operand); |
| __ CompareImmediate(temp, kNullCid); |
| __ b(&load_pointer, EQ); |
| + __ ldr(temp, field_cid_operand); |
| + __ CompareImmediate(temp, kDoubleCid); |
| + __ b(&load_double, EQ); |
| + |
| + // Fall through. |
| + __ b(&load_pointer); |
| + |
| + __ Bind(&load_double); |
| + |
| BoxDoubleSlowPath* slow_path = new BoxDoubleSlowPath(this); |
| compiler->AddSlowPathCode(slow_path); |
| @@ -1968,6 +2014,9 @@ void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
| result_reg, |
| Double::value_offset() - kHeapObjectTag); |
| __ b(&done); |
| + |
| + // TODO(johnmccutchan): Add Float32x4 path here. |
| + |
| __ Bind(&load_pointer); |
| } |
| __ LoadFromOffset(kWord, result_reg, |