Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(251)

Unified Diff: runtime/vm/intermediate_language_ia32.cc

Issue 150063004: Support reusable boxes for Float32x4 fields (Closed) Base URL: https://dart.googlecode.com/svn/branches/bleeding_edge/dart
Patch Set: Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: runtime/vm/intermediate_language_ia32.cc
diff --git a/runtime/vm/intermediate_language_ia32.cc b/runtime/vm/intermediate_language_ia32.cc
index c1b3e0435bfc0b34ac238c4fff8c7fa10052858e..5655f75ee9626477a74f01ed5bf04e88e8b8ccc3 100644
--- a/runtime/vm/intermediate_language_ia32.cc
+++ b/runtime/vm/intermediate_language_ia32.cc
@@ -1590,36 +1590,74 @@ void GuardFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
+bool Field::IsUnboxedField() const {
+ bool valid_class = (guarded_cid() == kDoubleCid) ||
+ (guarded_cid() == kFloat32x4Cid);
+ return is_unboxing_candidate() && !is_final() && !is_nullable() &&
+ valid_class;
+}
+
+
class StoreInstanceFieldSlowPath : public SlowPathCode {
public:
- StoreInstanceFieldSlowPath(StoreInstanceFieldInstr* instruction,
- const Class& cls)
- : instruction_(instruction), cls_(cls) { }
+ explicit StoreInstanceFieldSlowPath(StoreInstanceFieldInstr* instruction)
+ : instruction_(instruction) { }
virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
__ Comment("StoreInstanceFieldSlowPath");
- __ Bind(entry_label());
- const Code& stub =
- Code::Handle(StubCode::GetAllocationStubForClass(cls_));
- const ExternalLabel label(cls_.ToCString(), stub.EntryPoint());
-
- LocationSummary* locs = instruction_->locs();
- locs->live_registers()->Remove(locs->out());
-
- compiler->SaveLiveRegisters(locs);
- compiler->GenerateCall(Scanner::kNoSourcePos, // No token position.
- &label,
- PcDescriptors::kOther,
- locs);
- __ MoveRegister(locs->temp(0).reg(), EAX);
- compiler->RestoreLiveRegisters(locs);
+ {
+ __ Bind(double_entry_label());
+ const Class& cls = compiler->double_class();
+ const Code& stub =
+ Code::Handle(StubCode::GetAllocationStubForClass(cls));
+ const ExternalLabel label(cls.ToCString(), stub.EntryPoint());
+
+ LocationSummary* locs = instruction_->locs();
+ locs->live_registers()->Remove(locs->out());
+ compiler->SaveLiveRegisters(locs);
+ compiler->GenerateCall(Scanner::kNoSourcePos, // No token position.
+ &label,
+ PcDescriptors::kOther,
+ locs);
+ __ MoveRegister(locs->temp(0).reg(), EAX);
+ compiler->RestoreLiveRegisters(locs);
+ __ jmp(double_exit_label());
+ }
+ {
+ __ Bind(float32x4_entry_label());
+ const Class& cls = compiler->float32x4_class();
+ const Code& stub =
+ Code::Handle(StubCode::GetAllocationStubForClass(cls));
+ const ExternalLabel label(cls.ToCString(), stub.EntryPoint());
+ LocationSummary* locs = instruction_->locs();
+ locs->live_registers()->Remove(locs->out());
+ compiler->SaveLiveRegisters(locs);
+ compiler->GenerateCall(Scanner::kNoSourcePos, // No token position.
+ &label,
+ PcDescriptors::kOther,
+ locs);
+ __ MoveRegister(locs->temp(0).reg(), EAX);
+ compiler->RestoreLiveRegisters(locs);
+ __ jmp(float32x4_exit_label());
+ }
+ }
- __ jmp(exit_label());
+ Label* double_entry_label() {
+ // Use default SlowPathCode label for double.
+ return entry_label();
}
+ Label* double_exit_label() {
+ // Use default SlowPathCode label for double.
+ return exit_label();
+ }
+
+ Label* float32x4_entry_label() { return &float32x4_entry_label_; }
+ Label* float32x4_exit_label() { return &float32x4_exit_label_; }
private:
+ Label float32x4_entry_label_;
+ Label float32x4_exit_label_;
StoreInstanceFieldInstr* instruction_;
- const Class& cls_;
};
@@ -1666,25 +1704,34 @@ void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const intptr_t cid = field().UnboxedFieldCid();
if (is_initialization_) {
+ StoreInstanceFieldSlowPath* slow_path =
+ new StoreInstanceFieldSlowPath(this);
+ compiler->AddSlowPathCode(slow_path);
+
const Class* cls = NULL;
+ Label* entry_label = NULL;
+ Label* exit_label = NULL;
switch (cid) {
case kDoubleCid:
cls = &compiler->double_class();
+ entry_label = slow_path->double_entry_label();
+ exit_label = slow_path->double_exit_label();
+ break;
+ case kFloat32x4Cid:
+ cls = &compiler->float32x4_class();
+ entry_label = slow_path->float32x4_entry_label();
+ exit_label = slow_path->float32x4_exit_label();
break;
- // TODO(johnmccutchan): Add kFloat32x4Cid here.
default:
UNREACHABLE();
}
- StoreInstanceFieldSlowPath* slow_path =
- new StoreInstanceFieldSlowPath(this, *cls);
- compiler->AddSlowPathCode(slow_path);
__ TryAllocate(*cls,
- slow_path->entry_label(),
+ entry_label,
Assembler::kFarJump,
temp,
temp2);
- __ Bind(slow_path->exit_label());
+ __ Bind(exit_label);
__ movl(temp2, temp);
__ StoreIntoObject(instance_reg,
FieldAddress(instance_reg, field().Offset()),
@@ -1694,8 +1741,12 @@ void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
switch (cid) {
case kDoubleCid:
- __ movsd(FieldAddress(temp, Double::value_offset()), value);
- // TODO(johnmccutchan): Add kFloat32x4Cid here.
+ __ Comment("UnboxedDoubleStoreInstanceFieldInstr");
+ __ movsd(FieldAddress(temp, Double::value_offset()), value);
+ break;
+ case kFloat32x4Cid:
+ __ Comment("UnboxedFloat32x4StoreInstanceFieldInstr");
+ __ movups(FieldAddress(temp, Float32x4::value_offset()), value);
break;
default:
UNREACHABLE();
@@ -1710,8 +1761,8 @@ void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
FpuRegister fpu_temp = locs()->temp(2).fpu_reg();
Label store_pointer;
- Label copy_double;
Label store_double;
+ Label store_float32x4;
__ LoadObject(temp, Field::ZoneHandle(field().raw()));
@@ -1727,41 +1778,80 @@ void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Immediate(kDoubleCid));
__ j(EQUAL, &store_double);
+ __ cmpl(FieldAddress(temp, Field::guarded_cid_offset()),
+ Immediate(kFloat32x4Cid));
+ __ j(EQUAL, &store_float32x4);
+
// Fall through.
__ jmp(&store_pointer);
- __ Bind(&store_double);
-
- const Immediate& raw_null =
- Immediate(reinterpret_cast<intptr_t>(Object::null()));
- __ movl(temp, FieldAddress(instance_reg, field().Offset()));
- __ cmpl(temp, raw_null);
- __ j(NOT_EQUAL, &copy_double);
-
StoreInstanceFieldSlowPath* slow_path =
- new StoreInstanceFieldSlowPath(this, compiler->double_class());
+ new StoreInstanceFieldSlowPath(this);
compiler->AddSlowPathCode(slow_path);
- if (!compiler->is_optimizing()) {
- locs()->live_registers()->Add(locs()->in(0));
- locs()->live_registers()->Add(locs()->in(1));
+ {
+ __ Bind(&store_double);
+ Label copy_double;
+
+ const Immediate& raw_null =
+ Immediate(reinterpret_cast<intptr_t>(Object::null()));
+ __ movl(temp, FieldAddress(instance_reg, field().Offset()));
+ __ cmpl(temp, raw_null);
+ __ j(NOT_EQUAL, &copy_double);
+
+ if (!compiler->is_optimizing()) {
+ locs()->live_registers()->Add(locs()->in(0));
+ locs()->live_registers()->Add(locs()->in(1));
+ }
+
+ __ TryAllocate(compiler->double_class(),
+ slow_path->double_entry_label(),
+ Assembler::kFarJump,
+ temp,
+ temp2);
+ __ Bind(slow_path->double_exit_label());
+ __ movl(temp2, temp);
+ __ StoreIntoObject(instance_reg,
+ FieldAddress(instance_reg, field().Offset()),
+ temp2);
+
+ __ Bind(&copy_double);
+ __ movsd(fpu_temp, FieldAddress(value_reg, Double::value_offset()));
+ __ movsd(FieldAddress(temp, Double::value_offset()), fpu_temp);
+ __ jmp(&skip_store);
}
- __ TryAllocate(compiler->double_class(),
- slow_path->entry_label(),
- Assembler::kFarJump,
- temp,
- temp2);
- __ Bind(slow_path->exit_label());
- __ movl(temp2, temp);
- __ StoreIntoObject(instance_reg,
- FieldAddress(instance_reg, field().Offset()),
- temp2);
+ {
+ __ Bind(&store_float32x4);
+ Label copy_float32x4;
+
+ const Immediate& raw_null =
+ Immediate(reinterpret_cast<intptr_t>(Object::null()));
+ __ movl(temp, FieldAddress(instance_reg, field().Offset()));
+ __ cmpl(temp, raw_null);
+ __ j(NOT_EQUAL, &copy_float32x4);
+
+ if (!compiler->is_optimizing()) {
+ locs()->live_registers()->Add(locs()->in(0));
+ locs()->live_registers()->Add(locs()->in(1));
+ }
+
+ __ TryAllocate(compiler->float32x4_class(),
+ slow_path->float32x4_entry_label(),
+ Assembler::kFarJump,
+ temp,
+ temp2);
+ __ Bind(slow_path->float32x4_exit_label());
+ __ movl(temp2, temp);
+ __ StoreIntoObject(instance_reg,
+ FieldAddress(instance_reg, field().Offset()),
+ temp2);
- __ Bind(&copy_double);
- __ movsd(fpu_temp, FieldAddress(value_reg, Double::value_offset()));
- __ movsd(FieldAddress(temp, Double::value_offset()), fpu_temp);
- __ jmp(&skip_store);
+ __ Bind(&copy_float32x4);
+ __ movups(fpu_temp, FieldAddress(value_reg, Float32x4::value_offset()));
+ __ movups(FieldAddress(temp, Float32x4::value_offset()), fpu_temp);
+ __ jmp(&skip_store);
+ }
__ Bind(&store_pointer);
}
@@ -1906,34 +1996,70 @@ void AllocateObjectWithBoundsCheckInstr::EmitNativeCode(
}
-class BoxDoubleSlowPath : public SlowPathCode {
+class LoadFieldSlowPath : public SlowPathCode {
public:
- explicit BoxDoubleSlowPath(Instruction* instruction)
+ explicit LoadFieldSlowPath(Instruction* instruction)
: instruction_(instruction) { }
virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
- __ Comment("BoxDoubleSlowPath");
- __ Bind(entry_label());
- const Class& double_class = compiler->double_class();
- const Code& stub =
- Code::Handle(StubCode::GetAllocationStubForClass(double_class));
- const ExternalLabel label(double_class.ToCString(), stub.EntryPoint());
-
- LocationSummary* locs = instruction_->locs();
- locs->live_registers()->Remove(locs->out());
-
- compiler->SaveLiveRegisters(locs);
- compiler->GenerateCall(Scanner::kNoSourcePos, // No token position.
- &label,
- PcDescriptors::kOther,
- locs);
- __ MoveRegister(locs->out().reg(), EAX);
- compiler->RestoreLiveRegisters(locs);
+ __ Comment("LoadFieldSlowPath");
+ {
+ __ Bind(double_entry_label());
+ const Class& double_class = compiler->double_class();
+ const Code& stub =
+ Code::Handle(StubCode::GetAllocationStubForClass(double_class));
+ const ExternalLabel label(double_class.ToCString(), stub.EntryPoint());
+
+ LocationSummary* locs = instruction_->locs();
+ locs->live_registers()->Remove(locs->out());
+
+ compiler->SaveLiveRegisters(locs);
+ compiler->GenerateCall(Scanner::kNoSourcePos, // No token position.
+ &label,
+ PcDescriptors::kOther,
+ locs);
+ __ MoveRegister(locs->out().reg(), EAX);
+ compiler->RestoreLiveRegisters(locs);
+
+ __ jmp(double_exit_label());
+ }
+ {
+ __ Bind(float32x4_entry_label());
+ const Class& float32x4_class = compiler->float32x4_class();
+ const Code& stub =
+ Code::Handle(StubCode::GetAllocationStubForClass(float32x4_class));
+ const ExternalLabel label(float32x4_class.ToCString(), stub.EntryPoint());
+
+ LocationSummary* locs = instruction_->locs();
+ locs->live_registers()->Remove(locs->out());
+
+ compiler->SaveLiveRegisters(locs);
+ compiler->GenerateCall(Scanner::kNoSourcePos, // No token position.
+ &label,
+ PcDescriptors::kOther,
+ locs);
+ __ MoveRegister(locs->out().reg(), EAX);
+ compiler->RestoreLiveRegisters(locs);
+
+ __ jmp(float32x4_exit_label());
+ }
+ }
- __ jmp(exit_label());
+ Label* double_entry_label() {
+ // Use default SlowPathCode label for double.
+ return entry_label();
}
+ Label* double_exit_label() {
+ // Use default SlowPathCode label for double.
+ return exit_label();
+ }
+
+ Label* float32x4_entry_label() { return &float32x4_entry_label_; }
+ Label* float32x4_exit_label() { return &float32x4_exit_label_; }
private:
+ Label float32x4_entry_label_;
+ Label float32x4_exit_label_;
Instruction* instruction_;
};
@@ -1971,9 +2097,13 @@ void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const intptr_t cid = field()->UnboxedFieldCid();
switch (cid) {
case kDoubleCid:
+ __ Comment("UnboxedDoubleLoadFieldInstr");
__ movsd(result, FieldAddress(temp, Double::value_offset()));
break;
- // TODO(johnmccutchan): Add Float32x4 path here.
+ case kFloat32x4Cid:
+ __ Comment("UnboxedFloat32x4LoadFieldInstr");
+ __ movups(result, FieldAddress(temp, Float32x4::value_offset()));
+ break;
default:
UNREACHABLE();
}
@@ -1985,9 +2115,13 @@ void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (IsPotentialUnboxedLoad()) {
Register temp = locs()->temp(1).reg();
XmmRegister value = locs()->temp(0).fpu_reg();
+ LoadFieldSlowPath* slow_path = new LoadFieldSlowPath(this);
+ compiler->AddSlowPathCode(slow_path);
Label load_pointer;
Label load_double;
+ Label load_float32x4;
+
__ LoadObject(result, Field::ZoneHandle(field()->raw()));
FieldAddress field_cid_operand(result, Field::guarded_cid_offset());
@@ -1999,29 +2133,49 @@ void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ cmpl(field_cid_operand, Immediate(kDoubleCid));
__ j(EQUAL, &load_double);
+ __ cmpl(field_cid_operand, Immediate(kFloat32x4Cid));
+ __ j(EQUAL, &load_float32x4);
+
// Fall through.
__ jmp(&load_pointer);
- __ Bind(&load_double);
- BoxDoubleSlowPath* slow_path = new BoxDoubleSlowPath(this);
- compiler->AddSlowPathCode(slow_path);
+ {
+ __ Bind(&load_double);
- if (!compiler->is_optimizing()) {
- locs()->live_registers()->Add(locs()->in(0));
+ if (!compiler->is_optimizing()) {
+ locs()->live_registers()->Add(locs()->in(0));
+ }
+
+ __ TryAllocate(compiler->double_class(),
+ slow_path->double_entry_label(),
+ Assembler::kFarJump,
+ result,
+ temp);
+ __ Bind(slow_path->double_exit_label());
+ __ movl(temp, FieldAddress(instance_reg, offset_in_bytes()));
+ __ movsd(value, FieldAddress(temp, Double::value_offset()));
+ __ movsd(FieldAddress(result, Double::value_offset()), value);
+ __ jmp(&done);
}
- __ TryAllocate(compiler->double_class(),
- slow_path->entry_label(),
- Assembler::kFarJump,
- result,
- temp);
- __ Bind(slow_path->exit_label());
- __ movl(temp, FieldAddress(instance_reg, offset_in_bytes()));
- __ movsd(value, FieldAddress(temp, Double::value_offset()));
- __ movsd(FieldAddress(result, Double::value_offset()), value);
- __ jmp(&done);
+ {
+ __ Bind(&load_float32x4);
+
+ if (!compiler->is_optimizing()) {
+ locs()->live_registers()->Add(locs()->in(0));
+ }
- // TODO(johnmccutchan): Add Float32x4 path here.
+ __ TryAllocate(compiler->float32x4_class(),
+ slow_path->float32x4_entry_label(),
+ Assembler::kFarJump,
+ result,
+ temp);
+ __ Bind(slow_path->float32x4_exit_label());
+ __ movl(temp, FieldAddress(instance_reg, offset_in_bytes()));
+ __ movups(value, FieldAddress(temp, Float32x4::value_offset()));
+ __ movups(FieldAddress(result, Float32x4::value_offset()), value);
+ __ jmp(&done);
+ }
__ Bind(&load_pointer);
}
@@ -2877,6 +3031,38 @@ void CheckEitherNonSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
+class BoxDoubleSlowPath : public SlowPathCode {
+ public:
+ explicit BoxDoubleSlowPath(BoxDoubleInstr* instruction)
+ : instruction_(instruction) { }
+
+ virtual void EmitNativeCode(FlowGraphCompiler* compiler) {
+ __ Comment("BoxDoubleSlowPath");
+ __ Bind(entry_label());
+ const Class& double_class = compiler->double_class();
+ const Code& stub =
+ Code::Handle(StubCode::GetAllocationStubForClass(double_class));
+ const ExternalLabel label(double_class.ToCString(), stub.EntryPoint());
+
+ LocationSummary* locs = instruction_->locs();
+ locs->live_registers()->Remove(locs->out());
+
+ compiler->SaveLiveRegisters(locs);
+ compiler->GenerateCall(Scanner::kNoSourcePos, // No token position.
+ &label,
+ PcDescriptors::kOther,
+ locs);
+ __ MoveRegister(locs->out().reg(), EAX);
+ compiler->RestoreLiveRegisters(locs);
+
+ __ jmp(exit_label());
+ }
+
+ private:
+ BoxDoubleInstr* instruction_;
+};
+
+
LocationSummary* BoxDoubleInstr::MakeLocationSummary(bool opt) const {
const intptr_t kNumInputs = 1;
const intptr_t kNumTemps = 0;
@@ -2953,19 +3139,6 @@ void UnboxDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
-LocationSummary* BoxFloat32x4Instr::MakeLocationSummary(bool opt) const {
- const intptr_t kNumInputs = 1;
- const intptr_t kNumTemps = 0;
- LocationSummary* summary =
- new LocationSummary(kNumInputs,
- kNumTemps,
- LocationSummary::kCallOnSlowPath);
- summary->set_in(0, Location::RequiresFpuRegister());
- summary->set_out(Location::RequiresRegister());
- return summary;
-}
-
-
class BoxFloat32x4SlowPath : public SlowPathCode {
public:
explicit BoxFloat32x4SlowPath(BoxFloat32x4Instr* instruction)
@@ -2998,6 +3171,19 @@ class BoxFloat32x4SlowPath : public SlowPathCode {
};
+LocationSummary* BoxFloat32x4Instr::MakeLocationSummary(bool opt) const {
+ const intptr_t kNumInputs = 1;
+ const intptr_t kNumTemps = 0;
+ LocationSummary* summary =
+ new LocationSummary(kNumInputs,
+ kNumTemps,
+ LocationSummary::kCallOnSlowPath);
+ summary->set_in(0, Location::RequiresFpuRegister());
+ summary->set_out(Location::RequiresRegister());
+ return summary;
+}
+
+
void BoxFloat32x4Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
BoxFloat32x4SlowPath* slow_path = new BoxFloat32x4SlowPath(this);
compiler->AddSlowPathCode(slow_path);

Powered by Google App Engine
This is Rietveld 408576698