Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1164)

Unified Diff: runtime/vm/intermediate_language_x64.cc

Issue 11956004: Fix vm code base so that it can be built for --arch=simarm (no snapshot yet). (Closed) Base URL: http://dart.googlecode.com/svn/branches/bleeding_edge/dart/
Patch Set: Created 7 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « runtime/vm/intermediate_language_ia32.cc ('k') | runtime/vm/intrinsifier_arm.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: runtime/vm/intermediate_language_x64.cc
===================================================================
--- runtime/vm/intermediate_language_x64.cc (revision 17245)
+++ runtime/vm/intermediate_language_x64.cc (working copy)
@@ -1,4 +1,4 @@
-// Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
+// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
@@ -8,6 +8,7 @@
#include "vm/intermediate_language.h"
#include "lib/error.h"
+#include "vm/dart_entry.h"
#include "vm/flow_graph_compiler.h"
#include "vm/locations.h"
#include "vm/object_store.h"
@@ -274,8 +275,8 @@
const intptr_t kNumTemps = 0;
LocationSummary* locs =
new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
- locs->set_in(0, Location::RequiresXmmRegister());
- locs->set_in(1, Location::RequiresXmmRegister());
+ locs->set_in(0, Location::RequiresFpuRegister());
+ locs->set_in(1, Location::RequiresFpuRegister());
locs->set_out(Location::RequiresRegister());
return locs;
}
@@ -338,7 +339,7 @@
const Array& kNoArgumentNames = Array::Handle();
const int kNumArgumentsChecked = 2;
- const Immediate raw_null =
+ const Immediate& raw_null =
Immediate(reinterpret_cast<intptr_t>(Object::null()));
Label check_identity;
__ cmpq(Address(RSP, 0 * kWordSize), raw_null);
@@ -525,7 +526,7 @@
__ testq(left, Immediate(kSmiTagMask));
__ j(ZERO, deopt);
// 'left' is not Smi.
- const Immediate raw_null =
+ const Immediate& raw_null =
Immediate(reinterpret_cast<intptr_t>(Object::null()));
Label identity_compare;
__ cmpq(right, raw_null);
@@ -576,7 +577,7 @@
ASSERT(!ic_data.IsNull() && (ic_data.NumberOfChecks() > 0));
Register left = locs->in(0).reg();
Register right = locs->in(1).reg();
- const Immediate raw_null =
+ const Immediate& raw_null =
Immediate(reinterpret_cast<intptr_t>(Object::null()));
Label done, identity_compare, non_null_compare;
__ cmpq(right, raw_null);
@@ -661,8 +662,8 @@
const LocationSummary& locs,
Token::Kind kind,
BranchInstr* branch) {
- XmmRegister left = locs.in(0).xmm_reg();
- XmmRegister right = locs.in(1).xmm_reg();
+ XmmRegister left = locs.in(0).fpu_reg();
+ XmmRegister right = locs.in(1).fpu_reg();
Condition true_condition = TokenKindToDoubleCondition(kind);
if (branch != NULL) {
@@ -764,8 +765,8 @@
if (operands_class_id() == kDoubleCid) {
LocationSummary* summary =
new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
- summary->set_in(0, Location::RequiresXmmRegister());
- summary->set_in(1, Location::RequiresXmmRegister());
+ summary->set_in(0, Location::RequiresFpuRegister());
+ summary->set_in(1, Location::RequiresFpuRegister());
summary->set_out(Location::RequiresRegister());
return summary;
} else if (operands_class_id() == kSmiCid) {
@@ -1015,7 +1016,7 @@
? Location::RegisterOrSmiConstant(index())
: Location::RequiresRegister());
if (representation() == kUnboxedDouble) {
- locs->set_out(Location::RequiresXmmRegister());
+ locs->set_out(Location::RequiresFpuRegister());
} else {
locs->set_out(Location::RequiresRegister());
}
@@ -1056,12 +1057,12 @@
class_id(), array, Smi::Cast(index.constant()).Value());
if (representation() == kUnboxedDouble) {
- XmmRegister result = locs()->out().xmm_reg();
+ XmmRegister result = locs()->out().fpu_reg();
if (class_id() == kFloat32ArrayCid) {
// Load single precision float.
__ movss(result, element_address);
// Promote to double.
- __ cvtss2sd(result, locs()->out().xmm_reg());
+ __ cvtss2sd(result, locs()->out().fpu_reg());
} else {
ASSERT(class_id() == kFloat64ArrayCid);
__ movsd(result, element_address);
@@ -1135,11 +1136,11 @@
break;
case kFloat32ArrayCid:
// Need temp register for float-to-double conversion.
- locs->AddTemp(Location::RequiresXmmRegister());
+ locs->AddTemp(Location::RequiresFpuRegister());
// Fall through.
case kFloat64ArrayCid:
// TODO(srdjan): Support Float64 constants.
- locs->set_in(2, Location::RequiresXmmRegister());
+ locs->set_in(2, Location::RequiresFpuRegister());
break;
default:
UNREACHABLE();
@@ -1234,12 +1235,12 @@
}
case kFloat32ArrayCid:
// Convert to single precision.
- __ cvtsd2ss(locs()->temp(0).xmm_reg(), locs()->in(2).xmm_reg());
+ __ cvtsd2ss(locs()->temp(0).fpu_reg(), locs()->in(2).fpu_reg());
// Store.
- __ movss(element_address, locs()->temp(0).xmm_reg());
+ __ movss(element_address, locs()->temp(0).fpu_reg());
break;
case kFloat64ArrayCid:
- __ movsd(element_address, locs()->in(2).xmm_reg());
+ __ movsd(element_address, locs()->in(2).fpu_reg());
break;
default:
UNREACHABLE();
@@ -1444,7 +1445,7 @@
Label type_arguments_instantiated;
const intptr_t len = type_arguments().Length();
if (type_arguments().IsRawInstantiatedRaw(len)) {
- const Immediate raw_null =
+ const Immediate& raw_null =
Immediate(reinterpret_cast<intptr_t>(Object::null()));
__ cmpq(instantiator_reg, raw_null);
__ j(EQUAL, &type_arguments_instantiated, Assembler::kNearJump);
@@ -1505,7 +1506,7 @@
Label type_arguments_instantiated;
const intptr_t len = type_arguments().Length();
if (type_arguments().IsRawInstantiatedRaw(len)) {
- const Immediate raw_null =
+ const Immediate& raw_null =
Immediate(reinterpret_cast<intptr_t>(Object::null()));
__ cmpq(instantiator_reg, raw_null);
__ j(EQUAL, &type_arguments_instantiated, Assembler::kNearJump);
@@ -1519,7 +1520,7 @@
Label type_arguments_uninstantiated;
__ CompareClassId(instantiator_reg, kTypeArgumentsCid);
__ j(NOT_EQUAL, &type_arguments_uninstantiated, Assembler::kNearJump);
- Immediate arguments_length =
+ const Immediate& arguments_length =
Immediate(Smi::RawValue(type_arguments().Length()));
__ cmpq(FieldAddress(instantiator_reg, TypeArguments::length_offset()),
arguments_length);
@@ -1559,7 +1560,7 @@
Label done;
const intptr_t len = type_arguments().Length();
if (type_arguments().IsRawInstantiatedRaw(len)) {
- const Immediate raw_null =
+ const Immediate& raw_null =
Immediate(reinterpret_cast<intptr_t>(Object::null()));
Label instantiator_not_null;
__ cmpq(instantiator_reg, raw_null);
@@ -1585,7 +1586,7 @@
// instantiator will have the wrong class (Null instead of TypeArguments).
__ CompareClassId(instantiator_reg, kTypeArgumentsCid);
__ j(NOT_EQUAL, &done, Assembler::kNearJump);
- Immediate arguments_length =
+ const Immediate& arguments_length =
Immediate(Smi::RawValue(type_arguments().Length()));
__ cmpq(FieldAddress(instantiator_reg, TypeArguments::length_offset()),
arguments_length);
@@ -2045,7 +2046,7 @@
new LocationSummary(kNumInputs,
kNumTemps,
LocationSummary::kCallOnSlowPath);
- summary->set_in(0, Location::RequiresXmmRegister());
+ summary->set_in(0, Location::RequiresFpuRegister());
summary->set_out(Location::RequiresRegister());
return summary;
}
@@ -2088,7 +2089,7 @@
compiler->AddSlowPathCode(slow_path);
Register out_reg = locs()->out().reg();
- XmmRegister value = locs()->in(0).xmm_reg();
+ XmmRegister value = locs()->in(0).fpu_reg();
AssemblerMacros::TryAllocate(compiler->assembler(),
compiler->double_class(),
@@ -2107,7 +2108,7 @@
new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
summary->set_in(0, Location::RequiresRegister());
if (CanDeoptimize()) summary->set_temp(0, Location::RequiresRegister());
- summary->set_out(Location::RequiresXmmRegister());
+ summary->set_out(Location::RequiresFpuRegister());
return summary;
}
@@ -2115,7 +2116,7 @@
void UnboxDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const intptr_t value_cid = value()->ResultCid();
const Register value = locs()->in(0).reg();
- const XmmRegister result = locs()->out().xmm_reg();
+ const XmmRegister result = locs()->out().fpu_reg();
if (value_cid == kDoubleCid) {
__ movsd(result, FieldAddress(value, Double::value_offset()));
@@ -2125,7 +2126,7 @@
__ SmiTag(value); // Restore input register.
} else {
Label* deopt = compiler->AddDeoptStub(deopt_id_, kDeoptBinaryDoubleOp);
- compiler->LoadDoubleOrSmiToXmm(result,
+ compiler->LoadDoubleOrSmiToFpu(result,
value,
locs()->temp(0).reg(),
deopt);
@@ -2138,18 +2139,18 @@
const intptr_t kNumTemps = 0;
LocationSummary* summary =
new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
- summary->set_in(0, Location::RequiresXmmRegister());
- summary->set_in(1, Location::RequiresXmmRegister());
+ summary->set_in(0, Location::RequiresFpuRegister());
+ summary->set_in(1, Location::RequiresFpuRegister());
summary->set_out(Location::SameAsFirstInput());
return summary;
}
void BinaryDoubleOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- XmmRegister left = locs()->in(0).xmm_reg();
- XmmRegister right = locs()->in(1).xmm_reg();
+ XmmRegister left = locs()->in(0).fpu_reg();
+ XmmRegister right = locs()->in(1).fpu_reg();
- ASSERT(locs()->out().xmm_reg() == left);
+ ASSERT(locs()->out().fpu_reg() == left);
switch (op_kind()) {
case Token::kADD: __ addsd(left, right); break;
@@ -2166,14 +2167,14 @@
const intptr_t kNumTemps = 0;
LocationSummary* summary =
new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
- summary->set_in(0, Location::RequiresXmmRegister());
- summary->set_out(Location::RequiresXmmRegister());
+ summary->set_in(0, Location::RequiresFpuRegister());
+ summary->set_out(Location::RequiresFpuRegister());
return summary;
}
void MathSqrtInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- __ sqrtsd(locs()->out().xmm_reg(), locs()->in(0).xmm_reg());
+ __ sqrtsd(locs()->out().fpu_reg(), locs()->in(0).fpu_reg());
}
@@ -2298,7 +2299,7 @@
const intptr_t kNumTemps = 1;
LocationSummary* result = new LocationSummary(
kNumInputs, kNumTemps, LocationSummary::kNoCall);
- result->set_in(0, Location::RequiresXmmRegister());
+ result->set_in(0, Location::RequiresFpuRegister());
result->set_out(Location:: Location::RequiresRegister());
result->set_temp(0, Location::RequiresRegister());
return result;
@@ -2308,7 +2309,7 @@
void DoubleToSmiInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Label* deopt = compiler->AddDeoptStub(deopt_id(), kDeoptDoubleToSmi);
Register result = locs()->out().reg();
- XmmRegister value = locs()->in(0).xmm_reg();
+ XmmRegister value = locs()->in(0).fpu_reg();
Register temp = locs()->temp(0).reg();
__ cvttsd2siq(result, value);
@@ -2328,22 +2329,22 @@
(recognized_kind() == MethodRecognizer::kDoubleRound) ? 1 : 0;
LocationSummary* result =
new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
- result->set_in(0, Location::RequiresXmmRegister());
- result->set_out(Location::RequiresXmmRegister());
+ result->set_in(0, Location::RequiresFpuRegister());
+ result->set_out(Location::RequiresFpuRegister());
if (recognized_kind() == MethodRecognizer::kDoubleRound) {
- result->set_temp(0, Location::RequiresXmmRegister());
+ result->set_temp(0, Location::RequiresFpuRegister());
}
return result;
}
void DoubleToDoubleInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
- XmmRegister value = locs()->in(0).xmm_reg();
- XmmRegister result = locs()->out().xmm_reg();
+ XmmRegister value = locs()->in(0).fpu_reg();
+ XmmRegister result = locs()->out().fpu_reg();
if (recognized_kind() == MethodRecognizer::kDoubleTruncate) {
__ roundsd(result, value, Assembler::kRoundToZero);
} else {
- XmmRegister temp = locs()->temp(0).xmm_reg();
+ XmmRegister temp = locs()->temp(0).fpu_reg();
__ DoubleRound(result, value, temp);
}
}
@@ -2582,6 +2583,317 @@
UNIMPLEMENTED();
}
+
+LocationSummary* ThrowInstr::MakeLocationSummary() const {
+ return new LocationSummary(0, 0, LocationSummary::kCall);
+}
+
+
+void ThrowInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+ compiler->GenerateCallRuntime(token_pos(),
+ kThrowRuntimeEntry,
+ locs());
+ __ int3();
+}
+
+
+LocationSummary* ReThrowInstr::MakeLocationSummary() const {
+ return new LocationSummary(0, 0, LocationSummary::kCall);
+}
+
+
+void ReThrowInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+ compiler->GenerateCallRuntime(token_pos(),
+ kReThrowRuntimeEntry,
+ locs());
+ __ int3();
+}
+
+
+LocationSummary* GotoInstr::MakeLocationSummary() const {
+ return new LocationSummary(0, 0, LocationSummary::kNoCall);
+}
+
+
+void GotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+ // Add deoptimization descriptor for deoptimizing instructions
+ // that may be inserted before this instruction.
+ if (!compiler->is_optimizing()) {
+ compiler->AddCurrentDescriptor(PcDescriptors::kDeoptBefore,
+ GetDeoptId(),
+ 0); // No token position.
+ }
+
+ if (HasParallelMove()) {
+ compiler->parallel_move_resolver()->EmitNativeCode(parallel_move());
+ }
+
+ // We can fall through if the successor is the next block in the list.
+ // Otherwise, we need a jump.
+ if (!compiler->IsNextBlock(successor())) {
+ __ jmp(compiler->GetBlockLabel(successor()));
+ }
+}
+
+
+static Condition NegateCondition(Condition condition) {
+ switch (condition) {
+ case EQUAL: return NOT_EQUAL;
+ case NOT_EQUAL: return EQUAL;
+ case LESS: return GREATER_EQUAL;
+ case LESS_EQUAL: return GREATER;
+ case GREATER: return LESS_EQUAL;
+ case GREATER_EQUAL: return LESS;
+ case BELOW: return ABOVE_EQUAL;
+ case BELOW_EQUAL: return ABOVE;
+ case ABOVE: return BELOW_EQUAL;
+ case ABOVE_EQUAL: return BELOW;
+ default:
+ OS::Print("Error %d\n", condition);
+ UNIMPLEMENTED();
+ return EQUAL;
+ }
+}
+
+
+void ControlInstruction::EmitBranchOnValue(FlowGraphCompiler* compiler,
+ bool value) {
+ if (value && compiler->IsNextBlock(false_successor())) {
+ __ jmp(compiler->GetBlockLabel(true_successor()));
+ } else if (!value && compiler->IsNextBlock(true_successor())) {
+ __ jmp(compiler->GetBlockLabel(false_successor()));
+ }
+}
+
+
+void ControlInstruction::EmitBranchOnCondition(FlowGraphCompiler* compiler,
+ Condition true_condition) {
+ if (compiler->IsNextBlock(false_successor())) {
+ // If the next block is the false successor we will fall through to it.
+ __ j(true_condition, compiler->GetBlockLabel(true_successor()));
+ } else {
+ // If the next block is the true successor we negate comparison and fall
+ // through to it.
+ ASSERT(compiler->IsNextBlock(true_successor()));
+ Condition false_condition = NegateCondition(true_condition);
+ __ j(false_condition, compiler->GetBlockLabel(false_successor()));
+ }
+}
+
+
+LocationSummary* CurrentContextInstr::MakeLocationSummary() const {
+ return LocationSummary::Make(0,
+ Location::RequiresRegister(),
+ LocationSummary::kNoCall);
+}
+
+
+void CurrentContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+ __ MoveRegister(locs()->out().reg(), CTX);
+}
+
+
+LocationSummary* StrictCompareInstr::MakeLocationSummary() const {
+ const intptr_t kNumInputs = 2;
+ const intptr_t kNumTemps = 0;
+ LocationSummary* locs =
+ new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
+ locs->set_in(0, Location::RegisterOrConstant(left()));
+ locs->set_in(1, Location::RegisterOrConstant(right()));
+ locs->set_out(Location::RequiresRegister());
+ return locs;
+}
+
+
+// Special code for numbers (compare values instead of references.)
+void StrictCompareInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+ ASSERT(kind() == Token::kEQ_STRICT || kind() == Token::kNE_STRICT);
+ Location left = locs()->in(0);
+ Location right = locs()->in(1);
+ if (left.IsConstant() && right.IsConstant()) {
+ // TODO(vegorov): should be eliminated earlier by constant propagation.
+ const bool result = (kind() == Token::kEQ_STRICT) ?
+ left.constant().raw() == right.constant().raw() :
+ left.constant().raw() != right.constant().raw();
+ __ LoadObject(locs()->out().reg(), result ? Bool::True() : Bool::False());
+ return;
+ }
+ if (left.IsConstant()) {
+ compiler->EmitEqualityRegConstCompare(right.reg(),
+ left.constant(),
+ needs_number_check());
+ } else if (right.IsConstant()) {
+ compiler->EmitEqualityRegConstCompare(left.reg(),
+ right.constant(),
+ needs_number_check());
+ } else {
+ compiler->EmitEqualityRegRegCompare(left.reg(),
+ right.reg(),
+ needs_number_check());
+ }
+
+ Register result = locs()->out().reg();
+ Label load_true, done;
+ Condition true_condition = (kind() == Token::kEQ_STRICT) ? EQUAL : NOT_EQUAL;
+ __ j(true_condition, &load_true, Assembler::kNearJump);
+ __ LoadObject(result, Bool::False());
+ __ jmp(&done, Assembler::kNearJump);
+ __ Bind(&load_true);
+ __ LoadObject(result, Bool::True());
+ __ Bind(&done);
+}
+
+
+void StrictCompareInstr::EmitBranchCode(FlowGraphCompiler* compiler,
+ BranchInstr* branch) {
+ ASSERT(kind() == Token::kEQ_STRICT || kind() == Token::kNE_STRICT);
+ Location left = locs()->in(0);
+ Location right = locs()->in(1);
+ if (left.IsConstant() && right.IsConstant()) {
+ // TODO(vegorov): should be eliminated earlier by constant propagation.
+ const bool result = (kind() == Token::kEQ_STRICT) ?
+ left.constant().raw() == right.constant().raw() :
+ left.constant().raw() != right.constant().raw();
+ branch->EmitBranchOnValue(compiler, result);
+ return;
+ }
+ if (left.IsConstant()) {
+ compiler->EmitEqualityRegConstCompare(right.reg(),
+ left.constant(),
+ needs_number_check());
+ } else if (right.IsConstant()) {
+ compiler->EmitEqualityRegConstCompare(left.reg(),
+ right.constant(),
+ needs_number_check());
+ } else {
+ compiler->EmitEqualityRegRegCompare(left.reg(),
+ right.reg(),
+ needs_number_check());
+ }
+
+ Condition true_condition = (kind() == Token::kEQ_STRICT) ? EQUAL : NOT_EQUAL;
+ branch->EmitBranchOnCondition(compiler, true_condition);
+}
+
+
+void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+ // The arguments to the stub include the closure, as does the arguments
+ // descriptor.
+ Register temp_reg = locs()->temp(0).reg();
+ int argument_count = ArgumentCount();
+ const Array& arguments_descriptor =
+ Array::ZoneHandle(ArgumentsDescriptor::New(argument_count,
+ argument_names()));
+ __ LoadObject(temp_reg, arguments_descriptor);
+ compiler->GenerateDartCall(deopt_id(),
+ token_pos(),
+ &StubCode::CallClosureFunctionLabel(),
+ PcDescriptors::kOther,
+ locs());
+ __ Drop(argument_count);
+}
+
+
+LocationSummary* BooleanNegateInstr::MakeLocationSummary() const {
+ return LocationSummary::Make(1,
+ Location::RequiresRegister(),
+ LocationSummary::kNoCall);
+}
+
+
+void BooleanNegateInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+ Register value = locs()->in(0).reg();
+ Register result = locs()->out().reg();
+
+ Label done;
+ __ LoadObject(result, Bool::True());
+ __ CompareRegisters(result, value);
+ __ j(NOT_EQUAL, &done, Assembler::kNearJump);
+ __ LoadObject(result, Bool::False());
+ __ Bind(&done);
+}
+
+
+LocationSummary* ChainContextInstr::MakeLocationSummary() const {
+ return LocationSummary::Make(1,
+ Location::NoLocation(),
+ LocationSummary::kNoCall);
+}
+
+
+void ChainContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+ Register context_value = locs()->in(0).reg();
+
+ // Chain the new context in context_value to its parent in CTX.
+ __ StoreIntoObject(context_value,
+ FieldAddress(context_value, Context::parent_offset()),
+ CTX);
+ // Set new context as current context.
+ __ MoveRegister(CTX, context_value);
+}
+
+
+LocationSummary* StoreVMFieldInstr::MakeLocationSummary() const {
+ const intptr_t kNumInputs = 2;
+ const intptr_t kNumTemps = 0;
+ LocationSummary* locs =
+ new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall);
+ locs->set_in(0, value()->NeedsStoreBuffer() ? Location::WritableRegister()
+ : Location::RequiresRegister());
+ locs->set_in(1, Location::RequiresRegister());
+ return locs;
+}
+
+
+void StoreVMFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+ Register value_reg = locs()->in(0).reg();
+ Register dest_reg = locs()->in(1).reg();
+
+ if (value()->NeedsStoreBuffer()) {
+ __ StoreIntoObject(dest_reg, FieldAddress(dest_reg, offset_in_bytes()),
+ value_reg);
+ } else {
+ __ StoreIntoObjectNoBarrier(
+ dest_reg, FieldAddress(dest_reg, offset_in_bytes()), value_reg);
+ }
+}
+
+
+LocationSummary* AllocateObjectInstr::MakeLocationSummary() const {
+ return MakeCallSummary();
+}
+
+
+void AllocateObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+ const Class& cls = Class::ZoneHandle(constructor().Owner());
+ const Code& stub = Code::Handle(StubCode::GetAllocationStubForClass(cls));
+ const ExternalLabel label(cls.ToCString(), stub.EntryPoint());
+ compiler->GenerateCall(token_pos(),
+ &label,
+ PcDescriptors::kOther,
+ locs());
+ __ Drop(ArgumentCount()); // Discard arguments.
+}
+
+
+LocationSummary* CreateClosureInstr::MakeLocationSummary() const {
+ return MakeCallSummary();
+}
+
+
+void CreateClosureInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
+ const Function& closure_function = function();
+ ASSERT(!closure_function.IsImplicitStaticClosureFunction());
+ const Code& stub = Code::Handle(
+ StubCode::GetAllocationStubForClosure(closure_function));
+ const ExternalLabel label(closure_function.ToCString(), stub.EntryPoint());
+ compiler->GenerateCall(token_pos(),
+ &label,
+ PcDescriptors::kOther,
+ locs());
+ __ Drop(2); // Discard type arguments and receiver.
+}
+
} // namespace dart
#undef __
« no previous file with comments | « runtime/vm/intermediate_language_ia32.cc ('k') | runtime/vm/intrinsifier_arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698