Index: runtime/vm/intermediate_language_arm64.cc |
=================================================================== |
--- runtime/vm/intermediate_language_arm64.cc (revision 35551) |
+++ runtime/vm/intermediate_language_arm64.cc (working copy) |
@@ -109,13 +109,54 @@ |
LocationSummary* ClosureCallInstr::MakeLocationSummary(bool opt) const { |
- UNIMPLEMENTED(); |
- return NULL; |
+ return MakeCallSummary(); |
} |
void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
- UNIMPLEMENTED(); |
+ // Load closure object (first argument) in R1. |
+ int argument_count = ArgumentCount(); |
+ __ LoadFromOffset(R1, SP, (argument_count - 1) * kWordSize); |
+ |
+ // Load arguments descriptor in R4. |
+ const Array& arguments_descriptor = |
+ Array::ZoneHandle(ArgumentsDescriptor::New(argument_count, |
+ argument_names())); |
+ __ LoadObject(R4, arguments_descriptor, PP); |
+ |
+ // Load the closure function into R0. |
+ __ LoadFieldFromOffset(R0, R1, Closure::function_offset()); |
+ |
+ // Load closure context in CTX; note that CTX has already been preserved. |
+ __ LoadFieldFromOffset(CTX, R1, Closure::context_offset()); |
+ |
+ // R4: Arguments descriptor. |
+ // R0: Function. |
+ __ LoadFieldFromOffset(R2, R0, Function::code_offset()); |
+ |
+ // R2: code. |
+ // R5: Smi 0 (no IC data; the lazy-compile stub expects a GC-safe value). |
+ __ LoadImmediate(R5, 0, PP); |
+ __ LoadFieldFromOffset(R2, R2, Code::instructions_offset()); |
+ __ AddImmediate(R2, R2, Instructions::HeaderSize() - kHeapObjectTag, PP); |
+ __ blr(R2); |
+ compiler->AddCurrentDescriptor(PcDescriptors::kClosureCall, |
+ deopt_id(), |
+ token_pos()); |
+ compiler->RecordSafepoint(locs()); |
+ // Marks either the continuation point in unoptimized code or the |
+ // deoptimization point in optimized code, after call. |
+ const intptr_t deopt_id_after = Isolate::ToDeoptAfter(deopt_id()); |
+ if (compiler->is_optimizing()) { |
+ compiler->AddDeoptIndexAtCall(deopt_id_after, token_pos()); |
+ } else { |
+ // Add deoptimization continuation point after the call and before the |
+ // arguments are removed. |
+ compiler->AddCurrentDescriptor(PcDescriptors::kDeopt, |
+ deopt_id_after, |
+ token_pos()); |
+ } |
+ __ Drop(argument_count); |
} |
@@ -164,8 +205,15 @@ |
LocationSummary* AssertAssignableInstr::MakeLocationSummary(bool opt) const { |
- UNIMPLEMENTED(); |
- return NULL; |
+ const intptr_t kNumInputs = 3; |
+ const intptr_t kNumTemps = 0; |
+ LocationSummary* summary = |
+ new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kCall); |
+ summary->set_in(0, Location::RegisterLocation(R0)); // Value. |
+ summary->set_in(1, Location::RegisterLocation(R2)); // Instantiator. |
+ summary->set_in(2, Location::RegisterLocation(R1)); // Type arguments. |
+ summary->set_out(0, Location::RegisterLocation(R0)); |
+ return summary; |
} |
@@ -365,13 +413,28 @@ |
LocationSummary* StringInterpolateInstr::MakeLocationSummary(bool opt) const { |
- UNIMPLEMENTED(); |
- return NULL; |
+ const intptr_t kNumInputs = 1; |
+ const intptr_t kNumTemps = 0; |
+ LocationSummary* summary = |
+ new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kCall); |
+ summary->set_in(0, Location::RegisterLocation(R0)); |
+ summary->set_out(0, Location::RegisterLocation(R0)); |
+ return summary; |
} |
void StringInterpolateInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
- UNIMPLEMENTED(); |
+ Register array = locs()->in(0).reg(); |
+ __ Push(array); |
+ const int kNumberOfArguments = 1; |
+ const Array& kNoArgumentNames = Object::null_array(); |
+ compiler->GenerateStaticCall(deopt_id(), |
+ token_pos(), |
+ CallFunction(), |
+ kNumberOfArguments, |
+ kNoArgumentNames, |
+ locs()); |
+ ASSERT(locs()->out(0).reg() == R0); |
} |
@@ -387,13 +450,25 @@ |
LocationSummary* LoadClassIdInstr::MakeLocationSummary(bool opt) const { |
- UNIMPLEMENTED(); |
- return NULL; |
+ const intptr_t kNumInputs = 1; |
+ return LocationSummary::Make(kNumInputs, |
+ Location::RequiresRegister(), |
+ LocationSummary::kNoCall); |
} |
void LoadClassIdInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
- UNIMPLEMENTED(); |
+ Register object = locs()->in(0).reg(); |
+ Register result = locs()->out(0).reg(); |
+ Label load, done; |
+ __ tsti(object, kSmiTagMask); |
+ __ b(&load, NE); |
+ __ LoadImmediate(result, Smi::RawValue(kSmiCid), PP); |
+ __ b(&done); |
+ __ Bind(&load); |
+ __ LoadClassId(result, object); |
+ __ SmiTag(result); |
+ __ Bind(&done); |
} |
@@ -422,63 +497,756 @@ |
Representation StoreIndexedInstr::RequiredInputRepresentation( |
intptr_t idx) const { |
- UNIMPLEMENTED(); |
- return kTagged; |
+ // Array can be a Dart object or a pointer to external data. |
+ if (idx == 0) return kNoRepresentation; // Flexible input representation. |
+ if (idx == 1) return kTagged; // Index is a smi. |
+ ASSERT(idx == 2); |
+ switch (class_id_) { |
+ case kArrayCid: |
+ case kOneByteStringCid: |
+ case kTypedDataInt8ArrayCid: |
+ case kTypedDataUint8ArrayCid: |
+ case kExternalTypedDataUint8ArrayCid: |
+ case kTypedDataUint8ClampedArrayCid: |
+ case kExternalTypedDataUint8ClampedArrayCid: |
+ case kTypedDataInt16ArrayCid: |
+ case kTypedDataUint16ArrayCid: |
+ return kTagged; |
+ case kTypedDataInt32ArrayCid: |
+ case kTypedDataUint32ArrayCid: |
+ return value()->IsSmiValue() ? kTagged : kUnboxedMint; |
+ case kTypedDataFloat32ArrayCid: |
+ case kTypedDataFloat64ArrayCid: |
+ return kUnboxedDouble; |
+ case kTypedDataFloat32x4ArrayCid: |
+ return kUnboxedFloat32x4; |
+ case kTypedDataInt32x4ArrayCid: |
+ return kUnboxedInt32x4; |
+ case kTypedDataFloat64x2ArrayCid: |
+ return kUnboxedFloat64x2; |
+ default: |
+ UNREACHABLE(); |
+ return kTagged; |
+ } |
} |
LocationSummary* StoreIndexedInstr::MakeLocationSummary(bool opt) const { |
- UNIMPLEMENTED(); |
- return NULL; |
+ const intptr_t kNumInputs = 3; |
+ const intptr_t kNumTemps = 0; |
+ LocationSummary* locs = |
+ new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); |
+ locs->set_in(0, Location::RequiresRegister()); |
+ // The smi index is either untagged (element size == 1), or it is left smi |
+ // tagged (for all element sizes > 1). |
+ // TODO(regis): Revisit and see if the index can be immediate. |
+ locs->set_in(1, Location::WritableRegister()); |
+ switch (class_id()) { |
+ case kArrayCid: |
+ locs->set_in(2, ShouldEmitStoreBarrier() |
+ ? Location::WritableRegister() |
+ : Location::RegisterOrConstant(value())); |
+ break; |
+ case kExternalTypedDataUint8ArrayCid: |
+ case kExternalTypedDataUint8ClampedArrayCid: |
+ case kTypedDataInt8ArrayCid: |
+ case kTypedDataUint8ArrayCid: |
+ case kTypedDataUint8ClampedArrayCid: |
+ case kOneByteStringCid: |
+ case kTypedDataInt16ArrayCid: |
+ case kTypedDataUint16ArrayCid: |
+ locs->set_in(2, Location::WritableRegister()); |
+ break; |
+ case kTypedDataInt32ArrayCid: |
+ case kTypedDataUint32ArrayCid: |
+ // Mints are stored in Q registers. For smis, use a writable register |
+ // because the value must be untagged before storing. |
+ if (value()->IsSmiValue()) { |
+ locs->set_in(2, Location::WritableRegister()); |
+ } else { |
+ // TODO(zra): Implement when we add fpu loads and stores. |
+ UNIMPLEMENTED(); |
+ } |
+ break; |
+ case kTypedDataFloat32ArrayCid: |
+ case kTypedDataFloat64ArrayCid: // TODO(srdjan): Support Float64 constants. |
+ case kTypedDataInt32x4ArrayCid: |
+ case kTypedDataFloat32x4ArrayCid: |
+ case kTypedDataFloat64x2ArrayCid: |
+ // TODO(zra): Implement when we add fpu loads and stores. |
+ UNIMPLEMENTED(); |
+ break; |
+ default: |
+ UNREACHABLE(); |
+ return NULL; |
+ } |
+ return locs; |
} |
void StoreIndexedInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
- UNIMPLEMENTED(); |
+ Register array = locs()->in(0).reg(); |
+ Location index = locs()->in(1); |
+ |
+ Address element_address(kNoRegister, 0); |
+ ASSERT(index.IsRegister()); // TODO(regis): Revisit. |
+ // Note that index is expected smi-tagged, (i.e, times 2) for all arrays |
+ // with index scale factor > 1. E.g., for Uint8Array and OneByteString the |
+ // index is expected to be untagged before accessing. |
+ ASSERT(kSmiTagShift == 1); |
+ switch (index_scale()) { |
+ case 1: { |
+ __ SmiUntag(index.reg()); |
+ break; |
+ } |
+ case 2: { |
+ break; |
+ } |
+ case 4: { |
+ __ Lsl(index.reg(), index.reg(), 1); |
+ break; |
+ } |
+ case 8: { |
+ __ Lsl(index.reg(), index.reg(), 2); |
+ break; |
+ } |
+ case 16: { |
+ __ Lsl(index.reg(), index.reg(), 3); |
+ break; |
+ } |
+ default: |
+ UNREACHABLE(); |
+ } |
+ if (!IsExternal()) { |
+ ASSERT(this->array()->definition()->representation() == kTagged); |
+ __ AddImmediate(index.reg(), index.reg(), |
+ FlowGraphCompiler::DataOffsetFor(class_id()) - kHeapObjectTag, PP); |
+ } |
+ element_address = Address(array, index.reg(), UXTX, Address::Unscaled); |
+ |
+ switch (class_id()) { |
+ case kArrayCid: |
+ if (ShouldEmitStoreBarrier()) { |
+ Register value = locs()->in(2).reg(); |
+ __ StoreIntoObject(array, element_address, value); |
+ } else if (locs()->in(2).IsConstant()) { |
+ const Object& constant = locs()->in(2).constant(); |
+ __ StoreIntoObjectNoBarrier(array, element_address, constant); |
+ } else { |
+ Register value = locs()->in(2).reg(); |
+ __ StoreIntoObjectNoBarrier(array, element_address, value); |
+ } |
+ break; |
+ case kTypedDataInt8ArrayCid: |
+ case kTypedDataUint8ArrayCid: |
+ case kExternalTypedDataUint8ArrayCid: |
+ case kOneByteStringCid: { |
+ if (locs()->in(2).IsConstant()) { |
+ const Smi& constant = Smi::Cast(locs()->in(2).constant()); |
+ __ LoadImmediate(TMP, static_cast<int8_t>(constant.Value()), PP); |
+ __ str(TMP, element_address, kUnsignedByte); |
+ } else { |
+ Register value = locs()->in(2).reg(); |
+ __ SmiUntag(value); |
+ __ str(value, element_address, kUnsignedByte); |
+ } |
+ break; |
+ } |
+ case kTypedDataUint8ClampedArrayCid: |
+ case kExternalTypedDataUint8ClampedArrayCid: { |
+ if (locs()->in(2).IsConstant()) { |
+ const Smi& constant = Smi::Cast(locs()->in(2).constant()); |
+ intptr_t value = constant.Value(); |
+ // Clamp to 0x0 or 0xFF respectively. |
+ if (value > 0xFF) { |
+ value = 0xFF; |
+ } else if (value < 0) { |
+ value = 0; |
+ } |
+ __ LoadImmediate(TMP, static_cast<int8_t>(value), PP); |
+ __ str(TMP, element_address, kUnsignedByte); |
+ } else { |
+ Register value = locs()->in(2).reg(); |
+ Label store_value; |
+ __ SmiUntag(value); |
+ __ CompareImmediate(value, 0xFF, PP); |
+ // Clamp to 0x00 or 0xFF respectively. |
+ __ b(&store_value, LS); |
+ __ LoadImmediate(TMP, 0x00, PP); |
+ __ LoadImmediate(TMP2, 0xFF, PP); |
+ __ csel(value, TMP, value, LE); |
+ __ csel(value, TMP2, value, GT); |
+ __ Bind(&store_value); |
+ __ str(value, element_address, kUnsignedByte); |
+ } |
+ break; |
+ } |
+ case kTypedDataInt16ArrayCid: |
+ case kTypedDataUint16ArrayCid: { |
+ Register value = locs()->in(2).reg(); |
+ __ SmiUntag(value); |
+ __ str(value, element_address, kUnsignedHalfword); |
+ break; |
+ } |
+ case kTypedDataInt32ArrayCid: |
+ case kTypedDataUint32ArrayCid: { |
+ if (value()->IsSmiValue()) { |
+ ASSERT(RequiredInputRepresentation(2) == kTagged); |
+ Register value = locs()->in(2).reg(); |
+ __ SmiUntag(value); |
+ __ str(value, element_address); |
+ } else { |
+ // TODO(zra): Implement when we add fpu loads and stores. |
+ UNIMPLEMENTED(); |
+ } |
+ break; |
+ } |
+ case kTypedDataFloat32ArrayCid: |
+ case kTypedDataFloat64ArrayCid: |
+ case kTypedDataFloat64x2ArrayCid: |
+ case kTypedDataInt32x4ArrayCid: |
+ case kTypedDataFloat32x4ArrayCid: { |
+ // TODO(zra): Implement when we add fpu loads and stores. |
+ UNIMPLEMENTED(); |
+ break; |
+ } |
+ default: |
+ UNREACHABLE(); |
+ } |
} |
+static void LoadValueCid(FlowGraphCompiler* compiler, |
+ Register value_cid_reg, |
+ Register value_reg, |
+ Label* value_is_smi = NULL) { |
+ Label done; |
+ if (value_is_smi == NULL) { |
+ __ LoadImmediate(value_cid_reg, kSmiCid, PP); |
+ } |
+ __ tsti(value_reg, kSmiTagMask); |
+ if (value_is_smi == NULL) { |
+ __ b(&done, EQ); |
+ } else { |
+ __ b(value_is_smi, EQ); |
+ } |
+ __ LoadClassId(value_cid_reg, value_reg); |
+ __ Bind(&done); |
+} |
+ |
+ |
LocationSummary* GuardFieldInstr::MakeLocationSummary(bool opt) const { |
- UNIMPLEMENTED(); |
- return NULL; |
+ const intptr_t kNumInputs = 1; |
+ LocationSummary* summary = |
+ new LocationSummary(kNumInputs, 0, LocationSummary::kNoCall); |
+ summary->set_in(0, Location::RequiresRegister()); |
+ const bool field_has_length = field().needs_length_check(); |
+ summary->AddTemp(Location::RequiresRegister()); |
+ summary->AddTemp(Location::RequiresRegister()); |
+ const bool need_field_temp_reg = |
+ field_has_length || (field().guarded_cid() == kIllegalCid); |
+ if (need_field_temp_reg) { |
+ summary->AddTemp(Location::RequiresRegister()); |
+ } |
+ return summary; |
} |
void GuardFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
- UNIMPLEMENTED(); |
+ const intptr_t field_cid = field().guarded_cid(); |
+ const intptr_t nullability = field().is_nullable() ? kNullCid : kIllegalCid; |
+ const intptr_t field_length = field().guarded_list_length(); |
+ const bool field_has_length = field().needs_length_check(); |
+ const bool needs_field_temp_reg = |
+ field_has_length || (field().guarded_cid() == kIllegalCid); |
+ if (field_has_length) { |
+ // Currently, we should only see final fields that remember length. |
+ ASSERT(field().is_final()); |
+ } |
+ |
+ if (field_cid == kDynamicCid) { |
+ ASSERT(!compiler->is_optimizing()); |
+ return; // Nothing to emit. |
+ } |
+ |
+ const intptr_t value_cid = value()->Type()->ToCid(); |
+ |
+ Register value_reg = locs()->in(0).reg(); |
+ |
+ Register value_cid_reg = locs()->temp(0).reg(); |
+ |
+ Register temp_reg = locs()->temp(1).reg(); |
+ |
+ Register field_reg = needs_field_temp_reg ? |
+ locs()->temp(locs()->temp_count() - 1).reg() : kNoRegister; |
+ |
+ Label ok, fail_label; |
+ |
+ Label* deopt = compiler->is_optimizing() ? |
+ compiler->AddDeoptStub(deopt_id(), ICData::kDeoptGuardField) : NULL; |
+ |
+ Label* fail = (deopt != NULL) ? deopt : &fail_label; |
+ |
+ if (!compiler->is_optimizing() || (field_cid == kIllegalCid)) { |
+ if (!compiler->is_optimizing() && (field_reg == kNoRegister)) { |
+ // Currently we can't have different location summaries for optimized |
+ // and non-optimized code. So instead we manually pick up a register |
+ // that is known to be free because we know how non-optimizing compiler |
+ // allocates registers. |
+ field_reg = R2; |
+ ASSERT((field_reg != value_reg) && (field_reg != value_cid_reg)); |
+ } |
+ |
+ __ LoadObject(field_reg, Field::ZoneHandle(field().raw()), PP); |
+ |
+ FieldAddress field_cid_operand(field_reg, Field::guarded_cid_offset()); |
+ FieldAddress field_nullability_operand( |
+ field_reg, Field::is_nullable_offset()); |
+ FieldAddress field_length_operand( |
+ field_reg, Field::guarded_list_length_offset()); |
+ |
+ ASSERT(value_cid_reg != kNoRegister); |
+ ASSERT((value_cid_reg != value_reg) && (field_reg != value_cid_reg)); |
+ |
+ if (value_cid == kDynamicCid) { |
+ LoadValueCid(compiler, value_cid_reg, value_reg); |
+ Label skip_length_check; |
+ __ ldr(TMP, field_cid_operand); |
+ __ CompareRegisters(value_cid_reg, TMP); |
+ __ b(&skip_length_check, NE); |
+ if (field_has_length) { |
+ ASSERT(temp_reg != kNoRegister); |
+ // Field guard may have remembered list length, check it. |
+ if ((field_cid == kArrayCid) || (field_cid == kImmutableArrayCid)) { |
+ __ LoadFieldFromOffset(temp_reg, value_reg, Array::length_offset()); |
+ __ CompareImmediate(temp_reg, Smi::RawValue(field_length), PP); |
+ } else if (RawObject::IsTypedDataClassId(field_cid)) { |
+ __ LoadFieldFromOffset( |
+ temp_reg, value_reg, TypedData::length_offset()); |
+ __ CompareImmediate(temp_reg, Smi::RawValue(field_length), PP); |
+ } else { |
+ ASSERT(field_cid == kIllegalCid); |
+ ASSERT(field_length == Field::kUnknownFixedLength); |
+ // At compile time we do not know the type of the field nor its |
+ // length. At execution time we may have set the class id and |
+ // list length so we compare the guarded length with the |
+ // list length here, without this check the list length could change |
+ // without triggering a deoptimization. |
+ Label check_array, length_compared, no_fixed_length; |
+ // If length is negative the length guard is either disabled or |
+ // has not been initialized, either way it is safe to skip the |
+ // length check. |
+ __ ldr(TMP, field_length_operand); |
+ __ CompareImmediate(TMP, 0, PP); |
+ __ b(&skip_length_check, LT); |
+ __ CompareImmediate(value_cid_reg, kNullCid, PP); |
+ __ b(&no_fixed_length, EQ); |
+ // Check for typed data array. |
+ __ CompareImmediate(value_cid_reg, kTypedDataInt32x4ArrayCid, PP); |
+ __ b(&no_fixed_length, GT); |
+ __ CompareImmediate(value_cid_reg, kTypedDataInt8ArrayCid, PP); |
+ // Could still be a regular array. |
+ __ b(&check_array, LT); |
+ __ LoadFromOffset(temp_reg, value_reg, TypedData::length_offset()); |
+ __ ldr(TMP, field_length_operand); |
+ __ CompareRegisters(temp_reg, TMP); |
+ __ b(&length_compared); |
+ // Check for regular array. |
+ __ Bind(&check_array); |
+ __ CompareImmediate(value_cid_reg, kImmutableArrayCid, PP); |
+ __ b(&no_fixed_length, GT); |
+ __ CompareImmediate(value_cid_reg, kArrayCid, PP); |
+ __ b(&no_fixed_length, LT); |
+ __ LoadFieldFromOffset(temp_reg, value_reg, Array::length_offset()); |
+ __ ldr(TMP, field_length_operand); |
+ __ CompareRegisters(temp_reg, TMP); |
+ __ b(&length_compared); |
+ __ Bind(&no_fixed_length); |
+ __ b(fail); |
+ __ Bind(&length_compared); |
+ // Following branch cannot not occur, fall through. |
+ } |
+ __ b(fail, NE); |
+ } |
+ __ Bind(&skip_length_check); |
+ __ ldr(TMP, field_nullability_operand); |
+ __ CompareRegisters(value_cid_reg, TMP); |
+ } else if (value_cid == kNullCid) { |
+ __ ldr(value_cid_reg, field_nullability_operand); |
+ __ CompareImmediate(value_cid_reg, value_cid, PP); |
+ } else { |
+ Label skip_length_check; |
+ __ ldr(value_cid_reg, field_cid_operand); |
+ __ CompareImmediate(value_cid_reg, value_cid, PP); |
+ __ b(&skip_length_check, NE); |
+ if (field_has_length) { |
+ ASSERT(value_cid_reg != kNoRegister); |
+ ASSERT(temp_reg != kNoRegister); |
+ if ((value_cid == kArrayCid) || (value_cid == kImmutableArrayCid)) { |
+ __ LoadFieldFromOffset(temp_reg, value_reg, Array::length_offset()); |
+ __ CompareImmediate(temp_reg, Smi::RawValue(field_length), PP); |
+ } else if (RawObject::IsTypedDataClassId(value_cid)) { |
+ __ LoadFieldFromOffset( |
+ temp_reg, value_reg, TypedData::length_offset()); |
+ __ CompareImmediate(temp_reg, Smi::RawValue(field_length), PP); |
+ } else if (field_cid != kIllegalCid) { |
+ ASSERT(field_cid != value_cid); |
+ ASSERT(field_length >= 0); |
+ // Field has a known class id and length. At compile time it is |
+ // known that the value's class id is not a fixed length list. |
+ __ b(fail); |
+ } else { |
+ ASSERT(field_cid == kIllegalCid); |
+ ASSERT(field_length == Field::kUnknownFixedLength); |
+ // Following jump cannot not occur, fall through. |
+ } |
+ __ b(fail, NE); |
+ } |
+ // Not identical, possibly null. |
+ __ Bind(&skip_length_check); |
+ } |
+ __ b(&ok, EQ); |
+ |
+ __ ldr(TMP, field_cid_operand); |
+ __ CompareImmediate(TMP, kIllegalCid, PP); |
+ __ b(fail, NE); |
+ |
+ if (value_cid == kDynamicCid) { |
+ __ str(value_cid_reg, field_cid_operand); |
+ __ str(value_cid_reg, field_nullability_operand); |
+ if (field_has_length) { |
+ Label check_array, length_set, no_fixed_length; |
+ __ CompareImmediate(value_cid_reg, kNullCid, PP); |
+ __ b(&no_fixed_length, EQ); |
+ // Check for typed data array. |
+ __ CompareImmediate(value_cid_reg, kTypedDataInt32x4ArrayCid, PP); |
+ __ b(&no_fixed_length, GT); |
+ __ CompareImmediate(value_cid_reg, kTypedDataInt8ArrayCid, PP); |
+ // Could still be a regular array. |
+ __ b(&check_array, LT); |
+ // Destroy value_cid_reg (safe because we are finished with it). |
+ __ LoadFieldFromOffset( |
+ value_cid_reg, value_reg, TypedData::length_offset()); |
+ __ str(value_cid_reg, field_length_operand); |
+ __ b(&length_set); // Updated field length typed data array. |
+ // Check for regular array. |
+ __ Bind(&check_array); |
+ __ CompareImmediate(value_cid_reg, kImmutableArrayCid, PP); |
+ __ b(&no_fixed_length, GT); |
+ __ CompareImmediate(value_cid_reg, kArrayCid, PP); |
+ __ b(&no_fixed_length, LT); |
+ // Destroy value_cid_reg (safe because we are finished with it). |
+ __ LoadFieldFromOffset( |
+ value_cid_reg, value_reg, Array::length_offset()); |
+ __ str(value_cid_reg, field_length_operand); |
+ // Updated field length from regular array. |
+ __ b(&length_set); |
+ __ Bind(&no_fixed_length); |
+ __ LoadImmediate(TMP, Smi::RawValue(Field::kNoFixedLength), PP); |
+ __ str(TMP, field_length_operand); |
+ __ Bind(&length_set); |
+ } |
+ } else { |
+ __ LoadImmediate(TMP, value_cid, PP); |
+ __ str(TMP, field_cid_operand); |
+ __ str(TMP, field_nullability_operand); |
+ if (field_has_length) { |
+ if ((value_cid == kArrayCid) || (value_cid == kImmutableArrayCid)) { |
+ // Destroy value_cid_reg (safe because we are finished with it). |
+ __ LoadFieldFromOffset( |
+ value_cid_reg, value_reg, Array::length_offset()); |
+ __ str(value_cid_reg, field_length_operand); |
+ } else if (RawObject::IsTypedDataClassId(value_cid)) { |
+ // Destroy value_cid_reg (safe because we are finished with it). |
+ __ LoadFieldFromOffset( |
+ value_cid_reg, value_reg, TypedData::length_offset()); |
+ __ str(value_cid_reg, field_length_operand); |
+ } else { |
+ __ LoadImmediate(TMP, Smi::RawValue(Field::kNoFixedLength), PP); |
+ __ str(TMP, field_length_operand); |
+ } |
+ } |
+ } |
+ |
+ if (deopt == NULL) { |
+ ASSERT(!compiler->is_optimizing()); |
+ __ b(&ok); |
+ __ Bind(fail); |
+ |
+ __ LoadFieldFromOffset(TMP, field_reg, Field::guarded_cid_offset()); |
+ __ CompareImmediate(TMP, kDynamicCid, PP); |
+ __ b(&ok, EQ); |
+ |
+ __ Push(field_reg); |
+ __ Push(value_reg); |
+ __ CallRuntime(kUpdateFieldCidRuntimeEntry, 2); |
+ __ Drop(2); // Drop the field and the value. |
+ } |
+ } else { |
+ ASSERT(compiler->is_optimizing()); |
+ ASSERT(deopt != NULL); |
+ // Field guard class has been initialized and is known. |
+ if (field_reg != kNoRegister) { |
+ __ LoadObject(field_reg, Field::ZoneHandle(field().raw()), PP); |
+ } |
+ if (value_cid == kDynamicCid) { |
+ // Field's guarded class id is fixed by value's class id is not known. |
+ __ tsti(value_reg, kSmiTagMask); |
+ |
+ if (field_cid != kSmiCid) { |
+ __ b(fail, EQ); |
+ __ LoadClassId(value_cid_reg, value_reg); |
+ __ CompareImmediate(value_cid_reg, field_cid, PP); |
+ } |
+ |
+ if (field_has_length) { |
+ __ b(fail, NE); |
+ // Classes are same, perform guarded list length check. |
+ ASSERT(field_reg != kNoRegister); |
+ ASSERT(value_cid_reg != kNoRegister); |
+ FieldAddress field_length_operand( |
+ field_reg, Field::guarded_list_length_offset()); |
+ if ((field_cid == kArrayCid) || (field_cid == kImmutableArrayCid)) { |
+ // Destroy value_cid_reg (safe because we are finished with it). |
+ __ LoadFieldFromOffset( |
+ value_cid_reg, value_reg, Array::length_offset()); |
+ } else if (RawObject::IsTypedDataClassId(field_cid)) { |
+ // Destroy value_cid_reg (safe because we are finished with it). |
+ __ LoadFieldFromOffset( |
+ value_cid_reg, value_reg, TypedData::length_offset()); |
+ } |
+ __ ldr(TMP, field_length_operand); |
+ __ CompareRegisters(value_cid_reg, TMP); |
+ } |
+ |
+ if (field().is_nullable() && (field_cid != kNullCid)) { |
+ __ b(&ok, EQ); |
+ __ CompareObject(value_reg, Object::null_object(), PP); |
+ } |
+ __ b(fail, NE); |
+ } else { |
+ // Both value's and field's class id is known. |
+ if ((value_cid != field_cid) && (value_cid != nullability)) { |
+ __ b(fail); |
+ } else if (field_has_length && (value_cid == field_cid)) { |
+ ASSERT(value_cid_reg != kNoRegister); |
+ if ((field_cid == kArrayCid) || (field_cid == kImmutableArrayCid)) { |
+ // Destroy value_cid_reg (safe because we are finished with it). |
+ __ LoadFieldFromOffset( |
+ value_cid_reg, value_reg, Array::length_offset()); |
+ } else if (RawObject::IsTypedDataClassId(field_cid)) { |
+ // Destroy value_cid_reg (safe because we are finished with it). |
+ __ LoadFieldFromOffset( |
+ value_cid_reg, value_reg, TypedData::length_offset()); |
+ } |
+ __ CompareImmediate(value_cid_reg, field_length, PP); |
+ __ b(fail, NE); |
+ } else { |
+ UNREACHABLE(); |
+ } |
+ } |
+ } |
+ __ Bind(&ok); |
} |
+class StoreInstanceFieldSlowPath : public SlowPathCode { |
+ public: |
+ StoreInstanceFieldSlowPath(StoreInstanceFieldInstr* instruction, |
+ const Class& cls) |
+ : instruction_(instruction), cls_(cls) { } |
+ |
+ virtual void EmitNativeCode(FlowGraphCompiler* compiler) { |
+ __ Comment("StoreInstanceFieldSlowPath"); |
+ __ Bind(entry_label()); |
+ |
+ const Code& stub = |
+ Code::Handle(StubCode::GetAllocationStubForClass(cls_)); |
+ const ExternalLabel label(cls_.ToCString(), stub.EntryPoint()); |
+ |
+ LocationSummary* locs = instruction_->locs(); |
+ locs->live_registers()->Remove(locs->out(0)); |
+ |
+ compiler->SaveLiveRegisters(locs); |
+ compiler->GenerateCall(Scanner::kNoSourcePos, // No token position. |
+ &label, |
+ PcDescriptors::kOther, |
+ locs); |
+ __ mov(locs->temp(0).reg(), R0); |
+ compiler->RestoreLiveRegisters(locs); |
+ |
+ __ b(exit_label()); |
+ } |
+ |
+ private: |
+ StoreInstanceFieldInstr* instruction_; |
+ const Class& cls_; |
+}; |
+ |
+ |
LocationSummary* StoreInstanceFieldInstr::MakeLocationSummary(bool opt) const { |
- UNIMPLEMENTED(); |
- return NULL; |
+ const intptr_t kNumInputs = 2; |
+ const intptr_t kNumTemps = 0; |
+ LocationSummary* summary = |
+ new LocationSummary(kNumInputs, kNumTemps, |
+ !field().IsNull() && |
+ ((field().guarded_cid() == kIllegalCid) || is_initialization_) |
+ ? LocationSummary::kCallOnSlowPath |
+ : LocationSummary::kNoCall); |
+ |
+ summary->set_in(0, Location::RequiresRegister()); |
+ if (IsUnboxedStore() && opt) { |
+ // TODO(zra): Implement when we add fpu loads and stores. |
+ UNIMPLEMENTED(); |
+ } else if (IsPotentialUnboxedStore()) { |
+ summary->set_in(1, ShouldEmitStoreBarrier() |
+ ? Location::WritableRegister() |
+ : Location::RequiresRegister()); |
+ summary->AddTemp(Location::RequiresRegister()); |
+ summary->AddTemp(Location::RequiresRegister()); |
+ } else { |
+ summary->set_in(1, ShouldEmitStoreBarrier() |
+ ? Location::WritableRegister() |
+ : Location::RegisterOrConstant(value())); |
+ } |
+ return summary; |
} |
void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
- UNIMPLEMENTED(); |
+ Label skip_store; |
+ |
+ Register instance_reg = locs()->in(0).reg(); |
+ |
+ if (IsUnboxedStore() && compiler->is_optimizing()) { |
+ UNIMPLEMENTED(); |
+ } |
+ |
+ if (IsPotentialUnboxedStore()) { |
+ const Register temp = locs()->temp(0).reg(); |
+ const Register temp2 = locs()->temp(1).reg(); |
+ |
+ Label store_pointer; |
+ Label store_double; |
+ Label store_float32x4; |
+ Label store_float64x2; |
+ |
+ __ LoadObject(temp, Field::ZoneHandle(field().raw()), PP); |
+ |
+ __ LoadFieldFromOffset(temp2, temp, Field::is_nullable_offset()); |
+ __ CompareImmediate(temp2, kNullCid, PP); |
+ __ b(&store_pointer, EQ); |
+ |
+ __ LoadFromOffset( |
+ temp2, temp, Field::kind_bits_offset() - kHeapObjectTag, kUnsignedByte); |
+ __ tsti(temp2, 1 << Field::kUnboxingCandidateBit); |
+ __ b(&store_pointer, EQ); |
+ |
+ __ LoadFieldFromOffset(temp2, temp, Field::guarded_cid_offset()); |
+ __ CompareImmediate(temp2, kDoubleCid, PP); |
+ __ b(&store_double, EQ); |
+ |
+ __ LoadFieldFromOffset(temp2, temp, Field::guarded_cid_offset()); |
+ __ CompareImmediate(temp2, kFloat32x4Cid, PP); |
+ __ b(&store_float32x4, EQ); |
+ |
+ __ LoadFieldFromOffset(temp2, temp, Field::guarded_cid_offset()); |
+ __ CompareImmediate(temp2, kFloat64x2Cid, PP); |
+ __ b(&store_float64x2, EQ); |
+ |
+ // Fall through. |
+ __ b(&store_pointer); |
+ |
+ if (!compiler->is_optimizing()) { |
+ locs()->live_registers()->Add(locs()->in(0)); |
+ locs()->live_registers()->Add(locs()->in(1)); |
+ } |
+ |
+ // TODO(zra): Implement these when we add fpu loads and stores. |
+ { |
+ __ Bind(&store_double); |
+ __ hlt(0); // Unimplemented. |
+ } |
+ |
+ { |
+ __ Bind(&store_float32x4); |
+ __ hlt(0); // Unimplemented. |
+ } |
+ |
+ { |
+ __ Bind(&store_float64x2); |
+ __ hlt(0); // Unimplemented. |
+ } |
+ |
+ __ Bind(&store_pointer); |
+ } |
+ |
+ if (ShouldEmitStoreBarrier()) { |
+ Register value_reg = locs()->in(1).reg(); |
+ __ StoreIntoObject(instance_reg, |
+ FieldAddress(instance_reg, offset_in_bytes_), |
+ value_reg, |
+ CanValueBeSmi()); |
+ } else { |
+ if (locs()->in(1).IsConstant()) { |
+ __ StoreIntoObjectNoBarrier( |
+ instance_reg, |
+ FieldAddress(instance_reg, offset_in_bytes_), |
+ locs()->in(1).constant()); |
+ } else { |
+ Register value_reg = locs()->in(1).reg(); |
+ __ StoreIntoObjectNoBarrier(instance_reg, |
+ FieldAddress(instance_reg, offset_in_bytes_), value_reg); |
+ } |
+ } |
+ __ Bind(&skip_store); |
} |
LocationSummary* LoadStaticFieldInstr::MakeLocationSummary(bool opt) const { |
- UNIMPLEMENTED(); |
- return NULL; |
+ const intptr_t kNumInputs = 1; |
+ const intptr_t kNumTemps = 0; |
+ LocationSummary* summary = |
+ new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kNoCall); |
+ summary->set_in(0, Location::RequiresRegister()); |
+ summary->set_out(0, Location::RequiresRegister()); |
+ return summary; |
} |
+// When the parser is building an implicit static getter for optimization, |
+// it can generate a function body where deoptimization ids do not line up |
+// with the unoptimized code. |
+// |
+// This is safe only so long as LoadStaticFieldInstr cannot deoptimize. |
void LoadStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
- UNIMPLEMENTED(); |
+ Register field = locs()->in(0).reg(); |
+ Register result = locs()->out(0).reg(); |
+ __ LoadFieldFromOffset(result, field, Field::value_offset()); |
} |
LocationSummary* StoreStaticFieldInstr::MakeLocationSummary(bool opt) const { |
- UNIMPLEMENTED(); |
- return NULL; |
+ LocationSummary* locs = new LocationSummary(1, 1, LocationSummary::kNoCall); |
+ locs->set_in(0, value()->NeedsStoreBuffer() ? Location::WritableRegister() |
+ : Location::RequiresRegister()); |
+ locs->set_temp(0, Location::RequiresRegister()); |
+ return locs; |
} |
void StoreStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
- UNIMPLEMENTED(); |
+ Register value = locs()->in(0).reg(); |
+ Register temp = locs()->temp(0).reg(); |
+ |
+ __ LoadObject(temp, field(), PP); |
+ if (this->value()->NeedsStoreBuffer()) { |
+ __ StoreIntoObject(temp, |
+ FieldAddress(temp, Field::value_offset()), value, CanValueBeSmi()); |
+ } else { |
+ __ StoreIntoObjectNoBarrier( |
+ temp, FieldAddress(temp, Field::value_offset()), value); |
+ } |
} |
@@ -494,24 +1262,118 @@ |
LocationSummary* CreateArrayInstr::MakeLocationSummary(bool opt) const { |
- UNIMPLEMENTED(); |
- return NULL; |
+ const intptr_t kNumInputs = 2; |
+ const intptr_t kNumTemps = 0; |
+ LocationSummary* locs = |
+ new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kCall); |
+ locs->set_in(kElementTypePos, Location::RegisterLocation(R1)); |
+ locs->set_in(kLengthPos, Location::RegisterLocation(R2)); |
+ locs->set_out(0, Location::RegisterLocation(R0)); |
+ return locs; |
} |
void CreateArrayInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
- UNIMPLEMENTED(); |
+ // Allocate the array. R2 = length, R1 = element type. |
+ ASSERT(locs()->in(kElementTypePos).reg() == R1); |
+ ASSERT(locs()->in(kLengthPos).reg() == R2); |
+ compiler->GenerateCall(token_pos(), |
+ &StubCode::AllocateArrayLabel(), |
+ PcDescriptors::kOther, |
+ locs()); |
+ ASSERT(locs()->out(0).reg() == R0); |
} |
LocationSummary* LoadFieldInstr::MakeLocationSummary(bool opt) const { |
- UNIMPLEMENTED(); |
- return NULL; |
+ const intptr_t kNumInputs = 1; |
+ const intptr_t kNumTemps = 0; |
+ LocationSummary* locs = |
+ new LocationSummary( |
+ kNumInputs, kNumTemps, |
+ (opt && !IsPotentialUnboxedLoad()) |
+ ? LocationSummary::kNoCall |
+ : LocationSummary::kCallOnSlowPath); |
+ |
+ locs->set_in(0, Location::RequiresRegister()); |
+ |
+ if (IsUnboxedLoad() && opt) { |
+ // TODO(zra): Implement when we add fpu loads and stores. |
+ UNIMPLEMENTED(); |
+ } else if (IsPotentialUnboxedLoad()) { |
+ locs->AddTemp(Location::RequiresRegister()); |
+ } |
+ locs->set_out(0, Location::RequiresRegister()); |
+ return locs; |
} |
void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
- UNIMPLEMENTED(); |
+ Register instance_reg = locs()->in(0).reg(); |
+ if (IsUnboxedLoad() && compiler->is_optimizing()) { |
+ UNIMPLEMENTED(); |
+ } |
+ |
+ Label done; |
+ Register result_reg = locs()->out(0).reg(); |
+ if (IsPotentialUnboxedLoad()) { |
+ const Register temp = locs()->temp(0).reg(); |
+ |
+ Label load_pointer; |
+ Label load_double; |
+ Label load_float32x4; |
+ Label load_float64x2; |
+ |
+ __ LoadObject(result_reg, Field::ZoneHandle(field()->raw()), PP); |
+ |
+ FieldAddress field_cid_operand(result_reg, Field::guarded_cid_offset()); |
+ FieldAddress field_nullability_operand(result_reg, |
+ Field::is_nullable_offset()); |
+ |
+ __ ldr(temp, field_nullability_operand); |
+ __ CompareImmediate(temp, kNullCid, PP); |
+ __ b(&load_pointer, EQ); |
+ |
+ __ ldr(temp, field_cid_operand); |
+ __ CompareImmediate(temp, kDoubleCid, PP); |
+ __ b(&load_double, EQ); |
+ |
+ __ ldr(temp, field_cid_operand); |
+ __ CompareImmediate(temp, kFloat32x4Cid, PP); |
+ __ b(&load_float32x4, EQ); |
+ |
+ __ ldr(temp, field_cid_operand); |
+ __ CompareImmediate(temp, kFloat64x2Cid, PP); |
+ __ b(&load_float64x2, EQ); |
+ |
+ // Fall through. |
+ __ b(&load_pointer); |
+ |
+ if (!compiler->is_optimizing()) { |
+ locs()->live_registers()->Add(locs()->in(0)); |
+ } |
+ |
+ // TODO(zra): Implement these when we add fpu loads and stores. |
+ { |
+ __ Bind(&load_double); |
+ __ hlt(0); // Unimplemented. |
+ } |
+ |
+ { |
+ __ Bind(&load_float32x4); |
+ __ hlt(0); // Unimplemented. |
+ } |
+ |
+ { |
+ __ Bind(&load_float64x2); |
+ __ hlt(0); // Unimplemented. |
+ } |
+ |
+ __ Bind(&load_pointer); |
+ } |
+ __ LoadFromOffset( |
+ result_reg, instance_reg, offset_in_bytes() - kHeapObjectTag); |
+ __ Bind(&done); |
} |
@@ -540,13 +1402,27 @@ |
LocationSummary* AllocateContextInstr::MakeLocationSummary(bool opt) const { |
- UNIMPLEMENTED(); |
- return NULL; |
+ const intptr_t kNumInputs = 0; |
+ const intptr_t kNumTemps = 1; |
+ LocationSummary* locs = |
+ new LocationSummary(kNumInputs, kNumTemps, LocationSummary::kCall); |
+ locs->set_temp(0, Location::RegisterLocation(R1)); |
+ locs->set_out(0, Location::RegisterLocation(R0)); |
+ return locs; |
} |
void AllocateContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
- UNIMPLEMENTED(); |
+ ASSERT(locs()->temp(0).reg() == R1); |
+ ASSERT(locs()->out(0).reg() == R0); |
+ |
+ __ LoadImmediate(R1, num_context_variables(), PP); |
+ const ExternalLabel label("alloc_context", |
+ StubCode::AllocateContextEntryPoint()); |
+ compiler->GenerateCall(token_pos(), |
+ &label, |
+ PcDescriptors::kOther, |
+ locs()); |
} |
@@ -562,13 +1438,39 @@ |
LocationSummary* CatchBlockEntryInstr::MakeLocationSummary(bool opt) const { |
- UNIMPLEMENTED(); |
+ UNREACHABLE(); |
return NULL; |
} |
void CatchBlockEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
- UNIMPLEMENTED(); |
+ __ Bind(compiler->GetJumpLabel(this)); |
+ compiler->AddExceptionHandler(catch_try_index(), |
+ try_index(), |
+ compiler->assembler()->CodeSize(), |
+ catch_handler_types_, |
+ needs_stacktrace()); |
+ |
+ // Restore the pool pointer. |
+ __ LoadPoolPointer(PP); |
+ |
+ if (HasParallelMove()) { |
+ compiler->parallel_move_resolver()->EmitNativeCode(parallel_move()); |
+ } |
+ |
+ // Restore SP from FP as we are coming from a throw and the code for |
+ // popping arguments has not been run. |
+ const intptr_t fp_sp_dist = |
+ (kFirstLocalSlotFromFp + 1 - compiler->StackSize()) * kWordSize; |
+ ASSERT(fp_sp_dist <= 0); |
+ __ AddImmediate(SP, FP, fp_sp_dist, PP); |
+ |
+ // Restore stack and initialize the two exception variables: |
+ // exception and stack trace variables. |
+ __ StoreToOffset(kExceptionObjectReg, |
+ FP, exception_var().index() * kWordSize); |
+ __ StoreToOffset(kStackTraceObjectReg, |
+ FP, stacktrace_var().index() * kWordSize); |
} |
@@ -1390,13 +2292,18 @@ |
LocationSummary* ReThrowInstr::MakeLocationSummary(bool opt) const { |
- UNIMPLEMENTED(); |
- return NULL; |
+ return new LocationSummary(0, 0, LocationSummary::kCall); |
} |
void ReThrowInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
- UNIMPLEMENTED(); |
+ compiler->SetNeedsStacktrace(catch_try_index()); |
+ compiler->GenerateRuntimeCall(token_pos(), |
+ deopt_id(), |
+ kReThrowRuntimeEntry, |
+ 2, |
+ locs()); |
+ __ hlt(0); |
} |
@@ -1455,13 +2362,14 @@ |
LocationSummary* CurrentContextInstr::MakeLocationSummary(bool opt) const { |
- UNIMPLEMENTED(); |
- return NULL; |
+ return LocationSummary::Make(0, |
+ Location::RequiresRegister(), |
+ LocationSummary::kNoCall); |
} |
void CurrentContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) { |
- UNIMPLEMENTED(); |
+ __ mov(locs()->out(0).reg(), CTX); |
} |