| Index: src/compiler/change-lowering.cc
|
| diff --git a/src/compiler/change-lowering.cc b/src/compiler/change-lowering.cc
|
| index 8552cdf7921b6c5bd62e4d7bbd7091008d5e5eb8..213f7759648635771f9b9fd67f3f97be4723ca7d 100644
|
| --- a/src/compiler/change-lowering.cc
|
| +++ b/src/compiler/change-lowering.cc
|
| @@ -4,12 +4,14 @@
|
|
|
| #include "src/compiler/change-lowering.h"
|
|
|
| +#include "src/address-map.h"
|
| #include "src/code-factory.h"
|
| #include "src/compiler/js-graph.h"
|
| #include "src/compiler/linkage.h"
|
| #include "src/compiler/machine-operator.h"
|
| #include "src/compiler/node-properties.h"
|
| #include "src/compiler/operator-properties.h"
|
| +#include "src/compiler/simplified-operator.h"
|
|
|
| namespace v8 {
|
| namespace internal {
|
| @@ -37,6 +39,16 @@ Reduction ChangeLowering::Reduce(Node* node) {
|
| return ChangeTaggedToUI32(node->InputAt(0), control, kUnsigned);
|
| case IrOpcode::kChangeUint32ToTagged:
|
| return ChangeUint32ToTagged(node->InputAt(0), control);
|
| + case IrOpcode::kLoadField:
|
| + return LoadField(node);
|
| + case IrOpcode::kStoreField:
|
| + return StoreField(node);
|
| + case IrOpcode::kLoadElement:
|
| + return LoadElement(node);
|
| + case IrOpcode::kStoreElement:
|
| + return StoreElement(node);
|
| + case IrOpcode::kAllocate:
|
| + return Allocate(node);
|
| default:
|
| return NoChange();
|
| }
|
| @@ -407,6 +419,153 @@ Reduction ChangeLowering::ChangeUint32ToTagged(Node* value, Node* control) {
|
| }
|
|
|
|
|
| +namespace {
|
| +
|
| +WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,
|
| + MachineType representation,
|
| + Type* field_type, Type* input_type) {
|
| + if (field_type->Is(Type::TaggedSigned()) ||
|
| + input_type->Is(Type::TaggedSigned())) {
|
| + // Write barriers are only for writes of heap objects.
|
| + return kNoWriteBarrier;
|
| + }
|
| + if (input_type->Is(Type::BooleanOrNullOrUndefined())) {
|
| + // Write barriers are not necessary when storing true, false, null or
|
| + // undefined, because these special oddballs are always in the root set.
|
| + return kNoWriteBarrier;
|
| + }
|
| + if (base_is_tagged == kTaggedBase &&
|
| + RepresentationOf(representation) == kRepTagged) {
|
| + if (input_type->IsConstant() &&
|
| + input_type->AsConstant()->Value()->IsHeapObject()) {
|
| + Handle<HeapObject> input =
|
| + Handle<HeapObject>::cast(input_type->AsConstant()->Value());
|
| + if (input->IsMap()) {
|
| + // Write barriers for storing maps are cheaper.
|
| + return kMapWriteBarrier;
|
| + }
|
| + Isolate* const isolate = input->GetIsolate();
|
| + RootIndexMap root_index_map(isolate);
|
| + int root_index = root_index_map.Lookup(*input);
|
| + if (root_index != RootIndexMap::kInvalidRootIndex &&
|
| + isolate->heap()->RootIsImmortalImmovable(root_index)) {
|
| + // Write barriers are unnecessary for immortal immovable roots.
|
| + return kNoWriteBarrier;
|
| + }
|
| + }
|
| + if (field_type->Is(Type::TaggedPointer()) ||
|
| + input_type->Is(Type::TaggedPointer())) {
|
| + // Write barriers for heap objects don't need a Smi check.
|
| + return kPointerWriteBarrier;
|
| + }
|
| + // Write barriers are only for writes into heap objects (i.e. tagged base).
|
| + return kFullWriteBarrier;
|
| + }
|
| + return kNoWriteBarrier;
|
| +}
|
| +
|
| +} // namespace
|
| +
|
| +
|
| +Reduction ChangeLowering::LoadField(Node* node) {
|
| + const FieldAccess& access = FieldAccessOf(node->op());
|
| + Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
|
| + node->InsertInput(graph()->zone(), 1, offset);
|
| + NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
|
| + return Changed(node);
|
| +}
|
| +
|
| +
|
| +Reduction ChangeLowering::StoreField(Node* node) {
|
| + const FieldAccess& access = FieldAccessOf(node->op());
|
| + Type* type = NodeProperties::GetType(node->InputAt(1));
|
| + WriteBarrierKind kind = ComputeWriteBarrierKind(
|
| + access.base_is_tagged, access.machine_type, access.type, type);
|
| + Node* offset = jsgraph()->IntPtrConstant(access.offset - access.tag());
|
| + node->InsertInput(graph()->zone(), 1, offset);
|
| + NodeProperties::ChangeOp(
|
| + node, machine()->Store(StoreRepresentation(access.machine_type, kind)));
|
| + return Changed(node);
|
| +}
|
| +
|
| +
|
| +Node* ChangeLowering::ComputeIndex(const ElementAccess& access,
|
| + Node* const key) {
|
| + Node* index = key;
|
| + const int element_size_shift = ElementSizeLog2Of(access.machine_type);
|
| + if (element_size_shift) {
|
| + index = graph()->NewNode(machine()->Word32Shl(), index,
|
| + jsgraph()->Int32Constant(element_size_shift));
|
| + }
|
| + const int fixed_offset = access.header_size - access.tag();
|
| + if (fixed_offset) {
|
| + index = graph()->NewNode(machine()->Int32Add(), index,
|
| + jsgraph()->Int32Constant(fixed_offset));
|
| + }
|
| + if (machine()->Is64()) {
|
| + // TODO(turbofan): This is probably only correct for typed arrays, and only
|
| + // if the typed arrays are at most 2GiB in size, which happens to match
|
| + // exactly our current situation.
|
| + index = graph()->NewNode(machine()->ChangeUint32ToUint64(), index);
|
| + }
|
| + return index;
|
| +}
|
| +
|
| +
|
| +Reduction ChangeLowering::LoadElement(Node* node) {
|
| + const ElementAccess& access = ElementAccessOf(node->op());
|
| + node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
|
| + NodeProperties::ChangeOp(node, machine()->Load(access.machine_type));
|
| + return Changed(node);
|
| +}
|
| +
|
| +
|
| +Reduction ChangeLowering::StoreElement(Node* node) {
|
| + const ElementAccess& access = ElementAccessOf(node->op());
|
| + Type* type = NodeProperties::GetType(node->InputAt(2));
|
| + node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
|
| + NodeProperties::ChangeOp(
|
| + node,
|
| + machine()->Store(StoreRepresentation(
|
| + access.machine_type,
|
| + ComputeWriteBarrierKind(access.base_is_tagged, access.machine_type,
|
| + access.type, type))));
|
| + return Changed(node);
|
| +}
|
| +
|
| +
|
| +Reduction ChangeLowering::Allocate(Node* node) {
|
| + PretenureFlag pretenure = OpParameter<PretenureFlag>(node->op());
|
| + if (pretenure == NOT_TENURED) {
|
| + Callable callable = CodeFactory::AllocateInNewSpace(isolate());
|
| + Node* target = jsgraph()->HeapConstant(callable.code());
|
| + CallDescriptor* descriptor = Linkage::GetStubCallDescriptor(
|
| + isolate(), jsgraph()->zone(), callable.descriptor(), 0,
|
| + CallDescriptor::kNoFlags, Operator::kNoThrow);
|
| + const Operator* op = common()->Call(descriptor);
|
| + node->InsertInput(graph()->zone(), 0, target);
|
| + node->InsertInput(graph()->zone(), 2, jsgraph()->NoContextConstant());
|
| + NodeProperties::ChangeOp(node, op);
|
| + } else {
|
| + DCHECK_EQ(TENURED, pretenure);
|
| + AllocationSpace space = OLD_SPACE;
|
| + Runtime::FunctionId f = Runtime::kAllocateInTargetSpace;
|
| + Operator::Properties props = node->op()->properties();
|
| + CallDescriptor* desc =
|
| + Linkage::GetRuntimeCallDescriptor(jsgraph()->zone(), f, 2, props);
|
| + ExternalReference ref(f, jsgraph()->isolate());
|
| + int32_t flags = AllocateTargetSpace::encode(space);
|
| + node->InsertInput(graph()->zone(), 0, jsgraph()->CEntryStubConstant(1));
|
| + node->InsertInput(graph()->zone(), 2, jsgraph()->SmiConstant(flags));
|
| + node->InsertInput(graph()->zone(), 3, jsgraph()->ExternalConstant(ref));
|
| + node->InsertInput(graph()->zone(), 4, jsgraph()->Int32Constant(2));
|
| + node->InsertInput(graph()->zone(), 5, jsgraph()->NoContextConstant());
|
| + NodeProperties::ChangeOp(node, common()->Call(desc));
|
| + }
|
| + return Changed(node);
|
| +}
|
| +
|
| +
|
| Isolate* ChangeLowering::isolate() const { return jsgraph()->isolate(); }
|
|
|
|
|
|
|