Index: src/compiler/memory-optimizer.cc |
diff --git a/src/compiler/memory-optimizer.cc b/src/compiler/memory-optimizer.cc |
index 3381b46958af3dc0d4758b82c5eacaa5b0f6b7e9..05fc6e75792b2bbe6db29b76a447aae47162c230 100644 |
--- a/src/compiler/memory-optimizer.cc |
+++ b/src/compiler/memory-optimizer.cc |
@@ -109,6 +109,37 @@ void MemoryOptimizer::VisitAllocate(Node* node, AllocationState const* state) { |
Node* control = node->InputAt(2); |
PretenureFlag pretenure = OpParameter<PretenureFlag>(node->op()); |
+ // Propagate tenuring from outer allocations to inner allocations, i.e. |
+ // when we allocate an object in old space and store a newly allocated |
+ // child object into the pretenured object, then the newly allocated |
+ // child object also should get pretenured to old space. |
+ if (pretenure == TENURED) { |
+ for (Edge const edge : node->use_edges()) { |
+ Node* const user = edge.from(); |
+ if (user->opcode() == IrOpcode::kStoreField && edge.index() == 0) { |
+ Node* const child = user->InputAt(1); |
+ if (child->opcode() == IrOpcode::kAllocate && |
+ OpParameter<PretenureFlag>(child) == NOT_TENURED) { |
Jarin
2016/09/20 09:40:28
Can we have PretenureFlagOf, please?
Benedikt Meurer
2016/09/20 09:42:55
Follow-up CL.
|
+ NodeProperties::ChangeOp(child, node->op()); |
+ break; |
+ } |
+ } |
+ } |
+ } else { |
+ DCHECK_EQ(NOT_TENURED, pretenure); |
+ for (Edge const edge : node->use_edges()) { |
+ Node* const user = edge.from(); |
+ if (user->opcode() == IrOpcode::kStoreField && edge.index() == 1) { |
+ Node* const parent = user->InputAt(0); |
+ if (parent->opcode() == IrOpcode::kAllocate && |
+ OpParameter<PretenureFlag>(parent) == TENURED) { |
+ pretenure = TENURED; |
+ break; |
+ } |
+ } |
+ } |
+ } |
+ |
// Determine the top/limit addresses. |
Node* top_address = jsgraph()->ExternalConstant( |
pretenure == NOT_TENURED |