| OLD | NEW |
| 1 // Copyright 2016 the V8 project authors. All rights reserved. | 1 // Copyright 2016 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/compiler/memory-optimizer.h" | 5 #include "src/compiler/memory-optimizer.h" |
| 6 | 6 |
| 7 #include "src/compiler/js-graph.h" | 7 #include "src/compiler/js-graph.h" |
| 8 #include "src/compiler/linkage.h" | 8 #include "src/compiler/linkage.h" |
| 9 #include "src/compiler/node-matchers.h" | 9 #include "src/compiler/node-matchers.h" |
| 10 #include "src/compiler/node-properties.h" | 10 #include "src/compiler/node-properties.h" |
| 11 #include "src/compiler/node.h" | 11 #include "src/compiler/node.h" |
| 12 #include "src/compiler/simplified-operator.h" | 12 #include "src/compiler/simplified-operator.h" |
| 13 | 13 |
| 14 namespace v8 { | 14 namespace v8 { |
| 15 namespace internal { | 15 namespace internal { |
| 16 namespace compiler { | 16 namespace compiler { |
| 17 | 17 |
| 18 MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone) | 18 MemoryOptimizer::MemoryOptimizer(JSGraph* jsgraph, Zone* zone) |
| 19 : jsgraph_(jsgraph), | 19 : jsgraph_(jsgraph), |
| 20 empty_state_(AllocationState::Empty(zone)), | 20 empty_state_(AllocationState::Empty(zone)), |
| 21 pending_(zone), | 21 pending_(zone), |
| 22 tokens_(zone), | 22 tokens_(zone), |
| 23 zone_(zone) {} | 23 zone_(zone), |
| 24 graph_assembler_(jsgraph, nullptr, nullptr, zone) {} |
| 24 | 25 |
| 25 void MemoryOptimizer::Optimize() { | 26 void MemoryOptimizer::Optimize() { |
| 26 EnqueueUses(graph()->start(), empty_state()); | 27 EnqueueUses(graph()->start(), empty_state()); |
| 27 while (!tokens_.empty()) { | 28 while (!tokens_.empty()) { |
| 28 Token const token = tokens_.front(); | 29 Token const token = tokens_.front(); |
| 29 tokens_.pop(); | 30 tokens_.pop(); |
| 30 VisitNode(token.node, token.state); | 31 VisitNode(token.node, token.state); |
| 31 } | 32 } |
| 32 DCHECK(pending_.empty()); | 33 DCHECK(pending_.empty()); |
| 33 DCHECK(tokens_.empty()); | 34 DCHECK(tokens_.empty()); |
| (...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 96 case IrOpcode::kProtectedStore: | 97 case IrOpcode::kProtectedStore: |
| 97 case IrOpcode::kRetain: | 98 case IrOpcode::kRetain: |
| 98 case IrOpcode::kUnsafePointerAdd: | 99 case IrOpcode::kUnsafePointerAdd: |
| 99 return VisitOtherEffect(node, state); | 100 return VisitOtherEffect(node, state); |
| 100 default: | 101 default: |
| 101 break; | 102 break; |
| 102 } | 103 } |
| 103 DCHECK_EQ(0, node->op()->EffectOutputCount()); | 104 DCHECK_EQ(0, node->op()->EffectOutputCount()); |
| 104 } | 105 } |
| 105 | 106 |
| 107 #define __ gasm()-> |
| 108 |
| 106 void MemoryOptimizer::VisitAllocate(Node* node, AllocationState const* state) { | 109 void MemoryOptimizer::VisitAllocate(Node* node, AllocationState const* state) { |
| 107 DCHECK_EQ(IrOpcode::kAllocate, node->opcode()); | 110 DCHECK_EQ(IrOpcode::kAllocate, node->opcode()); |
| 108 Node* value; | 111 Node* value; |
| 109 Node* size = node->InputAt(0); | 112 Node* size = node->InputAt(0); |
| 110 Node* effect = node->InputAt(1); | 113 Node* effect = node->InputAt(1); |
| 111 Node* control = node->InputAt(2); | 114 Node* control = node->InputAt(2); |
| 115 |
| 116 gasm()->Reset(effect, control); |
| 117 |
| 112 PretenureFlag pretenure = PretenureFlagOf(node->op()); | 118 PretenureFlag pretenure = PretenureFlagOf(node->op()); |
| 113 | 119 |
| 114 // Propagate tenuring from outer allocations to inner allocations, i.e. | 120 // Propagate tenuring from outer allocations to inner allocations, i.e. |
| 115 // when we allocate an object in old space and store a newly allocated | 121 // when we allocate an object in old space and store a newly allocated |
| 116 // child object into the pretenured object, then the newly allocated | 122 // child object into the pretenured object, then the newly allocated |
| 117 // child object also should get pretenured to old space. | 123 // child object also should get pretenured to old space. |
| 118 if (pretenure == TENURED) { | 124 if (pretenure == TENURED) { |
| 119 for (Edge const edge : node->use_edges()) { | 125 for (Edge const edge : node->use_edges()) { |
| 120 Node* const user = edge.from(); | 126 Node* const user = edge.from(); |
| 121 if (user->opcode() == IrOpcode::kStoreField && edge.index() == 0) { | 127 if (user->opcode() == IrOpcode::kStoreField && edge.index() == 0) { |
| (...skipping 14 matching lines...) Expand all Loading... |
| 136 if (parent->opcode() == IrOpcode::kAllocate && | 142 if (parent->opcode() == IrOpcode::kAllocate && |
| 137 PretenureFlagOf(parent->op()) == TENURED) { | 143 PretenureFlagOf(parent->op()) == TENURED) { |
| 138 pretenure = TENURED; | 144 pretenure = TENURED; |
| 139 break; | 145 break; |
| 140 } | 146 } |
| 141 } | 147 } |
| 142 } | 148 } |
| 143 } | 149 } |
| 144 | 150 |
| 145 // Determine the top/limit addresses. | 151 // Determine the top/limit addresses. |
| 146 Node* top_address = jsgraph()->ExternalConstant( | 152 Node* top_address = __ ExternalConstant( |
| 147 pretenure == NOT_TENURED | 153 pretenure == NOT_TENURED |
| 148 ? ExternalReference::new_space_allocation_top_address(isolate()) | 154 ? ExternalReference::new_space_allocation_top_address(isolate()) |
| 149 : ExternalReference::old_space_allocation_top_address(isolate())); | 155 : ExternalReference::old_space_allocation_top_address(isolate())); |
| 150 Node* limit_address = jsgraph()->ExternalConstant( | 156 Node* limit_address = __ ExternalConstant( |
| 151 pretenure == NOT_TENURED | 157 pretenure == NOT_TENURED |
| 152 ? ExternalReference::new_space_allocation_limit_address(isolate()) | 158 ? ExternalReference::new_space_allocation_limit_address(isolate()) |
| 153 : ExternalReference::old_space_allocation_limit_address(isolate())); | 159 : ExternalReference::old_space_allocation_limit_address(isolate())); |
| 154 | 160 |
| 155 // Check if we can fold this allocation into a previous allocation represented | 161 // Check if we can fold this allocation into a previous allocation represented |
| 156 // by the incoming {state}. | 162 // by the incoming {state}. |
| 157 Int32Matcher m(size); | 163 Int32Matcher m(size); |
| 158 if (m.HasValue() && m.Value() < kMaxRegularHeapObjectSize) { | 164 if (m.HasValue() && m.Value() < kMaxRegularHeapObjectSize) { |
| 159 int32_t const object_size = m.Value(); | 165 int32_t const object_size = m.Value(); |
| 160 if (state->size() <= kMaxRegularHeapObjectSize - object_size && | 166 if (state->size() <= kMaxRegularHeapObjectSize - object_size && |
| 161 state->group()->pretenure() == pretenure) { | 167 state->group()->pretenure() == pretenure) { |
| 162 // We can fold this Allocate {node} into the allocation {group} | 168 // We can fold this Allocate {node} into the allocation {group} |
| 163 // represented by the given {state}. Compute the upper bound for | 169 // represented by the given {state}. Compute the upper bound for |
| 164 // the new {state}. | 170 // the new {state}. |
| 165 int32_t const state_size = state->size() + object_size; | 171 int32_t const state_size = state->size() + object_size; |
| 166 | 172 |
| 167 // Update the reservation check to the actual maximum upper bound. | 173 // Update the reservation check to the actual maximum upper bound. |
| 168 AllocationGroup* const group = state->group(); | 174 AllocationGroup* const group = state->group(); |
| 169 if (OpParameter<int32_t>(group->size()) < state_size) { | 175 if (OpParameter<int32_t>(group->size()) < state_size) { |
| 170 NodeProperties::ChangeOp(group->size(), | 176 NodeProperties::ChangeOp(group->size(), |
| 171 common()->Int32Constant(state_size)); | 177 common()->Int32Constant(state_size)); |
| 172 } | 178 } |
| 173 | 179 |
| 174 // Update the allocation top with the new object allocation. | 180 // Update the allocation top with the new object allocation. |
| 175 // TODO(bmeurer): Defer writing back top as much as possible. | 181 // TODO(bmeurer): Defer writing back top as much as possible. |
| 176 Node* top = graph()->NewNode(machine()->IntAdd(), state->top(), | 182 Node* top = __ IntAdd(state->top(), __ IntPtrConstant(object_size)); |
| 177 jsgraph()->IntPtrConstant(object_size)); | 183 __ Store(StoreRepresentation(MachineType::PointerRepresentation(), |
| 178 effect = graph()->NewNode( | 184 kNoWriteBarrier), |
| 179 machine()->Store(StoreRepresentation( | 185 top_address, __ IntPtrConstant(0), top); |
| 180 MachineType::PointerRepresentation(), kNoWriteBarrier)), | |
| 181 top_address, jsgraph()->IntPtrConstant(0), top, effect, control); | |
| 182 | 186 |
| 183 // Compute the effective inner allocated address. | 187 // Compute the effective inner allocated address. |
| 184 value = graph()->NewNode( | 188 value = __ BitcastWordToTagged( |
| 185 machine()->BitcastWordToTagged(), | 189 __ IntAdd(state->top(), __ IntPtrConstant(kHeapObjectTag))); |
| 186 graph()->NewNode(machine()->IntAdd(), state->top(), | |
| 187 jsgraph()->IntPtrConstant(kHeapObjectTag))); | |
| 188 | 190 |
| 189 // Extend the allocation {group}. | 191 // Extend the allocation {group}. |
| 190 group->Add(value); | 192 group->Add(value); |
| 191 state = AllocationState::Open(group, state_size, top, zone()); | 193 state = AllocationState::Open(group, state_size, top, zone()); |
| 192 } else { | 194 } else { |
| 195 auto call_runtime = __ MakeDeferredLabel<1>(); |
| 196 auto done = __ MakeLabel<2>(MachineType::PointerRepresentation()); |
| 197 |
| 193 // Setup a mutable reservation size node; will be patched as we fold | 198 // Setup a mutable reservation size node; will be patched as we fold |
| 194 // additional allocations into this new group. | 199 // additional allocations into this new group. |
| 195 Node* size = graph()->NewNode(common()->Int32Constant(object_size)); | 200 Node* size = __ UniqueInt32Constant(object_size); |
| 196 | 201 |
| 197 // Load allocation top and limit. | 202 // Load allocation top and limit. |
| 198 Node* top = effect = | 203 Node* top = |
| 199 graph()->NewNode(machine()->Load(MachineType::Pointer()), top_address, | 204 __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0)); |
| 200 jsgraph()->IntPtrConstant(0), effect, control); | 205 Node* limit = |
| 201 Node* limit = effect = graph()->NewNode( | 206 __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0)); |
| 202 machine()->Load(MachineType::Pointer()), limit_address, | |
| 203 jsgraph()->IntPtrConstant(0), effect, control); | |
| 204 | 207 |
| 205 // Check if we need to collect garbage before we can start bump pointer | 208 // Check if we need to collect garbage before we can start bump pointer |
| 206 // allocation (always done for folded allocations). | 209 // allocation (always done for folded allocations). |
| 207 Node* check = graph()->NewNode( | 210 Node* check = __ UintLessThan( |
| 208 machine()->UintLessThan(), | 211 __ IntAdd(top, |
| 209 graph()->NewNode( | 212 machine()->Is64() ? __ ChangeInt32ToInt64(size) : size), |
| 210 machine()->IntAdd(), top, | |
| 211 machine()->Is64() | |
| 212 ? graph()->NewNode(machine()->ChangeInt32ToInt64(), size) | |
| 213 : size), | |
| 214 limit); | 213 limit); |
| 215 Node* branch = | |
| 216 graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control); | |
| 217 | 214 |
| 218 Node* if_true = graph()->NewNode(common()->IfTrue(), branch); | 215 __ GotoUnless(check, &call_runtime); |
| 219 Node* etrue = effect; | 216 __ Goto(&done, top); |
| 220 Node* vtrue = top; | |
| 221 | 217 |
| 222 Node* if_false = graph()->NewNode(common()->IfFalse(), branch); | 218 __ Bind(&call_runtime); |
| 223 Node* efalse = effect; | |
| 224 Node* vfalse; | |
| 225 { | 219 { |
| 226 Node* target = pretenure == NOT_TENURED | 220 Node* target = |
| 227 ? jsgraph()->AllocateInNewSpaceStubConstant() | 221 pretenure == NOT_TENURED ? __ AllocateInNewSpaceStubConstant() |
| 228 : jsgraph()->AllocateInOldSpaceStubConstant(); | 222 : __ |
| 223 AllocateInOldSpaceStubConstant(); |
| 229 if (!allocate_operator_.is_set()) { | 224 if (!allocate_operator_.is_set()) { |
| 230 CallDescriptor* descriptor = | 225 CallDescriptor* descriptor = |
| 231 Linkage::GetAllocateCallDescriptor(graph()->zone()); | 226 Linkage::GetAllocateCallDescriptor(graph()->zone()); |
| 232 allocate_operator_.set(common()->Call(descriptor)); | 227 allocate_operator_.set(common()->Call(descriptor)); |
| 233 } | 228 } |
| 234 vfalse = efalse = graph()->NewNode(allocate_operator_.get(), target, | 229 Node* vfalse = __ Call(allocate_operator_.get(), target, size); |
| 235 size, efalse, if_false); | 230 vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag)); |
| 236 vfalse = graph()->NewNode(machine()->IntSub(), vfalse, | 231 __ Goto(&done, vfalse); |
| 237 jsgraph()->IntPtrConstant(kHeapObjectTag)); | |
| 238 } | 232 } |
| 239 | 233 |
| 240 control = graph()->NewNode(common()->Merge(2), if_true, if_false); | 234 __ Bind(&done); |
| 241 effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control); | |
| 242 value = graph()->NewNode( | |
| 243 common()->Phi(MachineType::PointerRepresentation(), 2), vtrue, vfalse, | |
| 244 control); | |
| 245 | 235 |
| 246 // Compute the new top and write it back. | 236 // Compute the new top and write it back. |
| 247 top = graph()->NewNode(machine()->IntAdd(), value, | 237 top = __ IntAdd(done.PhiAt(0), __ IntPtrConstant(object_size)); |
| 248 jsgraph()->IntPtrConstant(object_size)); | 238 __ Store(StoreRepresentation(MachineType::PointerRepresentation(), |
| 249 effect = graph()->NewNode( | 239 kNoWriteBarrier), |
| 250 machine()->Store(StoreRepresentation( | 240 top_address, __ IntPtrConstant(0), top); |
| 251 MachineType::PointerRepresentation(), kNoWriteBarrier)), | |
| 252 top_address, jsgraph()->IntPtrConstant(0), top, effect, control); | |
| 253 | 241 |
| 254 // Compute the initial object address. | 242 // Compute the initial object address. |
| 255 value = graph()->NewNode( | 243 value = __ BitcastWordToTagged( |
| 256 machine()->BitcastWordToTagged(), | 244 __ IntAdd(done.PhiAt(0), __ IntPtrConstant(kHeapObjectTag))); |
| 257 graph()->NewNode(machine()->IntAdd(), value, | |
| 258 jsgraph()->IntPtrConstant(kHeapObjectTag))); | |
| 259 | 245 |
| 260 // Start a new allocation group. | 246 // Start a new allocation group. |
| 261 AllocationGroup* group = | 247 AllocationGroup* group = |
| 262 new (zone()) AllocationGroup(value, pretenure, size, zone()); | 248 new (zone()) AllocationGroup(value, pretenure, size, zone()); |
| 263 state = AllocationState::Open(group, object_size, top, zone()); | 249 state = AllocationState::Open(group, object_size, top, zone()); |
| 264 } | 250 } |
| 265 } else { | 251 } else { |
| 252 auto call_runtime = __ MakeDeferredLabel<1>(); |
| 253 auto done = __ MakeLabel<2>(MachineRepresentation::kTaggedPointer); |
| 254 |
| 266 // Load allocation top and limit. | 255 // Load allocation top and limit. |
| 267 Node* top = effect = | 256 Node* top = |
| 268 graph()->NewNode(machine()->Load(MachineType::Pointer()), top_address, | 257 __ Load(MachineType::Pointer(), top_address, __ IntPtrConstant(0)); |
| 269 jsgraph()->IntPtrConstant(0), effect, control); | 258 Node* limit = |
| 270 Node* limit = effect = | 259 __ Load(MachineType::Pointer(), limit_address, __ IntPtrConstant(0)); |
| 271 graph()->NewNode(machine()->Load(MachineType::Pointer()), limit_address, | |
| 272 jsgraph()->IntPtrConstant(0), effect, control); | |
| 273 | 260 |
| 274 // Compute the new top. | 261 // Compute the new top. |
| 275 Node* new_top = graph()->NewNode( | 262 Node* new_top = |
| 276 machine()->IntAdd(), top, | 263 __ IntAdd(top, machine()->Is64() ? __ ChangeInt32ToInt64(size) : size); |
| 277 machine()->Is64() | |
| 278 ? graph()->NewNode(machine()->ChangeInt32ToInt64(), size) | |
| 279 : size); | |
| 280 | 264 |
| 281 // Check if we can do bump pointer allocation here. | 265 // Check if we can do bump pointer allocation here. |
| 282 Node* check = graph()->NewNode(machine()->UintLessThan(), new_top, limit); | 266 Node* check = __ UintLessThan(new_top, limit); |
| 283 Node* branch = | 267 __ GotoUnless(check, &call_runtime); |
| 284 graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control); | 268 __ Store(StoreRepresentation(MachineType::PointerRepresentation(), |
| 269 kNoWriteBarrier), |
| 270 top_address, __ IntPtrConstant(0), new_top); |
| 271 __ Goto(&done, __ BitcastWordToTagged( |
| 272 __ IntAdd(top, __ IntPtrConstant(kHeapObjectTag)))); |
| 285 | 273 |
| 286 Node* if_true = graph()->NewNode(common()->IfTrue(), branch); | 274 __ Bind(&call_runtime); |
| 287 Node* etrue = effect; | 275 Node* target = |
| 288 Node* vtrue; | 276 pretenure == NOT_TENURED ? __ AllocateInNewSpaceStubConstant() |
| 289 { | 277 : __ |
| 290 etrue = graph()->NewNode( | 278 AllocateInOldSpaceStubConstant(); |
| 291 machine()->Store(StoreRepresentation( | 279 if (!allocate_operator_.is_set()) { |
| 292 MachineType::PointerRepresentation(), kNoWriteBarrier)), | 280 CallDescriptor* descriptor = |
| 293 top_address, jsgraph()->IntPtrConstant(0), new_top, etrue, if_true); | 281 Linkage::GetAllocateCallDescriptor(graph()->zone()); |
| 294 vtrue = graph()->NewNode( | 282 allocate_operator_.set(common()->Call(descriptor)); |
| 295 machine()->BitcastWordToTagged(), | |
| 296 graph()->NewNode(machine()->IntAdd(), top, | |
| 297 jsgraph()->IntPtrConstant(kHeapObjectTag))); | |
| 298 } | 283 } |
| 284 __ Goto(&done, __ Call(allocate_operator_.get(), target, size)); |
| 299 | 285 |
| 300 Node* if_false = graph()->NewNode(common()->IfFalse(), branch); | 286 __ Bind(&done); |
| 301 Node* efalse = effect; | 287 value = done.PhiAt(0); |
| 302 Node* vfalse; | |
| 303 { | |
| 304 Node* target = pretenure == NOT_TENURED | |
| 305 ? jsgraph()->AllocateInNewSpaceStubConstant() | |
| 306 : jsgraph()->AllocateInOldSpaceStubConstant(); | |
| 307 if (!allocate_operator_.is_set()) { | |
| 308 CallDescriptor* descriptor = | |
| 309 Linkage::GetAllocateCallDescriptor(graph()->zone()); | |
| 310 allocate_operator_.set(common()->Call(descriptor)); | |
| 311 } | |
| 312 vfalse = efalse = graph()->NewNode(allocate_operator_.get(), target, size, | |
| 313 efalse, if_false); | |
| 314 } | |
| 315 | |
| 316 control = graph()->NewNode(common()->Merge(2), if_true, if_false); | |
| 317 effect = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, control); | |
| 318 value = graph()->NewNode( | |
| 319 common()->Phi(MachineRepresentation::kTaggedPointer, 2), vtrue, vfalse, | |
| 320 control); | |
| 321 | 288 |
| 322 // Create an unfoldable allocation group. | 289 // Create an unfoldable allocation group. |
| 323 AllocationGroup* group = | 290 AllocationGroup* group = |
| 324 new (zone()) AllocationGroup(value, pretenure, zone()); | 291 new (zone()) AllocationGroup(value, pretenure, zone()); |
| 325 state = AllocationState::Closed(group, zone()); | 292 state = AllocationState::Closed(group, zone()); |
| 326 } | 293 } |
| 327 | 294 |
| 295 effect = __ ExtractCurrentEffect(); |
| 296 control = __ ExtractCurrentControl(); |
| 297 USE(control); // Floating control, dropped on the floor. |
| 298 |
| 328 // Replace all effect uses of {node} with the {effect}, enqueue the | 299 // Replace all effect uses of {node} with the {effect}, enqueue the |
| 329 // effect uses for further processing, and replace all value uses of | 300 // effect uses for further processing, and replace all value uses of |
| 330 // {node} with the {value}. | 301 // {node} with the {value}. |
| 331 for (Edge edge : node->use_edges()) { | 302 for (Edge edge : node->use_edges()) { |
| 332 if (NodeProperties::IsEffectEdge(edge)) { | 303 if (NodeProperties::IsEffectEdge(edge)) { |
| 333 EnqueueUse(edge.from(), edge.index(), state); | 304 EnqueueUse(edge.from(), edge.index(), state); |
| 334 edge.UpdateTo(effect); | 305 edge.UpdateTo(effect); |
| 335 } else { | 306 } else { |
| 336 DCHECK(NodeProperties::IsValueEdge(edge)); | 307 DCHECK(NodeProperties::IsValueEdge(edge)); |
| 337 edge.UpdateTo(value); | 308 edge.UpdateTo(value); |
| 338 } | 309 } |
| 339 } | 310 } |
| 340 | 311 |
| 341 // Kill the {node} to make sure we don't leave dangling dead uses. | 312 // Kill the {node} to make sure we don't leave dangling dead uses. |
| 342 node->Kill(); | 313 node->Kill(); |
| 343 } | 314 } |
| 344 | 315 |
| 316 #undef __ |
| 317 |
| 345 void MemoryOptimizer::VisitCall(Node* node, AllocationState const* state) { | 318 void MemoryOptimizer::VisitCall(Node* node, AllocationState const* state) { |
| 346 DCHECK_EQ(IrOpcode::kCall, node->opcode()); | 319 DCHECK_EQ(IrOpcode::kCall, node->opcode()); |
| 347 // If the call can allocate, we start with a fresh state. | 320 // If the call can allocate, we start with a fresh state. |
| 348 if (!(CallDescriptorOf(node->op())->flags() & CallDescriptor::kNoAllocate)) { | 321 if (!(CallDescriptorOf(node->op())->flags() & CallDescriptor::kNoAllocate)) { |
| 349 state = empty_state(); | 322 state = empty_state(); |
| 350 } | 323 } |
| 351 EnqueueUses(node, state); | 324 EnqueueUses(node, state); |
| 352 } | 325 } |
| 353 | 326 |
| 354 void MemoryOptimizer::VisitLoadElement(Node* node, | 327 void MemoryOptimizer::VisitLoadElement(Node* node, |
| (...skipping 173 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 528 return jsgraph()->common(); | 501 return jsgraph()->common(); |
| 529 } | 502 } |
| 530 | 503 |
| 531 MachineOperatorBuilder* MemoryOptimizer::machine() const { | 504 MachineOperatorBuilder* MemoryOptimizer::machine() const { |
| 532 return jsgraph()->machine(); | 505 return jsgraph()->machine(); |
| 533 } | 506 } |
| 534 | 507 |
| 535 } // namespace compiler | 508 } // namespace compiler |
| 536 } // namespace internal | 509 } // namespace internal |
| 537 } // namespace v8 | 510 } // namespace v8 |
| OLD | NEW |