Index: src/code-stubs.cc |
diff --git a/src/code-stubs.cc b/src/code-stubs.cc |
index 0fb866e9491049d7e165d43da0f7c7923845a657..79098a1ac9a291ef2b7c6f7eee253262b3373a83 100644 |
--- a/src/code-stubs.cc |
+++ b/src/code-stubs.cc |
@@ -4493,26 +4493,33 @@ void FastNewClosureStub::GenerateAssembly(CodeStubAssembler* assembler) const { |
void FastNewFunctionContextStub::GenerateAssembly( |
CodeStubAssembler* assembler) const { |
+ typedef CodeStubAssembler::Label Label; |
typedef compiler::Node Node; |
+ typedef CodeStubAssembler::Variable Variable; |
- int length = slots() + Context::MIN_CONTEXT_SLOTS; |
- int size = length * kPointerSize + FixedArray::kHeaderSize; |
- |
- // Get the function |
Node* function = |
assembler->Parameter(FastNewFunctionContextDescriptor::kFunctionIndex); |
+ Node* slots = |
+ assembler->Parameter(FastNewFunctionContextDescriptor::kSlotsIndex); |
Node* context = |
assembler->Parameter(FastNewFunctionContextDescriptor::kContextIndex); |
+ Node* min_context_slots = |
+ assembler->Int32Constant(Context::MIN_CONTEXT_SLOTS); |
klaasb
2016/07/26 17:04:52
I've used int32 throughout, as it was used before
|
+ Node* length = assembler->Int32Add(slots, min_context_slots); |
+ Node* size = assembler->Int32Add( |
+ assembler->Int32Mul(length, assembler->Int32Constant(kPointerSize)), |
rmcilroy
2016/07/27 11:11:38
You could probably shift by kPointerSizeLog2 inste
klaasb
2016/07/27 17:06:31
TF does it on the graph itself before instruction
rmcilroy
2016/07/28 15:04:27
I don't think this phase is run on code generated
klaasb
2016/07/28 15:34:47
Done.
|
+ assembler->Int32Constant(FixedArray::kHeaderSize)); |
+ |
// Create a new closure from the given function info in new space |
Node* function_context = assembler->Allocate(size); |
assembler->StoreMapNoWriteBarrier( |
function_context, |
assembler->HeapConstant(isolate()->factory()->function_context_map())); |
- assembler->StoreObjectFieldNoWriteBarrier( |
- function_context, Context::kLengthOffset, |
- assembler->SmiConstant(Smi::FromInt(length))); |
+ assembler->StoreObjectFieldNoWriteBarrier(function_context, |
+ Context::kLengthOffset, |
+ assembler->SmiFromWord32(length)); |
// Set up the fixed slots. |
assembler->StoreFixedArrayElement( |
@@ -4534,11 +4541,30 @@ void FastNewFunctionContextStub::GenerateAssembly( |
// Initialize the rest of the slots to undefined. |
Node* undefined = assembler->UndefinedConstant(); |
- for (int i = Context::MIN_CONTEXT_SLOTS; i < length; ++i) { |
- assembler->StoreFixedArrayElement(function_context, |
- assembler->Int32Constant(i), undefined, |
- SKIP_WRITE_BARRIER); |
+ Variable var_slot_index(assembler, MachineType::PointerRepresentation()); |
+ var_slot_index.Bind(min_context_slots); |
+ Label loop(assembler, &var_slot_index), after_loop(assembler); |
+ assembler->Goto(&loop); |
+ |
+ assembler->Bind(&loop); |
+ { |
+ Node* slot_index = var_slot_index.value(); |
+ Label initialize_slot(assembler); |
+ assembler->Branch(assembler->Int32LessThan(slot_index, length), |
+ &initialize_slot, &after_loop); |
rmcilroy
2016/07/27 11:11:38
You could do this check on the backward branch - y
klaasb
2016/07/27 17:06:31
Done.
|
+ |
+ assembler->Bind(&initialize_slot); |
+ { |
+ assembler->StoreFixedArrayElement(function_context, |
+ var_slot_index.value(), undefined, |
+ SKIP_WRITE_BARRIER); |
+ Node* one = assembler->Int32Constant(1); |
+ Node* next_index = assembler->Int32Add(var_slot_index.value(), one); |
+ var_slot_index.Bind(next_index); |
+ assembler->Goto(&loop); |
+ } |
} |
+ assembler->Bind(&after_loop); |
assembler->Return(function_context); |
} |