Index: src/ia32/codegen-ia32.cc |
=================================================================== |
--- src/ia32/codegen-ia32.cc (revision 3716) |
+++ src/ia32/codegen-ia32.cc (working copy) |
@@ -1,4 +1,4 @@ |
-// Copyright 2006-2009 the V8 project authors. All rights reserved. |
+// Copyright 2010 the V8 project authors. All rights reserved. |
// Redistribution and use in source and binary forms, with or without |
// modification, are permitted provided that the following conditions are |
// met: |
@@ -639,15 +639,22 @@ |
return frame_->Pop(); |
} |
+//------------------------------------------------------------------------------ |
+// CodeGenerator implementation of variables, lookups, and stores. |
-Reference::Reference(CodeGenerator* cgen, Expression* expression) |
- : cgen_(cgen), expression_(expression), type_(ILLEGAL) { |
+Reference::Reference(CodeGenerator* cgen, |
+ Expression* expression, |
+ bool persist_after_get) |
+ : cgen_(cgen), |
+ expression_(expression), |
+ type_(ILLEGAL), |
+ persist_after_get_(persist_after_get) { |
cgen->LoadReference(this); |
} |
Reference::~Reference() { |
- cgen_->UnloadReference(this); |
+ ASSERT(is_unloaded() || is_illegal()); |
} |
@@ -697,6 +704,7 @@ |
// Pop a reference from the stack while preserving TOS. |
Comment cmnt(masm_, "[ UnloadReference"); |
frame_->Nip(ref->size()); |
+ ref->set_unloaded(); |
} |
@@ -2297,20 +2305,29 @@ |
} |
-void CodeGenerator::CallApplyLazy(Property* apply, |
+void CodeGenerator::CallApplyLazy(Expression* applicand, |
Expression* receiver, |
VariableProxy* arguments, |
int position) { |
+ // An optimized implementation of expressions of the form |
+ // x.apply(y, arguments). |
+ // If the arguments object of the scope has not been allocated, |
+ // and x.apply is Function.prototype.apply, this optimization |
+ // just copies y and the arguments of the current function on the |
+ // stack, as receiver and arguments, and calls x. |
+ // In the implementation comments, we call x the applicand |
+ // and y the receiver. |
ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION); |
ASSERT(arguments->IsArguments()); |
- JumpTarget slow, done; |
- |
- // Load the apply function onto the stack. This will usually |
+ // Load applicand.apply onto the stack. This will usually |
// give us a megamorphic load site. Not super, but it works. |
- Reference ref(this, apply); |
- ref.GetValue(); |
- ASSERT(ref.type() == Reference::NAMED); |
+ Load(applicand); |
+ Handle<String> name = Factory::LookupAsciiSymbol("apply"); |
+ frame()->Push(name); |
+ Result answer = frame()->CallLoadIC(RelocInfo::CODE_TARGET); |
+ __ nop(); |
+ frame()->Push(&answer); |
// Load the receiver and the existing arguments object onto the |
// expression stack. Avoid allocating the arguments object here. |
@@ -2320,6 +2337,11 @@ |
// Emit the source position information after having loaded the |
// receiver and the arguments. |
CodeForSourcePosition(position); |
+ // Contents of frame at this point: |
+ // Frame[0]: arguments object of the current function or the hole. |
+ // Frame[1]: receiver |
+ // Frame[2]: applicand.apply |
+ // Frame[3]: applicand. |
// Check if the arguments object has been lazily allocated |
// already. If so, just use that instead of copying the arguments |
@@ -2327,143 +2349,151 @@ |
// named 'arguments' has been introduced. |
frame_->Dup(); |
Result probe = frame_->Pop(); |
- bool try_lazy = true; |
- if (probe.is_constant()) { |
- try_lazy = probe.handle()->IsTheHole(); |
- } else { |
- __ cmp(Operand(probe.reg()), Immediate(Factory::the_hole_value())); |
- probe.Unuse(); |
- slow.Branch(not_equal); |
- } |
+ { VirtualFrame::SpilledScope spilled_scope; |
+ Label slow, done; |
+ bool try_lazy = true; |
+ if (probe.is_constant()) { |
+ try_lazy = probe.handle()->IsTheHole(); |
+ } else { |
+ __ cmp(Operand(probe.reg()), Immediate(Factory::the_hole_value())); |
+ probe.Unuse(); |
+ __ j(not_equal, &slow); |
+ } |
- if (try_lazy) { |
- JumpTarget build_args; |
+ if (try_lazy) { |
+ Label build_args; |
+ // Get rid of the arguments object probe. |
+ frame_->Drop(); // Can be called on a spilled frame. |
+ // Stack now has 3 elements on it. |
+ // Contents of stack at this point: |
+ // esp[0]: receiver |
+ // esp[1]: applicand.apply |
+ // esp[2]: applicand. |
- // Get rid of the arguments object probe. |
- frame_->Drop(); |
- |
- // Before messing with the execution stack, we sync all |
- // elements. This is bound to happen anyway because we're |
- // about to call a function. |
- frame_->SyncRange(0, frame_->element_count() - 1); |
- |
- // Check that the receiver really is a JavaScript object. |
- { frame_->PushElementAt(0); |
- Result receiver = frame_->Pop(); |
- receiver.ToRegister(); |
- __ test(receiver.reg(), Immediate(kSmiTagMask)); |
- build_args.Branch(zero); |
- Result tmp = allocator_->Allocate(); |
+ // Check that the receiver really is a JavaScript object. |
+ __ mov(eax, Operand(esp, 0)); |
+ __ test(eax, Immediate(kSmiTagMask)); |
+ __ j(zero, &build_args); |
// We allow all JSObjects including JSFunctions. As long as |
// JS_FUNCTION_TYPE is the last instance type and it is right |
// after LAST_JS_OBJECT_TYPE, we do not have to check the upper |
// bound. |
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE); |
ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1); |
- __ CmpObjectType(receiver.reg(), FIRST_JS_OBJECT_TYPE, tmp.reg()); |
- build_args.Branch(less); |
- } |
+ __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx); |
+ __ j(below, &build_args); |
- // Verify that we're invoking Function.prototype.apply. |
- { frame_->PushElementAt(1); |
- Result apply = frame_->Pop(); |
- apply.ToRegister(); |
- __ test(apply.reg(), Immediate(kSmiTagMask)); |
- build_args.Branch(zero); |
- Result tmp = allocator_->Allocate(); |
- __ CmpObjectType(apply.reg(), JS_FUNCTION_TYPE, tmp.reg()); |
- build_args.Branch(not_equal); |
- __ mov(tmp.reg(), |
- FieldOperand(apply.reg(), JSFunction::kSharedFunctionInfoOffset)); |
+ // Check that applicand.apply is Function.prototype.apply. |
+ __ mov(eax, Operand(esp, kPointerSize)); |
+ __ test(eax, Immediate(kSmiTagMask)); |
+ __ j(zero, &build_args); |
+ __ CmpObjectType(eax, JS_FUNCTION_TYPE, ecx); |
+ __ j(not_equal, &build_args); |
+ __ mov(ecx, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset)); |
Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply)); |
- __ cmp(FieldOperand(tmp.reg(), SharedFunctionInfo::kCodeOffset), |
+ __ cmp(FieldOperand(ecx, SharedFunctionInfo::kCodeOffset), |
Immediate(apply_code)); |
- build_args.Branch(not_equal); |
- } |
+ __ j(not_equal, &build_args); |
- // Get the function receiver from the stack. Check that it |
- // really is a function. |
- __ mov(edi, Operand(esp, 2 * kPointerSize)); |
- __ test(edi, Immediate(kSmiTagMask)); |
- build_args.Branch(zero); |
- __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); |
- build_args.Branch(not_equal); |
+ // Check that applicand is a function. |
+ __ mov(edi, Operand(esp, 2 * kPointerSize)); |
+ __ test(edi, Immediate(kSmiTagMask)); |
+ __ j(zero, &build_args); |
+ __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); |
+ __ j(not_equal, &build_args); |
- // Copy the arguments to this function possibly from the |
- // adaptor frame below it. |
- Label invoke, adapted; |
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); |
- __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset)); |
- __ cmp(Operand(ecx), |
- Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
- __ j(equal, &adapted); |
+ // Copy the arguments to this function possibly from the |
+ // adaptor frame below it. |
+ Label invoke, adapted; |
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset)); |
+ __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset)); |
+ __ cmp(Operand(ecx), |
+ Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
+ __ j(equal, &adapted); |
- // No arguments adaptor frame. Copy fixed number of arguments. |
- __ mov(eax, Immediate(scope_->num_parameters())); |
- for (int i = 0; i < scope_->num_parameters(); i++) { |
- __ push(frame_->ParameterAt(i)); |
- } |
- __ jmp(&invoke); |
+ // No arguments adaptor frame. Copy fixed number of arguments. |
+ __ mov(eax, Immediate(scope_->num_parameters())); |
+ for (int i = 0; i < scope_->num_parameters(); i++) { |
+ __ push(frame_->ParameterAt(i)); |
+ } |
+ __ jmp(&invoke); |
- // Arguments adaptor frame present. Copy arguments from there, but |
- // avoid copying too many arguments to avoid stack overflows. |
- __ bind(&adapted); |
- static const uint32_t kArgumentsLimit = 1 * KB; |
- __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
- __ SmiUntag(eax); |
- __ mov(ecx, Operand(eax)); |
- __ cmp(eax, kArgumentsLimit); |
- build_args.Branch(above); |
+ // Arguments adaptor frame present. Copy arguments from there, but |
+ // avoid copying too many arguments to avoid stack overflows. |
+ __ bind(&adapted); |
+ static const uint32_t kArgumentsLimit = 1 * KB; |
+ __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
+ __ SmiUntag(eax); |
+ __ mov(ecx, Operand(eax)); |
+ __ cmp(eax, kArgumentsLimit); |
+ __ j(above, &build_args); |
- // Loop through the arguments pushing them onto the execution |
- // stack. We don't inform the virtual frame of the push, so we don't |
- // have to worry about getting rid of the elements from the virtual |
- // frame. |
- Label loop; |
- __ bind(&loop); |
- __ test(ecx, Operand(ecx)); |
- __ j(zero, &invoke); |
- __ push(Operand(edx, ecx, times_4, 1 * kPointerSize)); |
- __ dec(ecx); |
- __ jmp(&loop); |
+ // Loop through the arguments pushing them onto the execution |
+ // stack. We don't inform the virtual frame of the push, so we don't |
+ // have to worry about getting rid of the elements from the virtual |
+ // frame. |
+ Label loop; |
+ // ecx is a small non-negative integer, due to the test above. |
+ __ test(ecx, Operand(ecx)); |
+ __ j(zero, &invoke); |
+ __ bind(&loop); |
+ __ push(Operand(edx, ecx, times_pointer_size, 1 * kPointerSize)); |
+ __ dec(ecx); |
+ __ j(not_zero, &loop); |
- // Invoke the function. The virtual frame knows about the receiver |
- // so make sure to forget that explicitly. |
- __ bind(&invoke); |
- ParameterCount actual(eax); |
- __ InvokeFunction(edi, actual, CALL_FUNCTION); |
- frame_->Forget(1); |
- Result result = allocator()->Allocate(eax); |
- frame_->SetElementAt(0, &result); |
- done.Jump(); |
+ // Invoke the function. |
+ __ bind(&invoke); |
+ ParameterCount actual(eax); |
+ __ InvokeFunction(edi, actual, CALL_FUNCTION); |
+ // Drop applicand.apply and applicand from the stack, and push |
+ // the result of the function call, but leave the spilled frame |
+ // unchanged, with 3 elements, so it is correct when we compile the |
+ // slow-case code. |
+ __ add(Operand(esp), Immediate(2 * kPointerSize)); |
+ __ push(eax); |
+ // Stack now has 1 element: |
+ // esp[0]: result |
+ __ jmp(&done); |
- // Slow-case: Allocate the arguments object since we know it isn't |
- // there, and fall-through to the slow-case where we call |
- // Function.prototype.apply. |
- build_args.Bind(); |
- Result arguments_object = StoreArgumentsObject(false); |
- frame_->Push(&arguments_object); |
- slow.Bind(); |
- } |
+ // Slow-case: Allocate the arguments object since we know it isn't |
+ // there, and fall-through to the slow-case where we call |
+ // applicand.apply. |
+ __ bind(&build_args); |
+ // Stack now has 3 elements, because we have jumped from where: |
+ // esp[0]: receiver |
+ // esp[1]: applicand.apply |
+ // esp[2]: applicand. |
- // Flip the apply function and the function to call on the stack, so |
- // the function looks like the receiver of the apply call. This way, |
- // the generic Function.prototype.apply implementation can deal with |
- // the call like it usually does. |
- Result a2 = frame_->Pop(); |
- Result a1 = frame_->Pop(); |
- Result ap = frame_->Pop(); |
- Result fn = frame_->Pop(); |
- frame_->Push(&ap); |
- frame_->Push(&fn); |
- frame_->Push(&a1); |
- frame_->Push(&a2); |
- CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS); |
- Result res = frame_->CallStub(&call_function, 3); |
- frame_->Push(&res); |
+ // StoreArgumentsObject requires a correct frame, and may modify it. |
+ Result arguments_object = StoreArgumentsObject(false); |
+ frame_->SpillAll(); |
+ arguments_object.ToRegister(); |
+ frame_->EmitPush(arguments_object.reg()); |
+ arguments_object.Unuse(); |
+ // Stack and frame now have 4 elements. |
+ __ bind(&slow); |
+ } |
- // All done. Restore context register after call. |
- if (try_lazy) done.Bind(); |
+ // Generic computation of x.apply(y, args) with no special optimization. |
+ // Flip applicand.apply and applicand on the stack, so |
+ // applicand looks like the receiver of the applicand.apply call. |
+ // Then process it as a normal function call. |
+ __ mov(eax, Operand(esp, 3 * kPointerSize)); |
+ __ mov(ebx, Operand(esp, 2 * kPointerSize)); |
+ __ mov(Operand(esp, 2 * kPointerSize), eax); |
+ __ mov(Operand(esp, 3 * kPointerSize), ebx); |
+ |
+ CallFunctionStub call_function(2, NOT_IN_LOOP, NO_CALL_FUNCTION_FLAGS); |
+ Result res = frame_->CallStub(&call_function, 3); |
+ // The function and its two arguments have been dropped. |
+ frame_->Drop(1); // Drop the receiver as well. |
+ res.ToRegister(); |
+ frame_->EmitPush(res.reg()); |
+ // Stack now has 1 element: |
+ // esp[0]: result |
+ if (try_lazy) __ bind(&done); |
+ } // End of spilled scope. |
+ // Restore the context register after a call. |
frame_->RestoreContextRegister(); |
} |
@@ -3503,17 +3533,13 @@ |
if (!each.is_illegal()) { |
if (each.size() > 0) { |
frame_->EmitPush(frame_->ElementAt(each.size())); |
- } |
- // If the reference was to a slot we rely on the convenient property |
- // that it doesn't matter whether a value (eg, ebx pushed above) is |
- // right on top of or right underneath a zero-sized reference. |
- each.SetValue(NOT_CONST_INIT); |
- if (each.size() > 0) { |
- // It's safe to pop the value lying on top of the reference before |
- // unloading the reference itself (which preserves the top of stack, |
- // ie, now the topmost value of the non-zero sized reference), since |
- // we will discard the top of stack after unloading the reference |
- // anyway. |
+ each.SetValue(NOT_CONST_INIT); |
+ frame_->Drop(2); |
+ } else { |
+ // If the reference was to a slot we rely on the convenient property |
+ // that it doesn't matter whether a value (eg, ebx pushed above) is |
+ // right on top of or right underneath a zero-sized reference. |
+ each.SetValue(NOT_CONST_INIT); |
frame_->Drop(); |
} |
} |
@@ -3521,10 +3547,6 @@ |
// Unloading a reference may leave the frame in an unspilled state. |
frame_->SpillAll(); |
- // Discard the i'th entry pushed above or else the remainder of the |
- // reference, whichever is currently on top of the stack. |
- frame_->Drop(); |
- |
// Body. |
CheckStack(); // TODO(1222600): ignore if body contains calls. |
VisitAndSpill(node->body()); |
@@ -4574,9 +4596,12 @@ |
void CodeGenerator::VisitAssignment(Assignment* node) { |
+#ifdef DEBUG |
+ int original_height = frame_->height(); |
+#endif |
Comment cmnt(masm_, "[ Assignment"); |
- { Reference target(this, node->target()); |
+ { Reference target(this, node->target(), node->is_compound()); |
if (target.is_illegal()) { |
// Fool the virtual frame into thinking that we left the assignment's |
// value on the frame. |
@@ -4598,12 +4623,27 @@ |
frame_->PushElementAt(target.size() - 1); |
Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1); |
} |
+ if (node->ends_initialization_block()) { |
+ // Add an extra copy of the receiver to the frame, so that it can be |
+ // converted back to fast case after the assignment. |
+ ASSERT(target.type() == Reference::NAMED || |
+ target.type() == Reference::KEYED); |
+ if (target.type() == Reference::NAMED) { |
+ frame_->Dup(); |
+ // Dup target receiver on stack. |
+ } else { |
+ ASSERT(target.type() == Reference::KEYED); |
+ Result temp = frame_->Pop(); |
+ frame_->Dup(); |
+ frame_->Push(&temp); |
+ } |
+ } |
if (node->op() == Token::ASSIGN || |
node->op() == Token::INIT_VAR || |
node->op() == Token::INIT_CONST) { |
Load(node->value()); |
- } else { |
+ } else { // Assignment is a compound assignment. |
Literal* literal = node->value()->AsLiteral(); |
bool overwrite_value = |
(node->value()->AsBinaryOperation() != NULL && |
@@ -4629,6 +4669,7 @@ |
var->mode() == Variable::CONST && |
node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) { |
// Assignment ignored - leave the value on the stack. |
+ UnloadReference(&target); |
} else { |
CodeForSourcePosition(node->position()); |
if (node->op() == Token::INIT_CONST) { |
@@ -4640,17 +4681,20 @@ |
target.SetValue(NOT_CONST_INIT); |
} |
if (node->ends_initialization_block()) { |
- ASSERT(target.type() == Reference::NAMED || |
- target.type() == Reference::KEYED); |
+ ASSERT(target.type() == Reference::UNLOADED); |
// End of initialization block. Revert to fast case. The |
- // argument to the runtime call is the receiver, which is the |
- // first value pushed as part of the reference, which is below |
- // the lhs value. |
- frame_->PushElementAt(target.size()); |
+ // argument to the runtime call is the extra copy of the receiver, |
+ // which is below the value of the assignment. |
+ // Swap the receiver and the value of the assignment expression. |
+ Result lhs = frame_->Pop(); |
+ Result receiver = frame_->Pop(); |
+ frame_->Push(&lhs); |
+ frame_->Push(&receiver); |
Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1); |
} |
} |
} |
+ ASSERT(frame_->height() == original_height + 1); |
} |
@@ -4813,7 +4857,7 @@ |
args->at(1)->AsVariableProxy()->IsArguments()) { |
// Use the optimized Function.prototype.apply that avoids |
// allocating lazily allocated arguments objects. |
- CallApplyLazy(property, |
+ CallApplyLazy(property->obj(), |
args->at(0), |
args->at(1)->AsVariableProxy(), |
node->position()); |
@@ -4846,16 +4890,21 @@ |
// ------------------------------------------- |
// Load the function to call from the property through a reference. |
- Reference ref(this, property); |
- ref.GetValue(); |
// Pass receiver to called function. |
if (property->is_synthetic()) { |
+ Reference ref(this, property); |
+ ref.GetValue(); |
// Use global object as receiver. |
LoadGlobalReceiver(); |
} else { |
- // The reference's size is non-negative. |
- frame_->PushElementAt(ref.size()); |
+ Load(property->obj()); |
+ Load(property->key()); |
+ Result function = EmitKeyedLoad(false); |
+ frame_->Drop(); // Key. |
+ Result receiver = frame_->Pop(); |
+ frame_->Push(&function); |
+ frame_->Push(&receiver); |
} |
// Call the function. |
@@ -5766,7 +5815,9 @@ |
// value will be in the frame to be spilled. |
if (is_postfix) frame_->Push(Smi::FromInt(0)); |
- { Reference target(this, node->expression()); |
+ // A constant reference is not saved to, so a constant reference is not a |
+ // compound assignment reference. |
+ { Reference target(this, node->expression(), !is_const); |
if (target.is_illegal()) { |
// Spoof the virtual frame to have the expected height (one higher |
// than on entry). |
@@ -6369,6 +6420,114 @@ |
} |
+Result CodeGenerator::EmitKeyedLoad(bool is_global) { |
+ Comment cmnt(masm_, "[ Load from keyed Property"); |
+ // Inline array load code if inside of a loop. We do not know |
+ // the receiver map yet, so we initially generate the code with |
+ // a check against an invalid map. In the inline cache code, we |
+ // patch the map check if appropriate. |
+ if (loop_nesting() > 0) { |
+ Comment cmnt(masm_, "[ Inlined load from keyed Property"); |
+ |
+ Result key = frame_->Pop(); |
+ Result receiver = frame_->Pop(); |
+ key.ToRegister(); |
+ receiver.ToRegister(); |
+ |
+ // Use a fresh temporary to load the elements without destroying |
+ // the receiver which is needed for the deferred slow case. |
+ Result elements = allocator()->Allocate(); |
+ ASSERT(elements.is_valid()); |
+ |
+ // Use a fresh temporary for the index and later the loaded |
+ // value. |
+ Result index = allocator()->Allocate(); |
+ ASSERT(index.is_valid()); |
+ |
+ DeferredReferenceGetKeyedValue* deferred = |
+ new DeferredReferenceGetKeyedValue(index.reg(), |
+ receiver.reg(), |
+ key.reg(), |
+ is_global); |
+ |
+ // Check that the receiver is not a smi (only needed if this |
+ // is not a load from the global context) and that it has the |
+ // expected map. |
+ if (!is_global) { |
+ __ test(receiver.reg(), Immediate(kSmiTagMask)); |
+ deferred->Branch(zero); |
+ } |
+ |
+ // Initially, use an invalid map. The map is patched in the IC |
+ // initialization code. |
+ __ bind(deferred->patch_site()); |
+ // Use masm-> here instead of the double underscore macro since extra |
+ // coverage code can interfere with the patching. |
+ masm_->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset), |
+ Immediate(Factory::null_value())); |
+ deferred->Branch(not_equal); |
+ |
+ // Check that the key is a smi. |
+ __ test(key.reg(), Immediate(kSmiTagMask)); |
+ deferred->Branch(not_zero); |
+ |
+ // Get the elements array from the receiver and check that it |
+ // is not a dictionary. |
+ __ mov(elements.reg(), |
+ FieldOperand(receiver.reg(), JSObject::kElementsOffset)); |
+ __ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset), |
+ Immediate(Factory::fixed_array_map())); |
+ deferred->Branch(not_equal); |
+ |
+ // Shift the key to get the actual index value and check that |
+ // it is within bounds. |
+ __ mov(index.reg(), key.reg()); |
+ __ SmiUntag(index.reg()); |
+ __ cmp(index.reg(), |
+ FieldOperand(elements.reg(), FixedArray::kLengthOffset)); |
+ deferred->Branch(above_equal); |
+ |
+ // Load and check that the result is not the hole. We could |
+ // reuse the index or elements register for the value. |
+ // |
+ // TODO(206): Consider whether it makes sense to try some |
+ // heuristic about which register to reuse. For example, if |
+ // one is eax, the we can reuse that one because the value |
+ // coming from the deferred code will be in eax. |
+ Result value = index; |
+ __ mov(value.reg(), Operand(elements.reg(), |
+ index.reg(), |
+ times_4, |
+ FixedArray::kHeaderSize - kHeapObjectTag)); |
+ elements.Unuse(); |
+ index.Unuse(); |
+ __ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value())); |
+ deferred->Branch(equal); |
+ __ IncrementCounter(&Counters::keyed_load_inline, 1); |
+ |
+ deferred->BindExit(); |
+ // Restore the receiver and key to the frame and push the |
+ // result on top of it. |
+ frame_->Push(&receiver); |
+ frame_->Push(&key); |
+ return value; |
+ } else { |
+ Comment cmnt(masm_, "[ Load from keyed Property"); |
+ RelocInfo::Mode mode = is_global |
+ ? RelocInfo::CODE_TARGET_CONTEXT |
+ : RelocInfo::CODE_TARGET; |
+ Result answer = frame_->CallKeyedLoadIC(mode); |
+ // Make sure that we do not have a test instruction after the |
+ // call. A test instruction after the call is used to |
+ // indicate that we have generated an inline version of the |
+ // keyed load. The explicit nop instruction is here because |
+ // the push that follows might be peep-hole optimized away. |
+ __ nop(); |
+ return answer; |
+ } |
+} |
+ |
+ |
#undef __ |
#define __ ACCESS_MASM(masm) |
@@ -6481,121 +6640,21 @@ |
} |
case KEYED: { |
- Comment cmnt(masm, "[ Load from keyed Property"); |
Variable* var = expression_->AsVariableProxy()->AsVariable(); |
bool is_global = var != NULL; |
ASSERT(!is_global || var->is_global()); |
- |
- // Inline array load code if inside of a loop. We do not know |
- // the receiver map yet, so we initially generate the code with |
- // a check against an invalid map. In the inline cache code, we |
- // patch the map check if appropriate. |
- if (cgen_->loop_nesting() > 0) { |
- Comment cmnt(masm, "[ Inlined load from keyed Property"); |
- |
- Result key = cgen_->frame()->Pop(); |
- Result receiver = cgen_->frame()->Pop(); |
- key.ToRegister(); |
- receiver.ToRegister(); |
- |
- // Use a fresh temporary to load the elements without destroying |
- // the receiver which is needed for the deferred slow case. |
- Result elements = cgen_->allocator()->Allocate(); |
- ASSERT(elements.is_valid()); |
- |
- // Use a fresh temporary for the index and later the loaded |
- // value. |
- Result index = cgen_->allocator()->Allocate(); |
- ASSERT(index.is_valid()); |
- |
- DeferredReferenceGetKeyedValue* deferred = |
- new DeferredReferenceGetKeyedValue(index.reg(), |
- receiver.reg(), |
- key.reg(), |
- is_global); |
- |
- // Check that the receiver is not a smi (only needed if this |
- // is not a load from the global context) and that it has the |
- // expected map. |
- if (!is_global) { |
- __ test(receiver.reg(), Immediate(kSmiTagMask)); |
- deferred->Branch(zero); |
- } |
- |
- // Initially, use an invalid map. The map is patched in the IC |
- // initialization code. |
- __ bind(deferred->patch_site()); |
- // Use masm-> here instead of the double underscore macro since extra |
- // coverage code can interfere with the patching. |
- masm->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset), |
- Immediate(Factory::null_value())); |
- deferred->Branch(not_equal); |
- |
- // Check that the key is a smi. |
- __ test(key.reg(), Immediate(kSmiTagMask)); |
- deferred->Branch(not_zero); |
- |
- // Get the elements array from the receiver and check that it |
- // is not a dictionary. |
- __ mov(elements.reg(), |
- FieldOperand(receiver.reg(), JSObject::kElementsOffset)); |
- __ cmp(FieldOperand(elements.reg(), HeapObject::kMapOffset), |
- Immediate(Factory::fixed_array_map())); |
- deferred->Branch(not_equal); |
- |
- // Shift the key to get the actual index value and check that |
- // it is within bounds. |
- __ mov(index.reg(), key.reg()); |
- __ SmiUntag(index.reg()); |
- __ cmp(index.reg(), |
- FieldOperand(elements.reg(), FixedArray::kLengthOffset)); |
- deferred->Branch(above_equal); |
- |
- // Load and check that the result is not the hole. We could |
- // reuse the index or elements register for the value. |
- // |
- // TODO(206): Consider whether it makes sense to try some |
- // heuristic about which register to reuse. For example, if |
- // one is eax, the we can reuse that one because the value |
- // coming from the deferred code will be in eax. |
- Result value = index; |
- __ mov(value.reg(), Operand(elements.reg(), |
- index.reg(), |
- times_4, |
- FixedArray::kHeaderSize - kHeapObjectTag)); |
- elements.Unuse(); |
- index.Unuse(); |
- __ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value())); |
- deferred->Branch(equal); |
- __ IncrementCounter(&Counters::keyed_load_inline, 1); |
- |
- deferred->BindExit(); |
- // Restore the receiver and key to the frame and push the |
- // result on top of it. |
- cgen_->frame()->Push(&receiver); |
- cgen_->frame()->Push(&key); |
- cgen_->frame()->Push(&value); |
- |
- } else { |
- Comment cmnt(masm, "[ Load from keyed Property"); |
- RelocInfo::Mode mode = is_global |
- ? RelocInfo::CODE_TARGET_CONTEXT |
- : RelocInfo::CODE_TARGET; |
- Result answer = cgen_->frame()->CallKeyedLoadIC(mode); |
- // Make sure that we do not have a test instruction after the |
- // call. A test instruction after the call is used to |
- // indicate that we have generated an inline version of the |
- // keyed load. The explicit nop instruction is here because |
- // the push that follows might be peep-hole optimized away. |
- __ nop(); |
- cgen_->frame()->Push(&answer); |
- } |
+ Result value = cgen_->EmitKeyedLoad(is_global); |
+ cgen_->frame()->Push(&value); |
break; |
} |
default: |
UNREACHABLE(); |
} |
+ |
+ if (!persist_after_get_) { |
+ cgen_->UnloadReference(this); |
+ } |
} |
@@ -6629,6 +6688,9 @@ |
ASSERT(slot->type() == Slot::LOCAL); |
cgen_->frame()->TakeLocalAt(slot->index()); |
} |
+ |
+ ASSERT(persist_after_get_); |
+ // Do not unload the reference, because it is used in SetValue. |
} |
@@ -6758,6 +6820,7 @@ |
default: |
UNREACHABLE(); |
} |
+ cgen_->UnloadReference(this); |
} |