Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(10)

Unified Diff: src/codegen-ia32.cc

Issue 21446: Experimental: allow the register allocator to work for for...in... (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/toiger/
Patch Set: Created 11 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/codegen-ia32.cc
===================================================================
--- src/codegen-ia32.cc (revision 1291)
+++ src/codegen-ia32.cc (working copy)
@@ -1488,6 +1488,7 @@
void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
ASSERT(!in_spilled_code());
+ ASSERT(HasValidEntryRegisters());
for (int i = 0; has_valid_frame() && i < statements->length(); i++) {
Visit(statements->at(i));
}
@@ -1496,6 +1497,7 @@
void CodeGenerator::VisitBlock(Block* node) {
ASSERT(!in_spilled_code());
+ ASSERT(HasValidEntryRegisters());
Comment cmnt(masm_, "[ Block");
CodeForStatementPosition(node);
node->set_break_stack_height(break_stack_height_);
@@ -1508,6 +1510,7 @@
void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+ ASSERT(HasValidEntryRegisters());
frame_->Push(pairs);
// Duplicate the context register.
@@ -1521,6 +1524,7 @@
void CodeGenerator::VisitDeclaration(Declaration* node) {
+ ASSERT(HasValidEntryRegisters());
Comment cmnt(masm_, "[ Declaration");
CodeForStatementPosition(node);
Variable* var = node->proxy()->var();
@@ -1585,6 +1589,7 @@
void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
ASSERT(!in_spilled_code());
+ ASSERT(HasValidEntryRegisters());
Comment cmnt(masm_, "[ ExpressionStatement");
CodeForStatementPosition(node);
Expression* expression = node->expression();
@@ -1597,6 +1602,7 @@
void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
ASSERT(!in_spilled_code());
+ ASSERT(HasValidEntryRegisters());
Comment cmnt(masm_, "// EmptyStatement");
CodeForStatementPosition(node);
// nothing to do
@@ -1605,6 +1611,7 @@
void CodeGenerator::VisitIfStatement(IfStatement* node) {
ASSERT(!in_spilled_code());
+ ASSERT(HasValidEntryRegisters());
Comment cmnt(masm_, "[ IfStatement");
// Generate different code depending on which parts of the if statement
// are present or not.
@@ -1708,6 +1715,7 @@
void CodeGenerator::VisitContinueStatement(ContinueStatement* node) {
ASSERT(!in_spilled_code());
+ ASSERT(HasValidEntryRegisters());
Comment cmnt(masm_, "[ ContinueStatement");
CodeForStatementPosition(node);
CleanStack(break_stack_height_ - node->target()->break_stack_height());
@@ -1717,6 +1725,7 @@
void CodeGenerator::VisitBreakStatement(BreakStatement* node) {
ASSERT(!in_spilled_code());
+ ASSERT(HasValidEntryRegisters());
Comment cmnt(masm_, "[ BreakStatement");
CodeForStatementPosition(node);
CleanStack(break_stack_height_ - node->target()->break_stack_height());
@@ -1726,6 +1735,7 @@
void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
ASSERT(!in_spilled_code());
+ ASSERT(HasValidEntryRegisters());
Comment cmnt(masm_, "[ ReturnStatement");
if (function_return_is_shadowed_) {
@@ -1794,6 +1804,7 @@
void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
ASSERT(!in_spilled_code());
+ ASSERT(HasValidEntryRegisters());
Comment cmnt(masm_, "[ WithEnterStatement");
CodeForStatementPosition(node);
Load(node->expression());
@@ -1823,6 +1834,7 @@
void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
ASSERT(!in_spilled_code());
+ ASSERT(HasValidEntryRegisters());
Comment cmnt(masm_, "[ WithExitStatement");
CodeForStatementPosition(node);
// Pop context.
@@ -1959,6 +1971,7 @@
void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
ASSERT(!in_spilled_code());
+ ASSERT(HasValidEntryRegisters());
Comment cmnt(masm_, "[ SwitchStatement");
CodeForStatementPosition(node);
node->set_break_stack_height(break_stack_height_);
@@ -2100,6 +2113,7 @@
void CodeGenerator::VisitLoopStatement(LoopStatement* node) {
ASSERT(!in_spilled_code());
+ ASSERT(HasValidEntryRegisters());
Comment cmnt(masm_, "[ LoopStatement");
CodeForStatementPosition(node);
node->set_break_stack_height(break_stack_height_);
@@ -2314,7 +2328,7 @@
void CodeGenerator::VisitForInStatement(ForInStatement* node) {
ASSERT(!in_spilled_code());
- VirtualFrame::SpilledScope spilled_scope(this);
+ ASSERT(HasValidEntryRegisters());
Comment cmnt(masm_, "[ ForInStatement");
CodeForStatementPosition(node);
@@ -2327,172 +2341,233 @@
node->break_target()->Initialize(this);
node->continue_target()->Initialize(this);
+ // Stack layout in body (from the top of the frame downward).
+ // [iteration counter (smi)] <- element 0
+ // [length of array (smi)] <- element 1
+ // [FixedArray] <- element 2
+ // [Map or 0] <- element 3
+ // [Object being iterated] <- element 4
+
JumpTarget primitive(this);
JumpTarget jsobject(this);
JumpTarget fixed_array(this);
- JumpTarget entry(this, JumpTarget::BIDIRECTIONAL);
+ JumpTarget condition(this, JumpTarget::BIDIRECTIONAL);
JumpTarget end_del_check(this);
JumpTarget exit(this);
- // Get the object to enumerate over (converted to JSObject).
- LoadAndSpill(node->enumerable());
+ // Get the object to iterate over.
+ Load(node->enumerable());
// Both SpiderMonkey and kjs ignore null and undefined in contrast
// to the specification. 12.6.4 mandates a call to ToObject.
- frame_->EmitPop(eax);
-
- // eax: value to be iterated over
- __ cmp(eax, Factory::undefined_value());
+ Result object = frame_->Pop();
+ object.ToRegister();
+ __ cmp(object.reg(), Factory::undefined_value());
exit.Branch(equal);
- __ cmp(eax, Factory::null_value());
+ __ cmp(object.reg(), Factory::null_value());
exit.Branch(equal);
- // Stack layout in body:
- // [iteration counter (smi)] <- slot 0
- // [length of array] <- slot 1
- // [FixedArray] <- slot 2
- // [Map or 0] <- slot 3
- // [Object] <- slot 4
-
// Check if enumerable is already a JSObject
- // eax: value to be iterated over
- __ test(eax, Immediate(kSmiTagMask));
- primitive.Branch(zero);
- __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
- jsobject.Branch(above_equal);
+ __ test(object.reg(), Immediate(kSmiTagMask));
+ primitive.Branch(zero, &object);
- primitive.Bind();
- frame_->EmitPush(eax);
- frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION, 1);
- // function call returns the value in eax, which is where we want it below
+ // Use a temporary register to check the instance type.
+ Result temp = allocator_->Allocate();
+ ASSERT(temp.is_valid());
+ __ mov(temp.reg(), FieldOperand(object.reg(), HeapObject::kMapOffset));
+ __ movzx_b(temp.reg(), FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
+ __ cmp(temp.reg(), FIRST_JS_OBJECT_TYPE);
+ temp.Unuse();
+ jsobject.Branch(above_equal, &object);
- jsobject.Bind();
+ primitive.Bind(&object);
+ // Live results:
+ // object: the object being iterated over
+ frame_->Push(&object);
+ object = frame_->InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION, 1);
+
+ jsobject.Bind(&object);
+ // Live results:
+ // object: the object being iterated over
// Get the set of properties (as a FixedArray or Map).
- // eax: value to be iterated over
- frame_->EmitPush(eax); // push the object being iterated over (slot 4)
+ frame_->Push(&object); // Push the object being iterated over (slot 4).
- frame_->EmitPush(eax); // push the Object (slot 4) for the runtime call
- frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+ frame_->Dup(); // Duplicate it for the runtime call.
+ Result properties = frame_->CallRuntime(Runtime::kGetPropertyNamesFast, 1);
- // If we got a Map, we can do a fast modification check.
- // Otherwise, we got a FixedArray, and we have to do a slow check.
- // eax: map or fixed array (result from call to
- // Runtime::kGetPropertyNamesFast)
- __ mov(edx, Operand(eax));
- __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
- __ cmp(ecx, Factory::meta_map());
- fixed_array.Branch(not_equal);
+ // If we got a Map, we can do a fast modification check. Otherwise,
+ // we got a FixedArray, and we have to do a slow check. Use a fresh
+ // temporary register to check the map.
+ temp = allocator_->Allocate();
+ ASSERT(temp.is_valid());
+ __ mov(temp.reg(), FieldOperand(properties.reg(), HeapObject::kMapOffset));
+ __ cmp(temp.reg(), Factory::meta_map());
+ // Do not unuse the temp register, we will need one after the branch.
+ fixed_array.Branch(not_equal, &properties);
- // Get enum cache
- // eax: map (result from call to Runtime::kGetPropertyNamesFast)
- __ mov(ecx, Operand(eax));
- __ mov(ecx, FieldOperand(ecx, Map::kInstanceDescriptorsOffset));
+ // Get enum cache. Properties is a map (from the call to
+ // Runtime::kGetPropertyNamesFast) and temp is a live register
+ // reference (distinct from properties).
+ __ mov(temp.reg(),
+ FieldOperand(properties.reg(), Map::kInstanceDescriptorsOffset));
// Get the bridge array held in the enumeration index field.
- __ mov(ecx, FieldOperand(ecx, DescriptorArray::kEnumerationIndexOffset));
+ __ mov(temp.reg(),
+ FieldOperand(temp.reg(), DescriptorArray::kEnumerationIndexOffset));
// Get the cache from the bridge array.
- __ mov(edx, FieldOperand(ecx, DescriptorArray::kEnumCacheBridgeCacheOffset));
+ __ mov(temp.reg(),
+ FieldOperand(temp.reg(),
+ DescriptorArray::kEnumCacheBridgeCacheOffset));
- frame_->EmitPush(eax); // <- slot 3
- frame_->EmitPush(edx); // <- slot 2
- __ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
- __ shl(eax, kSmiTagSize);
- frame_->EmitPush(eax); // <- slot 1
- frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
- entry.Jump();
+ frame_->Push(&properties); // <- slot 3
+ // Duplicate the enum cache so we can fetch the length from it.
+ Result cache = temp;
+ frame_->Push(&temp); // <- slot 2
- fixed_array.Bind();
- // eax: fixed array (result from call to Runtime::kGetPropertyNamesFast)
- frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 3
- frame_->EmitPush(eax); // <- slot 2
+ // Use a fresh temp register to fetch the length.
+ temp = allocator_->Allocate();
+ ASSERT(temp.is_valid());
+ __ mov(temp.reg(), FieldOperand(cache.reg(), FixedArray::kLengthOffset));
+ cache.Unuse();
+ __ shl(temp.reg(), kSmiTagSize);
+ frame_->Push(&temp); // <- slot 1
+ frame_->Push(Smi::FromInt(0)); // <- slot 0
+ condition.Jump();
- // Push the length of the array and the initial index onto the stack.
- __ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
- __ shl(eax, kSmiTagSize);
- frame_->EmitPush(eax); // <- slot 1
- frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
+ fixed_array.Bind(&properties);
+ // Live results:
+ // properties: the set of properties as a fixed array (from the call
+ // to Runtime::kGetPropertyNamesFast).
+ frame_->Push(Smi::FromInt(0)); // <- slot 3
+ // Use a fresh temporary register to fetch the length from the
+ // properties array.
+ temp = allocator_->Allocate();
+ ASSERT(temp.is_valid());
+ properties.ToRegister();
+ __ mov(temp.reg(),
+ FieldOperand(properties.reg(), FixedArray::kLengthOffset));
+ __ shl(temp.reg(), kSmiTagSize);
- // Condition.
- entry.Bind();
- __ mov(eax, frame_->ElementAt(0)); // load the current count
- __ cmp(eax, frame_->ElementAt(1)); // compare to the array length
+ frame_->Push(&properties); // <- slot 2
+ frame_->Push(&temp); // <- slot 1
+ frame_->Push(Smi::FromInt(0)); // <- slot 0
+
+ condition.Bind();
+ // Live results: none.
+ // Compare the current count (frame element 0) to the array length
+ // (frame element 1).
+ frame_->Dup();
+ Result count = frame_->Pop();
+
+ frame_->PushElementAt(1);
+ Result length = frame_->Pop();
+
+ count.ToRegister();
+ if (length.is_register()) {
+ // Count and length can be the same register, which is OK here.
+ __ cmp(count.reg(), Operand(length.reg()));
+ } else {
+ ASSERT(length.is_constant());
+ __ cmp(count.reg(), length.handle());
+ }
+ length.Unuse();
node->break_target()->Branch(above_equal);
// Get the i'th entry of the array.
- __ mov(edx, frame_->ElementAt(2));
- __ mov(ebx, Operand(edx, eax, times_2,
- FixedArray::kHeaderSize - kHeapObjectTag));
+ frame_->PushElementAt(2);
+ Result entry = frame_->Pop();
+ entry.ToRegister();
+ frame_->Spill(entry.reg());
+ // Entry and count can be the same register. That's OK here because
+ // they will both be absent from the frame after spilling entry and
+ // count is not needed after being read (before entry is written).
+ __ mov(entry.reg(), Operand(entry.reg(),
+ count.reg(),
+ times_2,
+ FixedArray::kHeaderSize - kHeapObjectTag));
+ count.Unuse();
- // Get the expected map from the stack or a zero map in the
- // permanent slow case eax: current iteration count ebx: i'th entry
- // of the enum cache
- __ mov(edx, frame_->ElementAt(3));
// Check if the expected map still matches that of the enumerable.
// If not, we have to filter the key.
- // eax: current iteration count
- // ebx: i'th entry of the enum cache
- // edx: expected map value
- __ mov(ecx, frame_->ElementAt(4));
- __ mov(ecx, FieldOperand(ecx, HeapObject::kMapOffset));
- __ cmp(ecx, Operand(edx));
- end_del_check.Branch(equal);
+ // Begin with the enumerable and fetch its map.
+ frame_->PushElementAt(4);
+ temp = frame_->Pop();
+ temp.ToRegister();
+ // We will write to temp. If it is the same register as entry we
+ // will allocate a fresh register. Otherwise we can just spill it
+ // from the frame.
+ if (temp.reg().is(entry.reg())) {
William Hesse 2009/02/18 10:33:28 This cannot happen because entry.reg() is spilled
+ Result fresh = allocator_->Allocate();
+ ASSERT(fresh.is_valid());
+ __ mov(fresh.reg(), temp.reg());
+ temp = fresh;
+ } else {
+ frame_->Spill(temp.reg());
+ }
+ __ mov(temp.reg(), FieldOperand(temp.reg(), HeapObject::kMapOffset));
+
+ // Get the expected map from the stack or a zero map in the
+ // permanent slow case.
+ frame_->PushElementAt(3);
+ Result expected_map = frame_->Pop();
+ if (expected_map.is_register()) {
+ __ cmp(temp.reg(), Operand(expected_map.reg()));
+ } else {
+ ASSERT(expected_map.is_constant());
+ __ cmp(temp.reg(), expected_map.handle());
+ }
+ expected_map.Unuse();
+ temp.Unuse();
+ end_del_check.Branch(equal, &entry);
+
// Convert the entry to a string (or null if it isn't a property anymore).
- frame_->EmitPush(frame_->ElementAt(4)); // push enumerable
- frame_->EmitPush(ebx); // push entry
- frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION, 2);
- __ mov(ebx, Operand(eax));
+ frame_->PushElementAt(4); // Duplicate the enumerable.
+ frame_->Push(&entry); // Push the entry.
+ entry = frame_->InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION, 2);
// If the property has been removed while iterating, we just skip it.
- __ cmp(ebx, Factory::null_value());
+ __ cmp(entry.reg(), Factory::null_value());
node->continue_target()->Branch(equal);
- end_del_check.Bind();
- // Store the entry in the 'each' expression and take another spin in the
- // loop. edx: i'th entry of the enum cache (or string there of)
- frame_->EmitPush(ebx);
+ end_del_check.Bind(&entry);
+ // Live results:
+ // entry: the i'th entry of the enum cache.
+ // Store the entry in the 'each' expression and take another spin in
+ // the loop.
+ //
+ // Push the entry before loading the reference, because loading a
+ // reference can visit code (requiring all non-reserved registers to
+ // be held by the frame).
+ frame_->Push(&entry);
+ bool entry_was_duplicated = false;
{ Reference each(this, node->each());
- // Loading a reference may leave the frame in an unspilled state.
- frame_->SpillAll();
if (!each.is_illegal()) {
- if (each.size() > 0) {
- frame_->EmitPush(frame_->ElementAt(each.size()));
- }
- // If the reference was to a slot we rely on the convenient property
- // that it doesn't matter whether a value (eg, ebx pushed above) is
- // right on top of or right underneath a zero-sized reference.
+ // Duplicate the entry and store to the reference.
+ frame_->PushElementAt(each.size());
+ entry_was_duplicated = true;
each.SetValue(NOT_CONST_INIT);
- if (each.size() > 0) {
- // It's safe to pop the value lying on top of the reference before
- // unloading the reference itself (which preserves the top of stack,
- // ie, now the topmost value of the non-zero sized reference), since
- // we will discard the top of stack after unloading the reference
- // anyway.
- frame_->Drop();
- }
}
}
- // Unloading a reference may leave the frame in an unspilled state.
- frame_->SpillAll();
+ // Discard the i'th entry (and its duplicate if it was duplicated).
+ frame_->Drop(entry_was_duplicated ? 2 : 1);
- // Discard the i'th entry pushed above or else the remainder of the
- // reference, whichever is currently on top of the stack.
- frame_->Drop();
-
// Body.
CheckStack(); // TODO(1222600): ignore if body contains calls.
- VisitAndSpill(node->body());
+ Visit(node->body());
// Next.
node->continue_target()->Bind();
- frame_->EmitPop(eax);
- __ add(Operand(eax), Immediate(Smi::FromInt(1)));
- frame_->EmitPush(eax);
- entry.Jump();
+ // Live results: none.
+ count = frame_->Pop();
+ count.ToRegister();
+ frame_->Spill(count.reg());
+ __ add(Operand(count.reg()), Immediate(Smi::FromInt(1)));
+ frame_->Push(&count);
+ condition.Jump();
// Cleanup.
node->break_target()->Bind();
+ // Live results: none.
frame_->Drop(5);
// Exit.
@@ -2504,6 +2579,7 @@
void CodeGenerator::VisitTryCatch(TryCatch* node) {
ASSERT(!in_spilled_code());
+ ASSERT(HasValidEntryRegisters());
VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ TryCatch");
CodeForStatementPosition(node);
@@ -2633,6 +2709,7 @@
void CodeGenerator::VisitTryFinally(TryFinally* node) {
ASSERT(!in_spilled_code());
+ ASSERT(HasValidEntryRegisters());
VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ TryFinally");
CodeForStatementPosition(node);
@@ -2802,6 +2879,7 @@
void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
ASSERT(!in_spilled_code());
+ ASSERT(HasValidEntryRegisters());
Comment cmnt(masm_, "[ DebuggerStatement");
CodeForStatementPosition(node);
// Spill everything, even constants, to the frame.
@@ -2825,6 +2903,7 @@
void CodeGenerator::VisitFunctionLiteral(FunctionLiteral* node) {
+ ASSERT(HasValidEntryRegisters());
Comment cmnt(masm_, "[ FunctionLiteral");
// Build the function boilerplate and instantiate it.
@@ -2837,12 +2916,14 @@
void CodeGenerator::VisitFunctionBoilerplateLiteral(
FunctionBoilerplateLiteral* node) {
+ ASSERT(HasValidEntryRegisters());
Comment cmnt(masm_, "[ FunctionBoilerplateLiteral");
InstantiateBoilerplate(node->boilerplate());
}
void CodeGenerator::VisitConditional(Conditional* node) {
+ ASSERT(HasValidEntryRegisters());
Comment cmnt(masm_, "[ Conditional");
JumpTarget then(this);
JumpTarget else_(this);
@@ -3024,12 +3105,14 @@
void CodeGenerator::VisitSlot(Slot* node) {
+ ASSERT(HasValidEntryRegisters());
Comment cmnt(masm_, "[ Slot");
LoadFromSlot(node, typeof_state());
}
void CodeGenerator::VisitVariableProxy(VariableProxy* node) {
+ ASSERT(HasValidEntryRegisters());
Comment cmnt(masm_, "[ VariableProxy");
Variable* var = node->var();
Expression* expr = var->rewrite();
@@ -3044,6 +3127,7 @@
void CodeGenerator::VisitLiteral(Literal* node) {
+ ASSERT(HasValidEntryRegisters());
Comment cmnt(masm_, "[ Literal");
if (node->handle()->IsSmi() && !IsInlineSmi(node)) {
// To prevent long attacker-controlled byte sequences in code, larger
@@ -3096,6 +3180,7 @@
void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
+ ASSERT(HasValidEntryRegisters());
Comment cmnt(masm_, "[ RegExp Literal");
DeferredRegExpLiteral* deferred = new DeferredRegExpLiteral(this, node);
@@ -3171,6 +3256,7 @@
void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
+ ASSERT(HasValidEntryRegisters());
Comment cmnt(masm_, "[ ObjectLiteral");
DeferredObjectLiteral* deferred = new DeferredObjectLiteral(this, node);
@@ -3272,6 +3358,7 @@
void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
+ ASSERT(HasValidEntryRegisters());
Comment cmnt(masm_, "[ ArrayLiteral");
// Call the runtime to create the array literal.
@@ -3330,6 +3417,7 @@
void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
ASSERT(!in_spilled_code());
+ ASSERT(HasValidEntryRegisters());
// Call runtime routine to allocate the catch extension object and
// assign the exception value to the catch variable.
Comment cmnt(masm_, "[ CatchExtensionObject");
@@ -3349,6 +3437,7 @@
void CodeGenerator::VisitAssignment(Assignment* node) {
+ ASSERT(HasValidEntryRegisters());
Comment cmnt(masm_, "[ Assignment");
CodeForStatementPosition(node);
@@ -3407,6 +3496,7 @@
void CodeGenerator::VisitThrow(Throw* node) {
+ ASSERT(HasValidEntryRegisters());
Comment cmnt(masm_, "[ Throw");
CodeForStatementPosition(node);
@@ -3417,6 +3507,7 @@
void CodeGenerator::VisitProperty(Property* node) {
+ ASSERT(HasValidEntryRegisters());
Comment cmnt(masm_, "[ Property");
Reference property(this, node);
property.GetValue(typeof_state());
@@ -3424,6 +3515,7 @@
void CodeGenerator::VisitCall(Call* node) {
+ ASSERT(HasValidEntryRegisters());
Comment cmnt(masm_, "[ Call");
ZoneList<Expression*>* args = node->arguments();
@@ -3563,6 +3655,7 @@
void CodeGenerator::VisitCallNew(CallNew* node) {
+ ASSERT(HasValidEntryRegisters());
Comment cmnt(masm_, "[ CallNew");
CodeForStatementPosition(node);
@@ -3615,6 +3708,7 @@
void CodeGenerator::VisitCallEval(CallEval* node) {
+ ASSERT(HasValidEntryRegisters());
Comment cmnt(masm_, "[ CallEval");
// In a call to eval, we first call %ResolvePossiblyDirectEval to resolve
@@ -4002,6 +4096,7 @@
void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
+ ASSERT(HasValidEntryRegisters());
if (CheckForInlineRuntimeCall(node)) {
return;
}
@@ -4047,6 +4142,7 @@
void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
+ ASSERT(HasValidEntryRegisters());
// Note that because of NOT and an optimization in comparison of a typeof
// expression to a literal string, this function can fail to leave a value
// on top of the frame or in the cc register.
@@ -4279,6 +4375,7 @@
void CodeGenerator::VisitCountOperation(CountOperation* node) {
+ ASSERT(HasValidEntryRegisters());
Comment cmnt(masm_, "[ CountOperation");
bool is_postfix = node->is_postfix();
@@ -4375,6 +4472,7 @@
void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
+ ASSERT(HasValidEntryRegisters());
// Note that due to an optimization in comparison operations (typeof
// compared to a string literal), we can evaluate a binary expression such
// as AND or OR and not leave a value on the frame or in the cc register.
@@ -4517,6 +4615,7 @@
void CodeGenerator::VisitThisFunction(ThisFunction* node) {
+ ASSERT(HasValidEntryRegisters());
frame_->PushFunction();
}
@@ -4534,6 +4633,7 @@
void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
+ ASSERT(HasValidEntryRegisters());
Comment cmnt(masm_, "[ CompareOperation");
// Get the expressions from the node.
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698