Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(67)

Unified Diff: src/codegen-ia32.cc

Issue 6075: Move code for code generator static member functions, code generation... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 12 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/codegen-arm.cc ('k') | no next file » | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/codegen-ia32.cc
===================================================================
--- src/codegen-ia32.cc (revision 408)
+++ src/codegen-ia32.cc (working copy)
@@ -732,69 +732,6 @@
}
-#undef __
-#define __ masm->
-
-Operand Ia32CodeGenerator::SlotOperand(CodeGenerator* cgen,
- Slot* slot,
- Register tmp) {
- // Currently, this assertion will fail if we try to assign to
- // a constant variable that is constant because it is read-only
- // (such as the variable referring to a named function expression).
- // We need to implement assignments to read-only variables.
- // Ideally, we should do this during AST generation (by converting
- // such assignments into expression statements); however, in general
- // we may not be able to make the decision until past AST generation,
- // that is when the entire program is known.
- ASSERT(slot != NULL);
- int index = slot->index();
- switch (slot->type()) {
- case Slot::PARAMETER: return ParameterOperand(cgen, index);
-
- case Slot::LOCAL: {
- ASSERT(0 <= index && index < cgen->scope()->num_stack_slots());
- const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
- return Operand(ebp, kLocal0Offset - index * kPointerSize);
- }
-
- case Slot::CONTEXT: {
- MacroAssembler* masm = cgen->masm();
- // Follow the context chain if necessary.
- ASSERT(!tmp.is(esi)); // do not overwrite context register
- Register context = esi;
- int chain_length =
- cgen->scope()->ContextChainLength(slot->var()->scope());
- for (int i = chain_length; i-- > 0;) {
- // Load the closure.
- // (All contexts, even 'with' contexts, have a closure,
- // and it is the same for all contexts inside a function.
- // There is no need to go to the function context first.)
- __ mov(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
- // Load the function context (which is the incoming, outer context).
- __ mov(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
- context = tmp;
- }
- // We may have a 'with' context now. Get the function context.
- // (In fact this mov may never be the needed, since the scope analysis
- // may not permit a direct context access in this case and thus we are
- // always at a function context. However it is safe to dereference be-
- // cause the function context of a function context is itself. Before
- // deleting this mov we should try to create a counter-example first,
- // though...)
- __ mov(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
- return ContextOperand(tmp, index);
- }
-
- default:
- UNREACHABLE();
- return Operand(eax);
- }
-}
-
-
-#undef __
-#define __ masm_->
-
// Loads a value on TOS. If it is a boolean value, the result may have been
// (partially) translated into branches, or it may have set the condition code
// register. If force_cc is set, the value is forced to set the condition code
@@ -959,115 +896,6 @@
}
-#undef __
-#define __ masm->
-
-void Property::GenerateStoreCode(CodeGenerator* cgen,
- Reference* ref,
- InitState init_state) {
- MacroAssembler* masm = cgen->masm();
- Comment cmnt(masm, "[ Store to Property");
- __ RecordPosition(position());
- Ia32CodeGenerator::SetReferenceProperty(cgen, ref, key());
-}
-
-
-void VariableProxy::GenerateStoreCode(CodeGenerator* cgen,
- Reference* ref,
- InitState init_state) {
- MacroAssembler* masm = cgen->masm();
- Comment cmnt(masm, "[ Store to VariableProxy");
- Variable* node = var();
-
- Expression* expr = node->rewrite();
- if (expr != NULL) {
- expr->GenerateStoreCode(cgen, ref, init_state);
- } else {
- ASSERT(node->is_global());
- if (node->AsProperty() != NULL) {
- __ RecordPosition(node->AsProperty()->position());
- }
- Expression* key = new Literal(node->name());
- Ia32CodeGenerator::SetReferenceProperty(cgen, ref, key);
- }
-}
-
-
-void Slot::GenerateStoreCode(CodeGenerator* cgen,
- Reference* ref,
- InitState init_state) {
- MacroAssembler* masm = cgen->masm();
- Comment cmnt(masm, "[ Store to Slot");
-
- if (type() == Slot::LOOKUP) {
- ASSERT(var()->mode() == Variable::DYNAMIC);
-
- // For now, just do a runtime call.
- __ push(esi);
- __ push(Immediate(var()->name()));
-
- if (init_state == CONST_INIT) {
- // Same as the case for a normal store, but ignores attribute
- // (e.g. READ_ONLY) of context slot so that we can initialize const
- // properties (introduced via eval("const foo = (some expr);")). Also,
- // uses the current function context instead of the top context.
- //
- // Note that we must declare the foo upon entry of eval(), via a
- // context slot declaration, but we cannot initialize it at the same
- // time, because the const declaration may be at the end of the eval
- // code (sigh...) and the const variable may have been used before
- // (where its value is 'undefined'). Thus, we can only do the
- // initialization when we actually encounter the expression and when
- // the expression operands are defined and valid, and thus we need the
- // split into 2 operations: declaration of the context slot followed
- // by initialization.
- __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- } else {
- __ CallRuntime(Runtime::kStoreContextSlot, 3);
- }
- // Storing a variable must keep the (new) value on the expression
- // stack. This is necessary for compiling assignment expressions.
- __ push(eax);
-
- } else {
- ASSERT(var()->mode() != Variable::DYNAMIC);
-
- Label exit;
- if (init_state == CONST_INIT) {
- ASSERT(var()->mode() == Variable::CONST);
- // Only the first const initialization must be executed (the slot
- // still contains 'the hole' value). When the assignment is executed,
- // the code is identical to a normal store (see below).
- Comment cmnt(masm, "[ Init const");
- __ mov(eax, Ia32CodeGenerator::SlotOperand(cgen, this, ecx));
- __ cmp(eax, Factory::the_hole_value());
- __ j(not_equal, &exit);
- }
-
- // We must execute the store.
- // Storing a variable must keep the (new) value on the stack. This is
- // necessary for compiling assignment expressions. ecx may be loaded
- // with context; used below in RecordWrite.
- //
- // Note: We will reach here even with node->var()->mode() ==
- // Variable::CONST because of const declarations which will initialize
- // consts to 'the hole' value and by doing so, end up calling this
- // code.
- __ pop(eax);
- __ mov(Ia32CodeGenerator::SlotOperand(cgen, this, ecx), eax);
- __ push(eax); // RecordWrite may destroy the value in eax.
- if (type() == Slot::CONTEXT) {
- // ecx is loaded with context when calling SlotOperand above.
- int offset = FixedArray::kHeaderSize + index() * kPointerSize;
- __ RecordWrite(ecx, offset, eax, ebx);
- }
- // If we definitely did not jump over the assignment, we do not need to
- // bind the exit label. Doing so can defeat peephole optimization.
- if (init_state == CONST_INIT) __ bind(&exit);
- }
-}
-
-
class ToBooleanStub: public CodeStub {
public:
ToBooleanStub() { }
@@ -1090,66 +918,6 @@
};
-// NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined).
-void ToBooleanStub::Generate(MacroAssembler* masm) {
- Label false_result, true_result, not_string;
- __ mov(eax, Operand(esp, 1 * kPointerSize));
-
- // 'null' => false.
- __ cmp(eax, Factory::null_value());
- __ j(equal, &false_result);
-
- // Get the map and type of the heap object.
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset));
-
- // Undetectable => false.
- __ movzx_b(ebx, FieldOperand(edx, Map::kBitFieldOffset));
- __ and_(ebx, 1 << Map::kIsUndetectable);
- __ j(not_zero, &false_result);
-
- // JavaScript object => true.
- __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
- __ j(above_equal, &true_result);
-
- // String value => false iff empty.
- __ cmp(ecx, FIRST_NONSTRING_TYPE);
- __ j(above_equal, &not_string);
- __ and_(ecx, kStringSizeMask);
- __ cmp(ecx, kShortStringTag);
- __ j(not_equal, &true_result); // Empty string is always short.
- __ mov(edx, FieldOperand(eax, String::kLengthOffset));
- __ shr(edx, String::kShortLengthShift);
- __ j(zero, &false_result);
- __ jmp(&true_result);
-
- __ bind(&not_string);
- // HeapNumber => false iff +0, -0, or NaN.
- __ cmp(edx, Factory::heap_number_map());
- __ j(not_equal, &true_result);
- __ fldz();
- __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ fucompp();
- __ push(eax);
- __ fnstsw_ax();
- __ sahf();
- __ pop(eax);
- __ j(zero, &false_result);
- // Fall through to |true_result|.
-
- // Return 1/0 for true/false in eax.
- __ bind(&true_result);
- __ mov(eax, 1);
- __ ret(1 * kPointerSize);
- __ bind(&false_result);
- __ mov(eax, 0);
- __ ret(1 * kPointerSize);
-}
-
-
-#undef __
-#define __ masm_->
-
// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
// convert it to a boolean in the condition code register or jump to
// 'false_target'/'true_target' as appropriate.
@@ -1234,41 +1002,6 @@
}
-#undef __
-#define __ masm->
-
-void Ia32CodeGenerator::SetReferenceProperty(CodeGenerator* cgen,
- Reference* ref,
- Expression* key) {
- ASSERT(!ref->is_illegal());
- MacroAssembler* masm = cgen->masm();
-
- if (ref->type() == Reference::NAMED) {
- // Compute the name of the property.
- Literal* literal = key->AsLiteral();
- Handle<String> name(String::cast(*literal->handle()));
-
- // Call the appropriate IC code.
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- // TODO(1222589): Make the IC grab the values from the stack.
- __ pop(eax);
- // Setup the name register.
- __ Set(ecx, Immediate(name));
- __ call(ic, RelocInfo::CODE_TARGET);
- } else {
- // Access keyed property.
- ASSERT(ref->type() == Reference::KEYED);
-
- // Call IC code.
- Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
- // TODO(1222589): Make the IC grab the values from the stack.
- __ pop(eax);
- __ call(ic, RelocInfo::CODE_TARGET);
- }
- __ push(eax); // IC call leaves result in eax, push it out
-}
-
-
class FloatingPointHelper : public AllStatic {
public:
// Code pattern for loading floating point values. Input values must
@@ -1341,499 +1074,6 @@
}
-void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
- Label call_runtime;
- __ mov(eax, Operand(esp, 1 * kPointerSize)); // Get y.
- __ mov(edx, Operand(esp, 2 * kPointerSize)); // Get x.
-
- // 1. Smi case.
- switch (op_) {
- case Token::ADD: {
- // eax: y.
- // edx: x.
- Label revert;
- __ mov(ecx, Operand(eax));
- __ or_(ecx, Operand(edx)); // ecx = x | y.
- __ add(eax, Operand(edx)); // Add y optimistically.
- // Go slow-path in case of overflow.
- __ j(overflow, &revert, not_taken);
- // Go slow-path in case of non-smi operands.
- ASSERT(kSmiTag == 0); // adjust code below
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &revert, not_taken);
- __ ret(2 * kPointerSize); // Remove all operands.
-
- // Revert optimistic add.
- __ bind(&revert);
- __ sub(eax, Operand(edx));
- break;
- }
- case Token::SUB: {
- // eax: y.
- // edx: x.
- Label revert;
- __ mov(ecx, Operand(edx));
- __ or_(ecx, Operand(eax)); // ecx = x | y.
- __ sub(edx, Operand(eax)); // Subtract y optimistically.
- // Go slow-path in case of overflow.
- __ j(overflow, &revert, not_taken);
- // Go slow-path in case of non-smi operands.
- ASSERT(kSmiTag == 0); // adjust code below
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &revert, not_taken);
- __ mov(eax, Operand(edx));
- __ ret(2 * kPointerSize); // Remove all operands.
-
- // Revert optimistic sub.
- __ bind(&revert);
- __ add(edx, Operand(eax));
- break;
- }
- case Token::MUL: {
- // eax: y
- // edx: x
- // a) both operands smi and result fits into a smi -> return.
- // b) at least one of operands non-smi -> non_smi_operands.
- // c) result does not fit in a smi -> non_smi_result.
- Label non_smi_operands, non_smi_result;
- // Tag check.
- __ mov(ecx, Operand(edx));
- __ or_(ecx, Operand(eax)); // ecx = x | y.
- ASSERT(kSmiTag == 0); // Adjust code below.
- __ test(ecx, Immediate(kSmiTagMask));
- // Jump if not both smi; check if float numbers.
- __ j(not_zero, &non_smi_operands, not_taken);
-
- // Get copies of operands.
- __ mov(ebx, Operand(eax));
- __ mov(ecx, Operand(edx));
- // If the smi tag is 0 we can just leave the tag on one operand.
- ASSERT(kSmiTag == 0); // adjust code below
- // Remove tag from one of the operands (but keep sign).
- __ sar(ecx, kSmiTagSize);
- // Do multiplication.
- __ imul(eax, Operand(ecx)); // Multiplication of Smis; result in eax.
- // Go slow on overflows.
- __ j(overflow, &non_smi_result, not_taken);
- // ...but operands OK for float arithmetic.
-
- // If the result is +0 we may need to check if the result should
- // really be -0. Welcome to the -0 fan club.
- __ NegativeZeroTest(eax, ebx, edx, ecx, &non_smi_result);
-
- __ ret(2 * kPointerSize);
-
- __ bind(&non_smi_result);
- // TODO(1243132): Do not check float operands here.
- __ bind(&non_smi_operands);
- __ mov(eax, Operand(esp, 1 * kPointerSize));
- __ mov(edx, Operand(esp, 2 * kPointerSize));
- break;
- }
- case Token::DIV: {
- // eax: y
- // edx: x
- Label non_smi_operands, non_smi_result, division_by_zero;
- __ mov(ebx, Operand(eax)); // Get y
- __ mov(eax, Operand(edx)); // Get x
-
- __ cdq(); // Sign extend eax into edx:eax.
- // Tag check.
- __ mov(ecx, Operand(ebx));
- __ or_(ecx, Operand(eax)); // ecx = x | y.
- ASSERT(kSmiTag == 0); // Adjust code below.
- __ test(ecx, Immediate(kSmiTagMask));
- // Jump if not both smi; check if float numbers.
- __ j(not_zero, &non_smi_operands, not_taken);
- __ test(ebx, Operand(ebx)); // Check for 0 divisor.
- __ j(zero, &division_by_zero, not_taken);
-
- __ idiv(ebx);
- // Check for the corner case of dividing the most negative smi by -1.
- // (We cannot use the overflow flag, since it is not set by idiv.)
- ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
- __ cmp(eax, 0x40000000);
- __ j(equal, &non_smi_result);
- // If the result is +0 we may need to check if the result should
- // really be -0. Welcome to the -0 fan club.
- __ NegativeZeroTest(eax, ecx, &non_smi_result); // Use ecx = x | y.
- __ test(edx, Operand(edx));
- // Use floats if there's a remainder.
- __ j(not_zero, &non_smi_result, not_taken);
- __ shl(eax, kSmiTagSize);
- __ ret(2 * kPointerSize); // Remove all operands.
-
- __ bind(&division_by_zero);
- __ mov(eax, Operand(esp, 1 * kPointerSize));
- __ mov(edx, Operand(esp, 2 * kPointerSize));
- __ jmp(&call_runtime); // Division by zero must go through runtime.
-
- __ bind(&non_smi_result);
- // TODO(1243132): Do not check float operands here.
- __ bind(&non_smi_operands);
- __ mov(eax, Operand(esp, 1 * kPointerSize));
- __ mov(edx, Operand(esp, 2 * kPointerSize));
- break;
- }
- case Token::MOD: {
- Label slow;
- __ mov(ebx, Operand(eax)); // get y
- __ mov(eax, Operand(edx)); // get x
- __ cdq(); // sign extend eax into edx:eax
- // tag check
- __ mov(ecx, Operand(ebx));
- __ or_(ecx, Operand(eax)); // ecx = x | y;
- ASSERT(kSmiTag == 0); // adjust code below
- __ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &slow, not_taken);
- __ test(ebx, Operand(ebx)); // test for y == 0
- __ j(zero, &slow);
-
- // Fast case: Do integer division and use remainder.
- __ idiv(ebx);
- __ NegativeZeroTest(edx, ecx, &slow); // use ecx = x | y
- __ mov(eax, Operand(edx));
- __ ret(2 * kPointerSize);
-
- // Slow case: Call runtime operator implementation.
- __ bind(&slow);
- __ mov(eax, Operand(esp, 1 * kPointerSize));
- __ mov(edx, Operand(esp, 2 * kPointerSize));
- // Fall through to |call_runtime|.
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- // Smi-case for bitops should already have been inlined.
- break;
- }
- default: {
- UNREACHABLE();
- }
- }
-
- // 2. Floating point case.
- switch (op_) {
- case Token::ADD:
- case Token::SUB:
- case Token::MUL:
- case Token::DIV: {
- // eax: y
- // edx: x
- FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
- // Fast-case: Both operands are numbers.
- // Allocate a heap number, if needed.
- Label skip_allocation;
- switch (mode_) {
- case OVERWRITE_LEFT:
- __ mov(eax, Operand(edx));
- // Fall through!
- case OVERWRITE_RIGHT:
- // If the argument in eax is already an object, we skip the
- // allocation of a heap number.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
- // Fall through!
- case NO_OVERWRITE:
- FloatingPointHelper::AllocateHeapNumber(masm,
- &call_runtime,
- ecx,
- edx);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- FloatingPointHelper::LoadFloatOperands(masm, ecx);
-
- switch (op_) {
- case Token::ADD: __ faddp(1); break;
- case Token::SUB: __ fsubp(1); break;
- case Token::MUL: __ fmulp(1); break;
- case Token::DIV: __ fdivp(1); break;
- default: UNREACHABLE();
- }
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(2 * kPointerSize);
- }
- case Token::MOD: {
- // For MOD we go directly to runtime in the non-smi case.
- break;
- }
- case Token::BIT_OR:
- case Token::BIT_AND:
- case Token::BIT_XOR:
- case Token::SAR:
- case Token::SHL:
- case Token::SHR: {
- FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
- FloatingPointHelper::LoadFloatOperands(masm, ecx);
-
- Label non_int32_operands, non_smi_result, skip_allocation;
- // Reserve space for converted numbers.
- __ sub(Operand(esp), Immediate(2 * kPointerSize));
-
- // Check if right operand is int32.
- __ fist_s(Operand(esp, 1 * kPointerSize));
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fucompp();
- __ fnstsw_ax();
- __ sahf();
- __ j(not_zero, &non_int32_operands);
- __ j(parity_even, &non_int32_operands);
-
- // Check if left operand is int32.
- __ fist_s(Operand(esp, 0 * kPointerSize));
- __ fild_s(Operand(esp, 0 * kPointerSize));
- __ fucompp();
- __ fnstsw_ax();
- __ sahf();
- __ j(not_zero, &non_int32_operands);
- __ j(parity_even, &non_int32_operands);
-
- // Get int32 operands and perform bitop.
- __ pop(eax);
- __ pop(ecx);
- switch (op_) {
- case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
- case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
- case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
- case Token::SAR: __ sar(eax); break;
- case Token::SHL: __ shl(eax); break;
- case Token::SHR: __ shr(eax); break;
- default: UNREACHABLE();
- }
-
- // Check if result is non-negative and fits in a smi.
- __ test(eax, Immediate(0xc0000000));
- __ j(not_zero, &non_smi_result);
-
- // Tag smi result and return.
- ASSERT(kSmiTagSize == times_2); // adjust code if not the case
- __ lea(eax, Operand(eax, times_2, kSmiTag));
- __ ret(2 * kPointerSize);
-
- // All ops except SHR return a signed int32 that we load in a HeapNumber.
- if (op_ != Token::SHR) {
- __ bind(&non_smi_result);
- // Allocate a heap number if needed.
- __ mov(ebx, Operand(eax)); // ebx: result
- switch (mode_) {
- case OVERWRITE_LEFT:
- case OVERWRITE_RIGHT:
- // If the operand was an object, we skip the
- // allocation of a heap number.
- __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
- 1 * kPointerSize : 2 * kPointerSize));
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &skip_allocation, not_taken);
- // Fall through!
- case NO_OVERWRITE:
- FloatingPointHelper::AllocateHeapNumber(masm, &call_runtime,
- ecx, edx);
- __ bind(&skip_allocation);
- break;
- default: UNREACHABLE();
- }
- // Store the result in the HeapNumber and return.
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
- __ ret(2 * kPointerSize);
- }
- __ bind(&non_int32_operands);
- // Restore stacks and operands before calling runtime.
- __ ffree(0);
- __ add(Operand(esp), Immediate(2 * kPointerSize));
-
- // SHR should return uint32 - go to runtime for non-smi/negative result.
- if (op_ == Token::SHR) __ bind(&non_smi_result);
- __ mov(eax, Operand(esp, 1 * kPointerSize));
- __ mov(edx, Operand(esp, 2 * kPointerSize));
- break;
- }
- default: UNREACHABLE(); break;
- }
-
- // 3. If all else fails, use the runtime system to get the correct result.
- __ bind(&call_runtime);
- switch (op_) {
- case Token::ADD:
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
- case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
- Label* need_gc,
- Register scratch1,
- Register scratch2) {
- ExternalReference allocation_top =
- ExternalReference::new_space_allocation_top_address();
- ExternalReference allocation_limit =
- ExternalReference::new_space_allocation_limit_address();
- __ mov(Operand(scratch1), Immediate(allocation_top));
- __ mov(eax, Operand(scratch1, 0));
- __ lea(scratch2, Operand(eax, HeapNumber::kSize)); // scratch2: new top
- __ cmp(scratch2, Operand::StaticVariable(allocation_limit));
- __ j(above, need_gc, not_taken);
-
- __ mov(Operand(scratch1, 0), scratch2); // store new top
- __ mov(Operand(eax, HeapObject::kMapOffset),
- Immediate(Factory::heap_number_map()));
- // Tag old top and use as result.
- __ add(Operand(eax), Immediate(kHeapObjectTag));
-}
-
-
-void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
- Register scratch) {
- Label load_smi_1, load_smi_2, done_load_1, done;
- __ mov(scratch, Operand(esp, 2 * kPointerSize));
- __ test(scratch, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_1, not_taken);
- __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
- __ bind(&done_load_1);
-
- __ mov(scratch, Operand(esp, 1 * kPointerSize));
- __ test(scratch, Immediate(kSmiTagMask));
- __ j(zero, &load_smi_2, not_taken);
- __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
- __ jmp(&done);
-
- __ bind(&load_smi_1);
- __ sar(scratch, kSmiTagSize);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
- __ jmp(&done_load_1);
-
- __ bind(&load_smi_2);
- __ sar(scratch, kSmiTagSize);
- __ push(scratch);
- __ fild_s(Operand(esp, 0));
- __ pop(scratch);
-
- __ bind(&done);
-}
-
-
-void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
- Label* non_float,
- Register scratch) {
- Label test_other, done;
- // Test if both operands are floats or smi -> scratch=k_is_float;
- // Otherwise scratch = k_not_float.
- __ test(edx, Immediate(kSmiTagMask));
- __ j(zero, &test_other, not_taken); // argument in edx is OK
- __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
- __ cmp(scratch, Factory::heap_number_map());
- __ j(not_equal, non_float); // argument in edx is not a number -> NaN
-
- __ bind(&test_other);
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &done); // argument in eax is OK
- __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(scratch, Factory::heap_number_map());
- __ j(not_equal, non_float); // argument in eax is not a number -> NaN
-
- // Fall-through: Both operands are numbers.
- __ bind(&done);
-}
-
-
-void UnarySubStub::Generate(MacroAssembler* masm) {
- Label undo;
- Label slow;
- Label done;
- Label try_float;
-
- // Check whether the value is a smi.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &try_float, not_taken);
-
- // Enter runtime system if the value of the expression is zero
- // to make sure that we switch between 0 and -0.
- __ test(eax, Operand(eax));
- __ j(zero, &slow, not_taken);
-
- // The value of the expression is a smi that is not zero. Try
- // optimistic subtraction '0 - value'.
- __ mov(edx, Operand(eax));
- __ Set(eax, Immediate(0));
- __ sub(eax, Operand(edx));
- __ j(overflow, &undo, not_taken);
-
- // If result is a smi we are done.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &done, taken);
-
- // Restore eax and enter runtime system.
- __ bind(&undo);
- __ mov(eax, Operand(edx));
-
- // Enter runtime system.
- __ bind(&slow);
- __ pop(ecx); // pop return address
- __ push(eax);
- __ push(ecx); // push return address
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
-
- // Try floating point case.
- __ bind(&try_float);
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(edx, Factory::heap_number_map());
- __ j(not_equal, &slow);
- __ mov(edx, Operand(eax));
- // edx: operand
- FloatingPointHelper::AllocateHeapNumber(masm, &undo, ebx, ecx);
- // eax: allocated 'empty' number
- __ fld_d(FieldOperand(edx, HeapNumber::kValueOffset));
- __ fchs();
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
-
- __ bind(&done);
-
- __ StubReturn(1);
-}
-
-
class ArgumentsAccessStub: public CodeStub {
public:
explicit ArgumentsAccessStub(bool is_length) : is_length_(is_length) { }
@@ -1856,81 +1096,6 @@
};
-void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
- // Check that the key is a smi for non-length access.
- Label slow;
- if (!is_length_) {
- __ mov(ebx, Operand(esp, 1 * kPointerSize)); // skip return address
- __ test(ebx, Immediate(kSmiTagMask));
- __ j(not_zero, &slow, not_taken);
- }
-
- // Check if the calling frame is an arguments adaptor frame.
- Label adaptor;
- __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
- __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
- __ cmp(ecx, ArgumentsAdaptorFrame::SENTINEL);
- __ j(equal, &adaptor);
-
- // The displacement is used for skipping the return address on the
- // stack. It is the offset of the last parameter (if any) relative
- // to the frame pointer.
- static const int kDisplacement = 1 * kPointerSize;
- ASSERT(kSmiTagSize == 1 && kSmiTag == 0); // shifting code depends on this
-
- if (is_length_) {
- // Do nothing. The length is already in register eax.
- } else {
- // Check index against formal parameters count limit passed in
- // through register eax. Use unsigned comparison to get negative
- // check for free.
- __ cmp(ebx, Operand(eax));
- __ j(above_equal, &slow, not_taken);
-
- // Read the argument from the stack.
- __ lea(edx, Operand(ebp, eax, times_2, 0));
- __ neg(ebx);
- __ mov(eax, Operand(edx, ebx, times_2, kDisplacement));
- }
-
- // Return the length or the argument.
- __ ret(0);
-
- // Arguments adaptor case: Find the length or the actual argument in
- // the calling frame.
- __ bind(&adaptor);
- if (is_length_) {
- // Read the arguments length from the adaptor frame.
- __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- } else {
- // Check index against actual arguments limit found in the
- // arguments adaptor frame. Use unsigned comparison to get
- // negative check for free.
- __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ cmp(ebx, Operand(ecx));
- __ j(above_equal, &slow, not_taken);
-
- // Read the argument from the stack.
- __ lea(edx, Operand(edx, ecx, times_2, 0));
- __ neg(ebx);
- __ mov(eax, Operand(edx, ebx, times_2, kDisplacement));
- }
-
- // Return the length or the argument.
- __ ret(0);
-
- // Slow-case: Handle non-smi or out-of-bounds access to arguments
- // by calling the runtime system.
- if (!is_length_) {
- __ bind(&slow);
- __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1);
- }
-}
-
-
-#undef __
-#define __ masm_->
-
void Ia32CodeGenerator::GenericBinaryOperation(Token::Value op,
OverwriteMode overwrite_mode) {
Comment cmnt(masm_, "[ BinaryOperation");
@@ -2366,9 +1531,6 @@
}
-#undef __
-#define __ masm->
-
class CompareStub: public CodeStub {
public:
CompareStub(Condition cc, bool strict) : cc_(cc), strict_(strict) { }
@@ -2399,92 +1561,6 @@
};
-void CompareStub::Generate(MacroAssembler* masm) {
- Label call_builtin, done;
- // Save the return address (and get it off the stack).
- __ pop(ecx);
-
- // Push arguments.
- __ push(eax);
- __ push(edx);
- __ push(ecx);
-
- // Inlined floating point compare.
- // Call builtin if operands are not floating point or smi.
- FloatingPointHelper::CheckFloatOperands(masm, &call_builtin, ebx);
- FloatingPointHelper::LoadFloatOperands(masm, ecx);
- __ FCmp();
-
- // Jump to builtin for NaN.
- __ j(parity_even, &call_builtin, not_taken);
-
- // TODO(1243847): Use cmov below once CpuFeatures are properly hooked up.
- Label below_lbl, above_lbl;
- // use edx, eax to convert unsigned to signed comparison
- __ j(below, &below_lbl, not_taken);
- __ j(above, &above_lbl, not_taken);
-
- __ xor_(eax, Operand(eax)); // equal
- __ ret(2 * kPointerSize);
-
- __ bind(&below_lbl);
- __ mov(eax, -1);
- __ ret(2 * kPointerSize);
-
- __ bind(&above_lbl);
- __ mov(eax, 1);
- __ ret(2 * kPointerSize); // eax, edx were pushed
-
- __ bind(&call_builtin);
- // must swap argument order
- __ pop(ecx);
- __ pop(edx);
- __ pop(eax);
- __ push(edx);
- __ push(eax);
-
- // Figure out which native to call and setup the arguments.
- Builtins::JavaScript builtin;
- if (cc_ == equal) {
- builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
- } else {
- builtin = Builtins::COMPARE;
- int ncr; // NaN compare result
- if (cc_ == less || cc_ == less_equal) {
- ncr = GREATER;
- } else {
- ASSERT(cc_ == greater || cc_ == greater_equal); // remaining cases
- ncr = LESS;
- }
- __ push(Immediate(Smi::FromInt(ncr)));
- }
-
- // Restore return address on the stack.
- __ push(ecx);
-
- // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
- // tagged as a small integer.
- __ InvokeBuiltin(builtin, JUMP_FUNCTION);
-}
-
-
-void StackCheckStub::Generate(MacroAssembler* masm) {
- // Because builtins always remove the receiver from the stack, we
- // have to fake one to avoid underflowing the stack. The receiver
- // must be inserted below the return address on the stack so we
- // temporarily store that in a register.
- __ pop(eax);
- __ push(Immediate(Smi::FromInt(0)));
- __ push(eax);
-
- // Do tail-call to runtime routine.
- __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1);
-}
-
-
-#undef __
-#define __ masm_->
-
void Ia32CodeGenerator::Comparison(Condition cc, bool strict) {
// Strict only makes sense for equality comparisons.
ASSERT(!strict || cc == equal);
@@ -2593,42 +1669,6 @@
};
-#undef __
-#define __ masm->
-
-void CallFunctionStub::Generate(MacroAssembler* masm) {
- Label slow;
-
- // Get the function to call from the stack.
- // +2 ~ receiver, return address
- __ mov(edi, Operand(esp, (argc_ + 2) * kPointerSize));
-
- // Check that the function really is a JavaScript function.
- __ test(edi, Immediate(kSmiTagMask));
- __ j(zero, &slow, not_taken);
- // Get the map.
- __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
- __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
- __ cmp(ecx, JS_FUNCTION_TYPE);
- __ j(not_equal, &slow, not_taken);
-
- // Fast-case: Just invoke the function.
- ParameterCount actual(argc_);
- __ InvokeFunction(edi, actual, JUMP_FUNCTION);
-
- // Slow-case: Non-function called.
- __ bind(&slow);
- __ Set(eax, Immediate(argc_));
- __ Set(ebx, Immediate(0));
- __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
- Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
- __ jmp(adaptor, RelocInfo::CODE_TARGET);
-}
-
-
-#undef __
-#define __ masm_->
-
// Call the function just below TOS on the stack with the given
// arguments. The receiver is the TOS.
void Ia32CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
@@ -4634,9 +3674,6 @@
};
-#undef __
-#define __ masm->
-
class RevertToNumberStub: public CodeStub {
public:
explicit RevertToNumberStub(bool is_increment)
@@ -4660,22 +3697,6 @@
};
-void RevertToNumberStub::Generate(MacroAssembler* masm) {
- // Revert optimistic increment/decrement.
- if (is_increment_) {
- __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
- } else {
- __ add(Operand(eax), Immediate(Smi::FromInt(1)));
- }
-
- __ pop(ecx);
- __ push(eax);
- __ push(ecx);
- __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
- // Code never returns due to JUMP_FUNCTION.
-}
-
-
class CounterOpStub: public CodeStub {
public:
CounterOpStub(int result_offset, bool is_postfix, bool is_increment)
@@ -4710,37 +3731,6 @@
};
-void CounterOpStub::Generate(MacroAssembler* masm) {
- // Store to the result on the stack (skip return address) before
- // performing the count operation.
- if (is_postfix_) {
- __ mov(Operand(esp, result_offset_ + kPointerSize), eax);
- }
-
- // Revert optimistic increment/decrement but only for prefix
- // counts. For postfix counts it has already been reverted before
- // the conversion to numbers.
- if (!is_postfix_) {
- if (is_increment_) {
- __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
- } else {
- __ add(Operand(eax), Immediate(Smi::FromInt(1)));
- }
- }
-
- // Compute the new value by calling the right JavaScript native.
- __ pop(ecx);
- __ push(eax);
- __ push(ecx);
- Builtins::JavaScript builtin = is_increment_ ? Builtins::INC : Builtins::DEC;
- __ InvokeBuiltin(builtin, JUMP_FUNCTION);
- // Code never returns due to JUMP_FUNCTION.
-}
-
-
-#undef __
-#define __ masm_->
-
void CountOperationDeferred::Generate() {
if (is_postfix_) {
RevertToNumberStub to_number_stub(is_increment_);
@@ -5178,6 +4168,980 @@
#undef __
#define __ masm->
+Operand Ia32CodeGenerator::SlotOperand(CodeGenerator* cgen,
+ Slot* slot,
+ Register tmp) {
+ // Currently, this assertion will fail if we try to assign to
+ // a constant variable that is constant because it is read-only
+ // (such as the variable referring to a named function expression).
+ // We need to implement assignments to read-only variables.
+ // Ideally, we should do this during AST generation (by converting
+ // such assignments into expression statements); however, in general
+ // we may not be able to make the decision until past AST generation,
+ // that is when the entire program is known.
+ ASSERT(slot != NULL);
+ int index = slot->index();
+ switch (slot->type()) {
+ case Slot::PARAMETER: return ParameterOperand(cgen, index);
+
+ case Slot::LOCAL: {
+ ASSERT(0 <= index && index < cgen->scope()->num_stack_slots());
+ const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
+ return Operand(ebp, kLocal0Offset - index * kPointerSize);
+ }
+
+ case Slot::CONTEXT: {
+ MacroAssembler* masm = cgen->masm();
+ // Follow the context chain if necessary.
+ ASSERT(!tmp.is(esi)); // do not overwrite context register
+ Register context = esi;
+ int chain_length =
+ cgen->scope()->ContextChainLength(slot->var()->scope());
+ for (int i = chain_length; i-- > 0;) {
+ // Load the closure.
+ // (All contexts, even 'with' contexts, have a closure,
+ // and it is the same for all contexts inside a function.
+ // There is no need to go to the function context first.)
+ __ mov(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
+ // Load the function context (which is the incoming, outer context).
+ __ mov(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
+ context = tmp;
+ }
+ // We may have a 'with' context now. Get the function context.
+ // (In fact this mov may never be the needed, since the scope analysis
+ // may not permit a direct context access in this case and thus we are
+ // always at a function context. However it is safe to dereference be-
+ // cause the function context of a function context is itself. Before
+ // deleting this mov we should try to create a counter-example first,
+ // though...)
+ __ mov(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
+ return ContextOperand(tmp, index);
+ }
+
+ default:
+ UNREACHABLE();
+ return Operand(eax);
+ }
+}
+
+
+void Property::GenerateStoreCode(CodeGenerator* cgen,
+ Reference* ref,
+ InitState init_state) {
+ MacroAssembler* masm = cgen->masm();
+ Comment cmnt(masm, "[ Store to Property");
+ __ RecordPosition(position());
+ Ia32CodeGenerator::SetReferenceProperty(cgen, ref, key());
+}
+
+
+void VariableProxy::GenerateStoreCode(CodeGenerator* cgen,
+ Reference* ref,
+ InitState init_state) {
+ MacroAssembler* masm = cgen->masm();
+ Comment cmnt(masm, "[ Store to VariableProxy");
+ Variable* node = var();
+
+ Expression* expr = node->rewrite();
+ if (expr != NULL) {
+ expr->GenerateStoreCode(cgen, ref, init_state);
+ } else {
+ ASSERT(node->is_global());
+ if (node->AsProperty() != NULL) {
+ __ RecordPosition(node->AsProperty()->position());
+ }
+ Expression* key = new Literal(node->name());
+ Ia32CodeGenerator::SetReferenceProperty(cgen, ref, key);
+ }
+}
+
+
+void Slot::GenerateStoreCode(CodeGenerator* cgen,
+ Reference* ref,
+ InitState init_state) {
+ MacroAssembler* masm = cgen->masm();
+ Comment cmnt(masm, "[ Store to Slot");
+
+ if (type() == Slot::LOOKUP) {
+ ASSERT(var()->mode() == Variable::DYNAMIC);
+
+ // For now, just do a runtime call.
+ __ push(esi);
+ __ push(Immediate(var()->name()));
+
+ if (init_state == CONST_INIT) {
+ // Same as the case for a normal store, but ignores attribute
+ // (e.g. READ_ONLY) of context slot so that we can initialize const
+ // properties (introduced via eval("const foo = (some expr);")). Also,
+ // uses the current function context instead of the top context.
+ //
+ // Note that we must declare the foo upon entry of eval(), via a
+ // context slot declaration, but we cannot initialize it at the same
+ // time, because the const declaration may be at the end of the eval
+ // code (sigh...) and the const variable may have been used before
+ // (where its value is 'undefined'). Thus, we can only do the
+ // initialization when we actually encounter the expression and when
+ // the expression operands are defined and valid, and thus we need the
+ // split into 2 operations: declaration of the context slot followed
+ // by initialization.
+ __ CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ } else {
+ __ CallRuntime(Runtime::kStoreContextSlot, 3);
+ }
+ // Storing a variable must keep the (new) value on the expression
+ // stack. This is necessary for compiling assignment expressions.
+ __ push(eax);
+
+ } else {
+ ASSERT(var()->mode() != Variable::DYNAMIC);
+
+ Label exit;
+ if (init_state == CONST_INIT) {
+ ASSERT(var()->mode() == Variable::CONST);
+ // Only the first const initialization must be executed (the slot
+ // still contains 'the hole' value). When the assignment is executed,
+ // the code is identical to a normal store (see below).
+ Comment cmnt(masm, "[ Init const");
+ __ mov(eax, Ia32CodeGenerator::SlotOperand(cgen, this, ecx));
+ __ cmp(eax, Factory::the_hole_value());
+ __ j(not_equal, &exit);
+ }
+
+ // We must execute the store.
+ // Storing a variable must keep the (new) value on the stack. This is
+ // necessary for compiling assignment expressions. ecx may be loaded
+ // with context; used below in RecordWrite.
+ //
+ // Note: We will reach here even with node->var()->mode() ==
+ // Variable::CONST because of const declarations which will initialize
+ // consts to 'the hole' value and by doing so, end up calling this
+ // code.
+ __ pop(eax);
+ __ mov(Ia32CodeGenerator::SlotOperand(cgen, this, ecx), eax);
+ __ push(eax); // RecordWrite may destroy the value in eax.
+ if (type() == Slot::CONTEXT) {
+ // ecx is loaded with context when calling SlotOperand above.
+ int offset = FixedArray::kHeaderSize + index() * kPointerSize;
+ __ RecordWrite(ecx, offset, eax, ebx);
+ }
+ // If we definitely did not jump over the assignment, we do not need to
+ // bind the exit label. Doing so can defeat peephole optimization.
+ if (init_state == CONST_INIT) __ bind(&exit);
+ }
+}
+
+
+// NOTE: The stub does not handle the inlined cases (Smis, Booleans, undefined).
+void ToBooleanStub::Generate(MacroAssembler* masm) {
+ Label false_result, true_result, not_string;
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
+
+ // 'null' => false.
+ __ cmp(eax, Factory::null_value());
+ __ j(equal, &false_result);
+
+ // Get the map and type of the heap object.
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(edx, Map::kInstanceTypeOffset));
+
+ // Undetectable => false.
+ __ movzx_b(ebx, FieldOperand(edx, Map::kBitFieldOffset));
+ __ and_(ebx, 1 << Map::kIsUndetectable);
+ __ j(not_zero, &false_result);
+
+ // JavaScript object => true.
+ __ cmp(ecx, FIRST_JS_OBJECT_TYPE);
+ __ j(above_equal, &true_result);
+
+ // String value => false iff empty.
+ __ cmp(ecx, FIRST_NONSTRING_TYPE);
+ __ j(above_equal, &not_string);
+ __ and_(ecx, kStringSizeMask);
+ __ cmp(ecx, kShortStringTag);
+ __ j(not_equal, &true_result); // Empty string is always short.
+ __ mov(edx, FieldOperand(eax, String::kLengthOffset));
+ __ shr(edx, String::kShortLengthShift);
+ __ j(zero, &false_result);
+ __ jmp(&true_result);
+
+ __ bind(&not_string);
+ // HeapNumber => false iff +0, -0, or NaN.
+ __ cmp(edx, Factory::heap_number_map());
+ __ j(not_equal, &true_result);
+ __ fldz();
+ __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ fucompp();
+ __ push(eax);
+ __ fnstsw_ax();
+ __ sahf();
+ __ pop(eax);
+ __ j(zero, &false_result);
+ // Fall through to |true_result|.
+
+ // Return 1/0 for true/false in eax.
+ __ bind(&true_result);
+ __ mov(eax, 1);
+ __ ret(1 * kPointerSize);
+ __ bind(&false_result);
+ __ mov(eax, 0);
+ __ ret(1 * kPointerSize);
+}
+
+
+void Ia32CodeGenerator::SetReferenceProperty(CodeGenerator* cgen,
+ Reference* ref,
+ Expression* key) {
+ ASSERT(!ref->is_illegal());
+ MacroAssembler* masm = cgen->masm();
+
+ if (ref->type() == Reference::NAMED) {
+ // Compute the name of the property.
+ Literal* literal = key->AsLiteral();
+ Handle<String> name(String::cast(*literal->handle()));
+
+ // Call the appropriate IC code.
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ // TODO(1222589): Make the IC grab the values from the stack.
+ __ pop(eax);
+ // Setup the name register.
+ __ Set(ecx, Immediate(name));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ } else {
+ // Access keyed property.
+ ASSERT(ref->type() == Reference::KEYED);
+
+ // Call IC code.
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ // TODO(1222589): Make the IC grab the values from the stack.
+ __ pop(eax);
+ __ call(ic, RelocInfo::CODE_TARGET);
+ }
+ __ push(eax); // IC call leaves result in eax, push it out
+}
+
+
+void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
+ Label call_runtime;
+ __ mov(eax, Operand(esp, 1 * kPointerSize)); // Get y.
+ __ mov(edx, Operand(esp, 2 * kPointerSize)); // Get x.
+
+ // 1. Smi case.
+ switch (op_) {
+ case Token::ADD: {
+ // eax: y.
+ // edx: x.
+ Label revert;
+ __ mov(ecx, Operand(eax));
+ __ or_(ecx, Operand(edx)); // ecx = x | y.
+ __ add(eax, Operand(edx)); // Add y optimistically.
+ // Go slow-path in case of overflow.
+ __ j(overflow, &revert, not_taken);
+ // Go slow-path in case of non-smi operands.
+ ASSERT(kSmiTag == 0); // adjust code below
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, &revert, not_taken);
+ __ ret(2 * kPointerSize); // Remove all operands.
+
+ // Revert optimistic add.
+ __ bind(&revert);
+ __ sub(eax, Operand(edx));
+ break;
+ }
+ case Token::SUB: {
+ // eax: y.
+ // edx: x.
+ Label revert;
+ __ mov(ecx, Operand(edx));
+ __ or_(ecx, Operand(eax)); // ecx = x | y.
+ __ sub(edx, Operand(eax)); // Subtract y optimistically.
+ // Go slow-path in case of overflow.
+ __ j(overflow, &revert, not_taken);
+ // Go slow-path in case of non-smi operands.
+ ASSERT(kSmiTag == 0); // adjust code below
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, &revert, not_taken);
+ __ mov(eax, Operand(edx));
+ __ ret(2 * kPointerSize); // Remove all operands.
+
+ // Revert optimistic sub.
+ __ bind(&revert);
+ __ add(edx, Operand(eax));
+ break;
+ }
+ case Token::MUL: {
+ // eax: y
+ // edx: x
+ // a) both operands smi and result fits into a smi -> return.
+ // b) at least one of operands non-smi -> non_smi_operands.
+ // c) result does not fit in a smi -> non_smi_result.
+ Label non_smi_operands, non_smi_result;
+ // Tag check.
+ __ mov(ecx, Operand(edx));
+ __ or_(ecx, Operand(eax)); // ecx = x | y.
+ ASSERT(kSmiTag == 0); // Adjust code below.
+ __ test(ecx, Immediate(kSmiTagMask));
+ // Jump if not both smi; check if float numbers.
+ __ j(not_zero, &non_smi_operands, not_taken);
+
+ // Get copies of operands.
+ __ mov(ebx, Operand(eax));
+ __ mov(ecx, Operand(edx));
+ // If the smi tag is 0 we can just leave the tag on one operand.
+ ASSERT(kSmiTag == 0); // adjust code below
+ // Remove tag from one of the operands (but keep sign).
+ __ sar(ecx, kSmiTagSize);
+ // Do multiplication.
+ __ imul(eax, Operand(ecx)); // Multiplication of Smis; result in eax.
+ // Go slow on overflows.
+ __ j(overflow, &non_smi_result, not_taken);
+ // ...but operands OK for float arithmetic.
+
+ // If the result is +0 we may need to check if the result should
+ // really be -0. Welcome to the -0 fan club.
+ __ NegativeZeroTest(eax, ebx, edx, ecx, &non_smi_result);
+
+ __ ret(2 * kPointerSize);
+
+ __ bind(&non_smi_result);
+ // TODO(1243132): Do not check float operands here.
+ __ bind(&non_smi_operands);
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
+ break;
+ }
+ case Token::DIV: {
+ // eax: y
+ // edx: x
+ Label non_smi_operands, non_smi_result, division_by_zero;
+ __ mov(ebx, Operand(eax)); // Get y
+ __ mov(eax, Operand(edx)); // Get x
+
+ __ cdq(); // Sign extend eax into edx:eax.
+ // Tag check.
+ __ mov(ecx, Operand(ebx));
+ __ or_(ecx, Operand(eax)); // ecx = x | y.
+ ASSERT(kSmiTag == 0); // Adjust code below.
+ __ test(ecx, Immediate(kSmiTagMask));
+ // Jump if not both smi; check if float numbers.
+ __ j(not_zero, &non_smi_operands, not_taken);
+ __ test(ebx, Operand(ebx)); // Check for 0 divisor.
+ __ j(zero, &division_by_zero, not_taken);
+
+ __ idiv(ebx);
+ // Check for the corner case of dividing the most negative smi by -1.
+ // (We cannot use the overflow flag, since it is not set by idiv.)
+ ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ __ cmp(eax, 0x40000000);
+ __ j(equal, &non_smi_result);
+ // If the result is +0 we may need to check if the result should
+ // really be -0. Welcome to the -0 fan club.
+ __ NegativeZeroTest(eax, ecx, &non_smi_result); // Use ecx = x | y.
+ __ test(edx, Operand(edx));
+ // Use floats if there's a remainder.
+ __ j(not_zero, &non_smi_result, not_taken);
+ __ shl(eax, kSmiTagSize);
+ __ ret(2 * kPointerSize); // Remove all operands.
+
+ __ bind(&division_by_zero);
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
+ __ jmp(&call_runtime); // Division by zero must go through runtime.
+
+ __ bind(&non_smi_result);
+ // TODO(1243132): Do not check float operands here.
+ __ bind(&non_smi_operands);
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
+ break;
+ }
+ case Token::MOD: {
+ Label slow;
+ __ mov(ebx, Operand(eax)); // get y
+ __ mov(eax, Operand(edx)); // get x
+ __ cdq(); // sign extend eax into edx:eax
+ // tag check
+ __ mov(ecx, Operand(ebx));
+ __ or_(ecx, Operand(eax)); // ecx = x | y;
+ ASSERT(kSmiTag == 0); // adjust code below
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow, not_taken);
+ __ test(ebx, Operand(ebx)); // test for y == 0
+ __ j(zero, &slow);
+
+ // Fast case: Do integer division and use remainder.
+ __ idiv(ebx);
+ __ NegativeZeroTest(edx, ecx, &slow); // use ecx = x | y
+ __ mov(eax, Operand(edx));
+ __ ret(2 * kPointerSize);
+
+ // Slow case: Call runtime operator implementation.
+ __ bind(&slow);
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
+ // Fall through to |call_runtime|.
+ break;
+ }
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SAR:
+ case Token::SHL:
+ case Token::SHR: {
+ // Smi-case for bitops should already have been inlined.
+ break;
+ }
+ default: {
+ UNREACHABLE();
+ }
+ }
+
+ // 2. Floating point case.
+ switch (op_) {
+ case Token::ADD:
+ case Token::SUB:
+ case Token::MUL:
+ case Token::DIV: {
+ // eax: y
+ // edx: x
+ FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
+ // Fast-case: Both operands are numbers.
+ // Allocate a heap number, if needed.
+ Label skip_allocation;
+ switch (mode_) {
+ case OVERWRITE_LEFT:
+ __ mov(eax, Operand(edx));
+ // Fall through!
+ case OVERWRITE_RIGHT:
+ // If the argument in eax is already an object, we skip the
+ // allocation of a heap number.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &skip_allocation, not_taken);
+ // Fall through!
+ case NO_OVERWRITE:
+ FloatingPointHelper::AllocateHeapNumber(masm,
+ &call_runtime,
+ ecx,
+ edx);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
+ FloatingPointHelper::LoadFloatOperands(masm, ecx);
+
+ switch (op_) {
+ case Token::ADD: __ faddp(1); break;
+ case Token::SUB: __ fsubp(1); break;
+ case Token::MUL: __ fmulp(1); break;
+ case Token::DIV: __ fdivp(1); break;
+ default: UNREACHABLE();
+ }
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ ret(2 * kPointerSize);
+ }
+ case Token::MOD: {
+ // For MOD we go directly to runtime in the non-smi case.
+ break;
+ }
+ case Token::BIT_OR:
+ case Token::BIT_AND:
+ case Token::BIT_XOR:
+ case Token::SAR:
+ case Token::SHL:
+ case Token::SHR: {
+ FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
+ FloatingPointHelper::LoadFloatOperands(masm, ecx);
+
+ Label non_int32_operands, non_smi_result, skip_allocation;
+ // Reserve space for converted numbers.
+ __ sub(Operand(esp), Immediate(2 * kPointerSize));
+
+ // Check if right operand is int32.
+ __ fist_s(Operand(esp, 1 * kPointerSize));
+ __ fild_s(Operand(esp, 1 * kPointerSize));
+ __ fucompp();
+ __ fnstsw_ax();
+ __ sahf();
+ __ j(not_zero, &non_int32_operands);
+ __ j(parity_even, &non_int32_operands);
+
+ // Check if left operand is int32.
+ __ fist_s(Operand(esp, 0 * kPointerSize));
+ __ fild_s(Operand(esp, 0 * kPointerSize));
+ __ fucompp();
+ __ fnstsw_ax();
+ __ sahf();
+ __ j(not_zero, &non_int32_operands);
+ __ j(parity_even, &non_int32_operands);
+
+ // Get int32 operands and perform bitop.
+ __ pop(eax);
+ __ pop(ecx);
+ switch (op_) {
+ case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
+ case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
+ case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
+ case Token::SAR: __ sar(eax); break;
+ case Token::SHL: __ shl(eax); break;
+ case Token::SHR: __ shr(eax); break;
+ default: UNREACHABLE();
+ }
+
+ // Check if result is non-negative and fits in a smi.
+ __ test(eax, Immediate(0xc0000000));
+ __ j(not_zero, &non_smi_result);
+
+ // Tag smi result and return.
+ ASSERT(kSmiTagSize == times_2); // adjust code if not the case
+ __ lea(eax, Operand(eax, times_2, kSmiTag));
+ __ ret(2 * kPointerSize);
+
+ // All ops except SHR return a signed int32 that we load in a HeapNumber.
+ if (op_ != Token::SHR) {
+ __ bind(&non_smi_result);
+ // Allocate a heap number if needed.
+ __ mov(ebx, Operand(eax)); // ebx: result
+ switch (mode_) {
+ case OVERWRITE_LEFT:
+ case OVERWRITE_RIGHT:
+ // If the operand was an object, we skip the
+ // allocation of a heap number.
+ __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
+ 1 * kPointerSize : 2 * kPointerSize));
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &skip_allocation, not_taken);
+ // Fall through!
+ case NO_OVERWRITE:
+ FloatingPointHelper::AllocateHeapNumber(masm, &call_runtime,
+ ecx, edx);
+ __ bind(&skip_allocation);
+ break;
+ default: UNREACHABLE();
+ }
+ // Store the result in the HeapNumber and return.
+ __ mov(Operand(esp, 1 * kPointerSize), ebx);
+ __ fild_s(Operand(esp, 1 * kPointerSize));
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ __ ret(2 * kPointerSize);
+ }
+ __ bind(&non_int32_operands);
+ // Restore stacks and operands before calling runtime.
+ __ ffree(0);
+ __ add(Operand(esp), Immediate(2 * kPointerSize));
+
+ // SHR should return uint32 - go to runtime for non-smi/negative result.
+ if (op_ == Token::SHR) __ bind(&non_smi_result);
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
+ __ mov(edx, Operand(esp, 2 * kPointerSize));
+ break;
+ }
+ default: UNREACHABLE(); break;
+ }
+
+ // 3. If all else fails, use the runtime system to get the correct result.
+ __ bind(&call_runtime);
+ switch (op_) {
+ case Token::ADD:
+ __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+ break;
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+ break;
+ case Token::MUL:
+ __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+ break;
+ case Token::DIV:
+ __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+ break;
+ case Token::MOD:
+ __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+ break;
+ case Token::BIT_OR:
+ __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+ break;
+ case Token::BIT_AND:
+ __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+ break;
+ case Token::BIT_XOR:
+ __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+ break;
+ case Token::SAR:
+ __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+ break;
+ case Token::SHL:
+ __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+ break;
+ case Token::SHR:
+ __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void FloatingPointHelper::AllocateHeapNumber(MacroAssembler* masm,
+ Label* need_gc,
+ Register scratch1,
+ Register scratch2) {
+ ExternalReference allocation_top =
+ ExternalReference::new_space_allocation_top_address();
+ ExternalReference allocation_limit =
+ ExternalReference::new_space_allocation_limit_address();
+ __ mov(Operand(scratch1), Immediate(allocation_top));
+ __ mov(eax, Operand(scratch1, 0));
+ __ lea(scratch2, Operand(eax, HeapNumber::kSize)); // scratch2: new top
+ __ cmp(scratch2, Operand::StaticVariable(allocation_limit));
+ __ j(above, need_gc, not_taken);
+
+ __ mov(Operand(scratch1, 0), scratch2); // store new top
+ __ mov(Operand(eax, HeapObject::kMapOffset),
+ Immediate(Factory::heap_number_map()));
+ // Tag old top and use as result.
+ __ add(Operand(eax), Immediate(kHeapObjectTag));
+}
+
+
+void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
+ Register scratch) {
+ Label load_smi_1, load_smi_2, done_load_1, done;
+ __ mov(scratch, Operand(esp, 2 * kPointerSize));
+ __ test(scratch, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi_1, not_taken);
+ __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
+ __ bind(&done_load_1);
+
+ __ mov(scratch, Operand(esp, 1 * kPointerSize));
+ __ test(scratch, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi_2, not_taken);
+ __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
+ __ jmp(&done);
+
+ __ bind(&load_smi_1);
+ __ sar(scratch, kSmiTagSize);
+ __ push(scratch);
+ __ fild_s(Operand(esp, 0));
+ __ pop(scratch);
+ __ jmp(&done_load_1);
+
+ __ bind(&load_smi_2);
+ __ sar(scratch, kSmiTagSize);
+ __ push(scratch);
+ __ fild_s(Operand(esp, 0));
+ __ pop(scratch);
+
+ __ bind(&done);
+}
+
+
+void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
+ Label* non_float,
+ Register scratch) {
+ Label test_other, done;
+ // Test if both operands are floats or smi -> scratch=k_is_float;
+ // Otherwise scratch = k_not_float.
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &test_other, not_taken); // argument in edx is OK
+ __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
+ __ cmp(scratch, Factory::heap_number_map());
+ __ j(not_equal, non_float); // argument in edx is not a number -> NaN
+
+ __ bind(&test_other);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &done); // argument in eax is OK
+ __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
+ __ cmp(scratch, Factory::heap_number_map());
+ __ j(not_equal, non_float); // argument in eax is not a number -> NaN
+
+ // Fall-through: Both operands are numbers.
+ __ bind(&done);
+}
+
+
+void UnarySubStub::Generate(MacroAssembler* masm) {
+ Label undo;
+ Label slow;
+ Label done;
+ Label try_float;
+
+ // Check whether the value is a smi.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &try_float, not_taken);
+
+ // Enter runtime system if the value of the expression is zero
+ // to make sure that we switch between 0 and -0.
+ __ test(eax, Operand(eax));
+ __ j(zero, &slow, not_taken);
+
+ // The value of the expression is a smi that is not zero. Try
+ // optimistic subtraction '0 - value'.
+ __ mov(edx, Operand(eax));
+ __ Set(eax, Immediate(0));
+ __ sub(eax, Operand(edx));
+ __ j(overflow, &undo, not_taken);
+
+ // If result is a smi we are done.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &done, taken);
+
+ // Restore eax and enter runtime system.
+ __ bind(&undo);
+ __ mov(eax, Operand(edx));
+
+ // Enter runtime system.
+ __ bind(&slow);
+ __ pop(ecx); // pop return address
+ __ push(eax);
+ __ push(ecx); // push return address
+ __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+
+ // Try floating point case.
+ __ bind(&try_float);
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ cmp(edx, Factory::heap_number_map());
+ __ j(not_equal, &slow);
+ __ mov(edx, Operand(eax));
+ // edx: operand
+ FloatingPointHelper::AllocateHeapNumber(masm, &undo, ebx, ecx);
+ // eax: allocated 'empty' number
+ __ fld_d(FieldOperand(edx, HeapNumber::kValueOffset));
+ __ fchs();
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+
+ __ bind(&done);
+
+ __ StubReturn(1);
+}
+
+
+void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
+ // Check that the key is a smi for non-length access.
+ Label slow;
+ if (!is_length_) {
+ __ mov(ebx, Operand(esp, 1 * kPointerSize)); // skip return address
+ __ test(ebx, Immediate(kSmiTagMask));
+ __ j(not_zero, &slow, not_taken);
+ }
+
+ // Check if the calling frame is an arguments adaptor frame.
+ Label adaptor;
+ __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+ __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
+ __ cmp(ecx, ArgumentsAdaptorFrame::SENTINEL);
+ __ j(equal, &adaptor);
+
+ // The displacement is used for skipping the return address on the
+ // stack. It is the offset of the last parameter (if any) relative
+ // to the frame pointer.
+ static const int kDisplacement = 1 * kPointerSize;
+ ASSERT(kSmiTagSize == 1 && kSmiTag == 0); // shifting code depends on this
+
+ if (is_length_) {
+ // Do nothing. The length is already in register eax.
+ } else {
+ // Check index against formal parameters count limit passed in
+ // through register eax. Use unsigned comparison to get negative
+ // check for free.
+ __ cmp(ebx, Operand(eax));
+ __ j(above_equal, &slow, not_taken);
+
+ // Read the argument from the stack.
+ __ lea(edx, Operand(ebp, eax, times_2, 0));
+ __ neg(ebx);
+ __ mov(eax, Operand(edx, ebx, times_2, kDisplacement));
+ }
+
+ // Return the length or the argument.
+ __ ret(0);
+
+ // Arguments adaptor case: Find the length or the actual argument in
+ // the calling frame.
+ __ bind(&adaptor);
+ if (is_length_) {
+ // Read the arguments length from the adaptor frame.
+ __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ } else {
+ // Check index against actual arguments limit found in the
+ // arguments adaptor frame. Use unsigned comparison to get
+ // negative check for free.
+ __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ cmp(ebx, Operand(ecx));
+ __ j(above_equal, &slow, not_taken);
+
+ // Read the argument from the stack.
+ __ lea(edx, Operand(edx, ecx, times_2, 0));
+ __ neg(ebx);
+ __ mov(eax, Operand(edx, ebx, times_2, kDisplacement));
+ }
+
+ // Return the length or the argument.
+ __ ret(0);
+
+ // Slow-case: Handle non-smi or out-of-bounds access to arguments
+ // by calling the runtime system.
+ if (!is_length_) {
+ __ bind(&slow);
+ __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1);
+ }
+}
+
+
+void CompareStub::Generate(MacroAssembler* masm) {
+ Label call_builtin, done;
+ // Save the return address (and get it off the stack).
+ __ pop(ecx);
+
+ // Push arguments.
+ __ push(eax);
+ __ push(edx);
+ __ push(ecx);
+
+ // Inlined floating point compare.
+ // Call builtin if operands are not floating point or smi.
+ FloatingPointHelper::CheckFloatOperands(masm, &call_builtin, ebx);
+ FloatingPointHelper::LoadFloatOperands(masm, ecx);
+ __ FCmp();
+
+ // Jump to builtin for NaN.
+ __ j(parity_even, &call_builtin, not_taken);
+
+ // TODO(1243847): Use cmov below once CpuFeatures are properly hooked up.
+ Label below_lbl, above_lbl;
+ // use edx, eax to convert unsigned to signed comparison
+ __ j(below, &below_lbl, not_taken);
+ __ j(above, &above_lbl, not_taken);
+
+ __ xor_(eax, Operand(eax)); // equal
+ __ ret(2 * kPointerSize);
+
+ __ bind(&below_lbl);
+ __ mov(eax, -1);
+ __ ret(2 * kPointerSize);
+
+ __ bind(&above_lbl);
+ __ mov(eax, 1);
+ __ ret(2 * kPointerSize); // eax, edx were pushed
+
+ __ bind(&call_builtin);
+ // must swap argument order
+ __ pop(ecx);
+ __ pop(edx);
+ __ pop(eax);
+ __ push(edx);
+ __ push(eax);
+
+ // Figure out which native to call and setup the arguments.
+ Builtins::JavaScript builtin;
+ if (cc_ == equal) {
+ builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ } else {
+ builtin = Builtins::COMPARE;
+ int ncr; // NaN compare result
+ if (cc_ == less || cc_ == less_equal) {
+ ncr = GREATER;
+ } else {
+ ASSERT(cc_ == greater || cc_ == greater_equal); // remaining cases
+ ncr = LESS;
+ }
+ __ push(Immediate(Smi::FromInt(ncr)));
+ }
+
+ // Restore return address on the stack.
+ __ push(ecx);
+
+ // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ InvokeBuiltin(builtin, JUMP_FUNCTION);
+}
+
+
+void StackCheckStub::Generate(MacroAssembler* masm) {
+ // Because builtins always remove the receiver from the stack, we
+ // have to fake one to avoid underflowing the stack. The receiver
+ // must be inserted below the return address on the stack so we
+ // temporarily store that in a register.
+ __ pop(eax);
+ __ push(Immediate(Smi::FromInt(0)));
+ __ push(eax);
+
+ // Do tail-call to runtime routine.
+ __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1);
+}
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+ Label slow;
+
+ // Get the function to call from the stack.
+ // +2 ~ receiver, return address
+ __ mov(edi, Operand(esp, (argc_ + 2) * kPointerSize));
+
+ // Check that the function really is a JavaScript function.
+ __ test(edi, Immediate(kSmiTagMask));
+ __ j(zero, &slow, not_taken);
+ // Get the map.
+ __ mov(ecx, FieldOperand(edi, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ cmp(ecx, JS_FUNCTION_TYPE);
+ __ j(not_equal, &slow, not_taken);
+
+ // Fast-case: Just invoke the function.
+ ParameterCount actual(argc_);
+ __ InvokeFunction(edi, actual, JUMP_FUNCTION);
+
+ // Slow-case: Non-function called.
+ __ bind(&slow);
+ __ Set(eax, Immediate(argc_));
+ __ Set(ebx, Immediate(0));
+ __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
+ Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
+ __ jmp(adaptor, RelocInfo::CODE_TARGET);
+}
+
+
+void RevertToNumberStub::Generate(MacroAssembler* masm) {
+ // Revert optimistic increment/decrement.
+ if (is_increment_) {
+ __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+ } else {
+ __ add(Operand(eax), Immediate(Smi::FromInt(1)));
+ }
+
+ __ pop(ecx);
+ __ push(eax);
+ __ push(ecx);
+ __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
+ // Code never returns due to JUMP_FUNCTION.
+}
+
+
+void CounterOpStub::Generate(MacroAssembler* masm) {
+ // Store to the result on the stack (skip return address) before
+ // performing the count operation.
+ if (is_postfix_) {
+ __ mov(Operand(esp, result_offset_ + kPointerSize), eax);
+ }
+
+ // Revert optimistic increment/decrement but only for prefix
+ // counts. For postfix counts it has already been reverted before
+ // the conversion to numbers.
+ if (!is_postfix_) {
+ if (is_increment_) {
+ __ sub(Operand(eax), Immediate(Smi::FromInt(1)));
+ } else {
+ __ add(Operand(eax), Immediate(Smi::FromInt(1)));
+ }
+ }
+
+ // Compute the new value by calling the right JavaScript native.
+ __ pop(ecx);
+ __ push(eax);
+ __ push(ecx);
+ Builtins::JavaScript builtin = is_increment_ ? Builtins::INC : Builtins::DEC;
+ __ InvokeBuiltin(builtin, JUMP_FUNCTION);
+ // Code never returns due to JUMP_FUNCTION.
+}
+
+
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
ASSERT(StackHandlerConstants::kSize == 6 * kPointerSize); // adjust this code
ExternalReference handler_address(Top::k_handler_address);
@@ -5465,7 +5429,6 @@
#undef __
-
// -----------------------------------------------------------------------------
// CodeGenerator interfaces
« no previous file with comments | « src/codegen-arm.cc ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698