| Index: src/x64/codegen-x64.cc
|
| ===================================================================
|
| --- src/x64/codegen-x64.cc (revision 5039)
|
| +++ src/x64/codegen-x64.cc (working copy)
|
| @@ -139,149 +139,6 @@
|
| }
|
|
|
|
|
| -// -------------------------------------------------------------------------
|
| -// Deferred code objects
|
| -//
|
| -// These subclasses of DeferredCode add pieces of code to the end of generated
|
| -// code. They are branched to from the generated code, and
|
| -// keep some slower code out of the main body of the generated code.
|
| -// Many of them call a code stub or a runtime function.
|
| -
|
| -class DeferredInlineSmiAdd: public DeferredCode {
|
| - public:
|
| - DeferredInlineSmiAdd(Register dst,
|
| - Smi* value,
|
| - OverwriteMode overwrite_mode)
|
| - : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
|
| - set_comment("[ DeferredInlineSmiAdd");
|
| - }
|
| -
|
| - virtual void Generate();
|
| -
|
| - private:
|
| - Register dst_;
|
| - Smi* value_;
|
| - OverwriteMode overwrite_mode_;
|
| -};
|
| -
|
| -
|
| -// The result of value + src is in dst. It either overflowed or was not
|
| -// smi tagged. Undo the speculative addition and call the appropriate
|
| -// specialized stub for add. The result is left in dst.
|
| -class DeferredInlineSmiAddReversed: public DeferredCode {
|
| - public:
|
| - DeferredInlineSmiAddReversed(Register dst,
|
| - Smi* value,
|
| - OverwriteMode overwrite_mode)
|
| - : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
|
| - set_comment("[ DeferredInlineSmiAddReversed");
|
| - }
|
| -
|
| - virtual void Generate();
|
| -
|
| - private:
|
| - Register dst_;
|
| - Smi* value_;
|
| - OverwriteMode overwrite_mode_;
|
| -};
|
| -
|
| -
|
| -class DeferredInlineSmiSub: public DeferredCode {
|
| - public:
|
| - DeferredInlineSmiSub(Register dst,
|
| - Smi* value,
|
| - OverwriteMode overwrite_mode)
|
| - : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
|
| - set_comment("[ DeferredInlineSmiSub");
|
| - }
|
| -
|
| - virtual void Generate();
|
| -
|
| - private:
|
| - Register dst_;
|
| - Smi* value_;
|
| - OverwriteMode overwrite_mode_;
|
| -};
|
| -
|
| -
|
| -// Call the appropriate binary operation stub to compute src op value
|
| -// and leave the result in dst.
|
| -class DeferredInlineSmiOperation: public DeferredCode {
|
| - public:
|
| - DeferredInlineSmiOperation(Token::Value op,
|
| - Register dst,
|
| - Register src,
|
| - Smi* value,
|
| - OverwriteMode overwrite_mode)
|
| - : op_(op),
|
| - dst_(dst),
|
| - src_(src),
|
| - value_(value),
|
| - overwrite_mode_(overwrite_mode) {
|
| - set_comment("[ DeferredInlineSmiOperation");
|
| - }
|
| -
|
| - virtual void Generate();
|
| -
|
| - private:
|
| - Token::Value op_;
|
| - Register dst_;
|
| - Register src_;
|
| - Smi* value_;
|
| - OverwriteMode overwrite_mode_;
|
| -};
|
| -
|
| -
|
| -// Call the appropriate binary operation stub to compute value op src
|
| -// and leave the result in dst.
|
| -class DeferredInlineSmiOperationReversed: public DeferredCode {
|
| - public:
|
| - DeferredInlineSmiOperationReversed(Token::Value op,
|
| - Register dst,
|
| - Smi* value,
|
| - Register src,
|
| - OverwriteMode overwrite_mode)
|
| - : op_(op),
|
| - dst_(dst),
|
| - value_(value),
|
| - src_(src),
|
| - overwrite_mode_(overwrite_mode) {
|
| - set_comment("[ DeferredInlineSmiOperationReversed");
|
| - }
|
| -
|
| - virtual void Generate();
|
| -
|
| - private:
|
| - Token::Value op_;
|
| - Register dst_;
|
| - Smi* value_;
|
| - Register src_;
|
| - OverwriteMode overwrite_mode_;
|
| -};
|
| -
|
| -
|
| -class FloatingPointHelper : public AllStatic {
|
| - public:
|
| - // Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
|
| - // If the operands are not both numbers, jump to not_numbers.
|
| - // Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
|
| - // NumberOperands assumes both are smis or heap numbers.
|
| - static void LoadSSE2SmiOperands(MacroAssembler* masm);
|
| - static void LoadSSE2NumberOperands(MacroAssembler* masm);
|
| - static void LoadSSE2UnknownOperands(MacroAssembler* masm,
|
| - Label* not_numbers);
|
| -
|
| - // Takes the operands in rdx and rax and loads them as integers in rax
|
| - // and rcx.
|
| - static void LoadAsIntegers(MacroAssembler* masm,
|
| - Label* operand_conversion_failure,
|
| - Register heap_number_map);
|
| - // As above, but we know the operands to be numbers. In that case,
|
| - // conversion can't fail.
|
| - static void LoadNumbersAsIntegers(MacroAssembler* masm);
|
| -};
|
| -
|
| -
|
| // -----------------------------------------------------------------------------
|
| // CodeGenerator implementation.
|
|
|
| @@ -298,21 +155,6 @@
|
| }
|
|
|
|
|
| -void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
|
| - // Call the runtime to declare the globals. The inevitable call
|
| - // will sync frame elements to memory anyway, so we do it eagerly to
|
| - // allow us to push the arguments directly into place.
|
| - frame_->SyncRange(0, frame_->element_count() - 1);
|
| -
|
| - __ movq(kScratchRegister, pairs, RelocInfo::EMBEDDED_OBJECT);
|
| - frame_->EmitPush(rsi); // The context is the first argument.
|
| - frame_->EmitPush(kScratchRegister);
|
| - frame_->EmitPush(Smi::FromInt(is_eval() ? 1 : 0));
|
| - Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
|
| - // Return value is ignored.
|
| -}
|
| -
|
| -
|
| void CodeGenerator::Generate(CompilationInfo* info) {
|
| // Record the position for debugging purposes.
|
| CodeForFunctionPosition(info->function());
|
| @@ -543,212 +385,2080 @@
|
| allocator_ = NULL;
|
| }
|
|
|
| -void CodeGenerator::GenerateReturnSequence(Result* return_value) {
|
| - // The return value is a live (but not currently reference counted)
|
| - // reference to rax. This is safe because the current frame does not
|
| - // contain a reference to rax (it is prepared for the return by spilling
|
| - // all registers).
|
| - if (FLAG_trace) {
|
| - frame_->Push(return_value);
|
| - *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
|
| +
|
| +Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
|
| + // Currently, this assertion will fail if we try to assign to
|
| + // a constant variable that is constant because it is read-only
|
| + // (such as the variable referring to a named function expression).
|
| + // We need to implement assignments to read-only variables.
|
| + // Ideally, we should do this during AST generation (by converting
|
| + // such assignments into expression statements); however, in general
|
| + // we may not be able to make the decision until past AST generation,
|
| + // that is when the entire program is known.
|
| + ASSERT(slot != NULL);
|
| + int index = slot->index();
|
| + switch (slot->type()) {
|
| + case Slot::PARAMETER:
|
| + return frame_->ParameterAt(index);
|
| +
|
| + case Slot::LOCAL:
|
| + return frame_->LocalAt(index);
|
| +
|
| + case Slot::CONTEXT: {
|
| + // Follow the context chain if necessary.
|
| + ASSERT(!tmp.is(rsi)); // do not overwrite context register
|
| + Register context = rsi;
|
| + int chain_length = scope()->ContextChainLength(slot->var()->scope());
|
| + for (int i = 0; i < chain_length; i++) {
|
| + // Load the closure.
|
| + // (All contexts, even 'with' contexts, have a closure,
|
| + // and it is the same for all contexts inside a function.
|
| + // There is no need to go to the function context first.)
|
| + __ movq(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
|
| + // Load the function context (which is the incoming, outer context).
|
| + __ movq(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
|
| + context = tmp;
|
| + }
|
| + // We may have a 'with' context now. Get the function context.
|
| + // (In fact this mov may never be the needed, since the scope analysis
|
| + // may not permit a direct context access in this case and thus we are
|
| + // always at a function context. However it is safe to dereference be-
|
| + // cause the function context of a function context is itself. Before
|
| + // deleting this mov we should try to create a counter-example first,
|
| + // though...)
|
| + __ movq(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
|
| + return ContextOperand(tmp, index);
|
| + }
|
| +
|
| + default:
|
| + UNREACHABLE();
|
| + return Operand(rsp, 0);
|
| }
|
| - return_value->ToRegister(rax);
|
| +}
|
|
|
| - // Add a label for checking the size of the code used for returning.
|
| +
|
| +Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
|
| + Result tmp,
|
| + JumpTarget* slow) {
|
| + ASSERT(slot->type() == Slot::CONTEXT);
|
| + ASSERT(tmp.is_register());
|
| + Register context = rsi;
|
| +
|
| + for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
|
| + if (s->num_heap_slots() > 0) {
|
| + if (s->calls_eval()) {
|
| + // Check that extension is NULL.
|
| + __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
|
| + Immediate(0));
|
| + slow->Branch(not_equal, not_taken);
|
| + }
|
| + __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
|
| + __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
|
| + context = tmp.reg();
|
| + }
|
| + }
|
| + // Check that last extension is NULL.
|
| + __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
|
| + slow->Branch(not_equal, not_taken);
|
| + __ movq(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX));
|
| + return ContextOperand(tmp.reg(), slot->index());
|
| +}
|
| +
|
| +
|
| +// Emit code to load the value of an expression to the top of the
|
| +// frame. If the expression is boolean-valued it may be compiled (or
|
| +// partially compiled) into control flow to the control destination.
|
| +// If force_control is true, control flow is forced.
|
| +void CodeGenerator::LoadCondition(Expression* x,
|
| + ControlDestination* dest,
|
| + bool force_control) {
|
| + ASSERT(!in_spilled_code());
|
| + int original_height = frame_->height();
|
| +
|
| + { CodeGenState new_state(this, dest);
|
| + Visit(x);
|
| +
|
| + // If we hit a stack overflow, we may not have actually visited
|
| + // the expression. In that case, we ensure that we have a
|
| + // valid-looking frame state because we will continue to generate
|
| + // code as we unwind the C++ stack.
|
| + //
|
| + // It's possible to have both a stack overflow and a valid frame
|
| + // state (eg, a subexpression overflowed, visiting it returned
|
| + // with a dummied frame state, and visiting this expression
|
| + // returned with a normal-looking state).
|
| + if (HasStackOverflow() &&
|
| + !dest->is_used() &&
|
| + frame_->height() == original_height) {
|
| + dest->Goto(true);
|
| + }
|
| + }
|
| +
|
| + if (force_control && !dest->is_used()) {
|
| + // Convert the TOS value into flow to the control destination.
|
| + // TODO(X64): Make control flow to control destinations work.
|
| + ToBoolean(dest);
|
| + }
|
| +
|
| + ASSERT(!(force_control && !dest->is_used()));
|
| + ASSERT(dest->is_used() || frame_->height() == original_height + 1);
|
| +}
|
| +
|
| +
|
| +void CodeGenerator::LoadAndSpill(Expression* expression) {
|
| + // TODO(x64): No architecture specific code. Move to shared location.
|
| + ASSERT(in_spilled_code());
|
| + set_in_spilled_code(false);
|
| + Load(expression);
|
| + frame_->SpillAll();
|
| + set_in_spilled_code(true);
|
| +}
|
| +
|
| +
|
| +void CodeGenerator::Load(Expression* expr) {
|
| #ifdef DEBUG
|
| - Label check_exit_codesize;
|
| - masm_->bind(&check_exit_codesize);
|
| + int original_height = frame_->height();
|
| #endif
|
| + ASSERT(!in_spilled_code());
|
| + JumpTarget true_target;
|
| + JumpTarget false_target;
|
| + ControlDestination dest(&true_target, &false_target, true);
|
| + LoadCondition(expr, &dest, false);
|
|
|
| - // Leave the frame and return popping the arguments and the
|
| - // receiver.
|
| - frame_->Exit();
|
| - masm_->ret((scope()->num_parameters() + 1) * kPointerSize);
|
| -#ifdef ENABLE_DEBUGGER_SUPPORT
|
| - // Add padding that will be overwritten by a debugger breakpoint.
|
| - // frame_->Exit() generates "movq rsp, rbp; pop rbp; ret k"
|
| - // with length 7 (3 + 1 + 3).
|
| - const int kPadding = Assembler::kJSReturnSequenceLength - 7;
|
| - for (int i = 0; i < kPadding; ++i) {
|
| - masm_->int3();
|
| + if (dest.false_was_fall_through()) {
|
| + // The false target was just bound.
|
| + JumpTarget loaded;
|
| + frame_->Push(Factory::false_value());
|
| + // There may be dangling jumps to the true target.
|
| + if (true_target.is_linked()) {
|
| + loaded.Jump();
|
| + true_target.Bind();
|
| + frame_->Push(Factory::true_value());
|
| + loaded.Bind();
|
| + }
|
| +
|
| + } else if (dest.is_used()) {
|
| + // There is true, and possibly false, control flow (with true as
|
| + // the fall through).
|
| + JumpTarget loaded;
|
| + frame_->Push(Factory::true_value());
|
| + if (false_target.is_linked()) {
|
| + loaded.Jump();
|
| + false_target.Bind();
|
| + frame_->Push(Factory::false_value());
|
| + loaded.Bind();
|
| + }
|
| +
|
| + } else {
|
| + // We have a valid value on top of the frame, but we still may
|
| + // have dangling jumps to the true and false targets from nested
|
| + // subexpressions (eg, the left subexpressions of the
|
| + // short-circuited boolean operators).
|
| + ASSERT(has_valid_frame());
|
| + if (true_target.is_linked() || false_target.is_linked()) {
|
| + JumpTarget loaded;
|
| + loaded.Jump(); // Don't lose the current TOS.
|
| + if (true_target.is_linked()) {
|
| + true_target.Bind();
|
| + frame_->Push(Factory::true_value());
|
| + if (false_target.is_linked()) {
|
| + loaded.Jump();
|
| + }
|
| + }
|
| + if (false_target.is_linked()) {
|
| + false_target.Bind();
|
| + frame_->Push(Factory::false_value());
|
| + }
|
| + loaded.Bind();
|
| + }
|
| }
|
| - // Check that the size of the code used for returning matches what is
|
| - // expected by the debugger.
|
| - ASSERT_EQ(Assembler::kJSReturnSequenceLength,
|
| - masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
|
| -#endif
|
| - DeleteFrame();
|
| +
|
| + ASSERT(has_valid_frame());
|
| + ASSERT(frame_->height() == original_height + 1);
|
| }
|
|
|
|
|
| -#ifdef DEBUG
|
| -bool CodeGenerator::HasValidEntryRegisters() {
|
| - return (allocator()->count(rax) == (frame()->is_used(rax) ? 1 : 0))
|
| - && (allocator()->count(rbx) == (frame()->is_used(rbx) ? 1 : 0))
|
| - && (allocator()->count(rcx) == (frame()->is_used(rcx) ? 1 : 0))
|
| - && (allocator()->count(rdx) == (frame()->is_used(rdx) ? 1 : 0))
|
| - && (allocator()->count(rdi) == (frame()->is_used(rdi) ? 1 : 0))
|
| - && (allocator()->count(r8) == (frame()->is_used(r8) ? 1 : 0))
|
| - && (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0))
|
| - && (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0))
|
| - && (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0))
|
| - && (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0));
|
| +void CodeGenerator::LoadGlobal() {
|
| + if (in_spilled_code()) {
|
| + frame_->EmitPush(GlobalObject());
|
| + } else {
|
| + Result temp = allocator_->Allocate();
|
| + __ movq(temp.reg(), GlobalObject());
|
| + frame_->Push(&temp);
|
| + }
|
| }
|
| -#endif
|
|
|
|
|
| -class DeferredReferenceGetKeyedValue: public DeferredCode {
|
| +void CodeGenerator::LoadGlobalReceiver() {
|
| + Result temp = allocator_->Allocate();
|
| + Register reg = temp.reg();
|
| + __ movq(reg, GlobalObject());
|
| + __ movq(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
|
| + frame_->Push(&temp);
|
| +}
|
| +
|
| +
|
| +void CodeGenerator::LoadTypeofExpression(Expression* expr) {
|
| + // Special handling of identifiers as subexpressions of typeof.
|
| + Variable* variable = expr->AsVariableProxy()->AsVariable();
|
| + if (variable != NULL && !variable->is_this() && variable->is_global()) {
|
| + // For a global variable we build the property reference
|
| + // <global>.<variable> and perform a (regular non-contextual) property
|
| + // load to make sure we do not get reference errors.
|
| + Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
|
| + Literal key(variable->name());
|
| + Property property(&global, &key, RelocInfo::kNoPosition);
|
| + Reference ref(this, &property);
|
| + ref.GetValue();
|
| + } else if (variable != NULL && variable->slot() != NULL) {
|
| + // For a variable that rewrites to a slot, we signal it is the immediate
|
| + // subexpression of a typeof.
|
| + LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
|
| + } else {
|
| + // Anything else can be handled normally.
|
| + Load(expr);
|
| + }
|
| +}
|
| +
|
| +
|
| +ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
|
| + if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
|
| + ASSERT(scope()->arguments_shadow() != NULL);
|
| + // We don't want to do lazy arguments allocation for functions that
|
| + // have heap-allocated contexts, because it interfers with the
|
| + // uninitialized const tracking in the context objects.
|
| + return (scope()->num_heap_slots() > 0)
|
| + ? EAGER_ARGUMENTS_ALLOCATION
|
| + : LAZY_ARGUMENTS_ALLOCATION;
|
| +}
|
| +
|
| +
|
| +Result CodeGenerator::StoreArgumentsObject(bool initial) {
|
| + ArgumentsAllocationMode mode = ArgumentsMode();
|
| + ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
|
| +
|
| + Comment cmnt(masm_, "[ store arguments object");
|
| + if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
|
| + // When using lazy arguments allocation, we store the hole value
|
| + // as a sentinel indicating that the arguments object hasn't been
|
| + // allocated yet.
|
| + frame_->Push(Factory::the_hole_value());
|
| + } else {
|
| + ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
|
| + frame_->PushFunction();
|
| + frame_->PushReceiverSlotAddress();
|
| + frame_->Push(Smi::FromInt(scope()->num_parameters()));
|
| + Result result = frame_->CallStub(&stub, 3);
|
| + frame_->Push(&result);
|
| + }
|
| +
|
| +
|
| + Variable* arguments = scope()->arguments()->var();
|
| + Variable* shadow = scope()->arguments_shadow()->var();
|
| + ASSERT(arguments != NULL && arguments->slot() != NULL);
|
| + ASSERT(shadow != NULL && shadow->slot() != NULL);
|
| + JumpTarget done;
|
| + bool skip_arguments = false;
|
| + if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
|
| + // We have to skip storing into the arguments slot if it has
|
| + // already been written to. This can happen if the a function
|
| + // has a local variable named 'arguments'.
|
| + LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
|
| + Result probe = frame_->Pop();
|
| + if (probe.is_constant()) {
|
| + // We have to skip updating the arguments object if it has been
|
| + // assigned a proper value.
|
| + skip_arguments = !probe.handle()->IsTheHole();
|
| + } else {
|
| + __ CompareRoot(probe.reg(), Heap::kTheHoleValueRootIndex);
|
| + probe.Unuse();
|
| + done.Branch(not_equal);
|
| + }
|
| + }
|
| + if (!skip_arguments) {
|
| + StoreToSlot(arguments->slot(), NOT_CONST_INIT);
|
| + if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
|
| + }
|
| + StoreToSlot(shadow->slot(), NOT_CONST_INIT);
|
| + return frame_->Pop();
|
| +}
|
| +
|
| +//------------------------------------------------------------------------------
|
| +// CodeGenerator implementation of variables, lookups, and stores.
|
| +
|
| +//------------------------------------------------------------------------------
|
| +// CodeGenerator implementation of variables, lookups, and stores.
|
| +
|
| +Reference::Reference(CodeGenerator* cgen,
|
| + Expression* expression,
|
| + bool persist_after_get)
|
| + : cgen_(cgen),
|
| + expression_(expression),
|
| + type_(ILLEGAL),
|
| + persist_after_get_(persist_after_get) {
|
| + cgen->LoadReference(this);
|
| +}
|
| +
|
| +
|
| +Reference::~Reference() {
|
| + ASSERT(is_unloaded() || is_illegal());
|
| +}
|
| +
|
| +
|
| +void CodeGenerator::LoadReference(Reference* ref) {
|
| + // References are loaded from both spilled and unspilled code. Set the
|
| + // state to unspilled to allow that (and explicitly spill after
|
| + // construction at the construction sites).
|
| + bool was_in_spilled_code = in_spilled_code_;
|
| + in_spilled_code_ = false;
|
| +
|
| + Comment cmnt(masm_, "[ LoadReference");
|
| + Expression* e = ref->expression();
|
| + Property* property = e->AsProperty();
|
| + Variable* var = e->AsVariableProxy()->AsVariable();
|
| +
|
| + if (property != NULL) {
|
| + // The expression is either a property or a variable proxy that rewrites
|
| + // to a property.
|
| + Load(property->obj());
|
| + if (property->key()->IsPropertyName()) {
|
| + ref->set_type(Reference::NAMED);
|
| + } else {
|
| + Load(property->key());
|
| + ref->set_type(Reference::KEYED);
|
| + }
|
| + } else if (var != NULL) {
|
| + // The expression is a variable proxy that does not rewrite to a
|
| + // property. Global variables are treated as named property references.
|
| + if (var->is_global()) {
|
| + // If rax is free, the register allocator prefers it. Thus the code
|
| + // generator will load the global object into rax, which is where
|
| + // LoadIC wants it. Most uses of Reference call LoadIC directly
|
| + // after the reference is created.
|
| + frame_->Spill(rax);
|
| + LoadGlobal();
|
| + ref->set_type(Reference::NAMED);
|
| + } else {
|
| + ASSERT(var->slot() != NULL);
|
| + ref->set_type(Reference::SLOT);
|
| + }
|
| + } else {
|
| + // Anything else is a runtime error.
|
| + Load(e);
|
| + frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
|
| + }
|
| +
|
| + in_spilled_code_ = was_in_spilled_code;
|
| +}
|
| +
|
| +
|
| +void CodeGenerator::UnloadReference(Reference* ref) {
|
| + // Pop a reference from the stack while preserving TOS.
|
| + Comment cmnt(masm_, "[ UnloadReference");
|
| + frame_->Nip(ref->size());
|
| + ref->set_unloaded();
|
| +}
|
| +
|
| +
|
| +// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
|
| +// convert it to a boolean in the condition code register or jump to
|
| +// 'false_target'/'true_target' as appropriate.
|
| +void CodeGenerator::ToBoolean(ControlDestination* dest) {
|
| + Comment cmnt(masm_, "[ ToBoolean");
|
| +
|
| + // The value to convert should be popped from the frame.
|
| + Result value = frame_->Pop();
|
| + value.ToRegister();
|
| +
|
| + if (value.is_number()) {
|
| + // Fast case if TypeInfo indicates only numbers.
|
| + if (FLAG_debug_code) {
|
| + __ AbortIfNotNumber(value.reg());
|
| + }
|
| + // Smi => false iff zero.
|
| + __ SmiCompare(value.reg(), Smi::FromInt(0));
|
| + if (value.is_smi()) {
|
| + value.Unuse();
|
| + dest->Split(not_zero);
|
| + } else {
|
| + dest->false_target()->Branch(equal);
|
| + Condition is_smi = masm_->CheckSmi(value.reg());
|
| + dest->true_target()->Branch(is_smi);
|
| + __ xorpd(xmm0, xmm0);
|
| + __ ucomisd(xmm0, FieldOperand(value.reg(), HeapNumber::kValueOffset));
|
| + value.Unuse();
|
| + dest->Split(not_zero);
|
| + }
|
| + } else {
|
| + // Fast case checks.
|
| + // 'false' => false.
|
| + __ CompareRoot(value.reg(), Heap::kFalseValueRootIndex);
|
| + dest->false_target()->Branch(equal);
|
| +
|
| + // 'true' => true.
|
| + __ CompareRoot(value.reg(), Heap::kTrueValueRootIndex);
|
| + dest->true_target()->Branch(equal);
|
| +
|
| + // 'undefined' => false.
|
| + __ CompareRoot(value.reg(), Heap::kUndefinedValueRootIndex);
|
| + dest->false_target()->Branch(equal);
|
| +
|
| + // Smi => false iff zero.
|
| + __ SmiCompare(value.reg(), Smi::FromInt(0));
|
| + dest->false_target()->Branch(equal);
|
| + Condition is_smi = masm_->CheckSmi(value.reg());
|
| + dest->true_target()->Branch(is_smi);
|
| +
|
| + // Call the stub for all other cases.
|
| + frame_->Push(&value); // Undo the Pop() from above.
|
| + ToBooleanStub stub;
|
| + Result temp = frame_->CallStub(&stub, 1);
|
| + // Convert the result to a condition code.
|
| + __ testq(temp.reg(), temp.reg());
|
| + temp.Unuse();
|
| + dest->Split(not_equal);
|
| + }
|
| +}
|
| +
|
| +
|
| +class FloatingPointHelper : public AllStatic {
|
| public:
|
| - explicit DeferredReferenceGetKeyedValue(Register dst,
|
| - Register receiver,
|
| - Register key)
|
| - : dst_(dst), receiver_(receiver), key_(key) {
|
| - set_comment("[ DeferredReferenceGetKeyedValue");
|
| + // Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
|
| + // If the operands are not both numbers, jump to not_numbers.
|
| + // Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
|
| + // NumberOperands assumes both are smis or heap numbers.
|
| + static void LoadSSE2SmiOperands(MacroAssembler* masm);
|
| + static void LoadSSE2NumberOperands(MacroAssembler* masm);
|
| + static void LoadSSE2UnknownOperands(MacroAssembler* masm,
|
| + Label* not_numbers);
|
| +
|
| + // Takes the operands in rdx and rax and loads them as integers in rax
|
| + // and rcx.
|
| + static void LoadAsIntegers(MacroAssembler* masm,
|
| + Label* operand_conversion_failure,
|
| + Register heap_number_map);
|
| + // As above, but we know the operands to be numbers. In that case,
|
| + // conversion can't fail.
|
| + static void LoadNumbersAsIntegers(MacroAssembler* masm);
|
| +};
|
| +
|
| +
|
| +const char* GenericBinaryOpStub::GetName() {
|
| + if (name_ != NULL) return name_;
|
| + const int len = 100;
|
| + name_ = Bootstrapper::AllocateAutoDeletedArray(len);
|
| + if (name_ == NULL) return "OOM";
|
| + const char* op_name = Token::Name(op_);
|
| + const char* overwrite_name;
|
| + switch (mode_) {
|
| + case NO_OVERWRITE: overwrite_name = "Alloc"; break;
|
| + case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
|
| + case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
|
| + default: overwrite_name = "UnknownOverwrite"; break;
|
| }
|
|
|
| - virtual void Generate();
|
| + OS::SNPrintF(Vector<char>(name_, len),
|
| + "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
|
| + op_name,
|
| + overwrite_name,
|
| + (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
|
| + args_in_registers_ ? "RegArgs" : "StackArgs",
|
| + args_reversed_ ? "_R" : "",
|
| + static_operands_type_.ToString(),
|
| + BinaryOpIC::GetName(runtime_operands_type_));
|
| + return name_;
|
| +}
|
|
|
| - Label* patch_site() { return &patch_site_; }
|
|
|
| +// Call the specialized stub for a binary operation.
|
| +class DeferredInlineBinaryOperation: public DeferredCode {
|
| + public:
|
| + DeferredInlineBinaryOperation(Token::Value op,
|
| + Register dst,
|
| + Register left,
|
| + Register right,
|
| + OverwriteMode mode)
|
| + : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) {
|
| + set_comment("[ DeferredInlineBinaryOperation");
|
| + }
|
| +
|
| + virtual void Generate();
|
| +
|
| private:
|
| - Label patch_site_;
|
| + Token::Value op_;
|
| Register dst_;
|
| - Register receiver_;
|
| - Register key_;
|
| + Register left_;
|
| + Register right_;
|
| + OverwriteMode mode_;
|
| };
|
|
|
|
|
| -void DeferredReferenceGetKeyedValue::Generate() {
|
| - if (receiver_.is(rdx)) {
|
| - if (!key_.is(rax)) {
|
| - __ movq(rax, key_);
|
| - } // else do nothing.
|
| - } else if (receiver_.is(rax)) {
|
| - if (key_.is(rdx)) {
|
| - __ xchg(rax, rdx);
|
| - } else if (key_.is(rax)) {
|
| - __ movq(rdx, receiver_);
|
| - } else {
|
| - __ movq(rdx, receiver_);
|
| - __ movq(rax, key_);
|
| +void DeferredInlineBinaryOperation::Generate() {
|
| + Label done;
|
| + if ((op_ == Token::ADD)
|
| + || (op_ == Token::SUB)
|
| + || (op_ == Token::MUL)
|
| + || (op_ == Token::DIV)) {
|
| + Label call_runtime;
|
| + Label left_smi, right_smi, load_right, do_op;
|
| + __ JumpIfSmi(left_, &left_smi);
|
| + __ CompareRoot(FieldOperand(left_, HeapObject::kMapOffset),
|
| + Heap::kHeapNumberMapRootIndex);
|
| + __ j(not_equal, &call_runtime);
|
| + __ movsd(xmm0, FieldOperand(left_, HeapNumber::kValueOffset));
|
| + if (mode_ == OVERWRITE_LEFT) {
|
| + __ movq(dst_, left_);
|
| }
|
| - } else if (key_.is(rax)) {
|
| - __ movq(rdx, receiver_);
|
| + __ jmp(&load_right);
|
| +
|
| + __ bind(&left_smi);
|
| + __ SmiToInteger32(left_, left_);
|
| + __ cvtlsi2sd(xmm0, left_);
|
| + __ Integer32ToSmi(left_, left_);
|
| + if (mode_ == OVERWRITE_LEFT) {
|
| + Label alloc_failure;
|
| + __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
|
| + }
|
| +
|
| + __ bind(&load_right);
|
| + __ JumpIfSmi(right_, &right_smi);
|
| + __ CompareRoot(FieldOperand(right_, HeapObject::kMapOffset),
|
| + Heap::kHeapNumberMapRootIndex);
|
| + __ j(not_equal, &call_runtime);
|
| + __ movsd(xmm1, FieldOperand(right_, HeapNumber::kValueOffset));
|
| + if (mode_ == OVERWRITE_RIGHT) {
|
| + __ movq(dst_, right_);
|
| + } else if (mode_ == NO_OVERWRITE) {
|
| + Label alloc_failure;
|
| + __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
|
| + }
|
| + __ jmp(&do_op);
|
| +
|
| + __ bind(&right_smi);
|
| + __ SmiToInteger32(right_, right_);
|
| + __ cvtlsi2sd(xmm1, right_);
|
| + __ Integer32ToSmi(right_, right_);
|
| + if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) {
|
| + Label alloc_failure;
|
| + __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
|
| + }
|
| +
|
| + __ bind(&do_op);
|
| + switch (op_) {
|
| + case Token::ADD: __ addsd(xmm0, xmm1); break;
|
| + case Token::SUB: __ subsd(xmm0, xmm1); break;
|
| + case Token::MUL: __ mulsd(xmm0, xmm1); break;
|
| + case Token::DIV: __ divsd(xmm0, xmm1); break;
|
| + default: UNREACHABLE();
|
| + }
|
| + __ movsd(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0);
|
| + __ jmp(&done);
|
| +
|
| + __ bind(&call_runtime);
|
| + }
|
| + GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
|
| + stub.GenerateCall(masm_, left_, right_);
|
| + if (!dst_.is(rax)) __ movq(dst_, rax);
|
| + __ bind(&done);
|
| +}
|
| +
|
| +
|
| +static TypeInfo CalculateTypeInfo(TypeInfo operands_type,
|
| + Token::Value op,
|
| + const Result& right,
|
| + const Result& left) {
|
| + // Set TypeInfo of result according to the operation performed.
|
| + // We rely on the fact that smis have a 32 bit payload on x64.
|
| + STATIC_ASSERT(kSmiValueSize == 32);
|
| + switch (op) {
|
| + case Token::COMMA:
|
| + return right.type_info();
|
| + case Token::OR:
|
| + case Token::AND:
|
| + // Result type can be either of the two input types.
|
| + return operands_type;
|
| + case Token::BIT_OR:
|
| + case Token::BIT_XOR:
|
| + case Token::BIT_AND:
|
| + // Result is always a smi.
|
| + return TypeInfo::Smi();
|
| + case Token::SAR:
|
| + case Token::SHL:
|
| + // Result is always a smi.
|
| + return TypeInfo::Smi();
|
| + case Token::SHR:
|
| + // Result of x >>> y is always a smi if masked y >= 1, otherwise a number.
|
| + return (right.is_constant() && right.handle()->IsSmi()
|
| + && (Smi::cast(*right.handle())->value() & 0x1F) >= 1)
|
| + ? TypeInfo::Smi()
|
| + : TypeInfo::Number();
|
| + case Token::ADD:
|
| + if (operands_type.IsNumber()) {
|
| + return TypeInfo::Number();
|
| + } else if (left.type_info().IsString() || right.type_info().IsString()) {
|
| + return TypeInfo::String();
|
| + } else {
|
| + return TypeInfo::Unknown();
|
| + }
|
| + case Token::SUB:
|
| + case Token::MUL:
|
| + case Token::DIV:
|
| + case Token::MOD:
|
| + // Result is always a number.
|
| + return TypeInfo::Number();
|
| + default:
|
| + UNREACHABLE();
|
| + }
|
| + UNREACHABLE();
|
| + return TypeInfo::Unknown();
|
| +}
|
| +
|
| +
|
| +void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr,
|
| + OverwriteMode overwrite_mode) {
|
| + Comment cmnt(masm_, "[ BinaryOperation");
|
| + Token::Value op = expr->op();
|
| + Comment cmnt_token(masm_, Token::String(op));
|
| +
|
| + if (op == Token::COMMA) {
|
| + // Simply discard left value.
|
| + frame_->Nip(1);
|
| + return;
|
| + }
|
| +
|
| + Result right = frame_->Pop();
|
| + Result left = frame_->Pop();
|
| +
|
| + if (op == Token::ADD) {
|
| + const bool left_is_string = left.type_info().IsString();
|
| + const bool right_is_string = right.type_info().IsString();
|
| + // Make sure constant strings have string type info.
|
| + ASSERT(!(left.is_constant() && left.handle()->IsString()) ||
|
| + left_is_string);
|
| + ASSERT(!(right.is_constant() && right.handle()->IsString()) ||
|
| + right_is_string);
|
| + if (left_is_string || right_is_string) {
|
| + frame_->Push(&left);
|
| + frame_->Push(&right);
|
| + Result answer;
|
| + if (left_is_string) {
|
| + if (right_is_string) {
|
| + StringAddStub stub(NO_STRING_CHECK_IN_STUB);
|
| + answer = frame_->CallStub(&stub, 2);
|
| + } else {
|
| + answer =
|
| + frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2);
|
| + }
|
| + } else if (right_is_string) {
|
| + answer =
|
| + frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
|
| + }
|
| + answer.set_type_info(TypeInfo::String());
|
| + frame_->Push(&answer);
|
| + return;
|
| + }
|
| + // Neither operand is known to be a string.
|
| + }
|
| +
|
| + bool left_is_smi_constant = left.is_constant() && left.handle()->IsSmi();
|
| + bool left_is_non_smi_constant = left.is_constant() && !left.handle()->IsSmi();
|
| + bool right_is_smi_constant = right.is_constant() && right.handle()->IsSmi();
|
| + bool right_is_non_smi_constant =
|
| + right.is_constant() && !right.handle()->IsSmi();
|
| +
|
| + if (left_is_smi_constant && right_is_smi_constant) {
|
| + // Compute the constant result at compile time, and leave it on the frame.
|
| + int left_int = Smi::cast(*left.handle())->value();
|
| + int right_int = Smi::cast(*right.handle())->value();
|
| + if (FoldConstantSmis(op, left_int, right_int)) return;
|
| + }
|
| +
|
| + // Get number type of left and right sub-expressions.
|
| + TypeInfo operands_type =
|
| + TypeInfo::Combine(left.type_info(), right.type_info());
|
| +
|
| + TypeInfo result_type = CalculateTypeInfo(operands_type, op, right, left);
|
| +
|
| + Result answer;
|
| + if (left_is_non_smi_constant || right_is_non_smi_constant) {
|
| + // Go straight to the slow case, with no smi code.
|
| + GenericBinaryOpStub stub(op,
|
| + overwrite_mode,
|
| + NO_SMI_CODE_IN_STUB,
|
| + operands_type);
|
| + answer = stub.GenerateCall(masm_, frame_, &left, &right);
|
| + } else if (right_is_smi_constant) {
|
| + answer = ConstantSmiBinaryOperation(expr, &left, right.handle(),
|
| + false, overwrite_mode);
|
| + } else if (left_is_smi_constant) {
|
| + answer = ConstantSmiBinaryOperation(expr, &right, left.handle(),
|
| + true, overwrite_mode);
|
| } else {
|
| - __ movq(rax, key_);
|
| - __ movq(rdx, receiver_);
|
| + // Set the flags based on the operation, type and loop nesting level.
|
| + // Bit operations always assume they likely operate on Smis. Still only
|
| + // generate the inline Smi check code if this operation is part of a loop.
|
| + // For all other operations only inline the Smi check code for likely smis
|
| + // if the operation is part of a loop.
|
| + if (loop_nesting() > 0 &&
|
| + (Token::IsBitOp(op) ||
|
| + operands_type.IsInteger32() ||
|
| + expr->type()->IsLikelySmi())) {
|
| + answer = LikelySmiBinaryOperation(expr, &left, &right, overwrite_mode);
|
| + } else {
|
| + GenericBinaryOpStub stub(op,
|
| + overwrite_mode,
|
| + NO_GENERIC_BINARY_FLAGS,
|
| + operands_type);
|
| + answer = stub.GenerateCall(masm_, frame_, &left, &right);
|
| + }
|
| }
|
| - // Calculate the delta from the IC call instruction to the map check
|
| - // movq instruction in the inlined version. This delta is stored in
|
| - // a test(rax, delta) instruction after the call so that we can find
|
| - // it in the IC initialization code and patch the movq instruction.
|
| - // This means that we cannot allow test instructions after calls to
|
| - // KeyedLoadIC stubs in other places.
|
| - Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
|
| - __ Call(ic, RelocInfo::CODE_TARGET);
|
| - // The delta from the start of the map-compare instruction to the
|
| - // test instruction. We use masm_-> directly here instead of the __
|
| - // macro because the macro sometimes uses macro expansion to turn
|
| - // into something that can't return a value. This is encountered
|
| - // when doing generated code coverage tests.
|
| - int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
|
| - // Here we use masm_-> instead of the __ macro because this is the
|
| - // instruction that gets patched and coverage code gets in the way.
|
| - // TODO(X64): Consider whether it's worth switching the test to a
|
| - // 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't
|
| - // be generated normally.
|
| - masm_->testl(rax, Immediate(-delta_to_patch_site));
|
| - __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
|
|
|
| + answer.set_type_info(result_type);
|
| + frame_->Push(&answer);
|
| +}
|
| +
|
| +
|
| +bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
|
| + Object* answer_object = Heap::undefined_value();
|
| + switch (op) {
|
| + case Token::ADD:
|
| + // Use intptr_t to detect overflow of 32-bit int.
|
| + if (Smi::IsValid(static_cast<intptr_t>(left) + right)) {
|
| + answer_object = Smi::FromInt(left + right);
|
| + }
|
| + break;
|
| + case Token::SUB:
|
| + // Use intptr_t to detect overflow of 32-bit int.
|
| + if (Smi::IsValid(static_cast<intptr_t>(left) - right)) {
|
| + answer_object = Smi::FromInt(left - right);
|
| + }
|
| + break;
|
| + case Token::MUL: {
|
| + double answer = static_cast<double>(left) * right;
|
| + if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
|
| + // If the product is zero and the non-zero factor is negative,
|
| + // the spec requires us to return floating point negative zero.
|
| + if (answer != 0 || (left + right) >= 0) {
|
| + answer_object = Smi::FromInt(static_cast<int>(answer));
|
| + }
|
| + }
|
| + }
|
| + break;
|
| + case Token::DIV:
|
| + case Token::MOD:
|
| + break;
|
| + case Token::BIT_OR:
|
| + answer_object = Smi::FromInt(left | right);
|
| + break;
|
| + case Token::BIT_AND:
|
| + answer_object = Smi::FromInt(left & right);
|
| + break;
|
| + case Token::BIT_XOR:
|
| + answer_object = Smi::FromInt(left ^ right);
|
| + break;
|
| +
|
| + case Token::SHL: {
|
| + int shift_amount = right & 0x1F;
|
| + if (Smi::IsValid(left << shift_amount)) {
|
| + answer_object = Smi::FromInt(left << shift_amount);
|
| + }
|
| + break;
|
| + }
|
| + case Token::SHR: {
|
| + int shift_amount = right & 0x1F;
|
| + unsigned int unsigned_left = left;
|
| + unsigned_left >>= shift_amount;
|
| + if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) {
|
| + answer_object = Smi::FromInt(unsigned_left);
|
| + }
|
| + break;
|
| + }
|
| + case Token::SAR: {
|
| + int shift_amount = right & 0x1F;
|
| + unsigned int unsigned_left = left;
|
| + if (left < 0) {
|
| + // Perform arithmetic shift of a negative number by
|
| + // complementing number, logical shifting, complementing again.
|
| + unsigned_left = ~unsigned_left;
|
| + unsigned_left >>= shift_amount;
|
| + unsigned_left = ~unsigned_left;
|
| + } else {
|
| + unsigned_left >>= shift_amount;
|
| + }
|
| + ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left)));
|
| + answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left));
|
| + break;
|
| + }
|
| + default:
|
| + UNREACHABLE();
|
| + break;
|
| + }
|
| + if (answer_object == Heap::undefined_value()) {
|
| + return false;
|
| + }
|
| + frame_->Push(Handle<Object>(answer_object));
|
| + return true;
|
| +}
|
| +
|
| +
|
| +void CodeGenerator::JumpIfNotSmiUsingTypeInfo(Register reg,
|
| + TypeInfo type,
|
| + DeferredCode* deferred) {
|
| + if (!type.IsSmi()) {
|
| + __ JumpIfNotSmi(reg, deferred->entry_label());
|
| + }
|
| + if (FLAG_debug_code) {
|
| + __ AbortIfNotSmi(reg);
|
| + }
|
| +}
|
| +
|
| +
|
| +void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
|
| + Register right,
|
| + TypeInfo left_info,
|
| + TypeInfo right_info,
|
| + DeferredCode* deferred) {
|
| + if (!left_info.IsSmi() && !right_info.IsSmi()) {
|
| + __ JumpIfNotBothSmi(left, right, deferred->entry_label());
|
| + } else if (!left_info.IsSmi()) {
|
| + __ JumpIfNotSmi(left, deferred->entry_label());
|
| + } else if (!right_info.IsSmi()) {
|
| + __ JumpIfNotSmi(right, deferred->entry_label());
|
| + }
|
| + if (FLAG_debug_code) {
|
| + __ AbortIfNotSmi(left);
|
| + __ AbortIfNotSmi(right);
|
| + }
|
| +}
|
| +
|
| +
|
| +// Implements a binary operation using a deferred code object and some
|
| +// inline code to operate on smis quickly.
|
| +Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
|
| + Result* left,
|
| + Result* right,
|
| + OverwriteMode overwrite_mode) {
|
| + // Copy the type info because left and right may be overwritten.
|
| + TypeInfo left_type_info = left->type_info();
|
| + TypeInfo right_type_info = right->type_info();
|
| + Token::Value op = expr->op();
|
| + Result answer;
|
| + // Special handling of div and mod because they use fixed registers.
|
| + if (op == Token::DIV || op == Token::MOD) {
|
| + // We need rax as the quotient register, rdx as the remainder
|
| + // register, neither left nor right in rax or rdx, and left copied
|
| + // to rax.
|
| + Result quotient;
|
| + Result remainder;
|
| + bool left_is_in_rax = false;
|
| + // Step 1: get rax for quotient.
|
| + if ((left->is_register() && left->reg().is(rax)) ||
|
| + (right->is_register() && right->reg().is(rax))) {
|
| + // One or both is in rax. Use a fresh non-rdx register for
|
| + // them.
|
| + Result fresh = allocator_->Allocate();
|
| + ASSERT(fresh.is_valid());
|
| + if (fresh.reg().is(rdx)) {
|
| + remainder = fresh;
|
| + fresh = allocator_->Allocate();
|
| + ASSERT(fresh.is_valid());
|
| + }
|
| + if (left->is_register() && left->reg().is(rax)) {
|
| + quotient = *left;
|
| + *left = fresh;
|
| + left_is_in_rax = true;
|
| + }
|
| + if (right->is_register() && right->reg().is(rax)) {
|
| + quotient = *right;
|
| + *right = fresh;
|
| + }
|
| + __ movq(fresh.reg(), rax);
|
| + } else {
|
| + // Neither left nor right is in rax.
|
| + quotient = allocator_->Allocate(rax);
|
| + }
|
| + ASSERT(quotient.is_register() && quotient.reg().is(rax));
|
| + ASSERT(!(left->is_register() && left->reg().is(rax)));
|
| + ASSERT(!(right->is_register() && right->reg().is(rax)));
|
| +
|
| + // Step 2: get rdx for remainder if necessary.
|
| + if (!remainder.is_valid()) {
|
| + if ((left->is_register() && left->reg().is(rdx)) ||
|
| + (right->is_register() && right->reg().is(rdx))) {
|
| + Result fresh = allocator_->Allocate();
|
| + ASSERT(fresh.is_valid());
|
| + if (left->is_register() && left->reg().is(rdx)) {
|
| + remainder = *left;
|
| + *left = fresh;
|
| + }
|
| + if (right->is_register() && right->reg().is(rdx)) {
|
| + remainder = *right;
|
| + *right = fresh;
|
| + }
|
| + __ movq(fresh.reg(), rdx);
|
| + } else {
|
| + // Neither left nor right is in rdx.
|
| + remainder = allocator_->Allocate(rdx);
|
| + }
|
| + }
|
| + ASSERT(remainder.is_register() && remainder.reg().is(rdx));
|
| + ASSERT(!(left->is_register() && left->reg().is(rdx)));
|
| + ASSERT(!(right->is_register() && right->reg().is(rdx)));
|
| +
|
| + left->ToRegister();
|
| + right->ToRegister();
|
| + frame_->Spill(rax);
|
| + frame_->Spill(rdx);
|
| +
|
| + // Check that left and right are smi tagged.
|
| + DeferredInlineBinaryOperation* deferred =
|
| + new DeferredInlineBinaryOperation(op,
|
| + (op == Token::DIV) ? rax : rdx,
|
| + left->reg(),
|
| + right->reg(),
|
| + overwrite_mode);
|
| + JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(),
|
| + left_type_info, right_type_info, deferred);
|
| +
|
| + if (op == Token::DIV) {
|
| + __ SmiDiv(rax, left->reg(), right->reg(), deferred->entry_label());
|
| + deferred->BindExit();
|
| + left->Unuse();
|
| + right->Unuse();
|
| + answer = quotient;
|
| + } else {
|
| + ASSERT(op == Token::MOD);
|
| + __ SmiMod(rdx, left->reg(), right->reg(), deferred->entry_label());
|
| + deferred->BindExit();
|
| + left->Unuse();
|
| + right->Unuse();
|
| + answer = remainder;
|
| + }
|
| + ASSERT(answer.is_valid());
|
| + return answer;
|
| + }
|
| +
|
| + // Special handling of shift operations because they use fixed
|
| + // registers.
|
| + if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
|
| + // Move left out of rcx if necessary.
|
| + if (left->is_register() && left->reg().is(rcx)) {
|
| + *left = allocator_->Allocate();
|
| + ASSERT(left->is_valid());
|
| + __ movq(left->reg(), rcx);
|
| + }
|
| + right->ToRegister(rcx);
|
| + left->ToRegister();
|
| + ASSERT(left->is_register() && !left->reg().is(rcx));
|
| + ASSERT(right->is_register() && right->reg().is(rcx));
|
| +
|
| + // We will modify right, it must be spilled.
|
| + frame_->Spill(rcx);
|
| +
|
| + // Use a fresh answer register to avoid spilling the left operand.
|
| + answer = allocator_->Allocate();
|
| + ASSERT(answer.is_valid());
|
| + // Check that both operands are smis using the answer register as a
|
| + // temporary.
|
| + DeferredInlineBinaryOperation* deferred =
|
| + new DeferredInlineBinaryOperation(op,
|
| + answer.reg(),
|
| + left->reg(),
|
| + rcx,
|
| + overwrite_mode);
|
| +
|
| + Label do_op;
|
| + if (right_type_info.IsSmi()) {
|
| + if (FLAG_debug_code) {
|
| + __ AbortIfNotSmi(right->reg());
|
| + }
|
| + __ movq(answer.reg(), left->reg());
|
| + // If left is not known to be a smi, check if it is.
|
| + // If left is not known to be a number, and it isn't a smi, check if
|
| + // it is a HeapNumber.
|
| + if (!left_type_info.IsSmi()) {
|
| + __ JumpIfSmi(answer.reg(), &do_op);
|
| + if (!left_type_info.IsNumber()) {
|
| + // Branch if not a heapnumber.
|
| + __ Cmp(FieldOperand(answer.reg(), HeapObject::kMapOffset),
|
| + Factory::heap_number_map());
|
| + deferred->Branch(not_equal);
|
| + }
|
| + // Load integer value into answer register using truncation.
|
| + __ cvttsd2si(answer.reg(),
|
| + FieldOperand(answer.reg(), HeapNumber::kValueOffset));
|
| + // Branch if we might have overflowed.
|
| + // (False negative for Smi::kMinValue)
|
| + __ cmpq(answer.reg(), Immediate(0x80000000));
|
| + deferred->Branch(equal);
|
| + // TODO(lrn): Inline shifts on int32 here instead of first smi-tagging.
|
| + __ Integer32ToSmi(answer.reg(), answer.reg());
|
| + } else {
|
| + // Fast case - both are actually smis.
|
| + if (FLAG_debug_code) {
|
| + __ AbortIfNotSmi(left->reg());
|
| + }
|
| + }
|
| + } else {
|
| + JumpIfNotBothSmiUsingTypeInfo(left->reg(), rcx,
|
| + left_type_info, right_type_info, deferred);
|
| + }
|
| + __ bind(&do_op);
|
| +
|
| + // Perform the operation.
|
| + switch (op) {
|
| + case Token::SAR:
|
| + __ SmiShiftArithmeticRight(answer.reg(), left->reg(), rcx);
|
| + break;
|
| + case Token::SHR: {
|
| + __ SmiShiftLogicalRight(answer.reg(),
|
| + left->reg(),
|
| + rcx,
|
| + deferred->entry_label());
|
| + break;
|
| + }
|
| + case Token::SHL: {
|
| + __ SmiShiftLeft(answer.reg(),
|
| + left->reg(),
|
| + rcx);
|
| + break;
|
| + }
|
| + default:
|
| + UNREACHABLE();
|
| + }
|
| + deferred->BindExit();
|
| + left->Unuse();
|
| + right->Unuse();
|
| + ASSERT(answer.is_valid());
|
| + return answer;
|
| + }
|
| +
|
| + // Handle the other binary operations.
|
| + left->ToRegister();
|
| + right->ToRegister();
|
| + // A newly allocated register answer is used to hold the answer. The
|
| + // registers containing left and right are not modified so they don't
|
| + // need to be spilled in the fast case.
|
| + answer = allocator_->Allocate();
|
| + ASSERT(answer.is_valid());
|
| +
|
| + // Perform the smi tag check.
|
| + DeferredInlineBinaryOperation* deferred =
|
| + new DeferredInlineBinaryOperation(op,
|
| + answer.reg(),
|
| + left->reg(),
|
| + right->reg(),
|
| + overwrite_mode);
|
| + JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(),
|
| + left_type_info, right_type_info, deferred);
|
| +
|
| + switch (op) {
|
| + case Token::ADD:
|
| + __ SmiAdd(answer.reg(),
|
| + left->reg(),
|
| + right->reg(),
|
| + deferred->entry_label());
|
| + break;
|
| +
|
| + case Token::SUB:
|
| + __ SmiSub(answer.reg(),
|
| + left->reg(),
|
| + right->reg(),
|
| + deferred->entry_label());
|
| + break;
|
| +
|
| + case Token::MUL: {
|
| + __ SmiMul(answer.reg(),
|
| + left->reg(),
|
| + right->reg(),
|
| + deferred->entry_label());
|
| + break;
|
| + }
|
| +
|
| + case Token::BIT_OR:
|
| + __ SmiOr(answer.reg(), left->reg(), right->reg());
|
| + break;
|
| +
|
| + case Token::BIT_AND:
|
| + __ SmiAnd(answer.reg(), left->reg(), right->reg());
|
| + break;
|
| +
|
| + case Token::BIT_XOR:
|
| + __ SmiXor(answer.reg(), left->reg(), right->reg());
|
| + break;
|
| +
|
| + default:
|
| + UNREACHABLE();
|
| + break;
|
| + }
|
| + deferred->BindExit();
|
| + left->Unuse();
|
| + right->Unuse();
|
| + ASSERT(answer.is_valid());
|
| + return answer;
|
| +}
|
| +
|
| +
|
| +// Call the appropriate binary operation stub to compute src op value
|
| +// and leave the result in dst.
|
| +class DeferredInlineSmiOperation: public DeferredCode {
|
| + public:
|
| + DeferredInlineSmiOperation(Token::Value op,
|
| + Register dst,
|
| + Register src,
|
| + Smi* value,
|
| + OverwriteMode overwrite_mode)
|
| + : op_(op),
|
| + dst_(dst),
|
| + src_(src),
|
| + value_(value),
|
| + overwrite_mode_(overwrite_mode) {
|
| + set_comment("[ DeferredInlineSmiOperation");
|
| + }
|
| +
|
| + virtual void Generate();
|
| +
|
| + private:
|
| + Token::Value op_;
|
| + Register dst_;
|
| + Register src_;
|
| + Smi* value_;
|
| + OverwriteMode overwrite_mode_;
|
| +};
|
| +
|
| +
|
| +void DeferredInlineSmiOperation::Generate() {
|
| + // For mod we don't generate all the Smi code inline.
|
| + GenericBinaryOpStub stub(
|
| + op_,
|
| + overwrite_mode_,
|
| + (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB);
|
| + stub.GenerateCall(masm_, src_, value_);
|
| if (!dst_.is(rax)) __ movq(dst_, rax);
|
| }
|
|
|
|
|
| -class DeferredReferenceSetKeyedValue: public DeferredCode {
|
| +// Call the appropriate binary operation stub to compute value op src
|
| +// and leave the result in dst.
|
| +class DeferredInlineSmiOperationReversed: public DeferredCode {
|
| public:
|
| - DeferredReferenceSetKeyedValue(Register value,
|
| - Register key,
|
| - Register receiver)
|
| - : value_(value), key_(key), receiver_(receiver) {
|
| - set_comment("[ DeferredReferenceSetKeyedValue");
|
| + DeferredInlineSmiOperationReversed(Token::Value op,
|
| + Register dst,
|
| + Smi* value,
|
| + Register src,
|
| + OverwriteMode overwrite_mode)
|
| + : op_(op),
|
| + dst_(dst),
|
| + value_(value),
|
| + src_(src),
|
| + overwrite_mode_(overwrite_mode) {
|
| + set_comment("[ DeferredInlineSmiOperationReversed");
|
| }
|
|
|
| virtual void Generate();
|
|
|
| - Label* patch_site() { return &patch_site_; }
|
| + private:
|
| + Token::Value op_;
|
| + Register dst_;
|
| + Smi* value_;
|
| + Register src_;
|
| + OverwriteMode overwrite_mode_;
|
| +};
|
|
|
| +
|
| +void DeferredInlineSmiOperationReversed::Generate() {
|
| + GenericBinaryOpStub stub(
|
| + op_,
|
| + overwrite_mode_,
|
| + NO_SMI_CODE_IN_STUB);
|
| + stub.GenerateCall(masm_, value_, src_);
|
| + if (!dst_.is(rax)) __ movq(dst_, rax);
|
| +}
|
| +class DeferredInlineSmiAdd: public DeferredCode {
|
| + public:
|
| + DeferredInlineSmiAdd(Register dst,
|
| + Smi* value,
|
| + OverwriteMode overwrite_mode)
|
| + : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
|
| + set_comment("[ DeferredInlineSmiAdd");
|
| + }
|
| +
|
| + virtual void Generate();
|
| +
|
| private:
|
| - Register value_;
|
| - Register key_;
|
| - Register receiver_;
|
| - Label patch_site_;
|
| + Register dst_;
|
| + Smi* value_;
|
| + OverwriteMode overwrite_mode_;
|
| };
|
|
|
|
|
| -void DeferredReferenceSetKeyedValue::Generate() {
|
| - __ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
|
| - // Move value, receiver, and key to registers rax, rdx, and rcx, as
|
| - // the IC stub expects.
|
| - // Move value to rax, using xchg if the receiver or key is in rax.
|
| - if (!value_.is(rax)) {
|
| - if (!receiver_.is(rax) && !key_.is(rax)) {
|
| - __ movq(rax, value_);
|
| +void DeferredInlineSmiAdd::Generate() {
|
| + GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
|
| + igostub.GenerateCall(masm_, dst_, value_);
|
| + if (!dst_.is(rax)) __ movq(dst_, rax);
|
| +}
|
| +
|
| +
|
| +// The result of value + src is in dst. It either overflowed or was not
|
| +// smi tagged. Undo the speculative addition and call the appropriate
|
| +// specialized stub for add. The result is left in dst.
|
| +class DeferredInlineSmiAddReversed: public DeferredCode {
|
| + public:
|
| + DeferredInlineSmiAddReversed(Register dst,
|
| + Smi* value,
|
| + OverwriteMode overwrite_mode)
|
| + : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
|
| + set_comment("[ DeferredInlineSmiAddReversed");
|
| + }
|
| +
|
| + virtual void Generate();
|
| +
|
| + private:
|
| + Register dst_;
|
| + Smi* value_;
|
| + OverwriteMode overwrite_mode_;
|
| +};
|
| +
|
| +
|
| +void DeferredInlineSmiAddReversed::Generate() {
|
| + GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
|
| + igostub.GenerateCall(masm_, value_, dst_);
|
| + if (!dst_.is(rax)) __ movq(dst_, rax);
|
| +}
|
| +
|
| +
|
| +class DeferredInlineSmiSub: public DeferredCode {
|
| + public:
|
| + DeferredInlineSmiSub(Register dst,
|
| + Smi* value,
|
| + OverwriteMode overwrite_mode)
|
| + : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
|
| + set_comment("[ DeferredInlineSmiSub");
|
| + }
|
| +
|
| + virtual void Generate();
|
| +
|
| + private:
|
| + Register dst_;
|
| + Smi* value_;
|
| + OverwriteMode overwrite_mode_;
|
| +};
|
| +
|
| +
|
| +
|
| +void DeferredInlineSmiSub::Generate() {
|
| + GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB);
|
| + igostub.GenerateCall(masm_, dst_, value_);
|
| + if (!dst_.is(rax)) __ movq(dst_, rax);
|
| +}
|
| +
|
| +
|
| +Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
|
| + Result* operand,
|
| + Handle<Object> value,
|
| + bool reversed,
|
| + OverwriteMode overwrite_mode) {
|
| + // Generate inline code for a binary operation when one of the
|
| + // operands is a constant smi. Consumes the argument "operand".
|
| + if (IsUnsafeSmi(value)) {
|
| + Result unsafe_operand(value);
|
| + if (reversed) {
|
| + return LikelySmiBinaryOperation(expr, &unsafe_operand, operand,
|
| + overwrite_mode);
|
| } else {
|
| - __ xchg(rax, value_);
|
| - // Update receiver_ and key_ if they are affected by the swap.
|
| - if (receiver_.is(rax)) {
|
| - receiver_ = value_;
|
| - } else if (receiver_.is(value_)) {
|
| - receiver_ = rax;
|
| + return LikelySmiBinaryOperation(expr, operand, &unsafe_operand,
|
| + overwrite_mode);
|
| + }
|
| + }
|
| +
|
| + // Get the literal value.
|
| + Smi* smi_value = Smi::cast(*value);
|
| + int int_value = smi_value->value();
|
| +
|
| + Token::Value op = expr->op();
|
| + Result answer;
|
| + switch (op) {
|
| + case Token::ADD: {
|
| + operand->ToRegister();
|
| + frame_->Spill(operand->reg());
|
| + DeferredCode* deferred = NULL;
|
| + if (reversed) {
|
| + deferred = new DeferredInlineSmiAddReversed(operand->reg(),
|
| + smi_value,
|
| + overwrite_mode);
|
| + } else {
|
| + deferred = new DeferredInlineSmiAdd(operand->reg(),
|
| + smi_value,
|
| + overwrite_mode);
|
| }
|
| - if (key_.is(rax)) {
|
| - key_ = value_;
|
| - } else if (key_.is(value_)) {
|
| - key_ = rax;
|
| + JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
|
| + deferred);
|
| + __ SmiAddConstant(operand->reg(),
|
| + operand->reg(),
|
| + smi_value,
|
| + deferred->entry_label());
|
| + deferred->BindExit();
|
| + answer = *operand;
|
| + break;
|
| + }
|
| +
|
| + case Token::SUB: {
|
| + if (reversed) {
|
| + Result constant_operand(value);
|
| + answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
|
| + overwrite_mode);
|
| + } else {
|
| + operand->ToRegister();
|
| + frame_->Spill(operand->reg());
|
| + DeferredCode* deferred = new DeferredInlineSmiSub(operand->reg(),
|
| + smi_value,
|
| + overwrite_mode);
|
| + JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
|
| + deferred);
|
| + // A smi currently fits in a 32-bit Immediate.
|
| + __ SmiSubConstant(operand->reg(),
|
| + operand->reg(),
|
| + smi_value,
|
| + deferred->entry_label());
|
| + deferred->BindExit();
|
| + answer = *operand;
|
| }
|
| + break;
|
| }
|
| +
|
| + case Token::SAR:
|
| + if (reversed) {
|
| + Result constant_operand(value);
|
| + answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
|
| + overwrite_mode);
|
| + } else {
|
| + // Only the least significant 5 bits of the shift value are used.
|
| + // In the slow case, this masking is done inside the runtime call.
|
| + int shift_value = int_value & 0x1f;
|
| + operand->ToRegister();
|
| + frame_->Spill(operand->reg());
|
| + DeferredInlineSmiOperation* deferred =
|
| + new DeferredInlineSmiOperation(op,
|
| + operand->reg(),
|
| + operand->reg(),
|
| + smi_value,
|
| + overwrite_mode);
|
| + JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
|
| + deferred);
|
| + __ SmiShiftArithmeticRightConstant(operand->reg(),
|
| + operand->reg(),
|
| + shift_value);
|
| + deferred->BindExit();
|
| + answer = *operand;
|
| + }
|
| + break;
|
| +
|
| + case Token::SHR:
|
| + if (reversed) {
|
| + Result constant_operand(value);
|
| + answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
|
| + overwrite_mode);
|
| + } else {
|
| + // Only the least significant 5 bits of the shift value are used.
|
| + // In the slow case, this masking is done inside the runtime call.
|
| + int shift_value = int_value & 0x1f;
|
| + operand->ToRegister();
|
| + answer = allocator()->Allocate();
|
| + ASSERT(answer.is_valid());
|
| + DeferredInlineSmiOperation* deferred =
|
| + new DeferredInlineSmiOperation(op,
|
| + answer.reg(),
|
| + operand->reg(),
|
| + smi_value,
|
| + overwrite_mode);
|
| + JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
|
| + deferred);
|
| + __ SmiShiftLogicalRightConstant(answer.reg(),
|
| + operand->reg(),
|
| + shift_value,
|
| + deferred->entry_label());
|
| + deferred->BindExit();
|
| + operand->Unuse();
|
| + }
|
| + break;
|
| +
|
| + case Token::SHL:
|
| + if (reversed) {
|
| + operand->ToRegister();
|
| +
|
| + // We need rcx to be available to hold operand, and to be spilled.
|
| + // SmiShiftLeft implicitly modifies rcx.
|
| + if (operand->reg().is(rcx)) {
|
| + frame_->Spill(operand->reg());
|
| + answer = allocator()->Allocate();
|
| + } else {
|
| + Result rcx_reg = allocator()->Allocate(rcx);
|
| + // answer must not be rcx.
|
| + answer = allocator()->Allocate();
|
| + // rcx_reg goes out of scope.
|
| + }
|
| +
|
| + DeferredInlineSmiOperationReversed* deferred =
|
| + new DeferredInlineSmiOperationReversed(op,
|
| + answer.reg(),
|
| + smi_value,
|
| + operand->reg(),
|
| + overwrite_mode);
|
| + JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
|
| + deferred);
|
| +
|
| + __ Move(answer.reg(), smi_value);
|
| + __ SmiShiftLeft(answer.reg(), answer.reg(), operand->reg());
|
| + operand->Unuse();
|
| +
|
| + deferred->BindExit();
|
| + } else {
|
| + // Only the least significant 5 bits of the shift value are used.
|
| + // In the slow case, this masking is done inside the runtime call.
|
| + int shift_value = int_value & 0x1f;
|
| + operand->ToRegister();
|
| + if (shift_value == 0) {
|
| + // Spill operand so it can be overwritten in the slow case.
|
| + frame_->Spill(operand->reg());
|
| + DeferredInlineSmiOperation* deferred =
|
| + new DeferredInlineSmiOperation(op,
|
| + operand->reg(),
|
| + operand->reg(),
|
| + smi_value,
|
| + overwrite_mode);
|
| + JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
|
| + deferred);
|
| + deferred->BindExit();
|
| + answer = *operand;
|
| + } else {
|
| + // Use a fresh temporary for nonzero shift values.
|
| + answer = allocator()->Allocate();
|
| + ASSERT(answer.is_valid());
|
| + DeferredInlineSmiOperation* deferred =
|
| + new DeferredInlineSmiOperation(op,
|
| + answer.reg(),
|
| + operand->reg(),
|
| + smi_value,
|
| + overwrite_mode);
|
| + JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
|
| + deferred);
|
| + __ SmiShiftLeftConstant(answer.reg(),
|
| + operand->reg(),
|
| + shift_value);
|
| + deferred->BindExit();
|
| + operand->Unuse();
|
| + }
|
| + }
|
| + break;
|
| +
|
| + case Token::BIT_OR:
|
| + case Token::BIT_XOR:
|
| + case Token::BIT_AND: {
|
| + operand->ToRegister();
|
| + frame_->Spill(operand->reg());
|
| + if (reversed) {
|
| + // Bit operations with a constant smi are commutative.
|
| + // We can swap left and right operands with no problem.
|
| + // Swap left and right overwrite modes. 0->0, 1->2, 2->1.
|
| + overwrite_mode = static_cast<OverwriteMode>((2 * overwrite_mode) % 3);
|
| + }
|
| + DeferredCode* deferred = new DeferredInlineSmiOperation(op,
|
| + operand->reg(),
|
| + operand->reg(),
|
| + smi_value,
|
| + overwrite_mode);
|
| + JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
|
| + deferred);
|
| + if (op == Token::BIT_AND) {
|
| + __ SmiAndConstant(operand->reg(), operand->reg(), smi_value);
|
| + } else if (op == Token::BIT_XOR) {
|
| + if (int_value != 0) {
|
| + __ SmiXorConstant(operand->reg(), operand->reg(), smi_value);
|
| + }
|
| + } else {
|
| + ASSERT(op == Token::BIT_OR);
|
| + if (int_value != 0) {
|
| + __ SmiOrConstant(operand->reg(), operand->reg(), smi_value);
|
| + }
|
| + }
|
| + deferred->BindExit();
|
| + answer = *operand;
|
| + break;
|
| + }
|
| +
|
| + // Generate inline code for mod of powers of 2 and negative powers of 2.
|
| + case Token::MOD:
|
| + if (!reversed &&
|
| + int_value != 0 &&
|
| + (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
|
| + operand->ToRegister();
|
| + frame_->Spill(operand->reg());
|
| + DeferredCode* deferred =
|
| + new DeferredInlineSmiOperation(op,
|
| + operand->reg(),
|
| + operand->reg(),
|
| + smi_value,
|
| + overwrite_mode);
|
| + // Check for negative or non-Smi left hand side.
|
| + __ JumpIfNotPositiveSmi(operand->reg(), deferred->entry_label());
|
| + if (int_value < 0) int_value = -int_value;
|
| + if (int_value == 1) {
|
| + __ Move(operand->reg(), Smi::FromInt(0));
|
| + } else {
|
| + __ SmiAndConstant(operand->reg(),
|
| + operand->reg(),
|
| + Smi::FromInt(int_value - 1));
|
| + }
|
| + deferred->BindExit();
|
| + answer = *operand;
|
| + break; // This break only applies if we generated code for MOD.
|
| + }
|
| + // Fall through if we did not find a power of 2 on the right hand side!
|
| + // The next case must be the default.
|
| +
|
| + default: {
|
| + Result constant_operand(value);
|
| + if (reversed) {
|
| + answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
|
| + overwrite_mode);
|
| + } else {
|
| + answer = LikelySmiBinaryOperation(expr, operand, &constant_operand,
|
| + overwrite_mode);
|
| + }
|
| + break;
|
| + }
|
| }
|
| - // Value is now in rax. Its original location is remembered in value_,
|
| - // and the value is restored to value_ before returning.
|
| - // The variables receiver_ and key_ are not preserved.
|
| - // Move receiver and key to rdx and rcx, swapping if necessary.
|
| - if (receiver_.is(rdx)) {
|
| - if (!key_.is(rcx)) {
|
| - __ movq(rcx, key_);
|
| - } // Else everything is already in the right place.
|
| - } else if (receiver_.is(rcx)) {
|
| - if (key_.is(rdx)) {
|
| - __ xchg(rcx, rdx);
|
| - } else if (key_.is(rcx)) {
|
| - __ movq(rdx, receiver_);
|
| + ASSERT(answer.is_valid());
|
| + return answer;
|
| +}
|
| +
|
| +static bool CouldBeNaN(const Result& result) {
|
| + if (result.type_info().IsSmi()) return false;
|
| + if (result.type_info().IsInteger32()) return false;
|
| + if (!result.is_constant()) return true;
|
| + if (!result.handle()->IsHeapNumber()) return false;
|
| + return isnan(HeapNumber::cast(*result.handle())->value());
|
| +}
|
| +
|
| +
|
| +// Convert from signed to unsigned comparison to match the way EFLAGS are set
|
| +// by FPU and XMM compare instructions.
|
| +static Condition DoubleCondition(Condition cc) {
|
| + switch (cc) {
|
| + case less: return below;
|
| + case equal: return equal;
|
| + case less_equal: return below_equal;
|
| + case greater: return above;
|
| + case greater_equal: return above_equal;
|
| + default: UNREACHABLE();
|
| + }
|
| + UNREACHABLE();
|
| + return equal;
|
| +}
|
| +
|
| +
|
| +void CodeGenerator::Comparison(AstNode* node,
|
| + Condition cc,
|
| + bool strict,
|
| + ControlDestination* dest) {
|
| + // Strict only makes sense for equality comparisons.
|
| + ASSERT(!strict || cc == equal);
|
| +
|
| + Result left_side;
|
| + Result right_side;
|
| + // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
|
| + if (cc == greater || cc == less_equal) {
|
| + cc = ReverseCondition(cc);
|
| + left_side = frame_->Pop();
|
| + right_side = frame_->Pop();
|
| + } else {
|
| + right_side = frame_->Pop();
|
| + left_side = frame_->Pop();
|
| + }
|
| + ASSERT(cc == less || cc == equal || cc == greater_equal);
|
| +
|
| + // If either side is a constant smi, optimize the comparison.
|
| + bool left_side_constant_smi = false;
|
| + bool left_side_constant_null = false;
|
| + bool left_side_constant_1_char_string = false;
|
| + if (left_side.is_constant()) {
|
| + left_side_constant_smi = left_side.handle()->IsSmi();
|
| + left_side_constant_null = left_side.handle()->IsNull();
|
| + left_side_constant_1_char_string =
|
| + (left_side.handle()->IsString() &&
|
| + String::cast(*left_side.handle())->length() == 1 &&
|
| + String::cast(*left_side.handle())->IsAsciiRepresentation());
|
| + }
|
| + bool right_side_constant_smi = false;
|
| + bool right_side_constant_null = false;
|
| + bool right_side_constant_1_char_string = false;
|
| + if (right_side.is_constant()) {
|
| + right_side_constant_smi = right_side.handle()->IsSmi();
|
| + right_side_constant_null = right_side.handle()->IsNull();
|
| + right_side_constant_1_char_string =
|
| + (right_side.handle()->IsString() &&
|
| + String::cast(*right_side.handle())->length() == 1 &&
|
| + String::cast(*right_side.handle())->IsAsciiRepresentation());
|
| + }
|
| +
|
| + if (left_side_constant_smi || right_side_constant_smi) {
|
| + if (left_side_constant_smi && right_side_constant_smi) {
|
| + // Trivial case, comparing two constants.
|
| + int left_value = Smi::cast(*left_side.handle())->value();
|
| + int right_value = Smi::cast(*right_side.handle())->value();
|
| + switch (cc) {
|
| + case less:
|
| + dest->Goto(left_value < right_value);
|
| + break;
|
| + case equal:
|
| + dest->Goto(left_value == right_value);
|
| + break;
|
| + case greater_equal:
|
| + dest->Goto(left_value >= right_value);
|
| + break;
|
| + default:
|
| + UNREACHABLE();
|
| + }
|
| } else {
|
| - __ movq(rdx, receiver_);
|
| - __ movq(rcx, key_);
|
| + // Only one side is a constant Smi.
|
| + // If left side is a constant Smi, reverse the operands.
|
| + // Since one side is a constant Smi, conversion order does not matter.
|
| + if (left_side_constant_smi) {
|
| + Result temp = left_side;
|
| + left_side = right_side;
|
| + right_side = temp;
|
| + cc = ReverseCondition(cc);
|
| + // This may re-introduce greater or less_equal as the value of cc.
|
| + // CompareStub and the inline code both support all values of cc.
|
| + }
|
| + // Implement comparison against a constant Smi, inlining the case
|
| + // where both sides are Smis.
|
| + left_side.ToRegister();
|
| + Register left_reg = left_side.reg();
|
| + Handle<Object> right_val = right_side.handle();
|
| +
|
| + // Here we split control flow to the stub call and inlined cases
|
| + // before finally splitting it to the control destination. We use
|
| + // a jump target and branching to duplicate the virtual frame at
|
| + // the first split. We manually handle the off-frame references
|
| + // by reconstituting them on the non-fall-through path.
|
| + JumpTarget is_smi;
|
| +
|
| + if (left_side.is_smi()) {
|
| + if (FLAG_debug_code) {
|
| + __ AbortIfNotSmi(left_side.reg());
|
| + }
|
| + } else {
|
| + Condition left_is_smi = masm_->CheckSmi(left_side.reg());
|
| + is_smi.Branch(left_is_smi);
|
| +
|
| + bool is_loop_condition = (node->AsExpression() != NULL) &&
|
| + node->AsExpression()->is_loop_condition();
|
| + if (!is_loop_condition && right_val->IsSmi()) {
|
| + // Right side is a constant smi and left side has been checked
|
| + // not to be a smi.
|
| + JumpTarget not_number;
|
| + __ Cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
|
| + Factory::heap_number_map());
|
| + not_number.Branch(not_equal, &left_side);
|
| + __ movsd(xmm1,
|
| + FieldOperand(left_reg, HeapNumber::kValueOffset));
|
| + int value = Smi::cast(*right_val)->value();
|
| + if (value == 0) {
|
| + __ xorpd(xmm0, xmm0);
|
| + } else {
|
| + Result temp = allocator()->Allocate();
|
| + __ movl(temp.reg(), Immediate(value));
|
| + __ cvtlsi2sd(xmm0, temp.reg());
|
| + temp.Unuse();
|
| + }
|
| + __ ucomisd(xmm1, xmm0);
|
| + // Jump to builtin for NaN.
|
| + not_number.Branch(parity_even, &left_side);
|
| + left_side.Unuse();
|
| + dest->true_target()->Branch(DoubleCondition(cc));
|
| + dest->false_target()->Jump();
|
| + not_number.Bind(&left_side);
|
| + }
|
| +
|
| + // Setup and call the compare stub.
|
| + CompareStub stub(cc, strict, kCantBothBeNaN);
|
| + Result result = frame_->CallStub(&stub, &left_side, &right_side);
|
| + result.ToRegister();
|
| + __ testq(result.reg(), result.reg());
|
| + result.Unuse();
|
| + dest->true_target()->Branch(cc);
|
| + dest->false_target()->Jump();
|
| +
|
| + is_smi.Bind();
|
| + }
|
| +
|
| + left_side = Result(left_reg);
|
| + right_side = Result(right_val);
|
| + // Test smi equality and comparison by signed int comparison.
|
| + // Both sides are smis, so we can use an Immediate.
|
| + __ SmiCompare(left_side.reg(), Smi::cast(*right_side.handle()));
|
| + left_side.Unuse();
|
| + right_side.Unuse();
|
| + dest->Split(cc);
|
| }
|
| - } else if (key_.is(rcx)) {
|
| - __ movq(rdx, receiver_);
|
| + } else if (cc == equal &&
|
| + (left_side_constant_null || right_side_constant_null)) {
|
| + // To make null checks efficient, we check if either the left side or
|
| + // the right side is the constant 'null'.
|
| + // If so, we optimize the code by inlining a null check instead of
|
| + // calling the (very) general runtime routine for checking equality.
|
| + Result operand = left_side_constant_null ? right_side : left_side;
|
| + right_side.Unuse();
|
| + left_side.Unuse();
|
| + operand.ToRegister();
|
| + __ CompareRoot(operand.reg(), Heap::kNullValueRootIndex);
|
| + if (strict) {
|
| + operand.Unuse();
|
| + dest->Split(equal);
|
| + } else {
|
| + // The 'null' value is only equal to 'undefined' if using non-strict
|
| + // comparisons.
|
| + dest->true_target()->Branch(equal);
|
| + __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex);
|
| + dest->true_target()->Branch(equal);
|
| + Condition is_smi = masm_->CheckSmi(operand.reg());
|
| + dest->false_target()->Branch(is_smi);
|
| +
|
| + // It can be an undetectable object.
|
| + // Use a scratch register in preference to spilling operand.reg().
|
| + Result temp = allocator()->Allocate();
|
| + ASSERT(temp.is_valid());
|
| + __ movq(temp.reg(),
|
| + FieldOperand(operand.reg(), HeapObject::kMapOffset));
|
| + __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset),
|
| + Immediate(1 << Map::kIsUndetectable));
|
| + temp.Unuse();
|
| + operand.Unuse();
|
| + dest->Split(not_zero);
|
| + }
|
| + } else if (left_side_constant_1_char_string ||
|
| + right_side_constant_1_char_string) {
|
| + if (left_side_constant_1_char_string && right_side_constant_1_char_string) {
|
| + // Trivial case, comparing two constants.
|
| + int left_value = String::cast(*left_side.handle())->Get(0);
|
| + int right_value = String::cast(*right_side.handle())->Get(0);
|
| + switch (cc) {
|
| + case less:
|
| + dest->Goto(left_value < right_value);
|
| + break;
|
| + case equal:
|
| + dest->Goto(left_value == right_value);
|
| + break;
|
| + case greater_equal:
|
| + dest->Goto(left_value >= right_value);
|
| + break;
|
| + default:
|
| + UNREACHABLE();
|
| + }
|
| + } else {
|
| + // Only one side is a constant 1 character string.
|
| + // If left side is a constant 1-character string, reverse the operands.
|
| + // Since one side is a constant string, conversion order does not matter.
|
| + if (left_side_constant_1_char_string) {
|
| + Result temp = left_side;
|
| + left_side = right_side;
|
| + right_side = temp;
|
| + cc = ReverseCondition(cc);
|
| + // This may reintroduce greater or less_equal as the value of cc.
|
| + // CompareStub and the inline code both support all values of cc.
|
| + }
|
| + // Implement comparison against a constant string, inlining the case
|
| + // where both sides are strings.
|
| + left_side.ToRegister();
|
| +
|
| + // Here we split control flow to the stub call and inlined cases
|
| + // before finally splitting it to the control destination. We use
|
| + // a jump target and branching to duplicate the virtual frame at
|
| + // the first split. We manually handle the off-frame references
|
| + // by reconstituting them on the non-fall-through path.
|
| + JumpTarget is_not_string, is_string;
|
| + Register left_reg = left_side.reg();
|
| + Handle<Object> right_val = right_side.handle();
|
| + ASSERT(StringShape(String::cast(*right_val)).IsSymbol());
|
| + Condition is_smi = masm()->CheckSmi(left_reg);
|
| + is_not_string.Branch(is_smi, &left_side);
|
| + Result temp = allocator_->Allocate();
|
| + ASSERT(temp.is_valid());
|
| + __ movq(temp.reg(),
|
| + FieldOperand(left_reg, HeapObject::kMapOffset));
|
| + __ movzxbl(temp.reg(),
|
| + FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
|
| + // If we are testing for equality then make use of the symbol shortcut.
|
| + // Check if the left hand side has the same type as the right hand
|
| + // side (which is always a symbol).
|
| + if (cc == equal) {
|
| + Label not_a_symbol;
|
| + ASSERT(kSymbolTag != 0);
|
| + // Ensure that no non-strings have the symbol bit set.
|
| + ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
|
| + __ testb(temp.reg(), Immediate(kIsSymbolMask)); // Test the symbol bit.
|
| + __ j(zero, ¬_a_symbol);
|
| + // They are symbols, so do identity compare.
|
| + __ Cmp(left_reg, right_side.handle());
|
| + dest->true_target()->Branch(equal);
|
| + dest->false_target()->Branch(not_equal);
|
| + __ bind(¬_a_symbol);
|
| + }
|
| + // Call the compare stub if the left side is not a flat ascii string.
|
| + __ andb(temp.reg(),
|
| + Immediate(kIsNotStringMask |
|
| + kStringRepresentationMask |
|
| + kStringEncodingMask));
|
| + __ cmpb(temp.reg(),
|
| + Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
|
| + temp.Unuse();
|
| + is_string.Branch(equal, &left_side);
|
| +
|
| + // Setup and call the compare stub.
|
| + is_not_string.Bind(&left_side);
|
| + CompareStub stub(cc, strict, kCantBothBeNaN);
|
| + Result result = frame_->CallStub(&stub, &left_side, &right_side);
|
| + result.ToRegister();
|
| + __ testq(result.reg(), result.reg());
|
| + result.Unuse();
|
| + dest->true_target()->Branch(cc);
|
| + dest->false_target()->Jump();
|
| +
|
| + is_string.Bind(&left_side);
|
| + // left_side is a sequential ASCII string.
|
| + ASSERT(left_side.reg().is(left_reg));
|
| + right_side = Result(right_val);
|
| + Result temp2 = allocator_->Allocate();
|
| + ASSERT(temp2.is_valid());
|
| + // Test string equality and comparison.
|
| + if (cc == equal) {
|
| + Label comparison_done;
|
| + __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset),
|
| + Smi::FromInt(1));
|
| + __ j(not_equal, &comparison_done);
|
| + uint8_t char_value =
|
| + static_cast<uint8_t>(String::cast(*right_val)->Get(0));
|
| + __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
|
| + Immediate(char_value));
|
| + __ bind(&comparison_done);
|
| + } else {
|
| + __ movq(temp2.reg(),
|
| + FieldOperand(left_side.reg(), String::kLengthOffset));
|
| + __ SmiSubConstant(temp2.reg(), temp2.reg(), Smi::FromInt(1));
|
| + Label comparison;
|
| + // If the length is 0 then the subtraction gave -1 which compares less
|
| + // than any character.
|
| + __ j(negative, &comparison);
|
| + // Otherwise load the first character.
|
| + __ movzxbl(temp2.reg(),
|
| + FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize));
|
| + __ bind(&comparison);
|
| + // Compare the first character of the string with the
|
| + // constant 1-character string.
|
| + uint8_t char_value =
|
| + static_cast<uint8_t>(String::cast(*right_side.handle())->Get(0));
|
| + __ cmpb(temp2.reg(), Immediate(char_value));
|
| + Label characters_were_different;
|
| + __ j(not_equal, &characters_were_different);
|
| + // If the first character is the same then the long string sorts after
|
| + // the short one.
|
| + __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset),
|
| + Smi::FromInt(1));
|
| + __ bind(&characters_were_different);
|
| + }
|
| + temp2.Unuse();
|
| + left_side.Unuse();
|
| + right_side.Unuse();
|
| + dest->Split(cc);
|
| + }
|
| } else {
|
| - __ movq(rcx, key_);
|
| - __ movq(rdx, receiver_);
|
| + // Neither side is a constant Smi, constant 1-char string, or constant null.
|
| + // If either side is a non-smi constant, skip the smi check.
|
| + bool known_non_smi =
|
| + (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
|
| + (right_side.is_constant() && !right_side.handle()->IsSmi()) ||
|
| + left_side.type_info().IsDouble() ||
|
| + right_side.type_info().IsDouble();
|
| +
|
| + NaNInformation nan_info =
|
| + (CouldBeNaN(left_side) && CouldBeNaN(right_side)) ?
|
| + kBothCouldBeNaN :
|
| + kCantBothBeNaN;
|
| +
|
| + // Inline number comparison handling any combination of smi's and heap
|
| + // numbers if:
|
| + // code is in a loop
|
| + // the compare operation is different from equal
|
| + // compare is not a for-loop comparison
|
| + // The reason for excluding equal is that it will most likely be done
|
| + // with smi's (not heap numbers) and the code to comparing smi's is inlined
|
| + // separately. The same reason applies for for-loop comparison which will
|
| + // also most likely be smi comparisons.
|
| + bool is_loop_condition = (node->AsExpression() != NULL)
|
| + && node->AsExpression()->is_loop_condition();
|
| + bool inline_number_compare =
|
| + loop_nesting() > 0 && cc != equal && !is_loop_condition;
|
| +
|
| + left_side.ToRegister();
|
| + right_side.ToRegister();
|
| +
|
| + if (known_non_smi) {
|
| + // Inlined equality check:
|
| + // If at least one of the objects is not NaN, then if the objects
|
| + // are identical, they are equal.
|
| + if (nan_info == kCantBothBeNaN && cc == equal) {
|
| + __ cmpq(left_side.reg(), right_side.reg());
|
| + dest->true_target()->Branch(equal);
|
| + }
|
| +
|
| + // Inlined number comparison:
|
| + if (inline_number_compare) {
|
| + GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
|
| + }
|
| +
|
| + CompareStub stub(cc, strict, nan_info, !inline_number_compare);
|
| + Result answer = frame_->CallStub(&stub, &left_side, &right_side);
|
| + __ testq(answer.reg(), answer.reg()); // Sets both zero and sign flag.
|
| + answer.Unuse();
|
| + dest->Split(cc);
|
| + } else {
|
| + // Here we split control flow to the stub call and inlined cases
|
| + // before finally splitting it to the control destination. We use
|
| + // a jump target and branching to duplicate the virtual frame at
|
| + // the first split. We manually handle the off-frame references
|
| + // by reconstituting them on the non-fall-through path.
|
| + JumpTarget is_smi;
|
| + Register left_reg = left_side.reg();
|
| + Register right_reg = right_side.reg();
|
| +
|
| + Condition both_smi = masm_->CheckBothSmi(left_reg, right_reg);
|
| + is_smi.Branch(both_smi);
|
| +
|
| + // Inline the equality check if both operands can't be a NaN. If both
|
| + // objects are the same they are equal.
|
| + if (nan_info == kCantBothBeNaN && cc == equal) {
|
| + __ cmpq(left_side.reg(), right_side.reg());
|
| + dest->true_target()->Branch(equal);
|
| + }
|
| +
|
| + // Inlined number comparison:
|
| + if (inline_number_compare) {
|
| + GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
|
| + }
|
| +
|
| + CompareStub stub(cc, strict, nan_info, !inline_number_compare);
|
| + Result answer = frame_->CallStub(&stub, &left_side, &right_side);
|
| + __ testq(answer.reg(), answer.reg()); // Sets both zero and sign flags.
|
| + answer.Unuse();
|
| + dest->true_target()->Branch(cc);
|
| + dest->false_target()->Jump();
|
| +
|
| + is_smi.Bind();
|
| + left_side = Result(left_reg);
|
| + right_side = Result(right_reg);
|
| + __ SmiCompare(left_side.reg(), right_side.reg());
|
| + right_side.Unuse();
|
| + left_side.Unuse();
|
| + dest->Split(cc);
|
| + }
|
| }
|
| +}
|
|
|
| - // Call the IC stub.
|
| - Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
|
| - __ Call(ic, RelocInfo::CODE_TARGET);
|
| - // The delta from the start of the map-compare instructions (initial movq)
|
| - // to the test instruction. We use masm_-> directly here instead of the
|
| - // __ macro because the macro sometimes uses macro expansion to turn
|
| - // into something that can't return a value. This is encountered
|
| - // when doing generated code coverage tests.
|
| - int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
|
| - // Here we use masm_-> instead of the __ macro because this is the
|
| - // instruction that gets patched and coverage code gets in the way.
|
| - masm_->testl(rax, Immediate(-delta_to_patch_site));
|
| - // Restore value (returned from store IC).
|
| - if (!value_.is(rax)) __ movq(value_, rax);
|
| +
|
| +// Load a comparison operand into into a XMM register. Jump to not_numbers jump
|
| +// target passing the left and right result if the operand is not a number.
|
| +static void LoadComparisonOperand(MacroAssembler* masm_,
|
| + Result* operand,
|
| + XMMRegister xmm_reg,
|
| + Result* left_side,
|
| + Result* right_side,
|
| + JumpTarget* not_numbers) {
|
| + Label done;
|
| + if (operand->type_info().IsDouble()) {
|
| + // Operand is known to be a heap number, just load it.
|
| + __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
|
| + } else if (operand->type_info().IsSmi()) {
|
| + // Operand is known to be a smi. Convert it to double and keep the original
|
| + // smi.
|
| + __ SmiToInteger32(kScratchRegister, operand->reg());
|
| + __ cvtlsi2sd(xmm_reg, kScratchRegister);
|
| + } else {
|
| + // Operand type not known, check for smi or heap number.
|
| + Label smi;
|
| + __ JumpIfSmi(operand->reg(), &smi);
|
| + if (!operand->type_info().IsNumber()) {
|
| + __ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
|
| + __ cmpq(FieldOperand(operand->reg(), HeapObject::kMapOffset),
|
| + kScratchRegister);
|
| + not_numbers->Branch(not_equal, left_side, right_side, taken);
|
| + }
|
| + __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
|
| + __ jmp(&done);
|
| +
|
| + __ bind(&smi);
|
| + // Comvert smi to float and keep the original smi.
|
| + __ SmiToInteger32(kScratchRegister, operand->reg());
|
| + __ cvtlsi2sd(xmm_reg, kScratchRegister);
|
| + __ jmp(&done);
|
| + }
|
| + __ bind(&done);
|
| }
|
|
|
|
|
| +void CodeGenerator::GenerateInlineNumberComparison(Result* left_side,
|
| + Result* right_side,
|
| + Condition cc,
|
| + ControlDestination* dest) {
|
| + ASSERT(left_side->is_register());
|
| + ASSERT(right_side->is_register());
|
| +
|
| + JumpTarget not_numbers;
|
| + // Load left and right operand into registers xmm0 and xmm1 and compare.
|
| + LoadComparisonOperand(masm_, left_side, xmm0, left_side, right_side,
|
| + ¬_numbers);
|
| + LoadComparisonOperand(masm_, right_side, xmm1, left_side, right_side,
|
| + ¬_numbers);
|
| + __ ucomisd(xmm0, xmm1);
|
| + // Bail out if a NaN is involved.
|
| + not_numbers.Branch(parity_even, left_side, right_side);
|
| +
|
| + // Split to destination targets based on comparison.
|
| + left_side->Unuse();
|
| + right_side->Unuse();
|
| + dest->true_target()->Branch(DoubleCondition(cc));
|
| + dest->false_target()->Jump();
|
| +
|
| + not_numbers.Bind(left_side, right_side);
|
| +}
|
| +
|
| +
|
| +// Call the function just below TOS on the stack with the given
|
| +// arguments. The receiver is the TOS.
|
| +void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
|
| + CallFunctionFlags flags,
|
| + int position) {
|
| + // Push the arguments ("left-to-right") on the stack.
|
| + int arg_count = args->length();
|
| + for (int i = 0; i < arg_count; i++) {
|
| + Load(args->at(i));
|
| + frame_->SpillTop();
|
| + }
|
| +
|
| + // Record the position for debugging purposes.
|
| + CodeForSourcePosition(position);
|
| +
|
| + // Use the shared code stub to call the function.
|
| + InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
|
| + CallFunctionStub call_function(arg_count, in_loop, flags);
|
| + Result answer = frame_->CallStub(&call_function, arg_count + 1);
|
| + // Restore context and replace function on the stack with the
|
| + // result of the stub invocation.
|
| + frame_->RestoreContextRegister();
|
| + frame_->SetElementAt(0, &answer);
|
| +}
|
| +
|
| +
|
| void CodeGenerator::CallApplyLazy(Expression* applicand,
|
| Expression* receiver,
|
| VariableProxy* arguments,
|
| @@ -1010,6 +2720,21 @@
|
| }
|
|
|
|
|
| +void CodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
|
| + // Call the runtime to declare the globals. The inevitable call
|
| + // will sync frame elements to memory anyway, so we do it eagerly to
|
| + // allow us to push the arguments directly into place.
|
| + frame_->SyncRange(0, frame_->element_count() - 1);
|
| +
|
| + __ movq(kScratchRegister, pairs, RelocInfo::EMBEDDED_OBJECT);
|
| + frame_->EmitPush(rsi); // The context is the first argument.
|
| + frame_->EmitPush(kScratchRegister);
|
| + frame_->EmitPush(Smi::FromInt(is_eval() ? 1 : 0));
|
| + Result ignored = frame_->CallRuntime(Runtime::kDeclareGlobals, 3);
|
| + // Return value is ignored.
|
| +}
|
| +
|
| +
|
| void CodeGenerator::VisitDeclaration(Declaration* node) {
|
| Comment cmnt(masm_, "[ Declaration");
|
| Variable* var = node->proxy()->var();
|
| @@ -1230,6 +2955,44 @@
|
| }
|
|
|
|
|
| +void CodeGenerator::GenerateReturnSequence(Result* return_value) {
|
| + // The return value is a live (but not currently reference counted)
|
| + // reference to rax. This is safe because the current frame does not
|
| + // contain a reference to rax (it is prepared for the return by spilling
|
| + // all registers).
|
| + if (FLAG_trace) {
|
| + frame_->Push(return_value);
|
| + *return_value = frame_->CallRuntime(Runtime::kTraceExit, 1);
|
| + }
|
| + return_value->ToRegister(rax);
|
| +
|
| + // Add a label for checking the size of the code used for returning.
|
| +#ifdef DEBUG
|
| + Label check_exit_codesize;
|
| + masm_->bind(&check_exit_codesize);
|
| +#endif
|
| +
|
| + // Leave the frame and return popping the arguments and the
|
| + // receiver.
|
| + frame_->Exit();
|
| + masm_->ret((scope()->num_parameters() + 1) * kPointerSize);
|
| +#ifdef ENABLE_DEBUGGER_SUPPORT
|
| + // Add padding that will be overwritten by a debugger breakpoint.
|
| + // frame_->Exit() generates "movq rsp, rbp; pop rbp; ret k"
|
| + // with length 7 (3 + 1 + 3).
|
| + const int kPadding = Assembler::kJSReturnSequenceLength - 7;
|
| + for (int i = 0; i < kPadding; ++i) {
|
| + masm_->int3();
|
| + }
|
| + // Check that the size of the code used for returning matches what is
|
| + // expected by the debugger.
|
| + ASSERT_EQ(Assembler::kJSReturnSequenceLength,
|
| + masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
|
| +#endif
|
| + DeleteFrame();
|
| +}
|
| +
|
| +
|
| void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
|
| ASSERT(!in_spilled_code());
|
| Comment cmnt(masm_, "[ WithEnterStatement");
|
| @@ -2531,6 +4294,349 @@
|
| }
|
|
|
|
|
| +void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
|
| + if (slot->type() == Slot::LOOKUP) {
|
| + ASSERT(slot->var()->is_dynamic());
|
| +
|
| + JumpTarget slow;
|
| + JumpTarget done;
|
| + Result value;
|
| +
|
| + // Generate fast case for loading from slots that correspond to
|
| + // local/global variables or arguments unless they are shadowed by
|
| + // eval-introduced bindings.
|
| + EmitDynamicLoadFromSlotFastCase(slot,
|
| + typeof_state,
|
| + &value,
|
| + &slow,
|
| + &done);
|
| +
|
| + slow.Bind();
|
| + // A runtime call is inevitable. We eagerly sync frame elements
|
| + // to memory so that we can push the arguments directly into place
|
| + // on top of the frame.
|
| + frame_->SyncRange(0, frame_->element_count() - 1);
|
| + frame_->EmitPush(rsi);
|
| + __ movq(kScratchRegister, slot->var()->name(), RelocInfo::EMBEDDED_OBJECT);
|
| + frame_->EmitPush(kScratchRegister);
|
| + if (typeof_state == INSIDE_TYPEOF) {
|
| + value =
|
| + frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
|
| + } else {
|
| + value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
|
| + }
|
| +
|
| + done.Bind(&value);
|
| + frame_->Push(&value);
|
| +
|
| + } else if (slot->var()->mode() == Variable::CONST) {
|
| + // Const slots may contain 'the hole' value (the constant hasn't been
|
| + // initialized yet) which needs to be converted into the 'undefined'
|
| + // value.
|
| + //
|
| + // We currently spill the virtual frame because constants use the
|
| + // potentially unsafe direct-frame access of SlotOperand.
|
| + VirtualFrame::SpilledScope spilled_scope;
|
| + Comment cmnt(masm_, "[ Load const");
|
| + JumpTarget exit;
|
| + __ movq(rcx, SlotOperand(slot, rcx));
|
| + __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
|
| + exit.Branch(not_equal);
|
| + __ LoadRoot(rcx, Heap::kUndefinedValueRootIndex);
|
| + exit.Bind();
|
| + frame_->EmitPush(rcx);
|
| +
|
| + } else if (slot->type() == Slot::PARAMETER) {
|
| + frame_->PushParameterAt(slot->index());
|
| +
|
| + } else if (slot->type() == Slot::LOCAL) {
|
| + frame_->PushLocalAt(slot->index());
|
| +
|
| + } else {
|
| + // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
|
| + // here.
|
| + //
|
| + // The use of SlotOperand below is safe for an unspilled frame
|
| + // because it will always be a context slot.
|
| + ASSERT(slot->type() == Slot::CONTEXT);
|
| + Result temp = allocator_->Allocate();
|
| + ASSERT(temp.is_valid());
|
| + __ movq(temp.reg(), SlotOperand(slot, temp.reg()));
|
| + frame_->Push(&temp);
|
| + }
|
| +}
|
| +
|
| +
|
| +void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
|
| + TypeofState state) {
|
| + LoadFromSlot(slot, state);
|
| +
|
| + // Bail out quickly if we're not using lazy arguments allocation.
|
| + if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
|
| +
|
| + // ... or if the slot isn't a non-parameter arguments slot.
|
| + if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
|
| +
|
| + // Pop the loaded value from the stack.
|
| + Result value = frame_->Pop();
|
| +
|
| + // If the loaded value is a constant, we know if the arguments
|
| + // object has been lazily loaded yet.
|
| + if (value.is_constant()) {
|
| + if (value.handle()->IsTheHole()) {
|
| + Result arguments = StoreArgumentsObject(false);
|
| + frame_->Push(&arguments);
|
| + } else {
|
| + frame_->Push(&value);
|
| + }
|
| + return;
|
| + }
|
| +
|
| + // The loaded value is in a register. If it is the sentinel that
|
| + // indicates that we haven't loaded the arguments object yet, we
|
| + // need to do it now.
|
| + JumpTarget exit;
|
| + __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
|
| + frame_->Push(&value);
|
| + exit.Branch(not_equal);
|
| + Result arguments = StoreArgumentsObject(false);
|
| + frame_->SetElementAt(0, &arguments);
|
| + exit.Bind();
|
| +}
|
| +
|
| +
|
| +Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
|
| + Slot* slot,
|
| + TypeofState typeof_state,
|
| + JumpTarget* slow) {
|
| + // Check that no extension objects have been created by calls to
|
| + // eval from the current scope to the global scope.
|
| + Register context = rsi;
|
| + Result tmp = allocator_->Allocate();
|
| + ASSERT(tmp.is_valid()); // All non-reserved registers were available.
|
| +
|
| + Scope* s = scope();
|
| + while (s != NULL) {
|
| + if (s->num_heap_slots() > 0) {
|
| + if (s->calls_eval()) {
|
| + // Check that extension is NULL.
|
| + __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
|
| + Immediate(0));
|
| + slow->Branch(not_equal, not_taken);
|
| + }
|
| + // Load next context in chain.
|
| + __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
|
| + __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
|
| + context = tmp.reg();
|
| + }
|
| + // If no outer scope calls eval, we do not need to check more
|
| + // context extensions. If we have reached an eval scope, we check
|
| + // all extensions from this point.
|
| + if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
|
| + s = s->outer_scope();
|
| + }
|
| +
|
| + if (s->is_eval_scope()) {
|
| + // Loop up the context chain. There is no frame effect so it is
|
| + // safe to use raw labels here.
|
| + Label next, fast;
|
| + if (!context.is(tmp.reg())) {
|
| + __ movq(tmp.reg(), context);
|
| + }
|
| + // Load map for comparison into register, outside loop.
|
| + __ LoadRoot(kScratchRegister, Heap::kGlobalContextMapRootIndex);
|
| + __ bind(&next);
|
| + // Terminate at global context.
|
| + __ cmpq(kScratchRegister, FieldOperand(tmp.reg(), HeapObject::kMapOffset));
|
| + __ j(equal, &fast);
|
| + // Check that extension is NULL.
|
| + __ cmpq(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
|
| + slow->Branch(not_equal);
|
| + // Load next context in chain.
|
| + __ movq(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
|
| + __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
|
| + __ jmp(&next);
|
| + __ bind(&fast);
|
| + }
|
| + tmp.Unuse();
|
| +
|
| + // All extension objects were empty and it is safe to use a global
|
| + // load IC call.
|
| + LoadGlobal();
|
| + frame_->Push(slot->var()->name());
|
| + RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
|
| + ? RelocInfo::CODE_TARGET
|
| + : RelocInfo::CODE_TARGET_CONTEXT;
|
| + Result answer = frame_->CallLoadIC(mode);
|
| + // A test rax instruction following the call signals that the inobject
|
| + // property case was inlined. Ensure that there is not a test rax
|
| + // instruction here.
|
| + masm_->nop();
|
| + return answer;
|
| +}
|
| +
|
| +
|
| +void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
|
| + TypeofState typeof_state,
|
| + Result* result,
|
| + JumpTarget* slow,
|
| + JumpTarget* done) {
|
| + // Generate fast-case code for variables that might be shadowed by
|
| + // eval-introduced variables. Eval is used a lot without
|
| + // introducing variables. In those cases, we do not want to
|
| + // perform a runtime call for all variables in the scope
|
| + // containing the eval.
|
| + if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
|
| + *result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
|
| + done->Jump(result);
|
| +
|
| + } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
|
| + Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
|
| + Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
|
| + if (potential_slot != NULL) {
|
| + // Generate fast case for locals that rewrite to slots.
|
| + // Allocate a fresh register to use as a temp in
|
| + // ContextSlotOperandCheckExtensions and to hold the result
|
| + // value.
|
| + *result = allocator_->Allocate();
|
| + ASSERT(result->is_valid());
|
| + __ movq(result->reg(),
|
| + ContextSlotOperandCheckExtensions(potential_slot,
|
| + *result,
|
| + slow));
|
| + if (potential_slot->var()->mode() == Variable::CONST) {
|
| + __ CompareRoot(result->reg(), Heap::kTheHoleValueRootIndex);
|
| + done->Branch(not_equal, result);
|
| + __ LoadRoot(result->reg(), Heap::kUndefinedValueRootIndex);
|
| + }
|
| + done->Jump(result);
|
| + } else if (rewrite != NULL) {
|
| + // Generate fast case for argument loads.
|
| + Property* property = rewrite->AsProperty();
|
| + if (property != NULL) {
|
| + VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
|
| + Literal* key_literal = property->key()->AsLiteral();
|
| + if (obj_proxy != NULL &&
|
| + key_literal != NULL &&
|
| + obj_proxy->IsArguments() &&
|
| + key_literal->handle()->IsSmi()) {
|
| + // Load arguments object if there are no eval-introduced
|
| + // variables. Then load the argument from the arguments
|
| + // object using keyed load.
|
| + Result arguments = allocator()->Allocate();
|
| + ASSERT(arguments.is_valid());
|
| + __ movq(arguments.reg(),
|
| + ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
|
| + arguments,
|
| + slow));
|
| + frame_->Push(&arguments);
|
| + frame_->Push(key_literal->handle());
|
| + *result = EmitKeyedLoad();
|
| + done->Jump(result);
|
| + }
|
| + }
|
| + }
|
| + }
|
| +}
|
| +
|
| +
|
| +void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
|
| + if (slot->type() == Slot::LOOKUP) {
|
| + ASSERT(slot->var()->is_dynamic());
|
| +
|
| + // For now, just do a runtime call. Since the call is inevitable,
|
| + // we eagerly sync the virtual frame so we can directly push the
|
| + // arguments into place.
|
| + frame_->SyncRange(0, frame_->element_count() - 1);
|
| +
|
| + frame_->EmitPush(rsi);
|
| + frame_->EmitPush(slot->var()->name());
|
| +
|
| + Result value;
|
| + if (init_state == CONST_INIT) {
|
| + // Same as the case for a normal store, but ignores attribute
|
| + // (e.g. READ_ONLY) of context slot so that we can initialize const
|
| + // properties (introduced via eval("const foo = (some expr);")). Also,
|
| + // uses the current function context instead of the top context.
|
| + //
|
| + // Note that we must declare the foo upon entry of eval(), via a
|
| + // context slot declaration, but we cannot initialize it at the same
|
| + // time, because the const declaration may be at the end of the eval
|
| + // code (sigh...) and the const variable may have been used before
|
| + // (where its value is 'undefined'). Thus, we can only do the
|
| + // initialization when we actually encounter the expression and when
|
| + // the expression operands are defined and valid, and thus we need the
|
| + // split into 2 operations: declaration of the context slot followed
|
| + // by initialization.
|
| + value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
|
| + } else {
|
| + value = frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
|
| + }
|
| + // Storing a variable must keep the (new) value on the expression
|
| + // stack. This is necessary for compiling chained assignment
|
| + // expressions.
|
| + frame_->Push(&value);
|
| + } else {
|
| + ASSERT(!slot->var()->is_dynamic());
|
| +
|
| + JumpTarget exit;
|
| + if (init_state == CONST_INIT) {
|
| + ASSERT(slot->var()->mode() == Variable::CONST);
|
| + // Only the first const initialization must be executed (the slot
|
| + // still contains 'the hole' value). When the assignment is executed,
|
| + // the code is identical to a normal store (see below).
|
| + //
|
| + // We spill the frame in the code below because the direct-frame
|
| + // access of SlotOperand is potentially unsafe with an unspilled
|
| + // frame.
|
| + VirtualFrame::SpilledScope spilled_scope;
|
| + Comment cmnt(masm_, "[ Init const");
|
| + __ movq(rcx, SlotOperand(slot, rcx));
|
| + __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
|
| + exit.Branch(not_equal);
|
| + }
|
| +
|
| + // We must execute the store. Storing a variable must keep the (new)
|
| + // value on the stack. This is necessary for compiling assignment
|
| + // expressions.
|
| + //
|
| + // Note: We will reach here even with slot->var()->mode() ==
|
| + // Variable::CONST because of const declarations which will initialize
|
| + // consts to 'the hole' value and by doing so, end up calling this code.
|
| + if (slot->type() == Slot::PARAMETER) {
|
| + frame_->StoreToParameterAt(slot->index());
|
| + } else if (slot->type() == Slot::LOCAL) {
|
| + frame_->StoreToLocalAt(slot->index());
|
| + } else {
|
| + // The other slot types (LOOKUP and GLOBAL) cannot reach here.
|
| + //
|
| + // The use of SlotOperand below is safe for an unspilled frame
|
| + // because the slot is a context slot.
|
| + ASSERT(slot->type() == Slot::CONTEXT);
|
| + frame_->Dup();
|
| + Result value = frame_->Pop();
|
| + value.ToRegister();
|
| + Result start = allocator_->Allocate();
|
| + ASSERT(start.is_valid());
|
| + __ movq(SlotOperand(slot, start.reg()), value.reg());
|
| + // RecordWrite may destroy the value registers.
|
| + //
|
| + // TODO(204): Avoid actually spilling when the value is not
|
| + // needed (probably the common case).
|
| + frame_->Spill(value.reg());
|
| + int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
|
| + Result temp = allocator_->Allocate();
|
| + ASSERT(temp.is_valid());
|
| + __ RecordWrite(start.reg(), offset, value.reg(), temp.reg());
|
| + // The results start, value, and temp are unused by going out of
|
| + // scope.
|
| + }
|
| +
|
| + exit.Bind();
|
| + }
|
| +}
|
| +
|
| +
|
| void CodeGenerator::VisitSlot(Slot* node) {
|
| Comment cmnt(masm_, "[ Slot");
|
| LoadFromSlotCheckForArguments(node, NOT_INSIDE_TYPEOF);
|
| @@ -2557,6 +4663,17 @@
|
| }
|
|
|
|
|
| +void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) {
|
| + UNIMPLEMENTED();
|
| + // TODO(X64): Implement security policy for loads of smis.
|
| +}
|
| +
|
| +
|
| +bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
|
| + return false;
|
| +}
|
| +
|
| +
|
| // Materialize the regexp literal 'node' in the literals array
|
| // 'literals' of the function. Leave the regexp boilerplate in
|
| // 'boilerplate'.
|
| @@ -3245,908 +5362,51 @@
|
| }
|
|
|
|
|
| -void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
|
| - if (CheckForInlineRuntimeCall(node)) {
|
| - return;
|
| - }
|
| -
|
| - ZoneList<Expression*>* args = node->arguments();
|
| - Comment cmnt(masm_, "[ CallRuntime");
|
| - Runtime::Function* function = node->function();
|
| -
|
| - if (function == NULL) {
|
| - // Push the builtins object found in the current global object.
|
| - Result temp = allocator()->Allocate();
|
| - ASSERT(temp.is_valid());
|
| - __ movq(temp.reg(), GlobalObject());
|
| - __ movq(temp.reg(),
|
| - FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset));
|
| - frame_->Push(&temp);
|
| - }
|
| -
|
| - // Push the arguments ("left-to-right").
|
| - int arg_count = args->length();
|
| - for (int i = 0; i < arg_count; i++) {
|
| - Load(args->at(i));
|
| - }
|
| -
|
| - if (function == NULL) {
|
| - // Call the JS runtime function.
|
| - frame_->Push(node->name());
|
| - Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
|
| - arg_count,
|
| - loop_nesting_);
|
| - frame_->RestoreContextRegister();
|
| - frame_->Push(&answer);
|
| - } else {
|
| - // Call the C runtime function.
|
| - Result answer = frame_->CallRuntime(function, arg_count);
|
| - frame_->Push(&answer);
|
| - }
|
| -}
|
| -
|
| -
|
| -void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
|
| - Comment cmnt(masm_, "[ UnaryOperation");
|
| -
|
| - Token::Value op = node->op();
|
| -
|
| - if (op == Token::NOT) {
|
| - // Swap the true and false targets but keep the same actual label
|
| - // as the fall through.
|
| - destination()->Invert();
|
| - LoadCondition(node->expression(), destination(), true);
|
| - // Swap the labels back.
|
| - destination()->Invert();
|
| -
|
| - } else if (op == Token::DELETE) {
|
| - Property* property = node->expression()->AsProperty();
|
| - if (property != NULL) {
|
| - Load(property->obj());
|
| - Load(property->key());
|
| - Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 2);
|
| - frame_->Push(&answer);
|
| - return;
|
| - }
|
| -
|
| - Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
|
| - if (variable != NULL) {
|
| - Slot* slot = variable->slot();
|
| - if (variable->is_global()) {
|
| - LoadGlobal();
|
| - frame_->Push(variable->name());
|
| - Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
|
| - CALL_FUNCTION, 2);
|
| - frame_->Push(&answer);
|
| - return;
|
| -
|
| - } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
|
| - // Call the runtime to look up the context holding the named
|
| - // variable. Sync the virtual frame eagerly so we can push the
|
| - // arguments directly into place.
|
| - frame_->SyncRange(0, frame_->element_count() - 1);
|
| - frame_->EmitPush(rsi);
|
| - frame_->EmitPush(variable->name());
|
| - Result context = frame_->CallRuntime(Runtime::kLookupContext, 2);
|
| - ASSERT(context.is_register());
|
| - frame_->EmitPush(context.reg());
|
| - context.Unuse();
|
| - frame_->EmitPush(variable->name());
|
| - Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
|
| - CALL_FUNCTION, 2);
|
| - frame_->Push(&answer);
|
| - return;
|
| - }
|
| -
|
| - // Default: Result of deleting non-global, not dynamically
|
| - // introduced variables is false.
|
| - frame_->Push(Factory::false_value());
|
| -
|
| - } else {
|
| - // Default: Result of deleting expressions is true.
|
| - Load(node->expression()); // may have side-effects
|
| - frame_->SetElementAt(0, Factory::true_value());
|
| - }
|
| -
|
| - } else if (op == Token::TYPEOF) {
|
| - // Special case for loading the typeof expression; see comment on
|
| - // LoadTypeofExpression().
|
| - LoadTypeofExpression(node->expression());
|
| - Result answer = frame_->CallRuntime(Runtime::kTypeof, 1);
|
| - frame_->Push(&answer);
|
| -
|
| - } else if (op == Token::VOID) {
|
| - Expression* expression = node->expression();
|
| - if (expression && expression->AsLiteral() && (
|
| - expression->AsLiteral()->IsTrue() ||
|
| - expression->AsLiteral()->IsFalse() ||
|
| - expression->AsLiteral()->handle()->IsNumber() ||
|
| - expression->AsLiteral()->handle()->IsString() ||
|
| - expression->AsLiteral()->handle()->IsJSRegExp() ||
|
| - expression->AsLiteral()->IsNull())) {
|
| - // Omit evaluating the value of the primitive literal.
|
| - // It will be discarded anyway, and can have no side effect.
|
| - frame_->Push(Factory::undefined_value());
|
| - } else {
|
| - Load(node->expression());
|
| - frame_->SetElementAt(0, Factory::undefined_value());
|
| - }
|
| -
|
| - } else {
|
| - bool can_overwrite =
|
| - (node->expression()->AsBinaryOperation() != NULL &&
|
| - node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
|
| - UnaryOverwriteMode overwrite =
|
| - can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
|
| - bool no_negative_zero = node->expression()->no_negative_zero();
|
| - Load(node->expression());
|
| - switch (op) {
|
| - case Token::NOT:
|
| - case Token::DELETE:
|
| - case Token::TYPEOF:
|
| - UNREACHABLE(); // handled above
|
| - break;
|
| -
|
| - case Token::SUB: {
|
| - GenericUnaryOpStub stub(
|
| - Token::SUB,
|
| - overwrite,
|
| - no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
|
| - Result operand = frame_->Pop();
|
| - Result answer = frame_->CallStub(&stub, &operand);
|
| - answer.set_type_info(TypeInfo::Number());
|
| - frame_->Push(&answer);
|
| - break;
|
| - }
|
| -
|
| - case Token::BIT_NOT: {
|
| - // Smi check.
|
| - JumpTarget smi_label;
|
| - JumpTarget continue_label;
|
| - Result operand = frame_->Pop();
|
| - operand.ToRegister();
|
| -
|
| - Condition is_smi = masm_->CheckSmi(operand.reg());
|
| - smi_label.Branch(is_smi, &operand);
|
| -
|
| - GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
|
| - Result answer = frame_->CallStub(&stub, &operand);
|
| - continue_label.Jump(&answer);
|
| -
|
| - smi_label.Bind(&answer);
|
| - answer.ToRegister();
|
| - frame_->Spill(answer.reg());
|
| - __ SmiNot(answer.reg(), answer.reg());
|
| - continue_label.Bind(&answer);
|
| - answer.set_type_info(TypeInfo::Smi());
|
| - frame_->Push(&answer);
|
| - break;
|
| - }
|
| -
|
| - case Token::ADD: {
|
| - // Smi check.
|
| - JumpTarget continue_label;
|
| - Result operand = frame_->Pop();
|
| - TypeInfo operand_info = operand.type_info();
|
| - operand.ToRegister();
|
| - Condition is_smi = masm_->CheckSmi(operand.reg());
|
| - continue_label.Branch(is_smi, &operand);
|
| - frame_->Push(&operand);
|
| - Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
|
| - CALL_FUNCTION, 1);
|
| -
|
| - continue_label.Bind(&answer);
|
| - if (operand_info.IsSmi()) {
|
| - answer.set_type_info(TypeInfo::Smi());
|
| - } else if (operand_info.IsInteger32()) {
|
| - answer.set_type_info(TypeInfo::Integer32());
|
| - } else {
|
| - answer.set_type_info(TypeInfo::Number());
|
| - }
|
| - frame_->Push(&answer);
|
| - break;
|
| - }
|
| - default:
|
| - UNREACHABLE();
|
| - }
|
| - }
|
| -}
|
| -
|
| -
|
| -// The value in dst was optimistically incremented or decremented.
|
| -// The result overflowed or was not smi tagged. Call into the runtime
|
| -// to convert the argument to a number, and call the specialized add
|
| -// or subtract stub. The result is left in dst.
|
| -class DeferredPrefixCountOperation: public DeferredCode {
|
| - public:
|
| - DeferredPrefixCountOperation(Register dst,
|
| - bool is_increment,
|
| - TypeInfo input_type)
|
| - : dst_(dst), is_increment_(is_increment), input_type_(input_type) {
|
| - set_comment("[ DeferredCountOperation");
|
| - }
|
| -
|
| - virtual void Generate();
|
| -
|
| - private:
|
| - Register dst_;
|
| - bool is_increment_;
|
| - TypeInfo input_type_;
|
| -};
|
| -
|
| -
|
| -void DeferredPrefixCountOperation::Generate() {
|
| - Register left;
|
| - if (input_type_.IsNumber()) {
|
| - left = dst_;
|
| - } else {
|
| - __ push(dst_);
|
| - __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
|
| - left = rax;
|
| - }
|
| -
|
| - GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
|
| - NO_OVERWRITE,
|
| - NO_GENERIC_BINARY_FLAGS,
|
| - TypeInfo::Number());
|
| - stub.GenerateCall(masm_, left, Smi::FromInt(1));
|
| -
|
| - if (!dst_.is(rax)) __ movq(dst_, rax);
|
| -}
|
| -
|
| -
|
| -// The value in dst was optimistically incremented or decremented.
|
| -// The result overflowed or was not smi tagged. Call into the runtime
|
| -// to convert the argument to a number. Update the original value in
|
| -// old. Call the specialized add or subtract stub. The result is
|
| -// left in dst.
|
| -class DeferredPostfixCountOperation: public DeferredCode {
|
| - public:
|
| - DeferredPostfixCountOperation(Register dst,
|
| - Register old,
|
| - bool is_increment,
|
| - TypeInfo input_type)
|
| - : dst_(dst),
|
| - old_(old),
|
| - is_increment_(is_increment),
|
| - input_type_(input_type) {
|
| - set_comment("[ DeferredCountOperation");
|
| - }
|
| -
|
| - virtual void Generate();
|
| -
|
| - private:
|
| - Register dst_;
|
| - Register old_;
|
| - bool is_increment_;
|
| - TypeInfo input_type_;
|
| -};
|
| -
|
| -
|
| -void DeferredPostfixCountOperation::Generate() {
|
| - Register left;
|
| - if (input_type_.IsNumber()) {
|
| - __ push(dst_); // Save the input to use as the old value.
|
| - left = dst_;
|
| - } else {
|
| - __ push(dst_);
|
| - __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
|
| - __ push(rax); // Save the result of ToNumber to use as the old value.
|
| - left = rax;
|
| - }
|
| -
|
| - GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
|
| - NO_OVERWRITE,
|
| - NO_GENERIC_BINARY_FLAGS,
|
| - TypeInfo::Number());
|
| - stub.GenerateCall(masm_, left, Smi::FromInt(1));
|
| -
|
| - if (!dst_.is(rax)) __ movq(dst_, rax);
|
| - __ pop(old_);
|
| -}
|
| -
|
| -
|
| -void CodeGenerator::VisitCountOperation(CountOperation* node) {
|
| - Comment cmnt(masm_, "[ CountOperation");
|
| -
|
| - bool is_postfix = node->is_postfix();
|
| - bool is_increment = node->op() == Token::INC;
|
| -
|
| - Variable* var = node->expression()->AsVariableProxy()->AsVariable();
|
| - bool is_const = (var != NULL && var->mode() == Variable::CONST);
|
| -
|
| - // Postfix operations need a stack slot under the reference to hold
|
| - // the old value while the new value is being stored. This is so that
|
| - // in the case that storing the new value requires a call, the old
|
| - // value will be in the frame to be spilled.
|
| - if (is_postfix) frame_->Push(Smi::FromInt(0));
|
| -
|
| - // A constant reference is not saved to, so the reference is not a
|
| - // compound assignment reference.
|
| - { Reference target(this, node->expression(), !is_const);
|
| - if (target.is_illegal()) {
|
| - // Spoof the virtual frame to have the expected height (one higher
|
| - // than on entry).
|
| - if (!is_postfix) frame_->Push(Smi::FromInt(0));
|
| - return;
|
| - }
|
| - target.TakeValue();
|
| -
|
| - Result new_value = frame_->Pop();
|
| - new_value.ToRegister();
|
| -
|
| - Result old_value; // Only allocated in the postfix case.
|
| - if (is_postfix) {
|
| - // Allocate a temporary to preserve the old value.
|
| - old_value = allocator_->Allocate();
|
| - ASSERT(old_value.is_valid());
|
| - __ movq(old_value.reg(), new_value.reg());
|
| -
|
| - // The return value for postfix operations is ToNumber(input).
|
| - // Keep more precise type info if the input is some kind of
|
| - // number already. If the input is not a number we have to wait
|
| - // for the deferred code to convert it.
|
| - if (new_value.type_info().IsNumber()) {
|
| - old_value.set_type_info(new_value.type_info());
|
| - }
|
| - }
|
| - // Ensure the new value is writable.
|
| - frame_->Spill(new_value.reg());
|
| -
|
| - DeferredCode* deferred = NULL;
|
| - if (is_postfix) {
|
| - deferred = new DeferredPostfixCountOperation(new_value.reg(),
|
| - old_value.reg(),
|
| - is_increment,
|
| - new_value.type_info());
|
| - } else {
|
| - deferred = new DeferredPrefixCountOperation(new_value.reg(),
|
| - is_increment,
|
| - new_value.type_info());
|
| - }
|
| -
|
| - if (new_value.is_smi()) {
|
| - if (FLAG_debug_code) { __ AbortIfNotSmi(new_value.reg()); }
|
| - } else {
|
| - __ JumpIfNotSmi(new_value.reg(), deferred->entry_label());
|
| - }
|
| - if (is_increment) {
|
| - __ SmiAddConstant(new_value.reg(),
|
| - new_value.reg(),
|
| - Smi::FromInt(1),
|
| - deferred->entry_label());
|
| - } else {
|
| - __ SmiSubConstant(new_value.reg(),
|
| - new_value.reg(),
|
| - Smi::FromInt(1),
|
| - deferred->entry_label());
|
| - }
|
| - deferred->BindExit();
|
| -
|
| - // Postfix count operations return their input converted to
|
| - // number. The case when the input is already a number is covered
|
| - // above in the allocation code for old_value.
|
| - if (is_postfix && !new_value.type_info().IsNumber()) {
|
| - old_value.set_type_info(TypeInfo::Number());
|
| - }
|
| -
|
| - new_value.set_type_info(TypeInfo::Number());
|
| -
|
| - // Postfix: store the old value in the allocated slot under the
|
| - // reference.
|
| - if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
|
| -
|
| - frame_->Push(&new_value);
|
| - // Non-constant: update the reference.
|
| - if (!is_const) target.SetValue(NOT_CONST_INIT);
|
| - }
|
| -
|
| - // Postfix: drop the new value and use the old.
|
| - if (is_postfix) frame_->Drop();
|
| -}
|
| -
|
| -
|
| -void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
|
| - // According to ECMA-262 section 11.11, page 58, the binary logical
|
| - // operators must yield the result of one of the two expressions
|
| - // before any ToBoolean() conversions. This means that the value
|
| - // produced by a && or || operator is not necessarily a boolean.
|
| -
|
| - // NOTE: If the left hand side produces a materialized value (not
|
| - // control flow), we force the right hand side to do the same. This
|
| - // is necessary because we assume that if we get control flow on the
|
| - // last path out of an expression we got it on all paths.
|
| - if (node->op() == Token::AND) {
|
| - JumpTarget is_true;
|
| - ControlDestination dest(&is_true, destination()->false_target(), true);
|
| - LoadCondition(node->left(), &dest, false);
|
| -
|
| - if (dest.false_was_fall_through()) {
|
| - // The current false target was used as the fall-through. If
|
| - // there are no dangling jumps to is_true then the left
|
| - // subexpression was unconditionally false. Otherwise we have
|
| - // paths where we do have to evaluate the right subexpression.
|
| - if (is_true.is_linked()) {
|
| - // We need to compile the right subexpression. If the jump to
|
| - // the current false target was a forward jump then we have a
|
| - // valid frame, we have just bound the false target, and we
|
| - // have to jump around the code for the right subexpression.
|
| - if (has_valid_frame()) {
|
| - destination()->false_target()->Unuse();
|
| - destination()->false_target()->Jump();
|
| - }
|
| - is_true.Bind();
|
| - // The left subexpression compiled to control flow, so the
|
| - // right one is free to do so as well.
|
| - LoadCondition(node->right(), destination(), false);
|
| - } else {
|
| - // We have actually just jumped to or bound the current false
|
| - // target but the current control destination is not marked as
|
| - // used.
|
| - destination()->Use(false);
|
| - }
|
| -
|
| - } else if (dest.is_used()) {
|
| - // The left subexpression compiled to control flow (and is_true
|
| - // was just bound), so the right is free to do so as well.
|
| - LoadCondition(node->right(), destination(), false);
|
| -
|
| - } else {
|
| - // We have a materialized value on the frame, so we exit with
|
| - // one on all paths. There are possibly also jumps to is_true
|
| - // from nested subexpressions.
|
| - JumpTarget pop_and_continue;
|
| - JumpTarget exit;
|
| -
|
| - // Avoid popping the result if it converts to 'false' using the
|
| - // standard ToBoolean() conversion as described in ECMA-262,
|
| - // section 9.2, page 30.
|
| - //
|
| - // Duplicate the TOS value. The duplicate will be popped by
|
| - // ToBoolean.
|
| - frame_->Dup();
|
| - ControlDestination dest(&pop_and_continue, &exit, true);
|
| - ToBoolean(&dest);
|
| -
|
| - // Pop the result of evaluating the first part.
|
| - frame_->Drop();
|
| -
|
| - // Compile right side expression.
|
| - is_true.Bind();
|
| - Load(node->right());
|
| -
|
| - // Exit (always with a materialized value).
|
| - exit.Bind();
|
| - }
|
| -
|
| - } else {
|
| - ASSERT(node->op() == Token::OR);
|
| - JumpTarget is_false;
|
| - ControlDestination dest(destination()->true_target(), &is_false, false);
|
| - LoadCondition(node->left(), &dest, false);
|
| -
|
| - if (dest.true_was_fall_through()) {
|
| - // The current true target was used as the fall-through. If
|
| - // there are no dangling jumps to is_false then the left
|
| - // subexpression was unconditionally true. Otherwise we have
|
| - // paths where we do have to evaluate the right subexpression.
|
| - if (is_false.is_linked()) {
|
| - // We need to compile the right subexpression. If the jump to
|
| - // the current true target was a forward jump then we have a
|
| - // valid frame, we have just bound the true target, and we
|
| - // have to jump around the code for the right subexpression.
|
| - if (has_valid_frame()) {
|
| - destination()->true_target()->Unuse();
|
| - destination()->true_target()->Jump();
|
| - }
|
| - is_false.Bind();
|
| - // The left subexpression compiled to control flow, so the
|
| - // right one is free to do so as well.
|
| - LoadCondition(node->right(), destination(), false);
|
| - } else {
|
| - // We have just jumped to or bound the current true target but
|
| - // the current control destination is not marked as used.
|
| - destination()->Use(true);
|
| - }
|
| -
|
| - } else if (dest.is_used()) {
|
| - // The left subexpression compiled to control flow (and is_false
|
| - // was just bound), so the right is free to do so as well.
|
| - LoadCondition(node->right(), destination(), false);
|
| -
|
| - } else {
|
| - // We have a materialized value on the frame, so we exit with
|
| - // one on all paths. There are possibly also jumps to is_false
|
| - // from nested subexpressions.
|
| - JumpTarget pop_and_continue;
|
| - JumpTarget exit;
|
| -
|
| - // Avoid popping the result if it converts to 'true' using the
|
| - // standard ToBoolean() conversion as described in ECMA-262,
|
| - // section 9.2, page 30.
|
| - //
|
| - // Duplicate the TOS value. The duplicate will be popped by
|
| - // ToBoolean.
|
| - frame_->Dup();
|
| - ControlDestination dest(&exit, &pop_and_continue, false);
|
| - ToBoolean(&dest);
|
| -
|
| - // Pop the result of evaluating the first part.
|
| - frame_->Drop();
|
| -
|
| - // Compile right side expression.
|
| - is_false.Bind();
|
| - Load(node->right());
|
| -
|
| - // Exit (always with a materialized value).
|
| - exit.Bind();
|
| - }
|
| - }
|
| -}
|
| -
|
| -void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
|
| - Comment cmnt(masm_, "[ BinaryOperation");
|
| -
|
| - if (node->op() == Token::AND || node->op() == Token::OR) {
|
| - GenerateLogicalBooleanOperation(node);
|
| - } else {
|
| - // NOTE: The code below assumes that the slow cases (calls to runtime)
|
| - // never return a constant/immutable object.
|
| - OverwriteMode overwrite_mode = NO_OVERWRITE;
|
| - if (node->left()->AsBinaryOperation() != NULL &&
|
| - node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) {
|
| - overwrite_mode = OVERWRITE_LEFT;
|
| - } else if (node->right()->AsBinaryOperation() != NULL &&
|
| - node->right()->AsBinaryOperation()->ResultOverwriteAllowed()) {
|
| - overwrite_mode = OVERWRITE_RIGHT;
|
| - }
|
| -
|
| - if (node->left()->IsTrivial()) {
|
| - Load(node->right());
|
| - Result right = frame_->Pop();
|
| - frame_->Push(node->left());
|
| - frame_->Push(&right);
|
| - } else {
|
| - Load(node->left());
|
| - Load(node->right());
|
| - }
|
| - GenericBinaryOperation(node, overwrite_mode);
|
| - }
|
| -}
|
| -
|
| -
|
| -
|
| -void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
|
| - Comment cmnt(masm_, "[ CompareOperation");
|
| -
|
| - // Get the expressions from the node.
|
| - Expression* left = node->left();
|
| - Expression* right = node->right();
|
| - Token::Value op = node->op();
|
| - // To make typeof testing for natives implemented in JavaScript really
|
| - // efficient, we generate special code for expressions of the form:
|
| - // 'typeof <expression> == <string>'.
|
| - UnaryOperation* operation = left->AsUnaryOperation();
|
| - if ((op == Token::EQ || op == Token::EQ_STRICT) &&
|
| - (operation != NULL && operation->op() == Token::TYPEOF) &&
|
| - (right->AsLiteral() != NULL &&
|
| - right->AsLiteral()->handle()->IsString())) {
|
| - Handle<String> check(Handle<String>::cast(right->AsLiteral()->handle()));
|
| -
|
| - // Load the operand and move it to a register.
|
| - LoadTypeofExpression(operation->expression());
|
| - Result answer = frame_->Pop();
|
| - answer.ToRegister();
|
| -
|
| - if (check->Equals(Heap::number_symbol())) {
|
| - Condition is_smi = masm_->CheckSmi(answer.reg());
|
| - destination()->true_target()->Branch(is_smi);
|
| - frame_->Spill(answer.reg());
|
| - __ movq(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
|
| - __ CompareRoot(answer.reg(), Heap::kHeapNumberMapRootIndex);
|
| - answer.Unuse();
|
| - destination()->Split(equal);
|
| -
|
| - } else if (check->Equals(Heap::string_symbol())) {
|
| - Condition is_smi = masm_->CheckSmi(answer.reg());
|
| - destination()->false_target()->Branch(is_smi);
|
| -
|
| - // It can be an undetectable string object.
|
| - __ movq(kScratchRegister,
|
| - FieldOperand(answer.reg(), HeapObject::kMapOffset));
|
| - __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
|
| - Immediate(1 << Map::kIsUndetectable));
|
| - destination()->false_target()->Branch(not_zero);
|
| - __ CmpInstanceType(kScratchRegister, FIRST_NONSTRING_TYPE);
|
| - answer.Unuse();
|
| - destination()->Split(below); // Unsigned byte comparison needed.
|
| -
|
| - } else if (check->Equals(Heap::boolean_symbol())) {
|
| - __ CompareRoot(answer.reg(), Heap::kTrueValueRootIndex);
|
| - destination()->true_target()->Branch(equal);
|
| - __ CompareRoot(answer.reg(), Heap::kFalseValueRootIndex);
|
| - answer.Unuse();
|
| - destination()->Split(equal);
|
| -
|
| - } else if (check->Equals(Heap::undefined_symbol())) {
|
| - __ CompareRoot(answer.reg(), Heap::kUndefinedValueRootIndex);
|
| - destination()->true_target()->Branch(equal);
|
| -
|
| - Condition is_smi = masm_->CheckSmi(answer.reg());
|
| - destination()->false_target()->Branch(is_smi);
|
| -
|
| - // It can be an undetectable object.
|
| - __ movq(kScratchRegister,
|
| - FieldOperand(answer.reg(), HeapObject::kMapOffset));
|
| - __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
|
| - Immediate(1 << Map::kIsUndetectable));
|
| - answer.Unuse();
|
| - destination()->Split(not_zero);
|
| -
|
| - } else if (check->Equals(Heap::function_symbol())) {
|
| - Condition is_smi = masm_->CheckSmi(answer.reg());
|
| - destination()->false_target()->Branch(is_smi);
|
| - frame_->Spill(answer.reg());
|
| - __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
|
| - destination()->true_target()->Branch(equal);
|
| - // Regular expressions are callable so typeof == 'function'.
|
| - __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE);
|
| - answer.Unuse();
|
| - destination()->Split(equal);
|
| -
|
| - } else if (check->Equals(Heap::object_symbol())) {
|
| - Condition is_smi = masm_->CheckSmi(answer.reg());
|
| - destination()->false_target()->Branch(is_smi);
|
| - __ CompareRoot(answer.reg(), Heap::kNullValueRootIndex);
|
| - destination()->true_target()->Branch(equal);
|
| -
|
| - // Regular expressions are typeof == 'function', not 'object'.
|
| - __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, kScratchRegister);
|
| - destination()->false_target()->Branch(equal);
|
| -
|
| - // It can be an undetectable object.
|
| - __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
|
| - Immediate(1 << Map::kIsUndetectable));
|
| - destination()->false_target()->Branch(not_zero);
|
| - __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE);
|
| - destination()->false_target()->Branch(below);
|
| - __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
|
| - answer.Unuse();
|
| - destination()->Split(below_equal);
|
| - } else {
|
| - // Uncommon case: typeof testing against a string literal that is
|
| - // never returned from the typeof operator.
|
| - answer.Unuse();
|
| - destination()->Goto(false);
|
| - }
|
| - return;
|
| - }
|
| -
|
| - Condition cc = no_condition;
|
| - bool strict = false;
|
| - switch (op) {
|
| - case Token::EQ_STRICT:
|
| - strict = true;
|
| - // Fall through
|
| - case Token::EQ:
|
| - cc = equal;
|
| - break;
|
| - case Token::LT:
|
| - cc = less;
|
| - break;
|
| - case Token::GT:
|
| - cc = greater;
|
| - break;
|
| - case Token::LTE:
|
| - cc = less_equal;
|
| - break;
|
| - case Token::GTE:
|
| - cc = greater_equal;
|
| - break;
|
| - case Token::IN: {
|
| - Load(left);
|
| - Load(right);
|
| - Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2);
|
| - frame_->Push(&answer); // push the result
|
| - return;
|
| - }
|
| - case Token::INSTANCEOF: {
|
| - Load(left);
|
| - Load(right);
|
| - InstanceofStub stub;
|
| - Result answer = frame_->CallStub(&stub, 2);
|
| - answer.ToRegister();
|
| - __ testq(answer.reg(), answer.reg());
|
| - answer.Unuse();
|
| - destination()->Split(zero);
|
| - return;
|
| - }
|
| - default:
|
| - UNREACHABLE();
|
| - }
|
| -
|
| - if (left->IsTrivial()) {
|
| - Load(right);
|
| - Result right_result = frame_->Pop();
|
| - frame_->Push(left);
|
| - frame_->Push(&right_result);
|
| - } else {
|
| - Load(left);
|
| - Load(right);
|
| - }
|
| -
|
| - Comparison(node, cc, strict, destination());
|
| -}
|
| -
|
| -
|
| -void CodeGenerator::VisitThisFunction(ThisFunction* node) {
|
| - frame_->PushFunction();
|
| -}
|
| -
|
| -
|
| -void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
|
| +void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
|
| ASSERT(args->length() == 1);
|
| -
|
| - // ArgumentsAccessStub expects the key in rdx and the formal
|
| - // parameter count in rax.
|
| Load(args->at(0));
|
| - Result key = frame_->Pop();
|
| - // Explicitly create a constant result.
|
| - Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
|
| - // Call the shared stub to get to arguments[key].
|
| - ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
|
| - Result result = frame_->CallStub(&stub, &key, &count);
|
| - frame_->Push(&result);
|
| -}
|
| -
|
| -
|
| -void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
|
| - ASSERT(args->length() == 1);
|
| - Load(args->at(0));
|
| Result value = frame_->Pop();
|
| value.ToRegister();
|
| ASSERT(value.is_valid());
|
| Condition is_smi = masm_->CheckSmi(value.reg());
|
| - destination()->false_target()->Branch(is_smi);
|
| - // It is a heap object - get map.
|
| - // Check if the object is a JS array or not.
|
| - __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, kScratchRegister);
|
| value.Unuse();
|
| - destination()->Split(equal);
|
| + destination()->Split(is_smi);
|
| }
|
|
|
|
|
| -void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
|
| +void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
|
| + // Conditionally generate a log call.
|
| + // Args:
|
| + // 0 (literal string): The type of logging (corresponds to the flags).
|
| + // This is used to determine whether or not to generate the log call.
|
| + // 1 (string): Format string. Access the string at argument index 2
|
| + // with '%2s' (see Logger::LogRuntime for all the formats).
|
| + // 2 (array): Arguments to the format string.
|
| + ASSERT_EQ(args->length(), 3);
|
| +#ifdef ENABLE_LOGGING_AND_PROFILING
|
| + if (ShouldGenerateLog(args->at(0))) {
|
| + Load(args->at(1));
|
| + Load(args->at(2));
|
| + frame_->CallRuntime(Runtime::kLog, 2);
|
| + }
|
| +#endif
|
| + // Finally, we're expected to leave a value on the top of the stack.
|
| + frame_->Push(Factory::undefined_value());
|
| +}
|
| +
|
| +
|
| +void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
|
| ASSERT(args->length() == 1);
|
| Load(args->at(0));
|
| Result value = frame_->Pop();
|
| value.ToRegister();
|
| ASSERT(value.is_valid());
|
| - Condition is_smi = masm_->CheckSmi(value.reg());
|
| - destination()->false_target()->Branch(is_smi);
|
| - // It is a heap object - get map.
|
| - // Check if the object is a regexp.
|
| - __ CmpObjectType(value.reg(), JS_REGEXP_TYPE, kScratchRegister);
|
| + Condition positive_smi = masm_->CheckPositiveSmi(value.reg());
|
| value.Unuse();
|
| - destination()->Split(equal);
|
| + destination()->Split(positive_smi);
|
| }
|
|
|
|
|
| -void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
|
| - // This generates a fast version of:
|
| - // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
|
| - ASSERT(args->length() == 1);
|
| - Load(args->at(0));
|
| - Result obj = frame_->Pop();
|
| - obj.ToRegister();
|
| - Condition is_smi = masm_->CheckSmi(obj.reg());
|
| - destination()->false_target()->Branch(is_smi);
|
| -
|
| - __ Move(kScratchRegister, Factory::null_value());
|
| - __ cmpq(obj.reg(), kScratchRegister);
|
| - destination()->true_target()->Branch(equal);
|
| -
|
| - __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
|
| - // Undetectable objects behave like undefined when tested with typeof.
|
| - __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
|
| - Immediate(1 << Map::kIsUndetectable));
|
| - destination()->false_target()->Branch(not_zero);
|
| - __ movzxbq(kScratchRegister,
|
| - FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
|
| - __ cmpq(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE));
|
| - destination()->false_target()->Branch(below);
|
| - __ cmpq(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE));
|
| - obj.Unuse();
|
| - destination()->Split(below_equal);
|
| -}
|
| -
|
| -
|
| -void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
|
| - // This generates a fast version of:
|
| - // (%_ClassOf(arg) === 'Function')
|
| - ASSERT(args->length() == 1);
|
| - Load(args->at(0));
|
| - Result obj = frame_->Pop();
|
| - obj.ToRegister();
|
| - Condition is_smi = masm_->CheckSmi(obj.reg());
|
| - destination()->false_target()->Branch(is_smi);
|
| - __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
|
| - obj.Unuse();
|
| - destination()->Split(equal);
|
| -}
|
| -
|
| -
|
| -void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
|
| - ASSERT(args->length() == 1);
|
| - Load(args->at(0));
|
| - Result obj = frame_->Pop();
|
| - obj.ToRegister();
|
| - Condition is_smi = masm_->CheckSmi(obj.reg());
|
| - destination()->false_target()->Branch(is_smi);
|
| - __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
|
| - __ movzxbl(kScratchRegister,
|
| - FieldOperand(kScratchRegister, Map::kBitFieldOffset));
|
| - __ testl(kScratchRegister, Immediate(1 << Map::kIsUndetectable));
|
| - obj.Unuse();
|
| - destination()->Split(not_zero);
|
| -}
|
| -
|
| -
|
| -void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
|
| - ASSERT(args->length() == 0);
|
| -
|
| - // Get the frame pointer for the calling frame.
|
| - Result fp = allocator()->Allocate();
|
| - __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
|
| -
|
| - // Skip the arguments adaptor frame if it exists.
|
| - Label check_frame_marker;
|
| - __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
|
| - Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
|
| - __ j(not_equal, &check_frame_marker);
|
| - __ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
|
| -
|
| - // Check the marker in the calling frame.
|
| - __ bind(&check_frame_marker);
|
| - __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
|
| - Smi::FromInt(StackFrame::CONSTRUCT));
|
| - fp.Unuse();
|
| - destination()->Split(equal);
|
| -}
|
| -
|
| -
|
| -void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
|
| - ASSERT(args->length() == 0);
|
| -
|
| - Result fp = allocator_->Allocate();
|
| - Result result = allocator_->Allocate();
|
| - ASSERT(fp.is_valid() && result.is_valid());
|
| -
|
| - Label exit;
|
| -
|
| - // Get the number of formal parameters.
|
| - __ Move(result.reg(), Smi::FromInt(scope()->num_parameters()));
|
| -
|
| - // Check if the calling frame is an arguments adaptor frame.
|
| - __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
|
| - __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
|
| - Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
|
| - __ j(not_equal, &exit);
|
| -
|
| - // Arguments adaptor case: Read the arguments length from the
|
| - // adaptor frame.
|
| - __ movq(result.reg(),
|
| - Operand(fp.reg(), ArgumentsAdaptorFrameConstants::kLengthOffset));
|
| -
|
| - __ bind(&exit);
|
| - result.set_type_info(TypeInfo::Smi());
|
| - if (FLAG_debug_code) {
|
| - __ AbortIfNotSmi(result.reg());
|
| - }
|
| - frame_->Push(&result);
|
| -}
|
| -
|
| -
|
| class DeferredStringCharCodeAt : public DeferredCode {
|
| public:
|
| DeferredStringCharCodeAt(Register object,
|
| @@ -4352,275 +5612,293 @@
|
| }
|
|
|
|
|
| -void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
|
| +void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
|
| ASSERT(args->length() == 1);
|
| Load(args->at(0));
|
| Result value = frame_->Pop();
|
| value.ToRegister();
|
| ASSERT(value.is_valid());
|
| - Condition positive_smi = masm_->CheckPositiveSmi(value.reg());
|
| + Condition is_smi = masm_->CheckSmi(value.reg());
|
| + destination()->false_target()->Branch(is_smi);
|
| + // It is a heap object - get map.
|
| + // Check if the object is a JS array or not.
|
| + __ CmpObjectType(value.reg(), JS_ARRAY_TYPE, kScratchRegister);
|
| value.Unuse();
|
| - destination()->Split(positive_smi);
|
| + destination()->Split(equal);
|
| }
|
|
|
|
|
| -// Generates the Math.pow method. Only handles special cases and
|
| -// branches to the runtime system for everything else. Please note
|
| -// that this function assumes that the callsite has executed ToNumber
|
| -// on both arguments.
|
| -void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
|
| - ASSERT(args->length() == 2);
|
| +void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
|
| + ASSERT(args->length() == 1);
|
| Load(args->at(0));
|
| - Load(args->at(1));
|
| + Result value = frame_->Pop();
|
| + value.ToRegister();
|
| + ASSERT(value.is_valid());
|
| + Condition is_smi = masm_->CheckSmi(value.reg());
|
| + destination()->false_target()->Branch(is_smi);
|
| + // It is a heap object - get map.
|
| + // Check if the object is a regexp.
|
| + __ CmpObjectType(value.reg(), JS_REGEXP_TYPE, kScratchRegister);
|
| + value.Unuse();
|
| + destination()->Split(equal);
|
| +}
|
|
|
| - Label allocate_return;
|
| - // Load the two operands while leaving the values on the frame.
|
| - frame()->Dup();
|
| - Result exponent = frame()->Pop();
|
| - exponent.ToRegister();
|
| - frame()->Spill(exponent.reg());
|
| - frame()->PushElementAt(1);
|
| - Result base = frame()->Pop();
|
| - base.ToRegister();
|
| - frame()->Spill(base.reg());
|
|
|
| - Result answer = allocator()->Allocate();
|
| - ASSERT(answer.is_valid());
|
| - ASSERT(!exponent.reg().is(base.reg()));
|
| - JumpTarget call_runtime;
|
| +void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
|
| + // This generates a fast version of:
|
| + // (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
|
| + ASSERT(args->length() == 1);
|
| + Load(args->at(0));
|
| + Result obj = frame_->Pop();
|
| + obj.ToRegister();
|
| + Condition is_smi = masm_->CheckSmi(obj.reg());
|
| + destination()->false_target()->Branch(is_smi);
|
|
|
| - // Save 1 in xmm3 - we need this several times later on.
|
| - __ movl(answer.reg(), Immediate(1));
|
| - __ cvtlsi2sd(xmm3, answer.reg());
|
| + __ Move(kScratchRegister, Factory::null_value());
|
| + __ cmpq(obj.reg(), kScratchRegister);
|
| + destination()->true_target()->Branch(equal);
|
|
|
| - Label exponent_nonsmi;
|
| - Label base_nonsmi;
|
| - // If the exponent is a heap number go to that specific case.
|
| - __ JumpIfNotSmi(exponent.reg(), &exponent_nonsmi);
|
| - __ JumpIfNotSmi(base.reg(), &base_nonsmi);
|
| + __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
|
| + // Undetectable objects behave like undefined when tested with typeof.
|
| + __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
|
| + Immediate(1 << Map::kIsUndetectable));
|
| + destination()->false_target()->Branch(not_zero);
|
| + __ movzxbq(kScratchRegister,
|
| + FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
|
| + __ cmpq(kScratchRegister, Immediate(FIRST_JS_OBJECT_TYPE));
|
| + destination()->false_target()->Branch(below);
|
| + __ cmpq(kScratchRegister, Immediate(LAST_JS_OBJECT_TYPE));
|
| + obj.Unuse();
|
| + destination()->Split(below_equal);
|
| +}
|
|
|
| - // Optimized version when y is an integer.
|
| - Label powi;
|
| - __ SmiToInteger32(base.reg(), base.reg());
|
| - __ cvtlsi2sd(xmm0, base.reg());
|
| - __ jmp(&powi);
|
| - // exponent is smi and base is a heapnumber.
|
| - __ bind(&base_nonsmi);
|
| - __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset),
|
| - Heap::kHeapNumberMapRootIndex);
|
| - call_runtime.Branch(not_equal);
|
|
|
| - __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
|
| +void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
|
| + // This generates a fast version of:
|
| + // (%_ClassOf(arg) === 'Function')
|
| + ASSERT(args->length() == 1);
|
| + Load(args->at(0));
|
| + Result obj = frame_->Pop();
|
| + obj.ToRegister();
|
| + Condition is_smi = masm_->CheckSmi(obj.reg());
|
| + destination()->false_target()->Branch(is_smi);
|
| + __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
|
| + obj.Unuse();
|
| + destination()->Split(equal);
|
| +}
|
|
|
| - // Optimized version of pow if y is an integer.
|
| - __ bind(&powi);
|
| - __ SmiToInteger32(exponent.reg(), exponent.reg());
|
|
|
| - // Save exponent in base as we need to check if exponent is negative later.
|
| - // We know that base and exponent are in different registers.
|
| - __ movl(base.reg(), exponent.reg());
|
| +void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
|
| + ASSERT(args->length() == 1);
|
| + Load(args->at(0));
|
| + Result obj = frame_->Pop();
|
| + obj.ToRegister();
|
| + Condition is_smi = masm_->CheckSmi(obj.reg());
|
| + destination()->false_target()->Branch(is_smi);
|
| + __ movq(kScratchRegister, FieldOperand(obj.reg(), HeapObject::kMapOffset));
|
| + __ movzxbl(kScratchRegister,
|
| + FieldOperand(kScratchRegister, Map::kBitFieldOffset));
|
| + __ testl(kScratchRegister, Immediate(1 << Map::kIsUndetectable));
|
| + obj.Unuse();
|
| + destination()->Split(not_zero);
|
| +}
|
|
|
| - // Get absolute value of exponent.
|
| - Label no_neg;
|
| - __ cmpl(exponent.reg(), Immediate(0));
|
| - __ j(greater_equal, &no_neg);
|
| - __ negl(exponent.reg());
|
| - __ bind(&no_neg);
|
|
|
| - // Load xmm1 with 1.
|
| - __ movsd(xmm1, xmm3);
|
| - Label while_true;
|
| - Label no_multiply;
|
| +void CodeGenerator::GenerateIsConstructCall(ZoneList<Expression*>* args) {
|
| + ASSERT(args->length() == 0);
|
|
|
| - __ bind(&while_true);
|
| - __ shrl(exponent.reg(), Immediate(1));
|
| - __ j(not_carry, &no_multiply);
|
| - __ mulsd(xmm1, xmm0);
|
| - __ bind(&no_multiply);
|
| - __ testl(exponent.reg(), exponent.reg());
|
| - __ mulsd(xmm0, xmm0);
|
| - __ j(not_zero, &while_true);
|
| + // Get the frame pointer for the calling frame.
|
| + Result fp = allocator()->Allocate();
|
| + __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
|
|
|
| - // x has the original value of y - if y is negative return 1/result.
|
| - __ testl(base.reg(), base.reg());
|
| - __ j(positive, &allocate_return);
|
| - // Special case if xmm1 has reached infinity.
|
| - __ movl(answer.reg(), Immediate(0x7FB00000));
|
| - __ movd(xmm0, answer.reg());
|
| - __ cvtss2sd(xmm0, xmm0);
|
| - __ ucomisd(xmm0, xmm1);
|
| - call_runtime.Branch(equal);
|
| - __ divsd(xmm3, xmm1);
|
| - __ movsd(xmm1, xmm3);
|
| - __ jmp(&allocate_return);
|
| + // Skip the arguments adaptor frame if it exists.
|
| + Label check_frame_marker;
|
| + __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
|
| + Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
|
| + __ j(not_equal, &check_frame_marker);
|
| + __ movq(fp.reg(), Operand(fp.reg(), StandardFrameConstants::kCallerFPOffset));
|
|
|
| - // exponent (or both) is a heapnumber - no matter what we should now work
|
| - // on doubles.
|
| - __ bind(&exponent_nonsmi);
|
| - __ CompareRoot(FieldOperand(exponent.reg(), HeapObject::kMapOffset),
|
| - Heap::kHeapNumberMapRootIndex);
|
| - call_runtime.Branch(not_equal);
|
| - __ movsd(xmm1, FieldOperand(exponent.reg(), HeapNumber::kValueOffset));
|
| - // Test if exponent is nan.
|
| - __ ucomisd(xmm1, xmm1);
|
| - call_runtime.Branch(parity_even);
|
| + // Check the marker in the calling frame.
|
| + __ bind(&check_frame_marker);
|
| + __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kMarkerOffset),
|
| + Smi::FromInt(StackFrame::CONSTRUCT));
|
| + fp.Unuse();
|
| + destination()->Split(equal);
|
| +}
|
|
|
| - Label base_not_smi;
|
| - Label handle_special_cases;
|
| - __ JumpIfNotSmi(base.reg(), &base_not_smi);
|
| - __ SmiToInteger32(base.reg(), base.reg());
|
| - __ cvtlsi2sd(xmm0, base.reg());
|
| - __ jmp(&handle_special_cases);
|
| - __ bind(&base_not_smi);
|
| - __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset),
|
| - Heap::kHeapNumberMapRootIndex);
|
| - call_runtime.Branch(not_equal);
|
| - __ movl(answer.reg(), FieldOperand(base.reg(), HeapNumber::kExponentOffset));
|
| - __ andl(answer.reg(), Immediate(HeapNumber::kExponentMask));
|
| - __ cmpl(answer.reg(), Immediate(HeapNumber::kExponentMask));
|
| - // base is NaN or +/-Infinity
|
| - call_runtime.Branch(greater_equal);
|
| - __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
|
|
|
| - // base is in xmm0 and exponent is in xmm1.
|
| - __ bind(&handle_special_cases);
|
| - Label not_minus_half;
|
| - // Test for -0.5.
|
| - // Load xmm2 with -0.5.
|
| - __ movl(answer.reg(), Immediate(0xBF000000));
|
| - __ movd(xmm2, answer.reg());
|
| - __ cvtss2sd(xmm2, xmm2);
|
| - // xmm2 now has -0.5.
|
| - __ ucomisd(xmm2, xmm1);
|
| - __ j(not_equal, ¬_minus_half);
|
| +void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
|
| + ASSERT(args->length() == 0);
|
|
|
| - // Calculates reciprocal of square root.
|
| - // Note that 1/sqrt(x) = sqrt(1/x))
|
| - __ divsd(xmm3, xmm0);
|
| - __ movsd(xmm1, xmm3);
|
| - __ sqrtsd(xmm1, xmm1);
|
| - __ jmp(&allocate_return);
|
| + Result fp = allocator_->Allocate();
|
| + Result result = allocator_->Allocate();
|
| + ASSERT(fp.is_valid() && result.is_valid());
|
|
|
| - // Test for 0.5.
|
| - __ bind(¬_minus_half);
|
| - // Load xmm2 with 0.5.
|
| - // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
|
| - __ addsd(xmm2, xmm3);
|
| - // xmm2 now has 0.5.
|
| - __ ucomisd(xmm2, xmm1);
|
| - call_runtime.Branch(not_equal);
|
| + Label exit;
|
|
|
| - // Calculates square root.
|
| - __ movsd(xmm1, xmm0);
|
| - __ sqrtsd(xmm1, xmm1);
|
| + // Get the number of formal parameters.
|
| + __ Move(result.reg(), Smi::FromInt(scope()->num_parameters()));
|
|
|
| - JumpTarget done;
|
| - Label failure, success;
|
| - __ bind(&allocate_return);
|
| - // Make a copy of the frame to enable us to handle allocation
|
| - // failure after the JumpTarget jump.
|
| - VirtualFrame* clone = new VirtualFrame(frame());
|
| - __ AllocateHeapNumber(answer.reg(), exponent.reg(), &failure);
|
| - __ movsd(FieldOperand(answer.reg(), HeapNumber::kValueOffset), xmm1);
|
| - // Remove the two original values from the frame - we only need those
|
| - // in the case where we branch to runtime.
|
| - frame()->Drop(2);
|
| - exponent.Unuse();
|
| - base.Unuse();
|
| - done.Jump(&answer);
|
| - // Use the copy of the original frame as our current frame.
|
| - RegisterFile empty_regs;
|
| - SetFrame(clone, &empty_regs);
|
| - // If we experience an allocation failure we branch to runtime.
|
| - __ bind(&failure);
|
| - call_runtime.Bind();
|
| - answer = frame()->CallRuntime(Runtime::kMath_pow_cfunction, 2);
|
| + // Check if the calling frame is an arguments adaptor frame.
|
| + __ movq(fp.reg(), Operand(rbp, StandardFrameConstants::kCallerFPOffset));
|
| + __ SmiCompare(Operand(fp.reg(), StandardFrameConstants::kContextOffset),
|
| + Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
|
| + __ j(not_equal, &exit);
|
|
|
| - done.Bind(&answer);
|
| - frame()->Push(&answer);
|
| + // Arguments adaptor case: Read the arguments length from the
|
| + // adaptor frame.
|
| + __ movq(result.reg(),
|
| + Operand(fp.reg(), ArgumentsAdaptorFrameConstants::kLengthOffset));
|
| +
|
| + __ bind(&exit);
|
| + result.set_type_info(TypeInfo::Smi());
|
| + if (FLAG_debug_code) {
|
| + __ AbortIfNotSmi(result.reg());
|
| + }
|
| + frame_->Push(&result);
|
| }
|
|
|
|
|
| -// Generates the Math.sqrt method. Please note - this function assumes that
|
| -// the callsite has executed ToNumber on the argument.
|
| -void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
|
| +void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
|
| ASSERT(args->length() == 1);
|
| - Load(args->at(0));
|
| + JumpTarget leave, null, function, non_function_constructor;
|
| + Load(args->at(0)); // Load the object.
|
| + Result obj = frame_->Pop();
|
| + obj.ToRegister();
|
| + frame_->Spill(obj.reg());
|
|
|
| - // Leave original value on the frame if we need to call runtime.
|
| - frame()->Dup();
|
| - Result result = frame()->Pop();
|
| - result.ToRegister();
|
| - frame()->Spill(result.reg());
|
| - Label runtime;
|
| - Label non_smi;
|
| - Label load_done;
|
| - JumpTarget end;
|
| + // If the object is a smi, we return null.
|
| + Condition is_smi = masm_->CheckSmi(obj.reg());
|
| + null.Branch(is_smi);
|
|
|
| - __ JumpIfNotSmi(result.reg(), &non_smi);
|
| - __ SmiToInteger32(result.reg(), result.reg());
|
| - __ cvtlsi2sd(xmm0, result.reg());
|
| - __ jmp(&load_done);
|
| - __ bind(&non_smi);
|
| - __ CompareRoot(FieldOperand(result.reg(), HeapObject::kMapOffset),
|
| - Heap::kHeapNumberMapRootIndex);
|
| - __ j(not_equal, &runtime);
|
| - __ movsd(xmm0, FieldOperand(result.reg(), HeapNumber::kValueOffset));
|
| + // Check that the object is a JS object but take special care of JS
|
| + // functions to make sure they have 'Function' as their class.
|
|
|
| - __ bind(&load_done);
|
| - __ sqrtsd(xmm0, xmm0);
|
| - // A copy of the virtual frame to allow us to go to runtime after the
|
| - // JumpTarget jump.
|
| - Result scratch = allocator()->Allocate();
|
| - VirtualFrame* clone = new VirtualFrame(frame());
|
| - __ AllocateHeapNumber(result.reg(), scratch.reg(), &runtime);
|
| + __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
|
| + null.Branch(below);
|
|
|
| - __ movsd(FieldOperand(result.reg(), HeapNumber::kValueOffset), xmm0);
|
| - frame()->Drop(1);
|
| - scratch.Unuse();
|
| - end.Jump(&result);
|
| - // We only branch to runtime if we have an allocation error.
|
| - // Use the copy of the original frame as our current frame.
|
| - RegisterFile empty_regs;
|
| - SetFrame(clone, &empty_regs);
|
| - __ bind(&runtime);
|
| - result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
|
| + // As long as JS_FUNCTION_TYPE is the last instance type and it is
|
| + // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
|
| + // LAST_JS_OBJECT_TYPE.
|
| + ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
|
| + ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
|
| + __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE);
|
| + function.Branch(equal);
|
|
|
| - end.Bind(&result);
|
| - frame()->Push(&result);
|
| + // Check if the constructor in the map is a function.
|
| + __ movq(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset));
|
| + __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
|
| + non_function_constructor.Branch(not_equal);
|
| +
|
| + // The obj register now contains the constructor function. Grab the
|
| + // instance class name from there.
|
| + __ movq(obj.reg(),
|
| + FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
|
| + __ movq(obj.reg(),
|
| + FieldOperand(obj.reg(),
|
| + SharedFunctionInfo::kInstanceClassNameOffset));
|
| + frame_->Push(&obj);
|
| + leave.Jump();
|
| +
|
| + // Functions have class 'Function'.
|
| + function.Bind();
|
| + frame_->Push(Factory::function_class_symbol());
|
| + leave.Jump();
|
| +
|
| + // Objects with a non-function constructor have class 'Object'.
|
| + non_function_constructor.Bind();
|
| + frame_->Push(Factory::Object_symbol());
|
| + leave.Jump();
|
| +
|
| + // Non-JS objects have class null.
|
| + null.Bind();
|
| + frame_->Push(Factory::null_value());
|
| +
|
| + // All done.
|
| + leave.Bind();
|
| }
|
|
|
|
|
| -void CodeGenerator::GenerateIsSmi(ZoneList<Expression*>* args) {
|
| +void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
|
| ASSERT(args->length() == 1);
|
| - Load(args->at(0));
|
| + JumpTarget leave;
|
| + Load(args->at(0)); // Load the object.
|
| + frame_->Dup();
|
| + Result object = frame_->Pop();
|
| + object.ToRegister();
|
| + ASSERT(object.is_valid());
|
| + // if (object->IsSmi()) return object.
|
| + Condition is_smi = masm_->CheckSmi(object.reg());
|
| + leave.Branch(is_smi);
|
| + // It is a heap object - get map.
|
| + Result temp = allocator()->Allocate();
|
| + ASSERT(temp.is_valid());
|
| + // if (!object->IsJSValue()) return object.
|
| + __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg());
|
| + leave.Branch(not_equal);
|
| + __ movq(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
|
| + object.Unuse();
|
| + frame_->SetElementAt(0, &temp);
|
| + leave.Bind();
|
| +}
|
| +
|
| +
|
| +void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
|
| + ASSERT(args->length() == 2);
|
| + JumpTarget leave;
|
| + Load(args->at(0)); // Load the object.
|
| + Load(args->at(1)); // Load the value.
|
| Result value = frame_->Pop();
|
| + Result object = frame_->Pop();
|
| value.ToRegister();
|
| - ASSERT(value.is_valid());
|
| - Condition is_smi = masm_->CheckSmi(value.reg());
|
| - value.Unuse();
|
| - destination()->Split(is_smi);
|
| + object.ToRegister();
|
| +
|
| + // if (object->IsSmi()) return value.
|
| + Condition is_smi = masm_->CheckSmi(object.reg());
|
| + leave.Branch(is_smi, &value);
|
| +
|
| + // It is a heap object - get its map.
|
| + Result scratch = allocator_->Allocate();
|
| + ASSERT(scratch.is_valid());
|
| + // if (!object->IsJSValue()) return value.
|
| + __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg());
|
| + leave.Branch(not_equal, &value);
|
| +
|
| + // Store the value.
|
| + __ movq(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
|
| + // Update the write barrier. Save the value as it will be
|
| + // overwritten by the write barrier code and is needed afterward.
|
| + Result duplicate_value = allocator_->Allocate();
|
| + ASSERT(duplicate_value.is_valid());
|
| + __ movq(duplicate_value.reg(), value.reg());
|
| + // The object register is also overwritten by the write barrier and
|
| + // possibly aliased in the frame.
|
| + frame_->Spill(object.reg());
|
| + __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(),
|
| + scratch.reg());
|
| + object.Unuse();
|
| + scratch.Unuse();
|
| + duplicate_value.Unuse();
|
| +
|
| + // Leave.
|
| + leave.Bind(&value);
|
| + frame_->Push(&value);
|
| }
|
|
|
|
|
| -void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
|
| - // Conditionally generate a log call.
|
| - // Args:
|
| - // 0 (literal string): The type of logging (corresponds to the flags).
|
| - // This is used to determine whether or not to generate the log call.
|
| - // 1 (string): Format string. Access the string at argument index 2
|
| - // with '%2s' (see Logger::LogRuntime for all the formats).
|
| - // 2 (array): Arguments to the format string.
|
| - ASSERT_EQ(args->length(), 3);
|
| -#ifdef ENABLE_LOGGING_AND_PROFILING
|
| - if (ShouldGenerateLog(args->at(0))) {
|
| - Load(args->at(1));
|
| - Load(args->at(2));
|
| - frame_->CallRuntime(Runtime::kLog, 2);
|
| - }
|
| -#endif
|
| - // Finally, we're expected to leave a value on the top of the stack.
|
| - frame_->Push(Factory::undefined_value());
|
| +void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
|
| + ASSERT(args->length() == 1);
|
| +
|
| + // ArgumentsAccessStub expects the key in rdx and the formal
|
| + // parameter count in rax.
|
| + Load(args->at(0));
|
| + Result key = frame_->Pop();
|
| + // Explicitly create a constant result.
|
| + Result count(Handle<Smi>(Smi::FromInt(scope()->num_parameters())));
|
| + // Call the shared stub to get to arguments[key].
|
| + ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT);
|
| + Result result = frame_->CallStub(&stub, &key, &count);
|
| + frame_->Push(&result);
|
| }
|
|
|
|
|
| @@ -4692,6 +5970,43 @@
|
| }
|
|
|
|
|
| +void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
|
| + ASSERT_EQ(2, args->length());
|
| +
|
| + Load(args->at(0));
|
| + Load(args->at(1));
|
| +
|
| + StringAddStub stub(NO_STRING_ADD_FLAGS);
|
| + Result answer = frame_->CallStub(&stub, 2);
|
| + frame_->Push(&answer);
|
| +}
|
| +
|
| +
|
| +void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
|
| + ASSERT_EQ(3, args->length());
|
| +
|
| + Load(args->at(0));
|
| + Load(args->at(1));
|
| + Load(args->at(2));
|
| +
|
| + SubStringStub stub;
|
| + Result answer = frame_->CallStub(&stub, 3);
|
| + frame_->Push(&answer);
|
| +}
|
| +
|
| +
|
| +void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
|
| + ASSERT_EQ(2, args->length());
|
| +
|
| + Load(args->at(0));
|
| + Load(args->at(1));
|
| +
|
| + StringCompareStub stub;
|
| + Result answer = frame_->CallStub(&stub, 2);
|
| + frame_->Push(&answer);
|
| +}
|
| +
|
| +
|
| void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
|
| ASSERT_EQ(args->length(), 4);
|
|
|
| @@ -5133,1725 +6448,1006 @@
|
| }
|
|
|
|
|
| -void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
|
| - ASSERT_EQ(args->length(), 1);
|
| +// Generates the Math.pow method. Only handles special cases and
|
| +// branches to the runtime system for everything else. Please note
|
| +// that this function assumes that the callsite has executed ToNumber
|
| +// on both arguments.
|
| +void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
|
| + ASSERT(args->length() == 2);
|
| Load(args->at(0));
|
| - TranscendentalCacheStub stub(TranscendentalCache::SIN);
|
| - Result result = frame_->CallStub(&stub, 1);
|
| - frame_->Push(&result);
|
| -}
|
| -
|
| -
|
| -void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
|
| - ASSERT_EQ(args->length(), 1);
|
| - Load(args->at(0));
|
| - TranscendentalCacheStub stub(TranscendentalCache::COS);
|
| - Result result = frame_->CallStub(&stub, 1);
|
| - frame_->Push(&result);
|
| -}
|
| -
|
| -
|
| -void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
|
| - ASSERT_EQ(2, args->length());
|
| -
|
| - Load(args->at(0));
|
| Load(args->at(1));
|
|
|
| - StringAddStub stub(NO_STRING_ADD_FLAGS);
|
| - Result answer = frame_->CallStub(&stub, 2);
|
| - frame_->Push(&answer);
|
| -}
|
| + Label allocate_return;
|
| + // Load the two operands while leaving the values on the frame.
|
| + frame()->Dup();
|
| + Result exponent = frame()->Pop();
|
| + exponent.ToRegister();
|
| + frame()->Spill(exponent.reg());
|
| + frame()->PushElementAt(1);
|
| + Result base = frame()->Pop();
|
| + base.ToRegister();
|
| + frame()->Spill(base.reg());
|
|
|
| + Result answer = allocator()->Allocate();
|
| + ASSERT(answer.is_valid());
|
| + ASSERT(!exponent.reg().is(base.reg()));
|
| + JumpTarget call_runtime;
|
|
|
| -void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
|
| - ASSERT_EQ(3, args->length());
|
| + // Save 1 in xmm3 - we need this several times later on.
|
| + __ movl(answer.reg(), Immediate(1));
|
| + __ cvtlsi2sd(xmm3, answer.reg());
|
|
|
| - Load(args->at(0));
|
| - Load(args->at(1));
|
| - Load(args->at(2));
|
| + Label exponent_nonsmi;
|
| + Label base_nonsmi;
|
| + // If the exponent is a heap number go to that specific case.
|
| + __ JumpIfNotSmi(exponent.reg(), &exponent_nonsmi);
|
| + __ JumpIfNotSmi(base.reg(), &base_nonsmi);
|
|
|
| - SubStringStub stub;
|
| - Result answer = frame_->CallStub(&stub, 3);
|
| - frame_->Push(&answer);
|
| -}
|
| + // Optimized version when y is an integer.
|
| + Label powi;
|
| + __ SmiToInteger32(base.reg(), base.reg());
|
| + __ cvtlsi2sd(xmm0, base.reg());
|
| + __ jmp(&powi);
|
| + // exponent is smi and base is a heapnumber.
|
| + __ bind(&base_nonsmi);
|
| + __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset),
|
| + Heap::kHeapNumberMapRootIndex);
|
| + call_runtime.Branch(not_equal);
|
|
|
| + __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
|
|
|
| -void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
|
| - ASSERT_EQ(2, args->length());
|
| + // Optimized version of pow if y is an integer.
|
| + __ bind(&powi);
|
| + __ SmiToInteger32(exponent.reg(), exponent.reg());
|
|
|
| - Load(args->at(0));
|
| - Load(args->at(1));
|
| + // Save exponent in base as we need to check if exponent is negative later.
|
| + // We know that base and exponent are in different registers.
|
| + __ movl(base.reg(), exponent.reg());
|
|
|
| - StringCompareStub stub;
|
| - Result answer = frame_->CallStub(&stub, 2);
|
| - frame_->Push(&answer);
|
| -}
|
| + // Get absolute value of exponent.
|
| + Label no_neg;
|
| + __ cmpl(exponent.reg(), Immediate(0));
|
| + __ j(greater_equal, &no_neg);
|
| + __ negl(exponent.reg());
|
| + __ bind(&no_neg);
|
|
|
| + // Load xmm1 with 1.
|
| + __ movsd(xmm1, xmm3);
|
| + Label while_true;
|
| + Label no_multiply;
|
|
|
| -void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
|
| - ASSERT(args->length() == 1);
|
| - JumpTarget leave, null, function, non_function_constructor;
|
| - Load(args->at(0)); // Load the object.
|
| - Result obj = frame_->Pop();
|
| - obj.ToRegister();
|
| - frame_->Spill(obj.reg());
|
| + __ bind(&while_true);
|
| + __ shrl(exponent.reg(), Immediate(1));
|
| + __ j(not_carry, &no_multiply);
|
| + __ mulsd(xmm1, xmm0);
|
| + __ bind(&no_multiply);
|
| + __ testl(exponent.reg(), exponent.reg());
|
| + __ mulsd(xmm0, xmm0);
|
| + __ j(not_zero, &while_true);
|
|
|
| - // If the object is a smi, we return null.
|
| - Condition is_smi = masm_->CheckSmi(obj.reg());
|
| - null.Branch(is_smi);
|
| + // x has the original value of y - if y is negative return 1/result.
|
| + __ testl(base.reg(), base.reg());
|
| + __ j(positive, &allocate_return);
|
| + // Special case if xmm1 has reached infinity.
|
| + __ movl(answer.reg(), Immediate(0x7FB00000));
|
| + __ movd(xmm0, answer.reg());
|
| + __ cvtss2sd(xmm0, xmm0);
|
| + __ ucomisd(xmm0, xmm1);
|
| + call_runtime.Branch(equal);
|
| + __ divsd(xmm3, xmm1);
|
| + __ movsd(xmm1, xmm3);
|
| + __ jmp(&allocate_return);
|
|
|
| - // Check that the object is a JS object but take special care of JS
|
| - // functions to make sure they have 'Function' as their class.
|
| + // exponent (or both) is a heapnumber - no matter what we should now work
|
| + // on doubles.
|
| + __ bind(&exponent_nonsmi);
|
| + __ CompareRoot(FieldOperand(exponent.reg(), HeapObject::kMapOffset),
|
| + Heap::kHeapNumberMapRootIndex);
|
| + call_runtime.Branch(not_equal);
|
| + __ movsd(xmm1, FieldOperand(exponent.reg(), HeapNumber::kValueOffset));
|
| + // Test if exponent is nan.
|
| + __ ucomisd(xmm1, xmm1);
|
| + call_runtime.Branch(parity_even);
|
|
|
| - __ CmpObjectType(obj.reg(), FIRST_JS_OBJECT_TYPE, obj.reg());
|
| - null.Branch(below);
|
| + Label base_not_smi;
|
| + Label handle_special_cases;
|
| + __ JumpIfNotSmi(base.reg(), &base_not_smi);
|
| + __ SmiToInteger32(base.reg(), base.reg());
|
| + __ cvtlsi2sd(xmm0, base.reg());
|
| + __ jmp(&handle_special_cases);
|
| + __ bind(&base_not_smi);
|
| + __ CompareRoot(FieldOperand(base.reg(), HeapObject::kMapOffset),
|
| + Heap::kHeapNumberMapRootIndex);
|
| + call_runtime.Branch(not_equal);
|
| + __ movl(answer.reg(), FieldOperand(base.reg(), HeapNumber::kExponentOffset));
|
| + __ andl(answer.reg(), Immediate(HeapNumber::kExponentMask));
|
| + __ cmpl(answer.reg(), Immediate(HeapNumber::kExponentMask));
|
| + // base is NaN or +/-Infinity
|
| + call_runtime.Branch(greater_equal);
|
| + __ movsd(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
|
|
|
| - // As long as JS_FUNCTION_TYPE is the last instance type and it is
|
| - // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
|
| - // LAST_JS_OBJECT_TYPE.
|
| - ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
|
| - ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
|
| - __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE);
|
| - function.Branch(equal);
|
| + // base is in xmm0 and exponent is in xmm1.
|
| + __ bind(&handle_special_cases);
|
| + Label not_minus_half;
|
| + // Test for -0.5.
|
| + // Load xmm2 with -0.5.
|
| + __ movl(answer.reg(), Immediate(0xBF000000));
|
| + __ movd(xmm2, answer.reg());
|
| + __ cvtss2sd(xmm2, xmm2);
|
| + // xmm2 now has -0.5.
|
| + __ ucomisd(xmm2, xmm1);
|
| + __ j(not_equal, ¬_minus_half);
|
|
|
| - // Check if the constructor in the map is a function.
|
| - __ movq(obj.reg(), FieldOperand(obj.reg(), Map::kConstructorOffset));
|
| - __ CmpObjectType(obj.reg(), JS_FUNCTION_TYPE, kScratchRegister);
|
| - non_function_constructor.Branch(not_equal);
|
| + // Calculates reciprocal of square root.
|
| + // Note that 1/sqrt(x) = sqrt(1/x))
|
| + __ divsd(xmm3, xmm0);
|
| + __ movsd(xmm1, xmm3);
|
| + __ sqrtsd(xmm1, xmm1);
|
| + __ jmp(&allocate_return);
|
|
|
| - // The obj register now contains the constructor function. Grab the
|
| - // instance class name from there.
|
| - __ movq(obj.reg(),
|
| - FieldOperand(obj.reg(), JSFunction::kSharedFunctionInfoOffset));
|
| - __ movq(obj.reg(),
|
| - FieldOperand(obj.reg(),
|
| - SharedFunctionInfo::kInstanceClassNameOffset));
|
| - frame_->Push(&obj);
|
| - leave.Jump();
|
| + // Test for 0.5.
|
| + __ bind(¬_minus_half);
|
| + // Load xmm2 with 0.5.
|
| + // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
|
| + __ addsd(xmm2, xmm3);
|
| + // xmm2 now has 0.5.
|
| + __ ucomisd(xmm2, xmm1);
|
| + call_runtime.Branch(not_equal);
|
|
|
| - // Functions have class 'Function'.
|
| - function.Bind();
|
| - frame_->Push(Factory::function_class_symbol());
|
| - leave.Jump();
|
| + // Calculates square root.
|
| + __ movsd(xmm1, xmm0);
|
| + __ sqrtsd(xmm1, xmm1);
|
|
|
| - // Objects with a non-function constructor have class 'Object'.
|
| - non_function_constructor.Bind();
|
| - frame_->Push(Factory::Object_symbol());
|
| - leave.Jump();
|
| + JumpTarget done;
|
| + Label failure, success;
|
| + __ bind(&allocate_return);
|
| + // Make a copy of the frame to enable us to handle allocation
|
| + // failure after the JumpTarget jump.
|
| + VirtualFrame* clone = new VirtualFrame(frame());
|
| + __ AllocateHeapNumber(answer.reg(), exponent.reg(), &failure);
|
| + __ movsd(FieldOperand(answer.reg(), HeapNumber::kValueOffset), xmm1);
|
| + // Remove the two original values from the frame - we only need those
|
| + // in the case where we branch to runtime.
|
| + frame()->Drop(2);
|
| + exponent.Unuse();
|
| + base.Unuse();
|
| + done.Jump(&answer);
|
| + // Use the copy of the original frame as our current frame.
|
| + RegisterFile empty_regs;
|
| + SetFrame(clone, &empty_regs);
|
| + // If we experience an allocation failure we branch to runtime.
|
| + __ bind(&failure);
|
| + call_runtime.Bind();
|
| + answer = frame()->CallRuntime(Runtime::kMath_pow_cfunction, 2);
|
|
|
| - // Non-JS objects have class null.
|
| - null.Bind();
|
| - frame_->Push(Factory::null_value());
|
| -
|
| - // All done.
|
| - leave.Bind();
|
| + done.Bind(&answer);
|
| + frame()->Push(&answer);
|
| }
|
|
|
|
|
| -void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
|
| - ASSERT(args->length() == 2);
|
| - JumpTarget leave;
|
| - Load(args->at(0)); // Load the object.
|
| - Load(args->at(1)); // Load the value.
|
| - Result value = frame_->Pop();
|
| - Result object = frame_->Pop();
|
| - value.ToRegister();
|
| - object.ToRegister();
|
| +void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
|
| + ASSERT_EQ(args->length(), 1);
|
| + Load(args->at(0));
|
| + TranscendentalCacheStub stub(TranscendentalCache::SIN);
|
| + Result result = frame_->CallStub(&stub, 1);
|
| + frame_->Push(&result);
|
| +}
|
|
|
| - // if (object->IsSmi()) return value.
|
| - Condition is_smi = masm_->CheckSmi(object.reg());
|
| - leave.Branch(is_smi, &value);
|
|
|
| - // It is a heap object - get its map.
|
| - Result scratch = allocator_->Allocate();
|
| - ASSERT(scratch.is_valid());
|
| - // if (!object->IsJSValue()) return value.
|
| - __ CmpObjectType(object.reg(), JS_VALUE_TYPE, scratch.reg());
|
| - leave.Branch(not_equal, &value);
|
| -
|
| - // Store the value.
|
| - __ movq(FieldOperand(object.reg(), JSValue::kValueOffset), value.reg());
|
| - // Update the write barrier. Save the value as it will be
|
| - // overwritten by the write barrier code and is needed afterward.
|
| - Result duplicate_value = allocator_->Allocate();
|
| - ASSERT(duplicate_value.is_valid());
|
| - __ movq(duplicate_value.reg(), value.reg());
|
| - // The object register is also overwritten by the write barrier and
|
| - // possibly aliased in the frame.
|
| - frame_->Spill(object.reg());
|
| - __ RecordWrite(object.reg(), JSValue::kValueOffset, duplicate_value.reg(),
|
| - scratch.reg());
|
| - object.Unuse();
|
| - scratch.Unuse();
|
| - duplicate_value.Unuse();
|
| -
|
| - // Leave.
|
| - leave.Bind(&value);
|
| - frame_->Push(&value);
|
| +void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
|
| + ASSERT_EQ(args->length(), 1);
|
| + Load(args->at(0));
|
| + TranscendentalCacheStub stub(TranscendentalCache::COS);
|
| + Result result = frame_->CallStub(&stub, 1);
|
| + frame_->Push(&result);
|
| }
|
|
|
|
|
| -void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
|
| +// Generates the Math.sqrt method. Please note - this function assumes that
|
| +// the callsite has executed ToNumber on the argument.
|
| +void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
|
| ASSERT(args->length() == 1);
|
| - JumpTarget leave;
|
| - Load(args->at(0)); // Load the object.
|
| - frame_->Dup();
|
| - Result object = frame_->Pop();
|
| - object.ToRegister();
|
| - ASSERT(object.is_valid());
|
| - // if (object->IsSmi()) return object.
|
| - Condition is_smi = masm_->CheckSmi(object.reg());
|
| - leave.Branch(is_smi);
|
| - // It is a heap object - get map.
|
| - Result temp = allocator()->Allocate();
|
| - ASSERT(temp.is_valid());
|
| - // if (!object->IsJSValue()) return object.
|
| - __ CmpObjectType(object.reg(), JS_VALUE_TYPE, temp.reg());
|
| - leave.Branch(not_equal);
|
| - __ movq(temp.reg(), FieldOperand(object.reg(), JSValue::kValueOffset));
|
| - object.Unuse();
|
| - frame_->SetElementAt(0, &temp);
|
| - leave.Bind();
|
| -}
|
| + Load(args->at(0));
|
|
|
| + // Leave original value on the frame if we need to call runtime.
|
| + frame()->Dup();
|
| + Result result = frame()->Pop();
|
| + result.ToRegister();
|
| + frame()->Spill(result.reg());
|
| + Label runtime;
|
| + Label non_smi;
|
| + Label load_done;
|
| + JumpTarget end;
|
|
|
| -// -----------------------------------------------------------------------------
|
| -// CodeGenerator implementation of Expressions
|
| + __ JumpIfNotSmi(result.reg(), &non_smi);
|
| + __ SmiToInteger32(result.reg(), result.reg());
|
| + __ cvtlsi2sd(xmm0, result.reg());
|
| + __ jmp(&load_done);
|
| + __ bind(&non_smi);
|
| + __ CompareRoot(FieldOperand(result.reg(), HeapObject::kMapOffset),
|
| + Heap::kHeapNumberMapRootIndex);
|
| + __ j(not_equal, &runtime);
|
| + __ movsd(xmm0, FieldOperand(result.reg(), HeapNumber::kValueOffset));
|
|
|
| -void CodeGenerator::LoadAndSpill(Expression* expression) {
|
| - // TODO(x64): No architecture specific code. Move to shared location.
|
| - ASSERT(in_spilled_code());
|
| - set_in_spilled_code(false);
|
| - Load(expression);
|
| - frame_->SpillAll();
|
| - set_in_spilled_code(true);
|
| -}
|
| + __ bind(&load_done);
|
| + __ sqrtsd(xmm0, xmm0);
|
| + // A copy of the virtual frame to allow us to go to runtime after the
|
| + // JumpTarget jump.
|
| + Result scratch = allocator()->Allocate();
|
| + VirtualFrame* clone = new VirtualFrame(frame());
|
| + __ AllocateHeapNumber(result.reg(), scratch.reg(), &runtime);
|
|
|
| + __ movsd(FieldOperand(result.reg(), HeapNumber::kValueOffset), xmm0);
|
| + frame()->Drop(1);
|
| + scratch.Unuse();
|
| + end.Jump(&result);
|
| + // We only branch to runtime if we have an allocation error.
|
| + // Use the copy of the original frame as our current frame.
|
| + RegisterFile empty_regs;
|
| + SetFrame(clone, &empty_regs);
|
| + __ bind(&runtime);
|
| + result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
|
|
|
| -void CodeGenerator::Load(Expression* expr) {
|
| -#ifdef DEBUG
|
| - int original_height = frame_->height();
|
| -#endif
|
| - ASSERT(!in_spilled_code());
|
| - JumpTarget true_target;
|
| - JumpTarget false_target;
|
| - ControlDestination dest(&true_target, &false_target, true);
|
| - LoadCondition(expr, &dest, false);
|
| + end.Bind(&result);
|
| + frame()->Push(&result);
|
| +}
|
|
|
| - if (dest.false_was_fall_through()) {
|
| - // The false target was just bound.
|
| - JumpTarget loaded;
|
| - frame_->Push(Factory::false_value());
|
| - // There may be dangling jumps to the true target.
|
| - if (true_target.is_linked()) {
|
| - loaded.Jump();
|
| - true_target.Bind();
|
| - frame_->Push(Factory::true_value());
|
| - loaded.Bind();
|
| - }
|
|
|
| - } else if (dest.is_used()) {
|
| - // There is true, and possibly false, control flow (with true as
|
| - // the fall through).
|
| - JumpTarget loaded;
|
| - frame_->Push(Factory::true_value());
|
| - if (false_target.is_linked()) {
|
| - loaded.Jump();
|
| - false_target.Bind();
|
| - frame_->Push(Factory::false_value());
|
| - loaded.Bind();
|
| - }
|
| -
|
| - } else {
|
| - // We have a valid value on top of the frame, but we still may
|
| - // have dangling jumps to the true and false targets from nested
|
| - // subexpressions (eg, the left subexpressions of the
|
| - // short-circuited boolean operators).
|
| - ASSERT(has_valid_frame());
|
| - if (true_target.is_linked() || false_target.is_linked()) {
|
| - JumpTarget loaded;
|
| - loaded.Jump(); // Don't lose the current TOS.
|
| - if (true_target.is_linked()) {
|
| - true_target.Bind();
|
| - frame_->Push(Factory::true_value());
|
| - if (false_target.is_linked()) {
|
| - loaded.Jump();
|
| - }
|
| - }
|
| - if (false_target.is_linked()) {
|
| - false_target.Bind();
|
| - frame_->Push(Factory::false_value());
|
| - }
|
| - loaded.Bind();
|
| - }
|
| +void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
|
| + if (CheckForInlineRuntimeCall(node)) {
|
| + return;
|
| }
|
|
|
| - ASSERT(has_valid_frame());
|
| - ASSERT(frame_->height() == original_height + 1);
|
| -}
|
| + ZoneList<Expression*>* args = node->arguments();
|
| + Comment cmnt(masm_, "[ CallRuntime");
|
| + Runtime::Function* function = node->function();
|
|
|
| -
|
| -// Emit code to load the value of an expression to the top of the
|
| -// frame. If the expression is boolean-valued it may be compiled (or
|
| -// partially compiled) into control flow to the control destination.
|
| -// If force_control is true, control flow is forced.
|
| -void CodeGenerator::LoadCondition(Expression* x,
|
| - ControlDestination* dest,
|
| - bool force_control) {
|
| - ASSERT(!in_spilled_code());
|
| - int original_height = frame_->height();
|
| -
|
| - { CodeGenState new_state(this, dest);
|
| - Visit(x);
|
| -
|
| - // If we hit a stack overflow, we may not have actually visited
|
| - // the expression. In that case, we ensure that we have a
|
| - // valid-looking frame state because we will continue to generate
|
| - // code as we unwind the C++ stack.
|
| - //
|
| - // It's possible to have both a stack overflow and a valid frame
|
| - // state (eg, a subexpression overflowed, visiting it returned
|
| - // with a dummied frame state, and visiting this expression
|
| - // returned with a normal-looking state).
|
| - if (HasStackOverflow() &&
|
| - !dest->is_used() &&
|
| - frame_->height() == original_height) {
|
| - dest->Goto(true);
|
| - }
|
| + if (function == NULL) {
|
| + // Push the builtins object found in the current global object.
|
| + Result temp = allocator()->Allocate();
|
| + ASSERT(temp.is_valid());
|
| + __ movq(temp.reg(), GlobalObject());
|
| + __ movq(temp.reg(),
|
| + FieldOperand(temp.reg(), GlobalObject::kBuiltinsOffset));
|
| + frame_->Push(&temp);
|
| }
|
|
|
| - if (force_control && !dest->is_used()) {
|
| - // Convert the TOS value into flow to the control destination.
|
| - // TODO(X64): Make control flow to control destinations work.
|
| - ToBoolean(dest);
|
| + // Push the arguments ("left-to-right").
|
| + int arg_count = args->length();
|
| + for (int i = 0; i < arg_count; i++) {
|
| + Load(args->at(i));
|
| }
|
|
|
| - ASSERT(!(force_control && !dest->is_used()));
|
| - ASSERT(dest->is_used() || frame_->height() == original_height + 1);
|
| -}
|
| -
|
| -
|
| -// ECMA-262, section 9.2, page 30: ToBoolean(). Pop the top of stack and
|
| -// convert it to a boolean in the condition code register or jump to
|
| -// 'false_target'/'true_target' as appropriate.
|
| -void CodeGenerator::ToBoolean(ControlDestination* dest) {
|
| - Comment cmnt(masm_, "[ ToBoolean");
|
| -
|
| - // The value to convert should be popped from the frame.
|
| - Result value = frame_->Pop();
|
| - value.ToRegister();
|
| -
|
| - if (value.is_number()) {
|
| - // Fast case if TypeInfo indicates only numbers.
|
| - if (FLAG_debug_code) {
|
| - __ AbortIfNotNumber(value.reg());
|
| - }
|
| - // Smi => false iff zero.
|
| - __ SmiCompare(value.reg(), Smi::FromInt(0));
|
| - if (value.is_smi()) {
|
| - value.Unuse();
|
| - dest->Split(not_zero);
|
| - } else {
|
| - dest->false_target()->Branch(equal);
|
| - Condition is_smi = masm_->CheckSmi(value.reg());
|
| - dest->true_target()->Branch(is_smi);
|
| - __ xorpd(xmm0, xmm0);
|
| - __ ucomisd(xmm0, FieldOperand(value.reg(), HeapNumber::kValueOffset));
|
| - value.Unuse();
|
| - dest->Split(not_zero);
|
| - }
|
| + if (function == NULL) {
|
| + // Call the JS runtime function.
|
| + frame_->Push(node->name());
|
| + Result answer = frame_->CallCallIC(RelocInfo::CODE_TARGET,
|
| + arg_count,
|
| + loop_nesting_);
|
| + frame_->RestoreContextRegister();
|
| + frame_->Push(&answer);
|
| } else {
|
| - // Fast case checks.
|
| - // 'false' => false.
|
| - __ CompareRoot(value.reg(), Heap::kFalseValueRootIndex);
|
| - dest->false_target()->Branch(equal);
|
| -
|
| - // 'true' => true.
|
| - __ CompareRoot(value.reg(), Heap::kTrueValueRootIndex);
|
| - dest->true_target()->Branch(equal);
|
| -
|
| - // 'undefined' => false.
|
| - __ CompareRoot(value.reg(), Heap::kUndefinedValueRootIndex);
|
| - dest->false_target()->Branch(equal);
|
| -
|
| - // Smi => false iff zero.
|
| - __ SmiCompare(value.reg(), Smi::FromInt(0));
|
| - dest->false_target()->Branch(equal);
|
| - Condition is_smi = masm_->CheckSmi(value.reg());
|
| - dest->true_target()->Branch(is_smi);
|
| -
|
| - // Call the stub for all other cases.
|
| - frame_->Push(&value); // Undo the Pop() from above.
|
| - ToBooleanStub stub;
|
| - Result temp = frame_->CallStub(&stub, 1);
|
| - // Convert the result to a condition code.
|
| - __ testq(temp.reg(), temp.reg());
|
| - temp.Unuse();
|
| - dest->Split(not_equal);
|
| + // Call the C runtime function.
|
| + Result answer = frame_->CallRuntime(function, arg_count);
|
| + frame_->Push(&answer);
|
| }
|
| }
|
|
|
|
|
| -void CodeGenerator::LoadUnsafeSmi(Register target, Handle<Object> value) {
|
| - UNIMPLEMENTED();
|
| - // TODO(X64): Implement security policy for loads of smis.
|
| -}
|
| +void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
|
| + Comment cmnt(masm_, "[ UnaryOperation");
|
|
|
| + Token::Value op = node->op();
|
|
|
| -bool CodeGenerator::IsUnsafeSmi(Handle<Object> value) {
|
| - return false;
|
| -}
|
| + if (op == Token::NOT) {
|
| + // Swap the true and false targets but keep the same actual label
|
| + // as the fall through.
|
| + destination()->Invert();
|
| + LoadCondition(node->expression(), destination(), true);
|
| + // Swap the labels back.
|
| + destination()->Invert();
|
|
|
| -//------------------------------------------------------------------------------
|
| -// CodeGenerator implementation of variables, lookups, and stores.
|
| + } else if (op == Token::DELETE) {
|
| + Property* property = node->expression()->AsProperty();
|
| + if (property != NULL) {
|
| + Load(property->obj());
|
| + Load(property->key());
|
| + Result answer = frame_->InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, 2);
|
| + frame_->Push(&answer);
|
| + return;
|
| + }
|
|
|
| -Reference::Reference(CodeGenerator* cgen,
|
| - Expression* expression,
|
| - bool persist_after_get)
|
| - : cgen_(cgen),
|
| - expression_(expression),
|
| - type_(ILLEGAL),
|
| - persist_after_get_(persist_after_get) {
|
| - cgen->LoadReference(this);
|
| -}
|
| + Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
|
| + if (variable != NULL) {
|
| + Slot* slot = variable->slot();
|
| + if (variable->is_global()) {
|
| + LoadGlobal();
|
| + frame_->Push(variable->name());
|
| + Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
|
| + CALL_FUNCTION, 2);
|
| + frame_->Push(&answer);
|
| + return;
|
|
|
| + } else if (slot != NULL && slot->type() == Slot::LOOKUP) {
|
| + // Call the runtime to look up the context holding the named
|
| + // variable. Sync the virtual frame eagerly so we can push the
|
| + // arguments directly into place.
|
| + frame_->SyncRange(0, frame_->element_count() - 1);
|
| + frame_->EmitPush(rsi);
|
| + frame_->EmitPush(variable->name());
|
| + Result context = frame_->CallRuntime(Runtime::kLookupContext, 2);
|
| + ASSERT(context.is_register());
|
| + frame_->EmitPush(context.reg());
|
| + context.Unuse();
|
| + frame_->EmitPush(variable->name());
|
| + Result answer = frame_->InvokeBuiltin(Builtins::DELETE,
|
| + CALL_FUNCTION, 2);
|
| + frame_->Push(&answer);
|
| + return;
|
| + }
|
|
|
| -Reference::~Reference() {
|
| - ASSERT(is_unloaded() || is_illegal());
|
| -}
|
| + // Default: Result of deleting non-global, not dynamically
|
| + // introduced variables is false.
|
| + frame_->Push(Factory::false_value());
|
|
|
| + } else {
|
| + // Default: Result of deleting expressions is true.
|
| + Load(node->expression()); // may have side-effects
|
| + frame_->SetElementAt(0, Factory::true_value());
|
| + }
|
|
|
| -void CodeGenerator::LoadReference(Reference* ref) {
|
| - // References are loaded from both spilled and unspilled code. Set the
|
| - // state to unspilled to allow that (and explicitly spill after
|
| - // construction at the construction sites).
|
| - bool was_in_spilled_code = in_spilled_code_;
|
| - in_spilled_code_ = false;
|
| + } else if (op == Token::TYPEOF) {
|
| + // Special case for loading the typeof expression; see comment on
|
| + // LoadTypeofExpression().
|
| + LoadTypeofExpression(node->expression());
|
| + Result answer = frame_->CallRuntime(Runtime::kTypeof, 1);
|
| + frame_->Push(&answer);
|
|
|
| - Comment cmnt(masm_, "[ LoadReference");
|
| - Expression* e = ref->expression();
|
| - Property* property = e->AsProperty();
|
| - Variable* var = e->AsVariableProxy()->AsVariable();
|
| -
|
| - if (property != NULL) {
|
| - // The expression is either a property or a variable proxy that rewrites
|
| - // to a property.
|
| - Load(property->obj());
|
| - if (property->key()->IsPropertyName()) {
|
| - ref->set_type(Reference::NAMED);
|
| + } else if (op == Token::VOID) {
|
| + Expression* expression = node->expression();
|
| + if (expression && expression->AsLiteral() && (
|
| + expression->AsLiteral()->IsTrue() ||
|
| + expression->AsLiteral()->IsFalse() ||
|
| + expression->AsLiteral()->handle()->IsNumber() ||
|
| + expression->AsLiteral()->handle()->IsString() ||
|
| + expression->AsLiteral()->handle()->IsJSRegExp() ||
|
| + expression->AsLiteral()->IsNull())) {
|
| + // Omit evaluating the value of the primitive literal.
|
| + // It will be discarded anyway, and can have no side effect.
|
| + frame_->Push(Factory::undefined_value());
|
| } else {
|
| - Load(property->key());
|
| - ref->set_type(Reference::KEYED);
|
| + Load(node->expression());
|
| + frame_->SetElementAt(0, Factory::undefined_value());
|
| }
|
| - } else if (var != NULL) {
|
| - // The expression is a variable proxy that does not rewrite to a
|
| - // property. Global variables are treated as named property references.
|
| - if (var->is_global()) {
|
| - // If rax is free, the register allocator prefers it. Thus the code
|
| - // generator will load the global object into rax, which is where
|
| - // LoadIC wants it. Most uses of Reference call LoadIC directly
|
| - // after the reference is created.
|
| - frame_->Spill(rax);
|
| - LoadGlobal();
|
| - ref->set_type(Reference::NAMED);
|
| - } else {
|
| - ASSERT(var->slot() != NULL);
|
| - ref->set_type(Reference::SLOT);
|
| - }
|
| +
|
| } else {
|
| - // Anything else is a runtime error.
|
| - Load(e);
|
| - frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
|
| - }
|
| + bool can_overwrite =
|
| + (node->expression()->AsBinaryOperation() != NULL &&
|
| + node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
|
| + UnaryOverwriteMode overwrite =
|
| + can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE;
|
| + bool no_negative_zero = node->expression()->no_negative_zero();
|
| + Load(node->expression());
|
| + switch (op) {
|
| + case Token::NOT:
|
| + case Token::DELETE:
|
| + case Token::TYPEOF:
|
| + UNREACHABLE(); // handled above
|
| + break;
|
|
|
| - in_spilled_code_ = was_in_spilled_code;
|
| -}
|
| + case Token::SUB: {
|
| + GenericUnaryOpStub stub(
|
| + Token::SUB,
|
| + overwrite,
|
| + no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero);
|
| + Result operand = frame_->Pop();
|
| + Result answer = frame_->CallStub(&stub, &operand);
|
| + answer.set_type_info(TypeInfo::Number());
|
| + frame_->Push(&answer);
|
| + break;
|
| + }
|
|
|
| + case Token::BIT_NOT: {
|
| + // Smi check.
|
| + JumpTarget smi_label;
|
| + JumpTarget continue_label;
|
| + Result operand = frame_->Pop();
|
| + operand.ToRegister();
|
|
|
| -void CodeGenerator::UnloadReference(Reference* ref) {
|
| - // Pop a reference from the stack while preserving TOS.
|
| - Comment cmnt(masm_, "[ UnloadReference");
|
| - frame_->Nip(ref->size());
|
| - ref->set_unloaded();
|
| -}
|
| + Condition is_smi = masm_->CheckSmi(operand.reg());
|
| + smi_label.Branch(is_smi, &operand);
|
|
|
| + GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
|
| + Result answer = frame_->CallStub(&stub, &operand);
|
| + continue_label.Jump(&answer);
|
|
|
| -Operand CodeGenerator::SlotOperand(Slot* slot, Register tmp) {
|
| - // Currently, this assertion will fail if we try to assign to
|
| - // a constant variable that is constant because it is read-only
|
| - // (such as the variable referring to a named function expression).
|
| - // We need to implement assignments to read-only variables.
|
| - // Ideally, we should do this during AST generation (by converting
|
| - // such assignments into expression statements); however, in general
|
| - // we may not be able to make the decision until past AST generation,
|
| - // that is when the entire program is known.
|
| - ASSERT(slot != NULL);
|
| - int index = slot->index();
|
| - switch (slot->type()) {
|
| - case Slot::PARAMETER:
|
| - return frame_->ParameterAt(index);
|
| + smi_label.Bind(&answer);
|
| + answer.ToRegister();
|
| + frame_->Spill(answer.reg());
|
| + __ SmiNot(answer.reg(), answer.reg());
|
| + continue_label.Bind(&answer);
|
| + answer.set_type_info(TypeInfo::Smi());
|
| + frame_->Push(&answer);
|
| + break;
|
| + }
|
|
|
| - case Slot::LOCAL:
|
| - return frame_->LocalAt(index);
|
| + case Token::ADD: {
|
| + // Smi check.
|
| + JumpTarget continue_label;
|
| + Result operand = frame_->Pop();
|
| + TypeInfo operand_info = operand.type_info();
|
| + operand.ToRegister();
|
| + Condition is_smi = masm_->CheckSmi(operand.reg());
|
| + continue_label.Branch(is_smi, &operand);
|
| + frame_->Push(&operand);
|
| + Result answer = frame_->InvokeBuiltin(Builtins::TO_NUMBER,
|
| + CALL_FUNCTION, 1);
|
|
|
| - case Slot::CONTEXT: {
|
| - // Follow the context chain if necessary.
|
| - ASSERT(!tmp.is(rsi)); // do not overwrite context register
|
| - Register context = rsi;
|
| - int chain_length = scope()->ContextChainLength(slot->var()->scope());
|
| - for (int i = 0; i < chain_length; i++) {
|
| - // Load the closure.
|
| - // (All contexts, even 'with' contexts, have a closure,
|
| - // and it is the same for all contexts inside a function.
|
| - // There is no need to go to the function context first.)
|
| - __ movq(tmp, ContextOperand(context, Context::CLOSURE_INDEX));
|
| - // Load the function context (which is the incoming, outer context).
|
| - __ movq(tmp, FieldOperand(tmp, JSFunction::kContextOffset));
|
| - context = tmp;
|
| + continue_label.Bind(&answer);
|
| + if (operand_info.IsSmi()) {
|
| + answer.set_type_info(TypeInfo::Smi());
|
| + } else if (operand_info.IsInteger32()) {
|
| + answer.set_type_info(TypeInfo::Integer32());
|
| + } else {
|
| + answer.set_type_info(TypeInfo::Number());
|
| + }
|
| + frame_->Push(&answer);
|
| + break;
|
| }
|
| - // We may have a 'with' context now. Get the function context.
|
| - // (In fact this mov may never be the needed, since the scope analysis
|
| - // may not permit a direct context access in this case and thus we are
|
| - // always at a function context. However it is safe to dereference be-
|
| - // cause the function context of a function context is itself. Before
|
| - // deleting this mov we should try to create a counter-example first,
|
| - // though...)
|
| - __ movq(tmp, ContextOperand(context, Context::FCONTEXT_INDEX));
|
| - return ContextOperand(tmp, index);
|
| + default:
|
| + UNREACHABLE();
|
| }
|
| -
|
| - default:
|
| - UNREACHABLE();
|
| - return Operand(rsp, 0);
|
| }
|
| }
|
|
|
|
|
| -Operand CodeGenerator::ContextSlotOperandCheckExtensions(Slot* slot,
|
| - Result tmp,
|
| - JumpTarget* slow) {
|
| - ASSERT(slot->type() == Slot::CONTEXT);
|
| - ASSERT(tmp.is_register());
|
| - Register context = rsi;
|
| -
|
| - for (Scope* s = scope(); s != slot->var()->scope(); s = s->outer_scope()) {
|
| - if (s->num_heap_slots() > 0) {
|
| - if (s->calls_eval()) {
|
| - // Check that extension is NULL.
|
| - __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
|
| - Immediate(0));
|
| - slow->Branch(not_equal, not_taken);
|
| - }
|
| - __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
|
| - __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
|
| - context = tmp.reg();
|
| - }
|
| +// The value in dst was optimistically incremented or decremented.
|
| +// The result overflowed or was not smi tagged. Call into the runtime
|
| +// to convert the argument to a number, and call the specialized add
|
| +// or subtract stub. The result is left in dst.
|
| +class DeferredPrefixCountOperation: public DeferredCode {
|
| + public:
|
| + DeferredPrefixCountOperation(Register dst,
|
| + bool is_increment,
|
| + TypeInfo input_type)
|
| + : dst_(dst), is_increment_(is_increment), input_type_(input_type) {
|
| + set_comment("[ DeferredCountOperation");
|
| }
|
| - // Check that last extension is NULL.
|
| - __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX), Immediate(0));
|
| - slow->Branch(not_equal, not_taken);
|
| - __ movq(tmp.reg(), ContextOperand(context, Context::FCONTEXT_INDEX));
|
| - return ContextOperand(tmp.reg(), slot->index());
|
| -}
|
|
|
| + virtual void Generate();
|
|
|
| -void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
|
| - if (slot->type() == Slot::LOOKUP) {
|
| - ASSERT(slot->var()->is_dynamic());
|
| + private:
|
| + Register dst_;
|
| + bool is_increment_;
|
| + TypeInfo input_type_;
|
| +};
|
|
|
| - JumpTarget slow;
|
| - JumpTarget done;
|
| - Result value;
|
|
|
| - // Generate fast case for loading from slots that correspond to
|
| - // local/global variables or arguments unless they are shadowed by
|
| - // eval-introduced bindings.
|
| - EmitDynamicLoadFromSlotFastCase(slot,
|
| - typeof_state,
|
| - &value,
|
| - &slow,
|
| - &done);
|
| +void DeferredPrefixCountOperation::Generate() {
|
| + Register left;
|
| + if (input_type_.IsNumber()) {
|
| + left = dst_;
|
| + } else {
|
| + __ push(dst_);
|
| + __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
|
| + left = rax;
|
| + }
|
|
|
| - slow.Bind();
|
| - // A runtime call is inevitable. We eagerly sync frame elements
|
| - // to memory so that we can push the arguments directly into place
|
| - // on top of the frame.
|
| - frame_->SyncRange(0, frame_->element_count() - 1);
|
| - frame_->EmitPush(rsi);
|
| - __ movq(kScratchRegister, slot->var()->name(), RelocInfo::EMBEDDED_OBJECT);
|
| - frame_->EmitPush(kScratchRegister);
|
| - if (typeof_state == INSIDE_TYPEOF) {
|
| - value =
|
| - frame_->CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2);
|
| - } else {
|
| - value = frame_->CallRuntime(Runtime::kLoadContextSlot, 2);
|
| - }
|
| + GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
|
| + NO_OVERWRITE,
|
| + NO_GENERIC_BINARY_FLAGS,
|
| + TypeInfo::Number());
|
| + stub.GenerateCall(masm_, left, Smi::FromInt(1));
|
|
|
| - done.Bind(&value);
|
| - frame_->Push(&value);
|
| + if (!dst_.is(rax)) __ movq(dst_, rax);
|
| +}
|
|
|
| - } else if (slot->var()->mode() == Variable::CONST) {
|
| - // Const slots may contain 'the hole' value (the constant hasn't been
|
| - // initialized yet) which needs to be converted into the 'undefined'
|
| - // value.
|
| - //
|
| - // We currently spill the virtual frame because constants use the
|
| - // potentially unsafe direct-frame access of SlotOperand.
|
| - VirtualFrame::SpilledScope spilled_scope;
|
| - Comment cmnt(masm_, "[ Load const");
|
| - JumpTarget exit;
|
| - __ movq(rcx, SlotOperand(slot, rcx));
|
| - __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
|
| - exit.Branch(not_equal);
|
| - __ LoadRoot(rcx, Heap::kUndefinedValueRootIndex);
|
| - exit.Bind();
|
| - frame_->EmitPush(rcx);
|
|
|
| - } else if (slot->type() == Slot::PARAMETER) {
|
| - frame_->PushParameterAt(slot->index());
|
| -
|
| - } else if (slot->type() == Slot::LOCAL) {
|
| - frame_->PushLocalAt(slot->index());
|
| -
|
| - } else {
|
| - // The other remaining slot types (LOOKUP and GLOBAL) cannot reach
|
| - // here.
|
| - //
|
| - // The use of SlotOperand below is safe for an unspilled frame
|
| - // because it will always be a context slot.
|
| - ASSERT(slot->type() == Slot::CONTEXT);
|
| - Result temp = allocator_->Allocate();
|
| - ASSERT(temp.is_valid());
|
| - __ movq(temp.reg(), SlotOperand(slot, temp.reg()));
|
| - frame_->Push(&temp);
|
| +// The value in dst was optimistically incremented or decremented.
|
| +// The result overflowed or was not smi tagged. Call into the runtime
|
| +// to convert the argument to a number. Update the original value in
|
| +// old. Call the specialized add or subtract stub. The result is
|
| +// left in dst.
|
| +class DeferredPostfixCountOperation: public DeferredCode {
|
| + public:
|
| + DeferredPostfixCountOperation(Register dst,
|
| + Register old,
|
| + bool is_increment,
|
| + TypeInfo input_type)
|
| + : dst_(dst),
|
| + old_(old),
|
| + is_increment_(is_increment),
|
| + input_type_(input_type) {
|
| + set_comment("[ DeferredCountOperation");
|
| }
|
| -}
|
|
|
| + virtual void Generate();
|
|
|
| -void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
|
| - TypeofState state) {
|
| - LoadFromSlot(slot, state);
|
| + private:
|
| + Register dst_;
|
| + Register old_;
|
| + bool is_increment_;
|
| + TypeInfo input_type_;
|
| +};
|
|
|
| - // Bail out quickly if we're not using lazy arguments allocation.
|
| - if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
|
|
|
| - // ... or if the slot isn't a non-parameter arguments slot.
|
| - if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
|
| -
|
| - // Pop the loaded value from the stack.
|
| - Result value = frame_->Pop();
|
| -
|
| - // If the loaded value is a constant, we know if the arguments
|
| - // object has been lazily loaded yet.
|
| - if (value.is_constant()) {
|
| - if (value.handle()->IsTheHole()) {
|
| - Result arguments = StoreArgumentsObject(false);
|
| - frame_->Push(&arguments);
|
| - } else {
|
| - frame_->Push(&value);
|
| - }
|
| - return;
|
| +void DeferredPostfixCountOperation::Generate() {
|
| + Register left;
|
| + if (input_type_.IsNumber()) {
|
| + __ push(dst_); // Save the input to use as the old value.
|
| + left = dst_;
|
| + } else {
|
| + __ push(dst_);
|
| + __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
|
| + __ push(rax); // Save the result of ToNumber to use as the old value.
|
| + left = rax;
|
| }
|
|
|
| - // The loaded value is in a register. If it is the sentinel that
|
| - // indicates that we haven't loaded the arguments object yet, we
|
| - // need to do it now.
|
| - JumpTarget exit;
|
| - __ CompareRoot(value.reg(), Heap::kTheHoleValueRootIndex);
|
| - frame_->Push(&value);
|
| - exit.Branch(not_equal);
|
| - Result arguments = StoreArgumentsObject(false);
|
| - frame_->SetElementAt(0, &arguments);
|
| - exit.Bind();
|
| + GenericBinaryOpStub stub(is_increment_ ? Token::ADD : Token::SUB,
|
| + NO_OVERWRITE,
|
| + NO_GENERIC_BINARY_FLAGS,
|
| + TypeInfo::Number());
|
| + stub.GenerateCall(masm_, left, Smi::FromInt(1));
|
| +
|
| + if (!dst_.is(rax)) __ movq(dst_, rax);
|
| + __ pop(old_);
|
| }
|
|
|
|
|
| -void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
|
| - if (slot->type() == Slot::LOOKUP) {
|
| - ASSERT(slot->var()->is_dynamic());
|
| +void CodeGenerator::VisitCountOperation(CountOperation* node) {
|
| + Comment cmnt(masm_, "[ CountOperation");
|
|
|
| - // For now, just do a runtime call. Since the call is inevitable,
|
| - // we eagerly sync the virtual frame so we can directly push the
|
| - // arguments into place.
|
| - frame_->SyncRange(0, frame_->element_count() - 1);
|
| + bool is_postfix = node->is_postfix();
|
| + bool is_increment = node->op() == Token::INC;
|
|
|
| - frame_->EmitPush(rsi);
|
| - frame_->EmitPush(slot->var()->name());
|
| + Variable* var = node->expression()->AsVariableProxy()->AsVariable();
|
| + bool is_const = (var != NULL && var->mode() == Variable::CONST);
|
|
|
| - Result value;
|
| - if (init_state == CONST_INIT) {
|
| - // Same as the case for a normal store, but ignores attribute
|
| - // (e.g. READ_ONLY) of context slot so that we can initialize const
|
| - // properties (introduced via eval("const foo = (some expr);")). Also,
|
| - // uses the current function context instead of the top context.
|
| - //
|
| - // Note that we must declare the foo upon entry of eval(), via a
|
| - // context slot declaration, but we cannot initialize it at the same
|
| - // time, because the const declaration may be at the end of the eval
|
| - // code (sigh...) and the const variable may have been used before
|
| - // (where its value is 'undefined'). Thus, we can only do the
|
| - // initialization when we actually encounter the expression and when
|
| - // the expression operands are defined and valid, and thus we need the
|
| - // split into 2 operations: declaration of the context slot followed
|
| - // by initialization.
|
| - value = frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
|
| - } else {
|
| - value = frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
|
| - }
|
| - // Storing a variable must keep the (new) value on the expression
|
| - // stack. This is necessary for compiling chained assignment
|
| - // expressions.
|
| - frame_->Push(&value);
|
| - } else {
|
| - ASSERT(!slot->var()->is_dynamic());
|
| + // Postfix operations need a stack slot under the reference to hold
|
| + // the old value while the new value is being stored. This is so that
|
| + // in the case that storing the new value requires a call, the old
|
| + // value will be in the frame to be spilled.
|
| + if (is_postfix) frame_->Push(Smi::FromInt(0));
|
|
|
| - JumpTarget exit;
|
| - if (init_state == CONST_INIT) {
|
| - ASSERT(slot->var()->mode() == Variable::CONST);
|
| - // Only the first const initialization must be executed (the slot
|
| - // still contains 'the hole' value). When the assignment is executed,
|
| - // the code is identical to a normal store (see below).
|
| - //
|
| - // We spill the frame in the code below because the direct-frame
|
| - // access of SlotOperand is potentially unsafe with an unspilled
|
| - // frame.
|
| - VirtualFrame::SpilledScope spilled_scope;
|
| - Comment cmnt(masm_, "[ Init const");
|
| - __ movq(rcx, SlotOperand(slot, rcx));
|
| - __ CompareRoot(rcx, Heap::kTheHoleValueRootIndex);
|
| - exit.Branch(not_equal);
|
| + // A constant reference is not saved to, so the reference is not a
|
| + // compound assignment reference.
|
| + { Reference target(this, node->expression(), !is_const);
|
| + if (target.is_illegal()) {
|
| + // Spoof the virtual frame to have the expected height (one higher
|
| + // than on entry).
|
| + if (!is_postfix) frame_->Push(Smi::FromInt(0));
|
| + return;
|
| }
|
| + target.TakeValue();
|
|
|
| - // We must execute the store. Storing a variable must keep the (new)
|
| - // value on the stack. This is necessary for compiling assignment
|
| - // expressions.
|
| - //
|
| - // Note: We will reach here even with slot->var()->mode() ==
|
| - // Variable::CONST because of const declarations which will initialize
|
| - // consts to 'the hole' value and by doing so, end up calling this code.
|
| - if (slot->type() == Slot::PARAMETER) {
|
| - frame_->StoreToParameterAt(slot->index());
|
| - } else if (slot->type() == Slot::LOCAL) {
|
| - frame_->StoreToLocalAt(slot->index());
|
| - } else {
|
| - // The other slot types (LOOKUP and GLOBAL) cannot reach here.
|
| - //
|
| - // The use of SlotOperand below is safe for an unspilled frame
|
| - // because the slot is a context slot.
|
| - ASSERT(slot->type() == Slot::CONTEXT);
|
| - frame_->Dup();
|
| - Result value = frame_->Pop();
|
| - value.ToRegister();
|
| - Result start = allocator_->Allocate();
|
| - ASSERT(start.is_valid());
|
| - __ movq(SlotOperand(slot, start.reg()), value.reg());
|
| - // RecordWrite may destroy the value registers.
|
| - //
|
| - // TODO(204): Avoid actually spilling when the value is not
|
| - // needed (probably the common case).
|
| - frame_->Spill(value.reg());
|
| - int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
|
| - Result temp = allocator_->Allocate();
|
| - ASSERT(temp.is_valid());
|
| - __ RecordWrite(start.reg(), offset, value.reg(), temp.reg());
|
| - // The results start, value, and temp are unused by going out of
|
| - // scope.
|
| - }
|
| + Result new_value = frame_->Pop();
|
| + new_value.ToRegister();
|
|
|
| - exit.Bind();
|
| - }
|
| -}
|
| + Result old_value; // Only allocated in the postfix case.
|
| + if (is_postfix) {
|
| + // Allocate a temporary to preserve the old value.
|
| + old_value = allocator_->Allocate();
|
| + ASSERT(old_value.is_valid());
|
| + __ movq(old_value.reg(), new_value.reg());
|
|
|
| -
|
| -Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
|
| - Slot* slot,
|
| - TypeofState typeof_state,
|
| - JumpTarget* slow) {
|
| - // Check that no extension objects have been created by calls to
|
| - // eval from the current scope to the global scope.
|
| - Register context = rsi;
|
| - Result tmp = allocator_->Allocate();
|
| - ASSERT(tmp.is_valid()); // All non-reserved registers were available.
|
| -
|
| - Scope* s = scope();
|
| - while (s != NULL) {
|
| - if (s->num_heap_slots() > 0) {
|
| - if (s->calls_eval()) {
|
| - // Check that extension is NULL.
|
| - __ cmpq(ContextOperand(context, Context::EXTENSION_INDEX),
|
| - Immediate(0));
|
| - slow->Branch(not_equal, not_taken);
|
| + // The return value for postfix operations is ToNumber(input).
|
| + // Keep more precise type info if the input is some kind of
|
| + // number already. If the input is not a number we have to wait
|
| + // for the deferred code to convert it.
|
| + if (new_value.type_info().IsNumber()) {
|
| + old_value.set_type_info(new_value.type_info());
|
| }
|
| - // Load next context in chain.
|
| - __ movq(tmp.reg(), ContextOperand(context, Context::CLOSURE_INDEX));
|
| - __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
|
| - context = tmp.reg();
|
| }
|
| - // If no outer scope calls eval, we do not need to check more
|
| - // context extensions. If we have reached an eval scope, we check
|
| - // all extensions from this point.
|
| - if (!s->outer_scope_calls_eval() || s->is_eval_scope()) break;
|
| - s = s->outer_scope();
|
| - }
|
| + // Ensure the new value is writable.
|
| + frame_->Spill(new_value.reg());
|
|
|
| - if (s->is_eval_scope()) {
|
| - // Loop up the context chain. There is no frame effect so it is
|
| - // safe to use raw labels here.
|
| - Label next, fast;
|
| - if (!context.is(tmp.reg())) {
|
| - __ movq(tmp.reg(), context);
|
| + DeferredCode* deferred = NULL;
|
| + if (is_postfix) {
|
| + deferred = new DeferredPostfixCountOperation(new_value.reg(),
|
| + old_value.reg(),
|
| + is_increment,
|
| + new_value.type_info());
|
| + } else {
|
| + deferred = new DeferredPrefixCountOperation(new_value.reg(),
|
| + is_increment,
|
| + new_value.type_info());
|
| }
|
| - // Load map for comparison into register, outside loop.
|
| - __ LoadRoot(kScratchRegister, Heap::kGlobalContextMapRootIndex);
|
| - __ bind(&next);
|
| - // Terminate at global context.
|
| - __ cmpq(kScratchRegister, FieldOperand(tmp.reg(), HeapObject::kMapOffset));
|
| - __ j(equal, &fast);
|
| - // Check that extension is NULL.
|
| - __ cmpq(ContextOperand(tmp.reg(), Context::EXTENSION_INDEX), Immediate(0));
|
| - slow->Branch(not_equal);
|
| - // Load next context in chain.
|
| - __ movq(tmp.reg(), ContextOperand(tmp.reg(), Context::CLOSURE_INDEX));
|
| - __ movq(tmp.reg(), FieldOperand(tmp.reg(), JSFunction::kContextOffset));
|
| - __ jmp(&next);
|
| - __ bind(&fast);
|
| - }
|
| - tmp.Unuse();
|
|
|
| - // All extension objects were empty and it is safe to use a global
|
| - // load IC call.
|
| - LoadGlobal();
|
| - frame_->Push(slot->var()->name());
|
| - RelocInfo::Mode mode = (typeof_state == INSIDE_TYPEOF)
|
| - ? RelocInfo::CODE_TARGET
|
| - : RelocInfo::CODE_TARGET_CONTEXT;
|
| - Result answer = frame_->CallLoadIC(mode);
|
| - // A test rax instruction following the call signals that the inobject
|
| - // property case was inlined. Ensure that there is not a test rax
|
| - // instruction here.
|
| - masm_->nop();
|
| - return answer;
|
| -}
|
| -
|
| -
|
| -void CodeGenerator::EmitDynamicLoadFromSlotFastCase(Slot* slot,
|
| - TypeofState typeof_state,
|
| - Result* result,
|
| - JumpTarget* slow,
|
| - JumpTarget* done) {
|
| - // Generate fast-case code for variables that might be shadowed by
|
| - // eval-introduced variables. Eval is used a lot without
|
| - // introducing variables. In those cases, we do not want to
|
| - // perform a runtime call for all variables in the scope
|
| - // containing the eval.
|
| - if (slot->var()->mode() == Variable::DYNAMIC_GLOBAL) {
|
| - *result = LoadFromGlobalSlotCheckExtensions(slot, typeof_state, slow);
|
| - done->Jump(result);
|
| -
|
| - } else if (slot->var()->mode() == Variable::DYNAMIC_LOCAL) {
|
| - Slot* potential_slot = slot->var()->local_if_not_shadowed()->slot();
|
| - Expression* rewrite = slot->var()->local_if_not_shadowed()->rewrite();
|
| - if (potential_slot != NULL) {
|
| - // Generate fast case for locals that rewrite to slots.
|
| - // Allocate a fresh register to use as a temp in
|
| - // ContextSlotOperandCheckExtensions and to hold the result
|
| - // value.
|
| - *result = allocator_->Allocate();
|
| - ASSERT(result->is_valid());
|
| - __ movq(result->reg(),
|
| - ContextSlotOperandCheckExtensions(potential_slot,
|
| - *result,
|
| - slow));
|
| - if (potential_slot->var()->mode() == Variable::CONST) {
|
| - __ CompareRoot(result->reg(), Heap::kTheHoleValueRootIndex);
|
| - done->Branch(not_equal, result);
|
| - __ LoadRoot(result->reg(), Heap::kUndefinedValueRootIndex);
|
| - }
|
| - done->Jump(result);
|
| - } else if (rewrite != NULL) {
|
| - // Generate fast case for argument loads.
|
| - Property* property = rewrite->AsProperty();
|
| - if (property != NULL) {
|
| - VariableProxy* obj_proxy = property->obj()->AsVariableProxy();
|
| - Literal* key_literal = property->key()->AsLiteral();
|
| - if (obj_proxy != NULL &&
|
| - key_literal != NULL &&
|
| - obj_proxy->IsArguments() &&
|
| - key_literal->handle()->IsSmi()) {
|
| - // Load arguments object if there are no eval-introduced
|
| - // variables. Then load the argument from the arguments
|
| - // object using keyed load.
|
| - Result arguments = allocator()->Allocate();
|
| - ASSERT(arguments.is_valid());
|
| - __ movq(arguments.reg(),
|
| - ContextSlotOperandCheckExtensions(obj_proxy->var()->slot(),
|
| - arguments,
|
| - slow));
|
| - frame_->Push(&arguments);
|
| - frame_->Push(key_literal->handle());
|
| - *result = EmitKeyedLoad();
|
| - done->Jump(result);
|
| - }
|
| - }
|
| + if (new_value.is_smi()) {
|
| + if (FLAG_debug_code) { __ AbortIfNotSmi(new_value.reg()); }
|
| + } else {
|
| + __ JumpIfNotSmi(new_value.reg(), deferred->entry_label());
|
| }
|
| - }
|
| -}
|
| -
|
| -
|
| -void CodeGenerator::LoadGlobal() {
|
| - if (in_spilled_code()) {
|
| - frame_->EmitPush(GlobalObject());
|
| - } else {
|
| - Result temp = allocator_->Allocate();
|
| - __ movq(temp.reg(), GlobalObject());
|
| - frame_->Push(&temp);
|
| - }
|
| -}
|
| -
|
| -
|
| -void CodeGenerator::LoadGlobalReceiver() {
|
| - Result temp = allocator_->Allocate();
|
| - Register reg = temp.reg();
|
| - __ movq(reg, GlobalObject());
|
| - __ movq(reg, FieldOperand(reg, GlobalObject::kGlobalReceiverOffset));
|
| - frame_->Push(&temp);
|
| -}
|
| -
|
| -
|
| -ArgumentsAllocationMode CodeGenerator::ArgumentsMode() {
|
| - if (scope()->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
|
| - ASSERT(scope()->arguments_shadow() != NULL);
|
| - // We don't want to do lazy arguments allocation for functions that
|
| - // have heap-allocated contexts, because it interfers with the
|
| - // uninitialized const tracking in the context objects.
|
| - return (scope()->num_heap_slots() > 0)
|
| - ? EAGER_ARGUMENTS_ALLOCATION
|
| - : LAZY_ARGUMENTS_ALLOCATION;
|
| -}
|
| -
|
| -
|
| -Result CodeGenerator::StoreArgumentsObject(bool initial) {
|
| - ArgumentsAllocationMode mode = ArgumentsMode();
|
| - ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
|
| -
|
| - Comment cmnt(masm_, "[ store arguments object");
|
| - if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
|
| - // When using lazy arguments allocation, we store the hole value
|
| - // as a sentinel indicating that the arguments object hasn't been
|
| - // allocated yet.
|
| - frame_->Push(Factory::the_hole_value());
|
| - } else {
|
| - ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
|
| - frame_->PushFunction();
|
| - frame_->PushReceiverSlotAddress();
|
| - frame_->Push(Smi::FromInt(scope()->num_parameters()));
|
| - Result result = frame_->CallStub(&stub, 3);
|
| - frame_->Push(&result);
|
| - }
|
| -
|
| -
|
| - Variable* arguments = scope()->arguments()->var();
|
| - Variable* shadow = scope()->arguments_shadow()->var();
|
| - ASSERT(arguments != NULL && arguments->slot() != NULL);
|
| - ASSERT(shadow != NULL && shadow->slot() != NULL);
|
| - JumpTarget done;
|
| - bool skip_arguments = false;
|
| - if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
|
| - // We have to skip storing into the arguments slot if it has
|
| - // already been written to. This can happen if the a function
|
| - // has a local variable named 'arguments'.
|
| - LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
|
| - Result probe = frame_->Pop();
|
| - if (probe.is_constant()) {
|
| - // We have to skip updating the arguments object if it has been
|
| - // assigned a proper value.
|
| - skip_arguments = !probe.handle()->IsTheHole();
|
| + if (is_increment) {
|
| + __ SmiAddConstant(new_value.reg(),
|
| + new_value.reg(),
|
| + Smi::FromInt(1),
|
| + deferred->entry_label());
|
| } else {
|
| - __ CompareRoot(probe.reg(), Heap::kTheHoleValueRootIndex);
|
| - probe.Unuse();
|
| - done.Branch(not_equal);
|
| + __ SmiSubConstant(new_value.reg(),
|
| + new_value.reg(),
|
| + Smi::FromInt(1),
|
| + deferred->entry_label());
|
| }
|
| - }
|
| - if (!skip_arguments) {
|
| - StoreToSlot(arguments->slot(), NOT_CONST_INIT);
|
| - if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
|
| - }
|
| - StoreToSlot(shadow->slot(), NOT_CONST_INIT);
|
| - return frame_->Pop();
|
| -}
|
| + deferred->BindExit();
|
|
|
| + // Postfix count operations return their input converted to
|
| + // number. The case when the input is already a number is covered
|
| + // above in the allocation code for old_value.
|
| + if (is_postfix && !new_value.type_info().IsNumber()) {
|
| + old_value.set_type_info(TypeInfo::Number());
|
| + }
|
|
|
| -void CodeGenerator::LoadTypeofExpression(Expression* expr) {
|
| - // Special handling of identifiers as subexpressions of typeof.
|
| - Variable* variable = expr->AsVariableProxy()->AsVariable();
|
| - if (variable != NULL && !variable->is_this() && variable->is_global()) {
|
| - // For a global variable we build the property reference
|
| - // <global>.<variable> and perform a (regular non-contextual) property
|
| - // load to make sure we do not get reference errors.
|
| - Slot global(variable, Slot::CONTEXT, Context::GLOBAL_INDEX);
|
| - Literal key(variable->name());
|
| - Property property(&global, &key, RelocInfo::kNoPosition);
|
| - Reference ref(this, &property);
|
| - ref.GetValue();
|
| - } else if (variable != NULL && variable->slot() != NULL) {
|
| - // For a variable that rewrites to a slot, we signal it is the immediate
|
| - // subexpression of a typeof.
|
| - LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
|
| - } else {
|
| - // Anything else can be handled normally.
|
| - Load(expr);
|
| - }
|
| -}
|
| + new_value.set_type_info(TypeInfo::Number());
|
|
|
| + // Postfix: store the old value in the allocated slot under the
|
| + // reference.
|
| + if (is_postfix) frame_->SetElementAt(target.size(), &old_value);
|
|
|
| -static bool CouldBeNaN(const Result& result) {
|
| - if (result.type_info().IsSmi()) return false;
|
| - if (result.type_info().IsInteger32()) return false;
|
| - if (!result.is_constant()) return true;
|
| - if (!result.handle()->IsHeapNumber()) return false;
|
| - return isnan(HeapNumber::cast(*result.handle())->value());
|
| -}
|
| -
|
| -
|
| -// Convert from signed to unsigned comparison to match the way EFLAGS are set
|
| -// by FPU and XMM compare instructions.
|
| -static Condition DoubleCondition(Condition cc) {
|
| - switch (cc) {
|
| - case less: return below;
|
| - case equal: return equal;
|
| - case less_equal: return below_equal;
|
| - case greater: return above;
|
| - case greater_equal: return above_equal;
|
| - default: UNREACHABLE();
|
| + frame_->Push(&new_value);
|
| + // Non-constant: update the reference.
|
| + if (!is_const) target.SetValue(NOT_CONST_INIT);
|
| }
|
| - UNREACHABLE();
|
| - return equal;
|
| +
|
| + // Postfix: drop the new value and use the old.
|
| + if (is_postfix) frame_->Drop();
|
| }
|
|
|
|
|
| -void CodeGenerator::Comparison(AstNode* node,
|
| - Condition cc,
|
| - bool strict,
|
| - ControlDestination* dest) {
|
| - // Strict only makes sense for equality comparisons.
|
| - ASSERT(!strict || cc == equal);
|
| +void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
|
| + // According to ECMA-262 section 11.11, page 58, the binary logical
|
| + // operators must yield the result of one of the two expressions
|
| + // before any ToBoolean() conversions. This means that the value
|
| + // produced by a && or || operator is not necessarily a boolean.
|
|
|
| - Result left_side;
|
| - Result right_side;
|
| - // Implement '>' and '<=' by reversal to obtain ECMA-262 conversion order.
|
| - if (cc == greater || cc == less_equal) {
|
| - cc = ReverseCondition(cc);
|
| - left_side = frame_->Pop();
|
| - right_side = frame_->Pop();
|
| - } else {
|
| - right_side = frame_->Pop();
|
| - left_side = frame_->Pop();
|
| - }
|
| - ASSERT(cc == less || cc == equal || cc == greater_equal);
|
| + // NOTE: If the left hand side produces a materialized value (not
|
| + // control flow), we force the right hand side to do the same. This
|
| + // is necessary because we assume that if we get control flow on the
|
| + // last path out of an expression we got it on all paths.
|
| + if (node->op() == Token::AND) {
|
| + JumpTarget is_true;
|
| + ControlDestination dest(&is_true, destination()->false_target(), true);
|
| + LoadCondition(node->left(), &dest, false);
|
|
|
| - // If either side is a constant smi, optimize the comparison.
|
| - bool left_side_constant_smi = false;
|
| - bool left_side_constant_null = false;
|
| - bool left_side_constant_1_char_string = false;
|
| - if (left_side.is_constant()) {
|
| - left_side_constant_smi = left_side.handle()->IsSmi();
|
| - left_side_constant_null = left_side.handle()->IsNull();
|
| - left_side_constant_1_char_string =
|
| - (left_side.handle()->IsString() &&
|
| - String::cast(*left_side.handle())->length() == 1 &&
|
| - String::cast(*left_side.handle())->IsAsciiRepresentation());
|
| - }
|
| - bool right_side_constant_smi = false;
|
| - bool right_side_constant_null = false;
|
| - bool right_side_constant_1_char_string = false;
|
| - if (right_side.is_constant()) {
|
| - right_side_constant_smi = right_side.handle()->IsSmi();
|
| - right_side_constant_null = right_side.handle()->IsNull();
|
| - right_side_constant_1_char_string =
|
| - (right_side.handle()->IsString() &&
|
| - String::cast(*right_side.handle())->length() == 1 &&
|
| - String::cast(*right_side.handle())->IsAsciiRepresentation());
|
| - }
|
| -
|
| - if (left_side_constant_smi || right_side_constant_smi) {
|
| - if (left_side_constant_smi && right_side_constant_smi) {
|
| - // Trivial case, comparing two constants.
|
| - int left_value = Smi::cast(*left_side.handle())->value();
|
| - int right_value = Smi::cast(*right_side.handle())->value();
|
| - switch (cc) {
|
| - case less:
|
| - dest->Goto(left_value < right_value);
|
| - break;
|
| - case equal:
|
| - dest->Goto(left_value == right_value);
|
| - break;
|
| - case greater_equal:
|
| - dest->Goto(left_value >= right_value);
|
| - break;
|
| - default:
|
| - UNREACHABLE();
|
| - }
|
| - } else {
|
| - // Only one side is a constant Smi.
|
| - // If left side is a constant Smi, reverse the operands.
|
| - // Since one side is a constant Smi, conversion order does not matter.
|
| - if (left_side_constant_smi) {
|
| - Result temp = left_side;
|
| - left_side = right_side;
|
| - right_side = temp;
|
| - cc = ReverseCondition(cc);
|
| - // This may re-introduce greater or less_equal as the value of cc.
|
| - // CompareStub and the inline code both support all values of cc.
|
| - }
|
| - // Implement comparison against a constant Smi, inlining the case
|
| - // where both sides are Smis.
|
| - left_side.ToRegister();
|
| - Register left_reg = left_side.reg();
|
| - Handle<Object> right_val = right_side.handle();
|
| -
|
| - // Here we split control flow to the stub call and inlined cases
|
| - // before finally splitting it to the control destination. We use
|
| - // a jump target and branching to duplicate the virtual frame at
|
| - // the first split. We manually handle the off-frame references
|
| - // by reconstituting them on the non-fall-through path.
|
| - JumpTarget is_smi;
|
| -
|
| - if (left_side.is_smi()) {
|
| - if (FLAG_debug_code) {
|
| - __ AbortIfNotSmi(left_side.reg());
|
| + if (dest.false_was_fall_through()) {
|
| + // The current false target was used as the fall-through. If
|
| + // there are no dangling jumps to is_true then the left
|
| + // subexpression was unconditionally false. Otherwise we have
|
| + // paths where we do have to evaluate the right subexpression.
|
| + if (is_true.is_linked()) {
|
| + // We need to compile the right subexpression. If the jump to
|
| + // the current false target was a forward jump then we have a
|
| + // valid frame, we have just bound the false target, and we
|
| + // have to jump around the code for the right subexpression.
|
| + if (has_valid_frame()) {
|
| + destination()->false_target()->Unuse();
|
| + destination()->false_target()->Jump();
|
| }
|
| + is_true.Bind();
|
| + // The left subexpression compiled to control flow, so the
|
| + // right one is free to do so as well.
|
| + LoadCondition(node->right(), destination(), false);
|
| } else {
|
| - Condition left_is_smi = masm_->CheckSmi(left_side.reg());
|
| - is_smi.Branch(left_is_smi);
|
| -
|
| - bool is_loop_condition = (node->AsExpression() != NULL) &&
|
| - node->AsExpression()->is_loop_condition();
|
| - if (!is_loop_condition && right_val->IsSmi()) {
|
| - // Right side is a constant smi and left side has been checked
|
| - // not to be a smi.
|
| - JumpTarget not_number;
|
| - __ Cmp(FieldOperand(left_reg, HeapObject::kMapOffset),
|
| - Factory::heap_number_map());
|
| - not_number.Branch(not_equal, &left_side);
|
| - __ movsd(xmm1,
|
| - FieldOperand(left_reg, HeapNumber::kValueOffset));
|
| - int value = Smi::cast(*right_val)->value();
|
| - if (value == 0) {
|
| - __ xorpd(xmm0, xmm0);
|
| - } else {
|
| - Result temp = allocator()->Allocate();
|
| - __ movl(temp.reg(), Immediate(value));
|
| - __ cvtlsi2sd(xmm0, temp.reg());
|
| - temp.Unuse();
|
| - }
|
| - __ ucomisd(xmm1, xmm0);
|
| - // Jump to builtin for NaN.
|
| - not_number.Branch(parity_even, &left_side);
|
| - left_side.Unuse();
|
| - dest->true_target()->Branch(DoubleCondition(cc));
|
| - dest->false_target()->Jump();
|
| - not_number.Bind(&left_side);
|
| - }
|
| -
|
| - // Setup and call the compare stub.
|
| - CompareStub stub(cc, strict, kCantBothBeNaN);
|
| - Result result = frame_->CallStub(&stub, &left_side, &right_side);
|
| - result.ToRegister();
|
| - __ testq(result.reg(), result.reg());
|
| - result.Unuse();
|
| - dest->true_target()->Branch(cc);
|
| - dest->false_target()->Jump();
|
| -
|
| - is_smi.Bind();
|
| + // We have actually just jumped to or bound the current false
|
| + // target but the current control destination is not marked as
|
| + // used.
|
| + destination()->Use(false);
|
| }
|
|
|
| - left_side = Result(left_reg);
|
| - right_side = Result(right_val);
|
| - // Test smi equality and comparison by signed int comparison.
|
| - // Both sides are smis, so we can use an Immediate.
|
| - __ SmiCompare(left_side.reg(), Smi::cast(*right_side.handle()));
|
| - left_side.Unuse();
|
| - right_side.Unuse();
|
| - dest->Split(cc);
|
| - }
|
| - } else if (cc == equal &&
|
| - (left_side_constant_null || right_side_constant_null)) {
|
| - // To make null checks efficient, we check if either the left side or
|
| - // the right side is the constant 'null'.
|
| - // If so, we optimize the code by inlining a null check instead of
|
| - // calling the (very) general runtime routine for checking equality.
|
| - Result operand = left_side_constant_null ? right_side : left_side;
|
| - right_side.Unuse();
|
| - left_side.Unuse();
|
| - operand.ToRegister();
|
| - __ CompareRoot(operand.reg(), Heap::kNullValueRootIndex);
|
| - if (strict) {
|
| - operand.Unuse();
|
| - dest->Split(equal);
|
| - } else {
|
| - // The 'null' value is only equal to 'undefined' if using non-strict
|
| - // comparisons.
|
| - dest->true_target()->Branch(equal);
|
| - __ CompareRoot(operand.reg(), Heap::kUndefinedValueRootIndex);
|
| - dest->true_target()->Branch(equal);
|
| - Condition is_smi = masm_->CheckSmi(operand.reg());
|
| - dest->false_target()->Branch(is_smi);
|
| + } else if (dest.is_used()) {
|
| + // The left subexpression compiled to control flow (and is_true
|
| + // was just bound), so the right is free to do so as well.
|
| + LoadCondition(node->right(), destination(), false);
|
|
|
| - // It can be an undetectable object.
|
| - // Use a scratch register in preference to spilling operand.reg().
|
| - Result temp = allocator()->Allocate();
|
| - ASSERT(temp.is_valid());
|
| - __ movq(temp.reg(),
|
| - FieldOperand(operand.reg(), HeapObject::kMapOffset));
|
| - __ testb(FieldOperand(temp.reg(), Map::kBitFieldOffset),
|
| - Immediate(1 << Map::kIsUndetectable));
|
| - temp.Unuse();
|
| - operand.Unuse();
|
| - dest->Split(not_zero);
|
| - }
|
| - } else if (left_side_constant_1_char_string ||
|
| - right_side_constant_1_char_string) {
|
| - if (left_side_constant_1_char_string && right_side_constant_1_char_string) {
|
| - // Trivial case, comparing two constants.
|
| - int left_value = String::cast(*left_side.handle())->Get(0);
|
| - int right_value = String::cast(*right_side.handle())->Get(0);
|
| - switch (cc) {
|
| - case less:
|
| - dest->Goto(left_value < right_value);
|
| - break;
|
| - case equal:
|
| - dest->Goto(left_value == right_value);
|
| - break;
|
| - case greater_equal:
|
| - dest->Goto(left_value >= right_value);
|
| - break;
|
| - default:
|
| - UNREACHABLE();
|
| - }
|
| } else {
|
| - // Only one side is a constant 1 character string.
|
| - // If left side is a constant 1-character string, reverse the operands.
|
| - // Since one side is a constant string, conversion order does not matter.
|
| - if (left_side_constant_1_char_string) {
|
| - Result temp = left_side;
|
| - left_side = right_side;
|
| - right_side = temp;
|
| - cc = ReverseCondition(cc);
|
| - // This may reintroduce greater or less_equal as the value of cc.
|
| - // CompareStub and the inline code both support all values of cc.
|
| - }
|
| - // Implement comparison against a constant string, inlining the case
|
| - // where both sides are strings.
|
| - left_side.ToRegister();
|
| + // We have a materialized value on the frame, so we exit with
|
| + // one on all paths. There are possibly also jumps to is_true
|
| + // from nested subexpressions.
|
| + JumpTarget pop_and_continue;
|
| + JumpTarget exit;
|
|
|
| - // Here we split control flow to the stub call and inlined cases
|
| - // before finally splitting it to the control destination. We use
|
| - // a jump target and branching to duplicate the virtual frame at
|
| - // the first split. We manually handle the off-frame references
|
| - // by reconstituting them on the non-fall-through path.
|
| - JumpTarget is_not_string, is_string;
|
| - Register left_reg = left_side.reg();
|
| - Handle<Object> right_val = right_side.handle();
|
| - ASSERT(StringShape(String::cast(*right_val)).IsSymbol());
|
| - Condition is_smi = masm()->CheckSmi(left_reg);
|
| - is_not_string.Branch(is_smi, &left_side);
|
| - Result temp = allocator_->Allocate();
|
| - ASSERT(temp.is_valid());
|
| - __ movq(temp.reg(),
|
| - FieldOperand(left_reg, HeapObject::kMapOffset));
|
| - __ movzxbl(temp.reg(),
|
| - FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
|
| - // If we are testing for equality then make use of the symbol shortcut.
|
| - // Check if the left hand side has the same type as the right hand
|
| - // side (which is always a symbol).
|
| - if (cc == equal) {
|
| - Label not_a_symbol;
|
| - ASSERT(kSymbolTag != 0);
|
| - // Ensure that no non-strings have the symbol bit set.
|
| - ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
|
| - __ testb(temp.reg(), Immediate(kIsSymbolMask)); // Test the symbol bit.
|
| - __ j(zero, ¬_a_symbol);
|
| - // They are symbols, so do identity compare.
|
| - __ Cmp(left_reg, right_side.handle());
|
| - dest->true_target()->Branch(equal);
|
| - dest->false_target()->Branch(not_equal);
|
| - __ bind(¬_a_symbol);
|
| - }
|
| - // Call the compare stub if the left side is not a flat ascii string.
|
| - __ andb(temp.reg(),
|
| - Immediate(kIsNotStringMask |
|
| - kStringRepresentationMask |
|
| - kStringEncodingMask));
|
| - __ cmpb(temp.reg(),
|
| - Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
|
| - temp.Unuse();
|
| - is_string.Branch(equal, &left_side);
|
| + // Avoid popping the result if it converts to 'false' using the
|
| + // standard ToBoolean() conversion as described in ECMA-262,
|
| + // section 9.2, page 30.
|
| + //
|
| + // Duplicate the TOS value. The duplicate will be popped by
|
| + // ToBoolean.
|
| + frame_->Dup();
|
| + ControlDestination dest(&pop_and_continue, &exit, true);
|
| + ToBoolean(&dest);
|
|
|
| - // Setup and call the compare stub.
|
| - is_not_string.Bind(&left_side);
|
| - CompareStub stub(cc, strict, kCantBothBeNaN);
|
| - Result result = frame_->CallStub(&stub, &left_side, &right_side);
|
| - result.ToRegister();
|
| - __ testq(result.reg(), result.reg());
|
| - result.Unuse();
|
| - dest->true_target()->Branch(cc);
|
| - dest->false_target()->Jump();
|
| + // Pop the result of evaluating the first part.
|
| + frame_->Drop();
|
|
|
| - is_string.Bind(&left_side);
|
| - // left_side is a sequential ASCII string.
|
| - ASSERT(left_side.reg().is(left_reg));
|
| - right_side = Result(right_val);
|
| - Result temp2 = allocator_->Allocate();
|
| - ASSERT(temp2.is_valid());
|
| - // Test string equality and comparison.
|
| - if (cc == equal) {
|
| - Label comparison_done;
|
| - __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset),
|
| - Smi::FromInt(1));
|
| - __ j(not_equal, &comparison_done);
|
| - uint8_t char_value =
|
| - static_cast<uint8_t>(String::cast(*right_val)->Get(0));
|
| - __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
|
| - Immediate(char_value));
|
| - __ bind(&comparison_done);
|
| - } else {
|
| - __ movq(temp2.reg(),
|
| - FieldOperand(left_side.reg(), String::kLengthOffset));
|
| - __ SmiSubConstant(temp2.reg(), temp2.reg(), Smi::FromInt(1));
|
| - Label comparison;
|
| - // If the length is 0 then the subtraction gave -1 which compares less
|
| - // than any character.
|
| - __ j(negative, &comparison);
|
| - // Otherwise load the first character.
|
| - __ movzxbl(temp2.reg(),
|
| - FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize));
|
| - __ bind(&comparison);
|
| - // Compare the first character of the string with the
|
| - // constant 1-character string.
|
| - uint8_t char_value =
|
| - static_cast<uint8_t>(String::cast(*right_side.handle())->Get(0));
|
| - __ cmpb(temp2.reg(), Immediate(char_value));
|
| - Label characters_were_different;
|
| - __ j(not_equal, &characters_were_different);
|
| - // If the first character is the same then the long string sorts after
|
| - // the short one.
|
| - __ SmiCompare(FieldOperand(left_side.reg(), String::kLengthOffset),
|
| - Smi::FromInt(1));
|
| - __ bind(&characters_were_different);
|
| - }
|
| - temp2.Unuse();
|
| - left_side.Unuse();
|
| - right_side.Unuse();
|
| - dest->Split(cc);
|
| + // Compile right side expression.
|
| + is_true.Bind();
|
| + Load(node->right());
|
| +
|
| + // Exit (always with a materialized value).
|
| + exit.Bind();
|
| }
|
| +
|
| } else {
|
| - // Neither side is a constant Smi, constant 1-char string, or constant null.
|
| - // If either side is a non-smi constant, skip the smi check.
|
| - bool known_non_smi =
|
| - (left_side.is_constant() && !left_side.handle()->IsSmi()) ||
|
| - (right_side.is_constant() && !right_side.handle()->IsSmi()) ||
|
| - left_side.type_info().IsDouble() ||
|
| - right_side.type_info().IsDouble();
|
| + ASSERT(node->op() == Token::OR);
|
| + JumpTarget is_false;
|
| + ControlDestination dest(destination()->true_target(), &is_false, false);
|
| + LoadCondition(node->left(), &dest, false);
|
|
|
| - NaNInformation nan_info =
|
| - (CouldBeNaN(left_side) && CouldBeNaN(right_side)) ?
|
| - kBothCouldBeNaN :
|
| - kCantBothBeNaN;
|
| -
|
| - // Inline number comparison handling any combination of smi's and heap
|
| - // numbers if:
|
| - // code is in a loop
|
| - // the compare operation is different from equal
|
| - // compare is not a for-loop comparison
|
| - // The reason for excluding equal is that it will most likely be done
|
| - // with smi's (not heap numbers) and the code to comparing smi's is inlined
|
| - // separately. The same reason applies for for-loop comparison which will
|
| - // also most likely be smi comparisons.
|
| - bool is_loop_condition = (node->AsExpression() != NULL)
|
| - && node->AsExpression()->is_loop_condition();
|
| - bool inline_number_compare =
|
| - loop_nesting() > 0 && cc != equal && !is_loop_condition;
|
| -
|
| - left_side.ToRegister();
|
| - right_side.ToRegister();
|
| -
|
| - if (known_non_smi) {
|
| - // Inlined equality check:
|
| - // If at least one of the objects is not NaN, then if the objects
|
| - // are identical, they are equal.
|
| - if (nan_info == kCantBothBeNaN && cc == equal) {
|
| - __ cmpq(left_side.reg(), right_side.reg());
|
| - dest->true_target()->Branch(equal);
|
| + if (dest.true_was_fall_through()) {
|
| + // The current true target was used as the fall-through. If
|
| + // there are no dangling jumps to is_false then the left
|
| + // subexpression was unconditionally true. Otherwise we have
|
| + // paths where we do have to evaluate the right subexpression.
|
| + if (is_false.is_linked()) {
|
| + // We need to compile the right subexpression. If the jump to
|
| + // the current true target was a forward jump then we have a
|
| + // valid frame, we have just bound the true target, and we
|
| + // have to jump around the code for the right subexpression.
|
| + if (has_valid_frame()) {
|
| + destination()->true_target()->Unuse();
|
| + destination()->true_target()->Jump();
|
| + }
|
| + is_false.Bind();
|
| + // The left subexpression compiled to control flow, so the
|
| + // right one is free to do so as well.
|
| + LoadCondition(node->right(), destination(), false);
|
| + } else {
|
| + // We have just jumped to or bound the current true target but
|
| + // the current control destination is not marked as used.
|
| + destination()->Use(true);
|
| }
|
|
|
| - // Inlined number comparison:
|
| - if (inline_number_compare) {
|
| - GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
|
| - }
|
| + } else if (dest.is_used()) {
|
| + // The left subexpression compiled to control flow (and is_false
|
| + // was just bound), so the right is free to do so as well.
|
| + LoadCondition(node->right(), destination(), false);
|
|
|
| - CompareStub stub(cc, strict, nan_info, !inline_number_compare);
|
| - Result answer = frame_->CallStub(&stub, &left_side, &right_side);
|
| - __ testq(answer.reg(), answer.reg()); // Sets both zero and sign flag.
|
| - answer.Unuse();
|
| - dest->Split(cc);
|
| } else {
|
| - // Here we split control flow to the stub call and inlined cases
|
| - // before finally splitting it to the control destination. We use
|
| - // a jump target and branching to duplicate the virtual frame at
|
| - // the first split. We manually handle the off-frame references
|
| - // by reconstituting them on the non-fall-through path.
|
| - JumpTarget is_smi;
|
| - Register left_reg = left_side.reg();
|
| - Register right_reg = right_side.reg();
|
| + // We have a materialized value on the frame, so we exit with
|
| + // one on all paths. There are possibly also jumps to is_false
|
| + // from nested subexpressions.
|
| + JumpTarget pop_and_continue;
|
| + JumpTarget exit;
|
|
|
| - Condition both_smi = masm_->CheckBothSmi(left_reg, right_reg);
|
| - is_smi.Branch(both_smi);
|
| + // Avoid popping the result if it converts to 'true' using the
|
| + // standard ToBoolean() conversion as described in ECMA-262,
|
| + // section 9.2, page 30.
|
| + //
|
| + // Duplicate the TOS value. The duplicate will be popped by
|
| + // ToBoolean.
|
| + frame_->Dup();
|
| + ControlDestination dest(&exit, &pop_and_continue, false);
|
| + ToBoolean(&dest);
|
|
|
| - // Inline the equality check if both operands can't be a NaN. If both
|
| - // objects are the same they are equal.
|
| - if (nan_info == kCantBothBeNaN && cc == equal) {
|
| - __ cmpq(left_side.reg(), right_side.reg());
|
| - dest->true_target()->Branch(equal);
|
| - }
|
| + // Pop the result of evaluating the first part.
|
| + frame_->Drop();
|
|
|
| - // Inlined number comparison:
|
| - if (inline_number_compare) {
|
| - GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
|
| - }
|
| + // Compile right side expression.
|
| + is_false.Bind();
|
| + Load(node->right());
|
|
|
| - CompareStub stub(cc, strict, nan_info, !inline_number_compare);
|
| - Result answer = frame_->CallStub(&stub, &left_side, &right_side);
|
| - __ testq(answer.reg(), answer.reg()); // Sets both zero and sign flags.
|
| - answer.Unuse();
|
| - dest->true_target()->Branch(cc);
|
| - dest->false_target()->Jump();
|
| -
|
| - is_smi.Bind();
|
| - left_side = Result(left_reg);
|
| - right_side = Result(right_reg);
|
| - __ SmiCompare(left_side.reg(), right_side.reg());
|
| - right_side.Unuse();
|
| - left_side.Unuse();
|
| - dest->Split(cc);
|
| + // Exit (always with a materialized value).
|
| + exit.Bind();
|
| }
|
| }
|
| }
|
|
|
| +void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
|
| + Comment cmnt(masm_, "[ BinaryOperation");
|
|
|
| -// Load a comparison operand into into a XMM register. Jump to not_numbers jump
|
| -// target passing the left and right result if the operand is not a number.
|
| -static void LoadComparisonOperand(MacroAssembler* masm_,
|
| - Result* operand,
|
| - XMMRegister xmm_reg,
|
| - Result* left_side,
|
| - Result* right_side,
|
| - JumpTarget* not_numbers) {
|
| - Label done;
|
| - if (operand->type_info().IsDouble()) {
|
| - // Operand is known to be a heap number, just load it.
|
| - __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
|
| - } else if (operand->type_info().IsSmi()) {
|
| - // Operand is known to be a smi. Convert it to double and keep the original
|
| - // smi.
|
| - __ SmiToInteger32(kScratchRegister, operand->reg());
|
| - __ cvtlsi2sd(xmm_reg, kScratchRegister);
|
| + if (node->op() == Token::AND || node->op() == Token::OR) {
|
| + GenerateLogicalBooleanOperation(node);
|
| } else {
|
| - // Operand type not known, check for smi or heap number.
|
| - Label smi;
|
| - __ JumpIfSmi(operand->reg(), &smi);
|
| - if (!operand->type_info().IsNumber()) {
|
| - __ LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
|
| - __ cmpq(FieldOperand(operand->reg(), HeapObject::kMapOffset),
|
| - kScratchRegister);
|
| - not_numbers->Branch(not_equal, left_side, right_side, taken);
|
| + // NOTE: The code below assumes that the slow cases (calls to runtime)
|
| + // never return a constant/immutable object.
|
| + OverwriteMode overwrite_mode = NO_OVERWRITE;
|
| + if (node->left()->AsBinaryOperation() != NULL &&
|
| + node->left()->AsBinaryOperation()->ResultOverwriteAllowed()) {
|
| + overwrite_mode = OVERWRITE_LEFT;
|
| + } else if (node->right()->AsBinaryOperation() != NULL &&
|
| + node->right()->AsBinaryOperation()->ResultOverwriteAllowed()) {
|
| + overwrite_mode = OVERWRITE_RIGHT;
|
| }
|
| - __ movsd(xmm_reg, FieldOperand(operand->reg(), HeapNumber::kValueOffset));
|
| - __ jmp(&done);
|
|
|
| - __ bind(&smi);
|
| - // Comvert smi to float and keep the original smi.
|
| - __ SmiToInteger32(kScratchRegister, operand->reg());
|
| - __ cvtlsi2sd(xmm_reg, kScratchRegister);
|
| - __ jmp(&done);
|
| + if (node->left()->IsTrivial()) {
|
| + Load(node->right());
|
| + Result right = frame_->Pop();
|
| + frame_->Push(node->left());
|
| + frame_->Push(&right);
|
| + } else {
|
| + Load(node->left());
|
| + Load(node->right());
|
| + }
|
| + GenericBinaryOperation(node, overwrite_mode);
|
| }
|
| - __ bind(&done);
|
| }
|
|
|
|
|
| -void CodeGenerator::GenerateInlineNumberComparison(Result* left_side,
|
| - Result* right_side,
|
| - Condition cc,
|
| - ControlDestination* dest) {
|
| - ASSERT(left_side->is_register());
|
| - ASSERT(right_side->is_register());
|
| -
|
| - JumpTarget not_numbers;
|
| - // Load left and right operand into registers xmm0 and xmm1 and compare.
|
| - LoadComparisonOperand(masm_, left_side, xmm0, left_side, right_side,
|
| - ¬_numbers);
|
| - LoadComparisonOperand(masm_, right_side, xmm1, left_side, right_side,
|
| - ¬_numbers);
|
| - __ ucomisd(xmm0, xmm1);
|
| - // Bail out if a NaN is involved.
|
| - not_numbers.Branch(parity_even, left_side, right_side);
|
| -
|
| - // Split to destination targets based on comparison.
|
| - left_side->Unuse();
|
| - right_side->Unuse();
|
| - dest->true_target()->Branch(DoubleCondition(cc));
|
| - dest->false_target()->Jump();
|
| -
|
| - not_numbers.Bind(left_side, right_side);
|
| +void CodeGenerator::VisitThisFunction(ThisFunction* node) {
|
| + frame_->PushFunction();
|
| }
|
|
|
|
|
| -class DeferredInlineBinaryOperation: public DeferredCode {
|
| - public:
|
| - DeferredInlineBinaryOperation(Token::Value op,
|
| - Register dst,
|
| - Register left,
|
| - Register right,
|
| - OverwriteMode mode)
|
| - : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) {
|
| - set_comment("[ DeferredInlineBinaryOperation");
|
| - }
|
| +void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
|
| + Comment cmnt(masm_, "[ CompareOperation");
|
|
|
| - virtual void Generate();
|
| + // Get the expressions from the node.
|
| + Expression* left = node->left();
|
| + Expression* right = node->right();
|
| + Token::Value op = node->op();
|
| + // To make typeof testing for natives implemented in JavaScript really
|
| + // efficient, we generate special code for expressions of the form:
|
| + // 'typeof <expression> == <string>'.
|
| + UnaryOperation* operation = left->AsUnaryOperation();
|
| + if ((op == Token::EQ || op == Token::EQ_STRICT) &&
|
| + (operation != NULL && operation->op() == Token::TYPEOF) &&
|
| + (right->AsLiteral() != NULL &&
|
| + right->AsLiteral()->handle()->IsString())) {
|
| + Handle<String> check(Handle<String>::cast(right->AsLiteral()->handle()));
|
|
|
| - private:
|
| - Token::Value op_;
|
| - Register dst_;
|
| - Register left_;
|
| - Register right_;
|
| - OverwriteMode mode_;
|
| -};
|
| + // Load the operand and move it to a register.
|
| + LoadTypeofExpression(operation->expression());
|
| + Result answer = frame_->Pop();
|
| + answer.ToRegister();
|
|
|
| + if (check->Equals(Heap::number_symbol())) {
|
| + Condition is_smi = masm_->CheckSmi(answer.reg());
|
| + destination()->true_target()->Branch(is_smi);
|
| + frame_->Spill(answer.reg());
|
| + __ movq(answer.reg(), FieldOperand(answer.reg(), HeapObject::kMapOffset));
|
| + __ CompareRoot(answer.reg(), Heap::kHeapNumberMapRootIndex);
|
| + answer.Unuse();
|
| + destination()->Split(equal);
|
|
|
| -void DeferredInlineBinaryOperation::Generate() {
|
| - Label done;
|
| - if ((op_ == Token::ADD)
|
| - || (op_ == Token::SUB)
|
| - || (op_ == Token::MUL)
|
| - || (op_ == Token::DIV)) {
|
| - Label call_runtime;
|
| - Label left_smi, right_smi, load_right, do_op;
|
| - __ JumpIfSmi(left_, &left_smi);
|
| - __ CompareRoot(FieldOperand(left_, HeapObject::kMapOffset),
|
| - Heap::kHeapNumberMapRootIndex);
|
| - __ j(not_equal, &call_runtime);
|
| - __ movsd(xmm0, FieldOperand(left_, HeapNumber::kValueOffset));
|
| - if (mode_ == OVERWRITE_LEFT) {
|
| - __ movq(dst_, left_);
|
| - }
|
| - __ jmp(&load_right);
|
| + } else if (check->Equals(Heap::string_symbol())) {
|
| + Condition is_smi = masm_->CheckSmi(answer.reg());
|
| + destination()->false_target()->Branch(is_smi);
|
|
|
| - __ bind(&left_smi);
|
| - __ SmiToInteger32(left_, left_);
|
| - __ cvtlsi2sd(xmm0, left_);
|
| - __ Integer32ToSmi(left_, left_);
|
| - if (mode_ == OVERWRITE_LEFT) {
|
| - Label alloc_failure;
|
| - __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
|
| - }
|
| + // It can be an undetectable string object.
|
| + __ movq(kScratchRegister,
|
| + FieldOperand(answer.reg(), HeapObject::kMapOffset));
|
| + __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
|
| + Immediate(1 << Map::kIsUndetectable));
|
| + destination()->false_target()->Branch(not_zero);
|
| + __ CmpInstanceType(kScratchRegister, FIRST_NONSTRING_TYPE);
|
| + answer.Unuse();
|
| + destination()->Split(below); // Unsigned byte comparison needed.
|
|
|
| - __ bind(&load_right);
|
| - __ JumpIfSmi(right_, &right_smi);
|
| - __ CompareRoot(FieldOperand(right_, HeapObject::kMapOffset),
|
| - Heap::kHeapNumberMapRootIndex);
|
| - __ j(not_equal, &call_runtime);
|
| - __ movsd(xmm1, FieldOperand(right_, HeapNumber::kValueOffset));
|
| - if (mode_ == OVERWRITE_RIGHT) {
|
| - __ movq(dst_, right_);
|
| - } else if (mode_ == NO_OVERWRITE) {
|
| - Label alloc_failure;
|
| - __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
|
| - }
|
| - __ jmp(&do_op);
|
| + } else if (check->Equals(Heap::boolean_symbol())) {
|
| + __ CompareRoot(answer.reg(), Heap::kTrueValueRootIndex);
|
| + destination()->true_target()->Branch(equal);
|
| + __ CompareRoot(answer.reg(), Heap::kFalseValueRootIndex);
|
| + answer.Unuse();
|
| + destination()->Split(equal);
|
|
|
| - __ bind(&right_smi);
|
| - __ SmiToInteger32(right_, right_);
|
| - __ cvtlsi2sd(xmm1, right_);
|
| - __ Integer32ToSmi(right_, right_);
|
| - if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) {
|
| - Label alloc_failure;
|
| - __ AllocateHeapNumber(dst_, no_reg, &call_runtime);
|
| - }
|
| + } else if (check->Equals(Heap::undefined_symbol())) {
|
| + __ CompareRoot(answer.reg(), Heap::kUndefinedValueRootIndex);
|
| + destination()->true_target()->Branch(equal);
|
|
|
| - __ bind(&do_op);
|
| - switch (op_) {
|
| - case Token::ADD: __ addsd(xmm0, xmm1); break;
|
| - case Token::SUB: __ subsd(xmm0, xmm1); break;
|
| - case Token::MUL: __ mulsd(xmm0, xmm1); break;
|
| - case Token::DIV: __ divsd(xmm0, xmm1); break;
|
| - default: UNREACHABLE();
|
| - }
|
| - __ movsd(FieldOperand(dst_, HeapNumber::kValueOffset), xmm0);
|
| - __ jmp(&done);
|
| + Condition is_smi = masm_->CheckSmi(answer.reg());
|
| + destination()->false_target()->Branch(is_smi);
|
|
|
| - __ bind(&call_runtime);
|
| - }
|
| - GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
|
| - stub.GenerateCall(masm_, left_, right_);
|
| - if (!dst_.is(rax)) __ movq(dst_, rax);
|
| - __ bind(&done);
|
| -}
|
| + // It can be an undetectable object.
|
| + __ movq(kScratchRegister,
|
| + FieldOperand(answer.reg(), HeapObject::kMapOffset));
|
| + __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
|
| + Immediate(1 << Map::kIsUndetectable));
|
| + answer.Unuse();
|
| + destination()->Split(not_zero);
|
|
|
| + } else if (check->Equals(Heap::function_symbol())) {
|
| + Condition is_smi = masm_->CheckSmi(answer.reg());
|
| + destination()->false_target()->Branch(is_smi);
|
| + frame_->Spill(answer.reg());
|
| + __ CmpObjectType(answer.reg(), JS_FUNCTION_TYPE, answer.reg());
|
| + destination()->true_target()->Branch(equal);
|
| + // Regular expressions are callable so typeof == 'function'.
|
| + __ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE);
|
| + answer.Unuse();
|
| + destination()->Split(equal);
|
|
|
| -static TypeInfo CalculateTypeInfo(TypeInfo operands_type,
|
| - Token::Value op,
|
| - const Result& right,
|
| - const Result& left) {
|
| - // Set TypeInfo of result according to the operation performed.
|
| - // We rely on the fact that smis have a 32 bit payload on x64.
|
| - STATIC_ASSERT(kSmiValueSize == 32);
|
| - switch (op) {
|
| - case Token::COMMA:
|
| - return right.type_info();
|
| - case Token::OR:
|
| - case Token::AND:
|
| - // Result type can be either of the two input types.
|
| - return operands_type;
|
| - case Token::BIT_OR:
|
| - case Token::BIT_XOR:
|
| - case Token::BIT_AND:
|
| - // Result is always a smi.
|
| - return TypeInfo::Smi();
|
| - case Token::SAR:
|
| - case Token::SHL:
|
| - // Result is always a smi.
|
| - return TypeInfo::Smi();
|
| - case Token::SHR:
|
| - // Result of x >>> y is always a smi if masked y >= 1, otherwise a number.
|
| - return (right.is_constant() && right.handle()->IsSmi()
|
| - && (Smi::cast(*right.handle())->value() & 0x1F) >= 1)
|
| - ? TypeInfo::Smi()
|
| - : TypeInfo::Number();
|
| - case Token::ADD:
|
| - if (operands_type.IsNumber()) {
|
| - return TypeInfo::Number();
|
| - } else if (left.type_info().IsString() || right.type_info().IsString()) {
|
| - return TypeInfo::String();
|
| - } else {
|
| - return TypeInfo::Unknown();
|
| - }
|
| - case Token::SUB:
|
| - case Token::MUL:
|
| - case Token::DIV:
|
| - case Token::MOD:
|
| - // Result is always a number.
|
| - return TypeInfo::Number();
|
| - default:
|
| - UNREACHABLE();
|
| - }
|
| - UNREACHABLE();
|
| - return TypeInfo::Unknown();
|
| -}
|
| + } else if (check->Equals(Heap::object_symbol())) {
|
| + Condition is_smi = masm_->CheckSmi(answer.reg());
|
| + destination()->false_target()->Branch(is_smi);
|
| + __ CompareRoot(answer.reg(), Heap::kNullValueRootIndex);
|
| + destination()->true_target()->Branch(equal);
|
|
|
| + // Regular expressions are typeof == 'function', not 'object'.
|
| + __ CmpObjectType(answer.reg(), JS_REGEXP_TYPE, kScratchRegister);
|
| + destination()->false_target()->Branch(equal);
|
|
|
| -void CodeGenerator::GenericBinaryOperation(BinaryOperation* expr,
|
| - OverwriteMode overwrite_mode) {
|
| - Comment cmnt(masm_, "[ BinaryOperation");
|
| - Token::Value op = expr->op();
|
| - Comment cmnt_token(masm_, Token::String(op));
|
| -
|
| - if (op == Token::COMMA) {
|
| - // Simply discard left value.
|
| - frame_->Nip(1);
|
| + // It can be an undetectable object.
|
| + __ testb(FieldOperand(kScratchRegister, Map::kBitFieldOffset),
|
| + Immediate(1 << Map::kIsUndetectable));
|
| + destination()->false_target()->Branch(not_zero);
|
| + __ CmpInstanceType(kScratchRegister, FIRST_JS_OBJECT_TYPE);
|
| + destination()->false_target()->Branch(below);
|
| + __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
|
| + answer.Unuse();
|
| + destination()->Split(below_equal);
|
| + } else {
|
| + // Uncommon case: typeof testing against a string literal that is
|
| + // never returned from the typeof operator.
|
| + answer.Unuse();
|
| + destination()->Goto(false);
|
| + }
|
| return;
|
| }
|
|
|
| - Result right = frame_->Pop();
|
| - Result left = frame_->Pop();
|
| -
|
| - if (op == Token::ADD) {
|
| - const bool left_is_string = left.type_info().IsString();
|
| - const bool right_is_string = right.type_info().IsString();
|
| - // Make sure constant strings have string type info.
|
| - ASSERT(!(left.is_constant() && left.handle()->IsString()) ||
|
| - left_is_string);
|
| - ASSERT(!(right.is_constant() && right.handle()->IsString()) ||
|
| - right_is_string);
|
| - if (left_is_string || right_is_string) {
|
| - frame_->Push(&left);
|
| - frame_->Push(&right);
|
| - Result answer;
|
| - if (left_is_string) {
|
| - if (right_is_string) {
|
| - StringAddStub stub(NO_STRING_CHECK_IN_STUB);
|
| - answer = frame_->CallStub(&stub, 2);
|
| - } else {
|
| - answer =
|
| - frame_->InvokeBuiltin(Builtins::STRING_ADD_LEFT, CALL_FUNCTION, 2);
|
| - }
|
| - } else if (right_is_string) {
|
| - answer =
|
| - frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
|
| - }
|
| - answer.set_type_info(TypeInfo::String());
|
| - frame_->Push(&answer);
|
| + Condition cc = no_condition;
|
| + bool strict = false;
|
| + switch (op) {
|
| + case Token::EQ_STRICT:
|
| + strict = true;
|
| + // Fall through
|
| + case Token::EQ:
|
| + cc = equal;
|
| + break;
|
| + case Token::LT:
|
| + cc = less;
|
| + break;
|
| + case Token::GT:
|
| + cc = greater;
|
| + break;
|
| + case Token::LTE:
|
| + cc = less_equal;
|
| + break;
|
| + case Token::GTE:
|
| + cc = greater_equal;
|
| + break;
|
| + case Token::IN: {
|
| + Load(left);
|
| + Load(right);
|
| + Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2);
|
| + frame_->Push(&answer); // push the result
|
| return;
|
| }
|
| - // Neither operand is known to be a string.
|
| + case Token::INSTANCEOF: {
|
| + Load(left);
|
| + Load(right);
|
| + InstanceofStub stub;
|
| + Result answer = frame_->CallStub(&stub, 2);
|
| + answer.ToRegister();
|
| + __ testq(answer.reg(), answer.reg());
|
| + answer.Unuse();
|
| + destination()->Split(zero);
|
| + return;
|
| + }
|
| + default:
|
| + UNREACHABLE();
|
| }
|
|
|
| - bool left_is_smi_constant = left.is_constant() && left.handle()->IsSmi();
|
| - bool left_is_non_smi_constant = left.is_constant() && !left.handle()->IsSmi();
|
| - bool right_is_smi_constant = right.is_constant() && right.handle()->IsSmi();
|
| - bool right_is_non_smi_constant =
|
| - right.is_constant() && !right.handle()->IsSmi();
|
| -
|
| - if (left_is_smi_constant && right_is_smi_constant) {
|
| - // Compute the constant result at compile time, and leave it on the frame.
|
| - int left_int = Smi::cast(*left.handle())->value();
|
| - int right_int = Smi::cast(*right.handle())->value();
|
| - if (FoldConstantSmis(op, left_int, right_int)) return;
|
| + if (left->IsTrivial()) {
|
| + Load(right);
|
| + Result right_result = frame_->Pop();
|
| + frame_->Push(left);
|
| + frame_->Push(&right_result);
|
| + } else {
|
| + Load(left);
|
| + Load(right);
|
| }
|
|
|
| - // Get number type of left and right sub-expressions.
|
| - TypeInfo operands_type =
|
| - TypeInfo::Combine(left.type_info(), right.type_info());
|
| + Comparison(node, cc, strict, destination());
|
| +}
|
|
|
| - TypeInfo result_type = CalculateTypeInfo(operands_type, op, right, left);
|
|
|
| - Result answer;
|
| - if (left_is_non_smi_constant || right_is_non_smi_constant) {
|
| - // Go straight to the slow case, with no smi code.
|
| - GenericBinaryOpStub stub(op,
|
| - overwrite_mode,
|
| - NO_SMI_CODE_IN_STUB,
|
| - operands_type);
|
| - answer = stub.GenerateCall(masm_, frame_, &left, &right);
|
| - } else if (right_is_smi_constant) {
|
| - answer = ConstantSmiBinaryOperation(expr, &left, right.handle(),
|
| - false, overwrite_mode);
|
| - } else if (left_is_smi_constant) {
|
| - answer = ConstantSmiBinaryOperation(expr, &right, left.handle(),
|
| - true, overwrite_mode);
|
| - } else {
|
| - // Set the flags based on the operation, type and loop nesting level.
|
| - // Bit operations always assume they likely operate on Smis. Still only
|
| - // generate the inline Smi check code if this operation is part of a loop.
|
| - // For all other operations only inline the Smi check code for likely smis
|
| - // if the operation is part of a loop.
|
| - if (loop_nesting() > 0 &&
|
| - (Token::IsBitOp(op) ||
|
| - operands_type.IsInteger32() ||
|
| - expr->type()->IsLikelySmi())) {
|
| - answer = LikelySmiBinaryOperation(expr, &left, &right, overwrite_mode);
|
| - } else {
|
| - GenericBinaryOpStub stub(op,
|
| - overwrite_mode,
|
| - NO_GENERIC_BINARY_FLAGS,
|
| - operands_type);
|
| - answer = stub.GenerateCall(masm_, frame_, &left, &right);
|
| - }
|
| - }
|
| -
|
| - answer.set_type_info(result_type);
|
| - frame_->Push(&answer);
|
| +#ifdef DEBUG
|
| +bool CodeGenerator::HasValidEntryRegisters() {
|
| + return (allocator()->count(rax) == (frame()->is_used(rax) ? 1 : 0))
|
| + && (allocator()->count(rbx) == (frame()->is_used(rbx) ? 1 : 0))
|
| + && (allocator()->count(rcx) == (frame()->is_used(rcx) ? 1 : 0))
|
| + && (allocator()->count(rdx) == (frame()->is_used(rdx) ? 1 : 0))
|
| + && (allocator()->count(rdi) == (frame()->is_used(rdi) ? 1 : 0))
|
| + && (allocator()->count(r8) == (frame()->is_used(r8) ? 1 : 0))
|
| + && (allocator()->count(r9) == (frame()->is_used(r9) ? 1 : 0))
|
| + && (allocator()->count(r11) == (frame()->is_used(r11) ? 1 : 0))
|
| + && (allocator()->count(r14) == (frame()->is_used(r14) ? 1 : 0))
|
| + && (allocator()->count(r12) == (frame()->is_used(r12) ? 1 : 0));
|
| }
|
| +#endif
|
|
|
|
|
| +
|
| // Emit a LoadIC call to get the value from receiver and leave it in
|
| // dst. The receiver register is restored after the call.
|
| class DeferredReferenceGetNamedValue: public DeferredCode {
|
| @@ -6898,623 +7494,155 @@
|
| }
|
|
|
|
|
| -void DeferredInlineSmiAdd::Generate() {
|
| - GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
|
| - igostub.GenerateCall(masm_, dst_, value_);
|
| - if (!dst_.is(rax)) __ movq(dst_, rax);
|
| -}
|
| +class DeferredReferenceGetKeyedValue: public DeferredCode {
|
| + public:
|
| + explicit DeferredReferenceGetKeyedValue(Register dst,
|
| + Register receiver,
|
| + Register key)
|
| + : dst_(dst), receiver_(receiver), key_(key) {
|
| + set_comment("[ DeferredReferenceGetKeyedValue");
|
| + }
|
|
|
| + virtual void Generate();
|
|
|
| -void DeferredInlineSmiAddReversed::Generate() {
|
| - GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
|
| - igostub.GenerateCall(masm_, value_, dst_);
|
| - if (!dst_.is(rax)) __ movq(dst_, rax);
|
| -}
|
| + Label* patch_site() { return &patch_site_; }
|
|
|
| + private:
|
| + Label patch_site_;
|
| + Register dst_;
|
| + Register receiver_;
|
| + Register key_;
|
| +};
|
|
|
| -void DeferredInlineSmiSub::Generate() {
|
| - GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB);
|
| - igostub.GenerateCall(masm_, dst_, value_);
|
| - if (!dst_.is(rax)) __ movq(dst_, rax);
|
| -}
|
|
|
| -
|
| -void DeferredInlineSmiOperation::Generate() {
|
| - // For mod we don't generate all the Smi code inline.
|
| - GenericBinaryOpStub stub(
|
| - op_,
|
| - overwrite_mode_,
|
| - (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB);
|
| - stub.GenerateCall(masm_, src_, value_);
|
| - if (!dst_.is(rax)) __ movq(dst_, rax);
|
| -}
|
| -
|
| -
|
| -void DeferredInlineSmiOperationReversed::Generate() {
|
| - GenericBinaryOpStub stub(
|
| - op_,
|
| - overwrite_mode_,
|
| - NO_SMI_CODE_IN_STUB);
|
| - stub.GenerateCall(masm_, value_, src_);
|
| - if (!dst_.is(rax)) __ movq(dst_, rax);
|
| -}
|
| -
|
| -
|
| -Result CodeGenerator::ConstantSmiBinaryOperation(BinaryOperation* expr,
|
| - Result* operand,
|
| - Handle<Object> value,
|
| - bool reversed,
|
| - OverwriteMode overwrite_mode) {
|
| - // Generate inline code for a binary operation when one of the
|
| - // operands is a constant smi. Consumes the argument "operand".
|
| - if (IsUnsafeSmi(value)) {
|
| - Result unsafe_operand(value);
|
| - if (reversed) {
|
| - return LikelySmiBinaryOperation(expr, &unsafe_operand, operand,
|
| - overwrite_mode);
|
| +void DeferredReferenceGetKeyedValue::Generate() {
|
| + if (receiver_.is(rdx)) {
|
| + if (!key_.is(rax)) {
|
| + __ movq(rax, key_);
|
| + } // else do nothing.
|
| + } else if (receiver_.is(rax)) {
|
| + if (key_.is(rdx)) {
|
| + __ xchg(rax, rdx);
|
| + } else if (key_.is(rax)) {
|
| + __ movq(rdx, receiver_);
|
| } else {
|
| - return LikelySmiBinaryOperation(expr, operand, &unsafe_operand,
|
| - overwrite_mode);
|
| + __ movq(rdx, receiver_);
|
| + __ movq(rax, key_);
|
| }
|
| + } else if (key_.is(rax)) {
|
| + __ movq(rdx, receiver_);
|
| + } else {
|
| + __ movq(rax, key_);
|
| + __ movq(rdx, receiver_);
|
| }
|
| + // Calculate the delta from the IC call instruction to the map check
|
| + // movq instruction in the inlined version. This delta is stored in
|
| + // a test(rax, delta) instruction after the call so that we can find
|
| + // it in the IC initialization code and patch the movq instruction.
|
| + // This means that we cannot allow test instructions after calls to
|
| + // KeyedLoadIC stubs in other places.
|
| + Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
|
| + __ Call(ic, RelocInfo::CODE_TARGET);
|
| + // The delta from the start of the map-compare instruction to the
|
| + // test instruction. We use masm_-> directly here instead of the __
|
| + // macro because the macro sometimes uses macro expansion to turn
|
| + // into something that can't return a value. This is encountered
|
| + // when doing generated code coverage tests.
|
| + int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
|
| + // Here we use masm_-> instead of the __ macro because this is the
|
| + // instruction that gets patched and coverage code gets in the way.
|
| + // TODO(X64): Consider whether it's worth switching the test to a
|
| + // 7-byte NOP with non-zero immediate (0f 1f 80 xxxxxxxx) which won't
|
| + // be generated normally.
|
| + masm_->testl(rax, Immediate(-delta_to_patch_site));
|
| + __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
|
|
|
| - // Get the literal value.
|
| - Smi* smi_value = Smi::cast(*value);
|
| - int int_value = smi_value->value();
|
| -
|
| - Token::Value op = expr->op();
|
| - Result answer;
|
| - switch (op) {
|
| - case Token::ADD: {
|
| - operand->ToRegister();
|
| - frame_->Spill(operand->reg());
|
| - DeferredCode* deferred = NULL;
|
| - if (reversed) {
|
| - deferred = new DeferredInlineSmiAddReversed(operand->reg(),
|
| - smi_value,
|
| - overwrite_mode);
|
| - } else {
|
| - deferred = new DeferredInlineSmiAdd(operand->reg(),
|
| - smi_value,
|
| - overwrite_mode);
|
| - }
|
| - JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
|
| - deferred);
|
| - __ SmiAddConstant(operand->reg(),
|
| - operand->reg(),
|
| - smi_value,
|
| - deferred->entry_label());
|
| - deferred->BindExit();
|
| - answer = *operand;
|
| - break;
|
| - }
|
| -
|
| - case Token::SUB: {
|
| - if (reversed) {
|
| - Result constant_operand(value);
|
| - answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
|
| - overwrite_mode);
|
| - } else {
|
| - operand->ToRegister();
|
| - frame_->Spill(operand->reg());
|
| - DeferredCode* deferred = new DeferredInlineSmiSub(operand->reg(),
|
| - smi_value,
|
| - overwrite_mode);
|
| - JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
|
| - deferred);
|
| - // A smi currently fits in a 32-bit Immediate.
|
| - __ SmiSubConstant(operand->reg(),
|
| - operand->reg(),
|
| - smi_value,
|
| - deferred->entry_label());
|
| - deferred->BindExit();
|
| - answer = *operand;
|
| - }
|
| - break;
|
| - }
|
| -
|
| - case Token::SAR:
|
| - if (reversed) {
|
| - Result constant_operand(value);
|
| - answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
|
| - overwrite_mode);
|
| - } else {
|
| - // Only the least significant 5 bits of the shift value are used.
|
| - // In the slow case, this masking is done inside the runtime call.
|
| - int shift_value = int_value & 0x1f;
|
| - operand->ToRegister();
|
| - frame_->Spill(operand->reg());
|
| - DeferredInlineSmiOperation* deferred =
|
| - new DeferredInlineSmiOperation(op,
|
| - operand->reg(),
|
| - operand->reg(),
|
| - smi_value,
|
| - overwrite_mode);
|
| - JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
|
| - deferred);
|
| - __ SmiShiftArithmeticRightConstant(operand->reg(),
|
| - operand->reg(),
|
| - shift_value);
|
| - deferred->BindExit();
|
| - answer = *operand;
|
| - }
|
| - break;
|
| -
|
| - case Token::SHR:
|
| - if (reversed) {
|
| - Result constant_operand(value);
|
| - answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
|
| - overwrite_mode);
|
| - } else {
|
| - // Only the least significant 5 bits of the shift value are used.
|
| - // In the slow case, this masking is done inside the runtime call.
|
| - int shift_value = int_value & 0x1f;
|
| - operand->ToRegister();
|
| - answer = allocator()->Allocate();
|
| - ASSERT(answer.is_valid());
|
| - DeferredInlineSmiOperation* deferred =
|
| - new DeferredInlineSmiOperation(op,
|
| - answer.reg(),
|
| - operand->reg(),
|
| - smi_value,
|
| - overwrite_mode);
|
| - JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
|
| - deferred);
|
| - __ SmiShiftLogicalRightConstant(answer.reg(),
|
| - operand->reg(),
|
| - shift_value,
|
| - deferred->entry_label());
|
| - deferred->BindExit();
|
| - operand->Unuse();
|
| - }
|
| - break;
|
| -
|
| - case Token::SHL:
|
| - if (reversed) {
|
| - operand->ToRegister();
|
| -
|
| - // We need rcx to be available to hold operand, and to be spilled.
|
| - // SmiShiftLeft implicitly modifies rcx.
|
| - if (operand->reg().is(rcx)) {
|
| - frame_->Spill(operand->reg());
|
| - answer = allocator()->Allocate();
|
| - } else {
|
| - Result rcx_reg = allocator()->Allocate(rcx);
|
| - // answer must not be rcx.
|
| - answer = allocator()->Allocate();
|
| - // rcx_reg goes out of scope.
|
| - }
|
| -
|
| - DeferredInlineSmiOperationReversed* deferred =
|
| - new DeferredInlineSmiOperationReversed(op,
|
| - answer.reg(),
|
| - smi_value,
|
| - operand->reg(),
|
| - overwrite_mode);
|
| - JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
|
| - deferred);
|
| -
|
| - __ Move(answer.reg(), smi_value);
|
| - __ SmiShiftLeft(answer.reg(), answer.reg(), operand->reg());
|
| - operand->Unuse();
|
| -
|
| - deferred->BindExit();
|
| - } else {
|
| - // Only the least significant 5 bits of the shift value are used.
|
| - // In the slow case, this masking is done inside the runtime call.
|
| - int shift_value = int_value & 0x1f;
|
| - operand->ToRegister();
|
| - if (shift_value == 0) {
|
| - // Spill operand so it can be overwritten in the slow case.
|
| - frame_->Spill(operand->reg());
|
| - DeferredInlineSmiOperation* deferred =
|
| - new DeferredInlineSmiOperation(op,
|
| - operand->reg(),
|
| - operand->reg(),
|
| - smi_value,
|
| - overwrite_mode);
|
| - JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
|
| - deferred);
|
| - deferred->BindExit();
|
| - answer = *operand;
|
| - } else {
|
| - // Use a fresh temporary for nonzero shift values.
|
| - answer = allocator()->Allocate();
|
| - ASSERT(answer.is_valid());
|
| - DeferredInlineSmiOperation* deferred =
|
| - new DeferredInlineSmiOperation(op,
|
| - answer.reg(),
|
| - operand->reg(),
|
| - smi_value,
|
| - overwrite_mode);
|
| - JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
|
| - deferred);
|
| - __ SmiShiftLeftConstant(answer.reg(),
|
| - operand->reg(),
|
| - shift_value);
|
| - deferred->BindExit();
|
| - operand->Unuse();
|
| - }
|
| - }
|
| - break;
|
| -
|
| - case Token::BIT_OR:
|
| - case Token::BIT_XOR:
|
| - case Token::BIT_AND: {
|
| - operand->ToRegister();
|
| - frame_->Spill(operand->reg());
|
| - if (reversed) {
|
| - // Bit operations with a constant smi are commutative.
|
| - // We can swap left and right operands with no problem.
|
| - // Swap left and right overwrite modes. 0->0, 1->2, 2->1.
|
| - overwrite_mode = static_cast<OverwriteMode>((2 * overwrite_mode) % 3);
|
| - }
|
| - DeferredCode* deferred = new DeferredInlineSmiOperation(op,
|
| - operand->reg(),
|
| - operand->reg(),
|
| - smi_value,
|
| - overwrite_mode);
|
| - JumpIfNotSmiUsingTypeInfo(operand->reg(), operand->type_info(),
|
| - deferred);
|
| - if (op == Token::BIT_AND) {
|
| - __ SmiAndConstant(operand->reg(), operand->reg(), smi_value);
|
| - } else if (op == Token::BIT_XOR) {
|
| - if (int_value != 0) {
|
| - __ SmiXorConstant(operand->reg(), operand->reg(), smi_value);
|
| - }
|
| - } else {
|
| - ASSERT(op == Token::BIT_OR);
|
| - if (int_value != 0) {
|
| - __ SmiOrConstant(operand->reg(), operand->reg(), smi_value);
|
| - }
|
| - }
|
| - deferred->BindExit();
|
| - answer = *operand;
|
| - break;
|
| - }
|
| -
|
| - // Generate inline code for mod of powers of 2 and negative powers of 2.
|
| - case Token::MOD:
|
| - if (!reversed &&
|
| - int_value != 0 &&
|
| - (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
|
| - operand->ToRegister();
|
| - frame_->Spill(operand->reg());
|
| - DeferredCode* deferred =
|
| - new DeferredInlineSmiOperation(op,
|
| - operand->reg(),
|
| - operand->reg(),
|
| - smi_value,
|
| - overwrite_mode);
|
| - // Check for negative or non-Smi left hand side.
|
| - __ JumpIfNotPositiveSmi(operand->reg(), deferred->entry_label());
|
| - if (int_value < 0) int_value = -int_value;
|
| - if (int_value == 1) {
|
| - __ Move(operand->reg(), Smi::FromInt(0));
|
| - } else {
|
| - __ SmiAndConstant(operand->reg(),
|
| - operand->reg(),
|
| - Smi::FromInt(int_value - 1));
|
| - }
|
| - deferred->BindExit();
|
| - answer = *operand;
|
| - break; // This break only applies if we generated code for MOD.
|
| - }
|
| - // Fall through if we did not find a power of 2 on the right hand side!
|
| - // The next case must be the default.
|
| -
|
| - default: {
|
| - Result constant_operand(value);
|
| - if (reversed) {
|
| - answer = LikelySmiBinaryOperation(expr, &constant_operand, operand,
|
| - overwrite_mode);
|
| - } else {
|
| - answer = LikelySmiBinaryOperation(expr, operand, &constant_operand,
|
| - overwrite_mode);
|
| - }
|
| - break;
|
| - }
|
| - }
|
| - ASSERT(answer.is_valid());
|
| - return answer;
|
| + if (!dst_.is(rax)) __ movq(dst_, rax);
|
| }
|
|
|
|
|
| -void CodeGenerator::JumpIfNotSmiUsingTypeInfo(Register reg,
|
| - TypeInfo type,
|
| - DeferredCode* deferred) {
|
| - if (!type.IsSmi()) {
|
| - __ JumpIfNotSmi(reg, deferred->entry_label());
|
| +class DeferredReferenceSetKeyedValue: public DeferredCode {
|
| + public:
|
| + DeferredReferenceSetKeyedValue(Register value,
|
| + Register key,
|
| + Register receiver)
|
| + : value_(value), key_(key), receiver_(receiver) {
|
| + set_comment("[ DeferredReferenceSetKeyedValue");
|
| }
|
| - if (FLAG_debug_code) {
|
| - __ AbortIfNotSmi(reg);
|
| - }
|
| -}
|
|
|
| + virtual void Generate();
|
|
|
| -void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
|
| - Register right,
|
| - TypeInfo left_info,
|
| - TypeInfo right_info,
|
| - DeferredCode* deferred) {
|
| - if (!left_info.IsSmi() && !right_info.IsSmi()) {
|
| - __ JumpIfNotBothSmi(left, right, deferred->entry_label());
|
| - } else if (!left_info.IsSmi()) {
|
| - __ JumpIfNotSmi(left, deferred->entry_label());
|
| - } else if (!right_info.IsSmi()) {
|
| - __ JumpIfNotSmi(right, deferred->entry_label());
|
| - }
|
| - if (FLAG_debug_code) {
|
| - __ AbortIfNotSmi(left);
|
| - __ AbortIfNotSmi(right);
|
| - }
|
| -}
|
| + Label* patch_site() { return &patch_site_; }
|
|
|
| + private:
|
| + Register value_;
|
| + Register key_;
|
| + Register receiver_;
|
| + Label patch_site_;
|
| +};
|
|
|
| -// Implements a binary operation using a deferred code object and some
|
| -// inline code to operate on smis quickly.
|
| -Result CodeGenerator::LikelySmiBinaryOperation(BinaryOperation* expr,
|
| - Result* left,
|
| - Result* right,
|
| - OverwriteMode overwrite_mode) {
|
| - // Copy the type info because left and right may be overwritten.
|
| - TypeInfo left_type_info = left->type_info();
|
| - TypeInfo right_type_info = right->type_info();
|
| - Token::Value op = expr->op();
|
| - Result answer;
|
| - // Special handling of div and mod because they use fixed registers.
|
| - if (op == Token::DIV || op == Token::MOD) {
|
| - // We need rax as the quotient register, rdx as the remainder
|
| - // register, neither left nor right in rax or rdx, and left copied
|
| - // to rax.
|
| - Result quotient;
|
| - Result remainder;
|
| - bool left_is_in_rax = false;
|
| - // Step 1: get rax for quotient.
|
| - if ((left->is_register() && left->reg().is(rax)) ||
|
| - (right->is_register() && right->reg().is(rax))) {
|
| - // One or both is in rax. Use a fresh non-rdx register for
|
| - // them.
|
| - Result fresh = allocator_->Allocate();
|
| - ASSERT(fresh.is_valid());
|
| - if (fresh.reg().is(rdx)) {
|
| - remainder = fresh;
|
| - fresh = allocator_->Allocate();
|
| - ASSERT(fresh.is_valid());
|
| - }
|
| - if (left->is_register() && left->reg().is(rax)) {
|
| - quotient = *left;
|
| - *left = fresh;
|
| - left_is_in_rax = true;
|
| - }
|
| - if (right->is_register() && right->reg().is(rax)) {
|
| - quotient = *right;
|
| - *right = fresh;
|
| - }
|
| - __ movq(fresh.reg(), rax);
|
| - } else {
|
| - // Neither left nor right is in rax.
|
| - quotient = allocator_->Allocate(rax);
|
| - }
|
| - ASSERT(quotient.is_register() && quotient.reg().is(rax));
|
| - ASSERT(!(left->is_register() && left->reg().is(rax)));
|
| - ASSERT(!(right->is_register() && right->reg().is(rax)));
|
|
|
| - // Step 2: get rdx for remainder if necessary.
|
| - if (!remainder.is_valid()) {
|
| - if ((left->is_register() && left->reg().is(rdx)) ||
|
| - (right->is_register() && right->reg().is(rdx))) {
|
| - Result fresh = allocator_->Allocate();
|
| - ASSERT(fresh.is_valid());
|
| - if (left->is_register() && left->reg().is(rdx)) {
|
| - remainder = *left;
|
| - *left = fresh;
|
| - }
|
| - if (right->is_register() && right->reg().is(rdx)) {
|
| - remainder = *right;
|
| - *right = fresh;
|
| - }
|
| - __ movq(fresh.reg(), rdx);
|
| - } else {
|
| - // Neither left nor right is in rdx.
|
| - remainder = allocator_->Allocate(rdx);
|
| - }
|
| - }
|
| - ASSERT(remainder.is_register() && remainder.reg().is(rdx));
|
| - ASSERT(!(left->is_register() && left->reg().is(rdx)));
|
| - ASSERT(!(right->is_register() && right->reg().is(rdx)));
|
| -
|
| - left->ToRegister();
|
| - right->ToRegister();
|
| - frame_->Spill(rax);
|
| - frame_->Spill(rdx);
|
| -
|
| - // Check that left and right are smi tagged.
|
| - DeferredInlineBinaryOperation* deferred =
|
| - new DeferredInlineBinaryOperation(op,
|
| - (op == Token::DIV) ? rax : rdx,
|
| - left->reg(),
|
| - right->reg(),
|
| - overwrite_mode);
|
| - JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(),
|
| - left_type_info, right_type_info, deferred);
|
| -
|
| - if (op == Token::DIV) {
|
| - __ SmiDiv(rax, left->reg(), right->reg(), deferred->entry_label());
|
| - deferred->BindExit();
|
| - left->Unuse();
|
| - right->Unuse();
|
| - answer = quotient;
|
| +void DeferredReferenceSetKeyedValue::Generate() {
|
| + __ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
|
| + // Move value, receiver, and key to registers rax, rdx, and rcx, as
|
| + // the IC stub expects.
|
| + // Move value to rax, using xchg if the receiver or key is in rax.
|
| + if (!value_.is(rax)) {
|
| + if (!receiver_.is(rax) && !key_.is(rax)) {
|
| + __ movq(rax, value_);
|
| } else {
|
| - ASSERT(op == Token::MOD);
|
| - __ SmiMod(rdx, left->reg(), right->reg(), deferred->entry_label());
|
| - deferred->BindExit();
|
| - left->Unuse();
|
| - right->Unuse();
|
| - answer = remainder;
|
| - }
|
| - ASSERT(answer.is_valid());
|
| - return answer;
|
| - }
|
| -
|
| - // Special handling of shift operations because they use fixed
|
| - // registers.
|
| - if (op == Token::SHL || op == Token::SHR || op == Token::SAR) {
|
| - // Move left out of rcx if necessary.
|
| - if (left->is_register() && left->reg().is(rcx)) {
|
| - *left = allocator_->Allocate();
|
| - ASSERT(left->is_valid());
|
| - __ movq(left->reg(), rcx);
|
| - }
|
| - right->ToRegister(rcx);
|
| - left->ToRegister();
|
| - ASSERT(left->is_register() && !left->reg().is(rcx));
|
| - ASSERT(right->is_register() && right->reg().is(rcx));
|
| -
|
| - // We will modify right, it must be spilled.
|
| - frame_->Spill(rcx);
|
| -
|
| - // Use a fresh answer register to avoid spilling the left operand.
|
| - answer = allocator_->Allocate();
|
| - ASSERT(answer.is_valid());
|
| - // Check that both operands are smis using the answer register as a
|
| - // temporary.
|
| - DeferredInlineBinaryOperation* deferred =
|
| - new DeferredInlineBinaryOperation(op,
|
| - answer.reg(),
|
| - left->reg(),
|
| - rcx,
|
| - overwrite_mode);
|
| -
|
| - Label do_op;
|
| - if (right_type_info.IsSmi()) {
|
| - if (FLAG_debug_code) {
|
| - __ AbortIfNotSmi(right->reg());
|
| + __ xchg(rax, value_);
|
| + // Update receiver_ and key_ if they are affected by the swap.
|
| + if (receiver_.is(rax)) {
|
| + receiver_ = value_;
|
| + } else if (receiver_.is(value_)) {
|
| + receiver_ = rax;
|
| }
|
| - __ movq(answer.reg(), left->reg());
|
| - // If left is not known to be a smi, check if it is.
|
| - // If left is not known to be a number, and it isn't a smi, check if
|
| - // it is a HeapNumber.
|
| - if (!left_type_info.IsSmi()) {
|
| - __ JumpIfSmi(answer.reg(), &do_op);
|
| - if (!left_type_info.IsNumber()) {
|
| - // Branch if not a heapnumber.
|
| - __ Cmp(FieldOperand(answer.reg(), HeapObject::kMapOffset),
|
| - Factory::heap_number_map());
|
| - deferred->Branch(not_equal);
|
| - }
|
| - // Load integer value into answer register using truncation.
|
| - __ cvttsd2si(answer.reg(),
|
| - FieldOperand(answer.reg(), HeapNumber::kValueOffset));
|
| - // Branch if we might have overflowed.
|
| - // (False negative for Smi::kMinValue)
|
| - __ cmpq(answer.reg(), Immediate(0x80000000));
|
| - deferred->Branch(equal);
|
| - // TODO(lrn): Inline shifts on int32 here instead of first smi-tagging.
|
| - __ Integer32ToSmi(answer.reg(), answer.reg());
|
| - } else {
|
| - // Fast case - both are actually smis.
|
| - if (FLAG_debug_code) {
|
| - __ AbortIfNotSmi(left->reg());
|
| - }
|
| + if (key_.is(rax)) {
|
| + key_ = value_;
|
| + } else if (key_.is(value_)) {
|
| + key_ = rax;
|
| }
|
| - } else {
|
| - JumpIfNotBothSmiUsingTypeInfo(left->reg(), rcx,
|
| - left_type_info, right_type_info, deferred);
|
| }
|
| - __ bind(&do_op);
|
| -
|
| - // Perform the operation.
|
| - switch (op) {
|
| - case Token::SAR:
|
| - __ SmiShiftArithmeticRight(answer.reg(), left->reg(), rcx);
|
| - break;
|
| - case Token::SHR: {
|
| - __ SmiShiftLogicalRight(answer.reg(),
|
| - left->reg(),
|
| - rcx,
|
| - deferred->entry_label());
|
| - break;
|
| - }
|
| - case Token::SHL: {
|
| - __ SmiShiftLeft(answer.reg(),
|
| - left->reg(),
|
| - rcx);
|
| - break;
|
| - }
|
| - default:
|
| - UNREACHABLE();
|
| - }
|
| - deferred->BindExit();
|
| - left->Unuse();
|
| - right->Unuse();
|
| - ASSERT(answer.is_valid());
|
| - return answer;
|
| }
|
| -
|
| - // Handle the other binary operations.
|
| - left->ToRegister();
|
| - right->ToRegister();
|
| - // A newly allocated register answer is used to hold the answer. The
|
| - // registers containing left and right are not modified so they don't
|
| - // need to be spilled in the fast case.
|
| - answer = allocator_->Allocate();
|
| - ASSERT(answer.is_valid());
|
| -
|
| - // Perform the smi tag check.
|
| - DeferredInlineBinaryOperation* deferred =
|
| - new DeferredInlineBinaryOperation(op,
|
| - answer.reg(),
|
| - left->reg(),
|
| - right->reg(),
|
| - overwrite_mode);
|
| - JumpIfNotBothSmiUsingTypeInfo(left->reg(), right->reg(),
|
| - left_type_info, right_type_info, deferred);
|
| -
|
| - switch (op) {
|
| - case Token::ADD:
|
| - __ SmiAdd(answer.reg(),
|
| - left->reg(),
|
| - right->reg(),
|
| - deferred->entry_label());
|
| - break;
|
| -
|
| - case Token::SUB:
|
| - __ SmiSub(answer.reg(),
|
| - left->reg(),
|
| - right->reg(),
|
| - deferred->entry_label());
|
| - break;
|
| -
|
| - case Token::MUL: {
|
| - __ SmiMul(answer.reg(),
|
| - left->reg(),
|
| - right->reg(),
|
| - deferred->entry_label());
|
| - break;
|
| + // Value is now in rax. Its original location is remembered in value_,
|
| + // and the value is restored to value_ before returning.
|
| + // The variables receiver_ and key_ are not preserved.
|
| + // Move receiver and key to rdx and rcx, swapping if necessary.
|
| + if (receiver_.is(rdx)) {
|
| + if (!key_.is(rcx)) {
|
| + __ movq(rcx, key_);
|
| + } // Else everything is already in the right place.
|
| + } else if (receiver_.is(rcx)) {
|
| + if (key_.is(rdx)) {
|
| + __ xchg(rcx, rdx);
|
| + } else if (key_.is(rcx)) {
|
| + __ movq(rdx, receiver_);
|
| + } else {
|
| + __ movq(rdx, receiver_);
|
| + __ movq(rcx, key_);
|
| }
|
| -
|
| - case Token::BIT_OR:
|
| - __ SmiOr(answer.reg(), left->reg(), right->reg());
|
| - break;
|
| -
|
| - case Token::BIT_AND:
|
| - __ SmiAnd(answer.reg(), left->reg(), right->reg());
|
| - break;
|
| -
|
| - case Token::BIT_XOR:
|
| - __ SmiXor(answer.reg(), left->reg(), right->reg());
|
| - break;
|
| -
|
| - default:
|
| - UNREACHABLE();
|
| - break;
|
| + } else if (key_.is(rcx)) {
|
| + __ movq(rdx, receiver_);
|
| + } else {
|
| + __ movq(rcx, key_);
|
| + __ movq(rdx, receiver_);
|
| }
|
| - deferred->BindExit();
|
| - left->Unuse();
|
| - right->Unuse();
|
| - ASSERT(answer.is_valid());
|
| - return answer;
|
| +
|
| + // Call the IC stub.
|
| + Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
|
| + __ Call(ic, RelocInfo::CODE_TARGET);
|
| + // The delta from the start of the map-compare instructions (initial movq)
|
| + // to the test instruction. We use masm_-> directly here instead of the
|
| + // __ macro because the macro sometimes uses macro expansion to turn
|
| + // into something that can't return a value. This is encountered
|
| + // when doing generated code coverage tests.
|
| + int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
|
| + // Here we use masm_-> instead of the __ macro because this is the
|
| + // instruction that gets patched and coverage code gets in the way.
|
| + masm_->testl(rax, Immediate(-delta_to_patch_site));
|
| + // Restore value (returned from store IC).
|
| + if (!value_.is(rax)) __ movq(value_, rax);
|
| }
|
|
|
|
|
| @@ -8140,91 +8268,702 @@
|
| }
|
|
|
|
|
| -bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
|
| - Object* answer_object = Heap::undefined_value();
|
| - switch (op) {
|
| - case Token::ADD:
|
| - // Use intptr_t to detect overflow of 32-bit int.
|
| - if (Smi::IsValid(static_cast<intptr_t>(left) + right)) {
|
| - answer_object = Smi::FromInt(left + right);
|
| +void GenericBinaryOpStub::GenerateCall(
|
| + MacroAssembler* masm,
|
| + Register left,
|
| + Register right) {
|
| + if (!ArgsInRegistersSupported()) {
|
| + // Pass arguments on the stack.
|
| + __ push(left);
|
| + __ push(right);
|
| + } else {
|
| + // The calling convention with registers is left in rdx and right in rax.
|
| + Register left_arg = rdx;
|
| + Register right_arg = rax;
|
| + if (!(left.is(left_arg) && right.is(right_arg))) {
|
| + if (left.is(right_arg) && right.is(left_arg)) {
|
| + if (IsOperationCommutative()) {
|
| + SetArgsReversed();
|
| + } else {
|
| + __ xchg(left, right);
|
| + }
|
| + } else if (left.is(left_arg)) {
|
| + __ movq(right_arg, right);
|
| + } else if (right.is(right_arg)) {
|
| + __ movq(left_arg, left);
|
| + } else if (left.is(right_arg)) {
|
| + if (IsOperationCommutative()) {
|
| + __ movq(left_arg, right);
|
| + SetArgsReversed();
|
| + } else {
|
| + // Order of moves important to avoid destroying left argument.
|
| + __ movq(left_arg, left);
|
| + __ movq(right_arg, right);
|
| + }
|
| + } else if (right.is(left_arg)) {
|
| + if (IsOperationCommutative()) {
|
| + __ movq(right_arg, left);
|
| + SetArgsReversed();
|
| + } else {
|
| + // Order of moves important to avoid destroying right argument.
|
| + __ movq(right_arg, right);
|
| + __ movq(left_arg, left);
|
| + }
|
| + } else {
|
| + // Order of moves is not important.
|
| + __ movq(left_arg, left);
|
| + __ movq(right_arg, right);
|
| }
|
| + }
|
| +
|
| + // Update flags to indicate that arguments are in registers.
|
| + SetArgsInRegisters();
|
| + __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
|
| + }
|
| +
|
| + // Call the stub.
|
| + __ CallStub(this);
|
| +}
|
| +
|
| +
|
| +void GenericBinaryOpStub::GenerateCall(
|
| + MacroAssembler* masm,
|
| + Register left,
|
| + Smi* right) {
|
| + if (!ArgsInRegistersSupported()) {
|
| + // Pass arguments on the stack.
|
| + __ push(left);
|
| + __ Push(right);
|
| + } else {
|
| + // The calling convention with registers is left in rdx and right in rax.
|
| + Register left_arg = rdx;
|
| + Register right_arg = rax;
|
| + if (left.is(left_arg)) {
|
| + __ Move(right_arg, right);
|
| + } else if (left.is(right_arg) && IsOperationCommutative()) {
|
| + __ Move(left_arg, right);
|
| + SetArgsReversed();
|
| + } else {
|
| + // For non-commutative operations, left and right_arg might be
|
| + // the same register. Therefore, the order of the moves is
|
| + // important here in order to not overwrite left before moving
|
| + // it to left_arg.
|
| + __ movq(left_arg, left);
|
| + __ Move(right_arg, right);
|
| + }
|
| +
|
| + // Update flags to indicate that arguments are in registers.
|
| + SetArgsInRegisters();
|
| + __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
|
| + }
|
| +
|
| + // Call the stub.
|
| + __ CallStub(this);
|
| +}
|
| +
|
| +
|
| +void GenericBinaryOpStub::GenerateCall(
|
| + MacroAssembler* masm,
|
| + Smi* left,
|
| + Register right) {
|
| + if (!ArgsInRegistersSupported()) {
|
| + // Pass arguments on the stack.
|
| + __ Push(left);
|
| + __ push(right);
|
| + } else {
|
| + // The calling convention with registers is left in rdx and right in rax.
|
| + Register left_arg = rdx;
|
| + Register right_arg = rax;
|
| + if (right.is(right_arg)) {
|
| + __ Move(left_arg, left);
|
| + } else if (right.is(left_arg) && IsOperationCommutative()) {
|
| + __ Move(right_arg, left);
|
| + SetArgsReversed();
|
| + } else {
|
| + // For non-commutative operations, right and left_arg might be
|
| + // the same register. Therefore, the order of the moves is
|
| + // important here in order to not overwrite right before moving
|
| + // it to right_arg.
|
| + __ movq(right_arg, right);
|
| + __ Move(left_arg, left);
|
| + }
|
| + // Update flags to indicate that arguments are in registers.
|
| + SetArgsInRegisters();
|
| + __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
|
| + }
|
| +
|
| + // Call the stub.
|
| + __ CallStub(this);
|
| +}
|
| +
|
| +
|
| +Result GenericBinaryOpStub::GenerateCall(MacroAssembler* masm,
|
| + VirtualFrame* frame,
|
| + Result* left,
|
| + Result* right) {
|
| + if (ArgsInRegistersSupported()) {
|
| + SetArgsInRegisters();
|
| + return frame->CallStub(this, left, right);
|
| + } else {
|
| + frame->Push(left);
|
| + frame->Push(right);
|
| + return frame->CallStub(this, 2);
|
| + }
|
| +}
|
| +
|
| +
|
| +void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
|
| + // 1. Move arguments into rdx, rax except for DIV and MOD, which need the
|
| + // dividend in rax and rdx free for the division. Use rax, rbx for those.
|
| + Comment load_comment(masm, "-- Load arguments");
|
| + Register left = rdx;
|
| + Register right = rax;
|
| + if (op_ == Token::DIV || op_ == Token::MOD) {
|
| + left = rax;
|
| + right = rbx;
|
| + if (HasArgsInRegisters()) {
|
| + __ movq(rbx, rax);
|
| + __ movq(rax, rdx);
|
| + }
|
| + }
|
| + if (!HasArgsInRegisters()) {
|
| + __ movq(right, Operand(rsp, 1 * kPointerSize));
|
| + __ movq(left, Operand(rsp, 2 * kPointerSize));
|
| + }
|
| +
|
| + Label not_smis;
|
| + // 2. Smi check both operands.
|
| + if (static_operands_type_.IsSmi()) {
|
| + // Skip smi check if we know that both arguments are smis.
|
| + if (FLAG_debug_code) {
|
| + __ AbortIfNotSmi(left);
|
| + __ AbortIfNotSmi(right);
|
| + }
|
| + if (op_ == Token::BIT_OR) {
|
| + // Handle OR here, since we do extra smi-checking in the or code below.
|
| + __ SmiOr(right, right, left);
|
| + GenerateReturn(masm);
|
| + return;
|
| + }
|
| + } else {
|
| + if (op_ != Token::BIT_OR) {
|
| + // Skip the check for OR as it is better combined with the
|
| + // actual operation.
|
| + Comment smi_check_comment(masm, "-- Smi check arguments");
|
| + __ JumpIfNotBothSmi(left, right, ¬_smis);
|
| + }
|
| + }
|
| +
|
| + // 3. Operands are both smis (except for OR), perform the operation leaving
|
| + // the result in rax and check the result if necessary.
|
| + Comment perform_smi(masm, "-- Perform smi operation");
|
| + Label use_fp_on_smis;
|
| + switch (op_) {
|
| + case Token::ADD: {
|
| + ASSERT(right.is(rax));
|
| + __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
|
| break;
|
| - case Token::SUB:
|
| - // Use intptr_t to detect overflow of 32-bit int.
|
| - if (Smi::IsValid(static_cast<intptr_t>(left) - right)) {
|
| - answer_object = Smi::FromInt(left - right);
|
| - }
|
| + }
|
| +
|
| + case Token::SUB: {
|
| + __ SmiSub(left, left, right, &use_fp_on_smis);
|
| + __ movq(rax, left);
|
| break;
|
| - case Token::MUL: {
|
| - double answer = static_cast<double>(left) * right;
|
| - if (answer >= Smi::kMinValue && answer <= Smi::kMaxValue) {
|
| - // If the product is zero and the non-zero factor is negative,
|
| - // the spec requires us to return floating point negative zero.
|
| - if (answer != 0 || (left + right) >= 0) {
|
| - answer_object = Smi::FromInt(static_cast<int>(answer));
|
| - }
|
| - }
|
| - }
|
| + }
|
| +
|
| + case Token::MUL:
|
| + ASSERT(right.is(rax));
|
| + __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative.
|
| break;
|
| +
|
| case Token::DIV:
|
| + ASSERT(left.is(rax));
|
| + __ SmiDiv(left, left, right, &use_fp_on_smis);
|
| + break;
|
| +
|
| case Token::MOD:
|
| + ASSERT(left.is(rax));
|
| + __ SmiMod(left, left, right, slow);
|
| break;
|
| +
|
| case Token::BIT_OR:
|
| - answer_object = Smi::FromInt(left | right);
|
| + ASSERT(right.is(rax));
|
| + __ movq(rcx, right); // Save the right operand.
|
| + __ SmiOr(right, right, left); // BIT_OR is commutative.
|
| + __ testb(right, Immediate(kSmiTagMask));
|
| + __ j(not_zero, ¬_smis);
|
| break;
|
| +
|
| case Token::BIT_AND:
|
| - answer_object = Smi::FromInt(left & right);
|
| + ASSERT(right.is(rax));
|
| + __ SmiAnd(right, right, left); // BIT_AND is commutative.
|
| break;
|
| +
|
| case Token::BIT_XOR:
|
| - answer_object = Smi::FromInt(left ^ right);
|
| + ASSERT(right.is(rax));
|
| + __ SmiXor(right, right, left); // BIT_XOR is commutative.
|
| break;
|
|
|
| - case Token::SHL: {
|
| - int shift_amount = right & 0x1F;
|
| - if (Smi::IsValid(left << shift_amount)) {
|
| - answer_object = Smi::FromInt(left << shift_amount);
|
| + case Token::SHL:
|
| + case Token::SHR:
|
| + case Token::SAR:
|
| + switch (op_) {
|
| + case Token::SAR:
|
| + __ SmiShiftArithmeticRight(left, left, right);
|
| + break;
|
| + case Token::SHR:
|
| + __ SmiShiftLogicalRight(left, left, right, slow);
|
| + break;
|
| + case Token::SHL:
|
| + __ SmiShiftLeft(left, left, right);
|
| + break;
|
| + default:
|
| + UNREACHABLE();
|
| + }
|
| + __ movq(rax, left);
|
| + break;
|
| +
|
| + default:
|
| + UNREACHABLE();
|
| + break;
|
| + }
|
| +
|
| + // 4. Emit return of result in rax.
|
| + GenerateReturn(masm);
|
| +
|
| + // 5. For some operations emit inline code to perform floating point
|
| + // operations on known smis (e.g., if the result of the operation
|
| + // overflowed the smi range).
|
| + switch (op_) {
|
| + case Token::ADD:
|
| + case Token::SUB:
|
| + case Token::MUL:
|
| + case Token::DIV: {
|
| + ASSERT(use_fp_on_smis.is_linked());
|
| + __ bind(&use_fp_on_smis);
|
| + if (op_ == Token::DIV) {
|
| + __ movq(rdx, rax);
|
| + __ movq(rax, rbx);
|
| + }
|
| + // left is rdx, right is rax.
|
| + __ AllocateHeapNumber(rbx, rcx, slow);
|
| + FloatingPointHelper::LoadSSE2SmiOperands(masm);
|
| + switch (op_) {
|
| + case Token::ADD: __ addsd(xmm0, xmm1); break;
|
| + case Token::SUB: __ subsd(xmm0, xmm1); break;
|
| + case Token::MUL: __ mulsd(xmm0, xmm1); break;
|
| + case Token::DIV: __ divsd(xmm0, xmm1); break;
|
| + default: UNREACHABLE();
|
| + }
|
| + __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
|
| + __ movq(rax, rbx);
|
| + GenerateReturn(masm);
|
| + }
|
| + default:
|
| + break;
|
| + }
|
| +
|
| + // 6. Non-smi operands, fall out to the non-smi code with the operands in
|
| + // rdx and rax.
|
| + Comment done_comment(masm, "-- Enter non-smi code");
|
| + __ bind(¬_smis);
|
| +
|
| + switch (op_) {
|
| + case Token::DIV:
|
| + case Token::MOD:
|
| + // Operands are in rax, rbx at this point.
|
| + __ movq(rdx, rax);
|
| + __ movq(rax, rbx);
|
| + break;
|
| +
|
| + case Token::BIT_OR:
|
| + // Right operand is saved in rcx and rax was destroyed by the smi
|
| + // operation.
|
| + __ movq(rax, rcx);
|
| + break;
|
| +
|
| + default:
|
| + break;
|
| + }
|
| +}
|
| +
|
| +
|
| +void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
|
| + Label call_runtime;
|
| +
|
| + if (ShouldGenerateSmiCode()) {
|
| + GenerateSmiCode(masm, &call_runtime);
|
| + } else if (op_ != Token::MOD) {
|
| + if (!HasArgsInRegisters()) {
|
| + GenerateLoadArguments(masm);
|
| + }
|
| + }
|
| + // Floating point case.
|
| + if (ShouldGenerateFPCode()) {
|
| + switch (op_) {
|
| + case Token::ADD:
|
| + case Token::SUB:
|
| + case Token::MUL:
|
| + case Token::DIV: {
|
| + if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
|
| + HasSmiCodeInStub()) {
|
| + // Execution reaches this point when the first non-smi argument occurs
|
| + // (and only if smi code is generated). This is the right moment to
|
| + // patch to HEAP_NUMBERS state. The transition is attempted only for
|
| + // the four basic operations. The stub stays in the DEFAULT state
|
| + // forever for all other operations (also if smi code is skipped).
|
| + GenerateTypeTransition(masm);
|
| + break;
|
| }
|
| +
|
| + Label not_floats;
|
| + // rax: y
|
| + // rdx: x
|
| + if (static_operands_type_.IsNumber()) {
|
| + if (FLAG_debug_code) {
|
| + // Assert at runtime that inputs are only numbers.
|
| + __ AbortIfNotNumber(rdx);
|
| + __ AbortIfNotNumber(rax);
|
| + }
|
| + FloatingPointHelper::LoadSSE2NumberOperands(masm);
|
| + } else {
|
| + FloatingPointHelper::LoadSSE2UnknownOperands(masm, &call_runtime);
|
| + }
|
| +
|
| + switch (op_) {
|
| + case Token::ADD: __ addsd(xmm0, xmm1); break;
|
| + case Token::SUB: __ subsd(xmm0, xmm1); break;
|
| + case Token::MUL: __ mulsd(xmm0, xmm1); break;
|
| + case Token::DIV: __ divsd(xmm0, xmm1); break;
|
| + default: UNREACHABLE();
|
| + }
|
| + // Allocate a heap number, if needed.
|
| + Label skip_allocation;
|
| + OverwriteMode mode = mode_;
|
| + if (HasArgsReversed()) {
|
| + if (mode == OVERWRITE_RIGHT) {
|
| + mode = OVERWRITE_LEFT;
|
| + } else if (mode == OVERWRITE_LEFT) {
|
| + mode = OVERWRITE_RIGHT;
|
| + }
|
| + }
|
| + switch (mode) {
|
| + case OVERWRITE_LEFT:
|
| + __ JumpIfNotSmi(rdx, &skip_allocation);
|
| + __ AllocateHeapNumber(rbx, rcx, &call_runtime);
|
| + __ movq(rdx, rbx);
|
| + __ bind(&skip_allocation);
|
| + __ movq(rax, rdx);
|
| + break;
|
| + case OVERWRITE_RIGHT:
|
| + // If the argument in rax is already an object, we skip the
|
| + // allocation of a heap number.
|
| + __ JumpIfNotSmi(rax, &skip_allocation);
|
| + // Fall through!
|
| + case NO_OVERWRITE:
|
| + // Allocate a heap number for the result. Keep rax and rdx intact
|
| + // for the possible runtime call.
|
| + __ AllocateHeapNumber(rbx, rcx, &call_runtime);
|
| + __ movq(rax, rbx);
|
| + __ bind(&skip_allocation);
|
| + break;
|
| + default: UNREACHABLE();
|
| + }
|
| + __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
|
| + GenerateReturn(masm);
|
| + __ bind(¬_floats);
|
| + if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
|
| + !HasSmiCodeInStub()) {
|
| + // Execution reaches this point when the first non-number argument
|
| + // occurs (and only if smi code is skipped from the stub, otherwise
|
| + // the patching has already been done earlier in this case branch).
|
| + // A perfect moment to try patching to STRINGS for ADD operation.
|
| + if (op_ == Token::ADD) {
|
| + GenerateTypeTransition(masm);
|
| + }
|
| + }
|
| break;
|
| }
|
| - case Token::SHR: {
|
| - int shift_amount = right & 0x1F;
|
| - unsigned int unsigned_left = left;
|
| - unsigned_left >>= shift_amount;
|
| - if (unsigned_left <= static_cast<unsigned int>(Smi::kMaxValue)) {
|
| - answer_object = Smi::FromInt(unsigned_left);
|
| - }
|
| + case Token::MOD: {
|
| + // For MOD we go directly to runtime in the non-smi case.
|
| break;
|
| }
|
| - case Token::SAR: {
|
| - int shift_amount = right & 0x1F;
|
| - unsigned int unsigned_left = left;
|
| - if (left < 0) {
|
| - // Perform arithmetic shift of a negative number by
|
| - // complementing number, logical shifting, complementing again.
|
| - unsigned_left = ~unsigned_left;
|
| - unsigned_left >>= shift_amount;
|
| - unsigned_left = ~unsigned_left;
|
| + case Token::BIT_OR:
|
| + case Token::BIT_AND:
|
| + case Token::BIT_XOR:
|
| + case Token::SAR:
|
| + case Token::SHL:
|
| + case Token::SHR: {
|
| + Label skip_allocation, non_smi_shr_result;
|
| + Register heap_number_map = r9;
|
| + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
|
| + if (static_operands_type_.IsNumber()) {
|
| + if (FLAG_debug_code) {
|
| + // Assert at runtime that inputs are only numbers.
|
| + __ AbortIfNotNumber(rdx);
|
| + __ AbortIfNotNumber(rax);
|
| + }
|
| + FloatingPointHelper::LoadNumbersAsIntegers(masm);
|
| } else {
|
| - unsigned_left >>= shift_amount;
|
| + FloatingPointHelper::LoadAsIntegers(masm,
|
| + &call_runtime,
|
| + heap_number_map);
|
| }
|
| - ASSERT(Smi::IsValid(static_cast<int32_t>(unsigned_left)));
|
| - answer_object = Smi::FromInt(static_cast<int32_t>(unsigned_left));
|
| + switch (op_) {
|
| + case Token::BIT_OR: __ orl(rax, rcx); break;
|
| + case Token::BIT_AND: __ andl(rax, rcx); break;
|
| + case Token::BIT_XOR: __ xorl(rax, rcx); break;
|
| + case Token::SAR: __ sarl_cl(rax); break;
|
| + case Token::SHL: __ shll_cl(rax); break;
|
| + case Token::SHR: {
|
| + __ shrl_cl(rax);
|
| + // Check if result is negative. This can only happen for a shift
|
| + // by zero.
|
| + __ testl(rax, rax);
|
| + __ j(negative, &non_smi_shr_result);
|
| + break;
|
| + }
|
| + default: UNREACHABLE();
|
| + }
|
| +
|
| + STATIC_ASSERT(kSmiValueSize == 32);
|
| + // Tag smi result and return.
|
| + __ Integer32ToSmi(rax, rax);
|
| + GenerateReturn(masm);
|
| +
|
| + // All bit-ops except SHR return a signed int32 that can be
|
| + // returned immediately as a smi.
|
| + // We might need to allocate a HeapNumber if we shift a negative
|
| + // number right by zero (i.e., convert to UInt32).
|
| + if (op_ == Token::SHR) {
|
| + ASSERT(non_smi_shr_result.is_linked());
|
| + __ bind(&non_smi_shr_result);
|
| + // Allocate a heap number if needed.
|
| + __ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
|
| + switch (mode_) {
|
| + case OVERWRITE_LEFT:
|
| + case OVERWRITE_RIGHT:
|
| + // If the operand was an object, we skip the
|
| + // allocation of a heap number.
|
| + __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
|
| + 1 * kPointerSize : 2 * kPointerSize));
|
| + __ JumpIfNotSmi(rax, &skip_allocation);
|
| + // Fall through!
|
| + case NO_OVERWRITE:
|
| + // Allocate heap number in new space.
|
| + // Not using AllocateHeapNumber macro in order to reuse
|
| + // already loaded heap_number_map.
|
| + __ AllocateInNewSpace(HeapNumber::kSize,
|
| + rax,
|
| + rcx,
|
| + no_reg,
|
| + &call_runtime,
|
| + TAG_OBJECT);
|
| + // Set the map.
|
| + if (FLAG_debug_code) {
|
| + __ AbortIfNotRootValue(heap_number_map,
|
| + Heap::kHeapNumberMapRootIndex,
|
| + "HeapNumberMap register clobbered.");
|
| + }
|
| + __ movq(FieldOperand(rax, HeapObject::kMapOffset),
|
| + heap_number_map);
|
| + __ bind(&skip_allocation);
|
| + break;
|
| + default: UNREACHABLE();
|
| + }
|
| + // Store the result in the HeapNumber and return.
|
| + __ cvtqsi2sd(xmm0, rbx);
|
| + __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
|
| + GenerateReturn(masm);
|
| + }
|
| +
|
| break;
|
| }
|
| + default: UNREACHABLE(); break;
|
| + }
|
| + }
|
| +
|
| + // If all else fails, use the runtime system to get the correct
|
| + // result. If arguments was passed in registers now place them on the
|
| + // stack in the correct order below the return address.
|
| + __ bind(&call_runtime);
|
| +
|
| + if (HasArgsInRegisters()) {
|
| + GenerateRegisterArgsPush(masm);
|
| + }
|
| +
|
| + switch (op_) {
|
| + case Token::ADD: {
|
| + // Registers containing left and right operands respectively.
|
| + Register lhs, rhs;
|
| +
|
| + if (HasArgsReversed()) {
|
| + lhs = rax;
|
| + rhs = rdx;
|
| + } else {
|
| + lhs = rdx;
|
| + rhs = rax;
|
| + }
|
| +
|
| + // Test for string arguments before calling runtime.
|
| + Label not_strings, both_strings, not_string1, string1, string1_smi2;
|
| +
|
| + // If this stub has already generated FP-specific code then the arguments
|
| + // are already in rdx and rax.
|
| + if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
|
| + GenerateLoadArguments(masm);
|
| + }
|
| +
|
| + Condition is_smi;
|
| + is_smi = masm->CheckSmi(lhs);
|
| + __ j(is_smi, ¬_string1);
|
| + __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, r8);
|
| + __ j(above_equal, ¬_string1);
|
| +
|
| + // First argument is a a string, test second.
|
| + is_smi = masm->CheckSmi(rhs);
|
| + __ j(is_smi, &string1_smi2);
|
| + __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9);
|
| + __ j(above_equal, &string1);
|
| +
|
| + // First and second argument are strings.
|
| + StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
|
| + __ TailCallStub(&string_add_stub);
|
| +
|
| + __ bind(&string1_smi2);
|
| + // First argument is a string, second is a smi. Try to lookup the number
|
| + // string for the smi in the number string cache.
|
| + NumberToStringStub::GenerateLookupNumberStringCache(
|
| + masm, rhs, rbx, rcx, r8, true, &string1);
|
| +
|
| + // Replace second argument on stack and tailcall string add stub to make
|
| + // the result.
|
| + __ movq(Operand(rsp, 1 * kPointerSize), rbx);
|
| + __ TailCallStub(&string_add_stub);
|
| +
|
| + // Only first argument is a string.
|
| + __ bind(&string1);
|
| + __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
|
| +
|
| + // First argument was not a string, test second.
|
| + __ bind(¬_string1);
|
| + is_smi = masm->CheckSmi(rhs);
|
| + __ j(is_smi, ¬_strings);
|
| + __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, rhs);
|
| + __ j(above_equal, ¬_strings);
|
| +
|
| + // Only second argument is a string.
|
| + __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
|
| +
|
| + __ bind(¬_strings);
|
| + // Neither argument is a string.
|
| + __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
|
| + break;
|
| + }
|
| + case Token::SUB:
|
| + __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
|
| + break;
|
| + case Token::MUL:
|
| + __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
|
| + break;
|
| + case Token::DIV:
|
| + __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
|
| + break;
|
| + case Token::MOD:
|
| + __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
|
| + break;
|
| + case Token::BIT_OR:
|
| + __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
|
| + break;
|
| + case Token::BIT_AND:
|
| + __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
|
| + break;
|
| + case Token::BIT_XOR:
|
| + __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
|
| + break;
|
| + case Token::SAR:
|
| + __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
|
| + break;
|
| + case Token::SHL:
|
| + __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
|
| + break;
|
| + case Token::SHR:
|
| + __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
|
| + break;
|
| default:
|
| UNREACHABLE();
|
| - break;
|
| }
|
| - if (answer_object == Heap::undefined_value()) {
|
| - return false;
|
| +}
|
| +
|
| +
|
| +void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
|
| + ASSERT(!HasArgsInRegisters());
|
| + __ movq(rax, Operand(rsp, 1 * kPointerSize));
|
| + __ movq(rdx, Operand(rsp, 2 * kPointerSize));
|
| +}
|
| +
|
| +
|
| +void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
|
| + // If arguments are not passed in registers remove them from the stack before
|
| + // returning.
|
| + if (!HasArgsInRegisters()) {
|
| + __ ret(2 * kPointerSize); // Remove both operands
|
| + } else {
|
| + __ ret(0);
|
| }
|
| - frame_->Push(Handle<Object>(answer_object));
|
| - return true;
|
| }
|
|
|
|
|
| -// End of CodeGenerator implementation.
|
| +void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
|
| + ASSERT(HasArgsInRegisters());
|
| + __ pop(rcx);
|
| + if (HasArgsReversed()) {
|
| + __ push(rax);
|
| + __ push(rdx);
|
| + } else {
|
| + __ push(rdx);
|
| + __ push(rax);
|
| + }
|
| + __ push(rcx);
|
| +}
|
|
|
| +
|
| +void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
|
| + Label get_result;
|
| +
|
| + // Ensure the operands are on the stack.
|
| + if (HasArgsInRegisters()) {
|
| + GenerateRegisterArgsPush(masm);
|
| + }
|
| +
|
| + // Left and right arguments are already on stack.
|
| + __ pop(rcx); // Save the return address.
|
| +
|
| + // Push this stub's key.
|
| + __ Push(Smi::FromInt(MinorKey()));
|
| +
|
| + // Although the operation and the type info are encoded into the key,
|
| + // the encoding is opaque, so push them too.
|
| + __ Push(Smi::FromInt(op_));
|
| +
|
| + __ Push(Smi::FromInt(runtime_operands_type_));
|
| +
|
| + __ push(rcx); // The return address.
|
| +
|
| + // Perform patching to an appropriate fast case and return the result.
|
| + __ TailCallExternalReference(
|
| + ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
|
| + 5,
|
| + 1);
|
| +}
|
| +
|
| +
|
| +Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
|
| + GenericBinaryOpStub stub(key, type_info);
|
| + return stub.GetCode();
|
| +}
|
| +
|
| +
|
| void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
|
| // Input on stack:
|
| // rsp[8]: argument (should be number).
|
| @@ -8501,6 +9240,148 @@
|
| }
|
|
|
|
|
| +// Input: rdx, rax are the left and right objects of a bit op.
|
| +// Output: rax, rcx are left and right integers for a bit op.
|
| +void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
|
| + // Check float operands.
|
| + Label done;
|
| + Label rax_is_smi;
|
| + Label rax_is_object;
|
| + Label rdx_is_object;
|
| +
|
| + __ JumpIfNotSmi(rdx, &rdx_is_object);
|
| + __ SmiToInteger32(rdx, rdx);
|
| + __ JumpIfSmi(rax, &rax_is_smi);
|
| +
|
| + __ bind(&rax_is_object);
|
| + IntegerConvert(masm, rcx, rax); // Uses rdi, rcx and rbx.
|
| + __ jmp(&done);
|
| +
|
| + __ bind(&rdx_is_object);
|
| + IntegerConvert(masm, rdx, rdx); // Uses rdi, rcx and rbx.
|
| + __ JumpIfNotSmi(rax, &rax_is_object);
|
| + __ bind(&rax_is_smi);
|
| + __ SmiToInteger32(rcx, rax);
|
| +
|
| + __ bind(&done);
|
| + __ movl(rax, rdx);
|
| +}
|
| +
|
| +
|
| +// Input: rdx, rax are the left and right objects of a bit op.
|
| +// Output: rax, rcx are left and right integers for a bit op.
|
| +void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
|
| + Label* conversion_failure,
|
| + Register heap_number_map) {
|
| + // Check float operands.
|
| + Label arg1_is_object, check_undefined_arg1;
|
| + Label arg2_is_object, check_undefined_arg2;
|
| + Label load_arg2, done;
|
| +
|
| + __ JumpIfNotSmi(rdx, &arg1_is_object);
|
| + __ SmiToInteger32(rdx, rdx);
|
| + __ jmp(&load_arg2);
|
| +
|
| + // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
|
| + __ bind(&check_undefined_arg1);
|
| + __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
|
| + __ j(not_equal, conversion_failure);
|
| + __ movl(rdx, Immediate(0));
|
| + __ jmp(&load_arg2);
|
| +
|
| + __ bind(&arg1_is_object);
|
| + __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
|
| + __ j(not_equal, &check_undefined_arg1);
|
| + // Get the untagged integer version of the edx heap number in rcx.
|
| + IntegerConvert(masm, rdx, rdx);
|
| +
|
| + // Here rdx has the untagged integer, rax has a Smi or a heap number.
|
| + __ bind(&load_arg2);
|
| + // Test if arg2 is a Smi.
|
| + __ JumpIfNotSmi(rax, &arg2_is_object);
|
| + __ SmiToInteger32(rax, rax);
|
| + __ movl(rcx, rax);
|
| + __ jmp(&done);
|
| +
|
| + // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
|
| + __ bind(&check_undefined_arg2);
|
| + __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
|
| + __ j(not_equal, conversion_failure);
|
| + __ movl(rcx, Immediate(0));
|
| + __ jmp(&done);
|
| +
|
| + __ bind(&arg2_is_object);
|
| + __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
|
| + __ j(not_equal, &check_undefined_arg2);
|
| + // Get the untagged integer version of the eax heap number in ecx.
|
| + IntegerConvert(masm, rcx, rax);
|
| + __ bind(&done);
|
| + __ movl(rax, rdx);
|
| +}
|
| +
|
| +
|
| +void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
|
| + __ SmiToInteger32(kScratchRegister, rdx);
|
| + __ cvtlsi2sd(xmm0, kScratchRegister);
|
| + __ SmiToInteger32(kScratchRegister, rax);
|
| + __ cvtlsi2sd(xmm1, kScratchRegister);
|
| +}
|
| +
|
| +
|
| +void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) {
|
| + Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done;
|
| + // Load operand in rdx into xmm0.
|
| + __ JumpIfSmi(rdx, &load_smi_rdx);
|
| + __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
|
| + // Load operand in rax into xmm1.
|
| + __ JumpIfSmi(rax, &load_smi_rax);
|
| + __ bind(&load_nonsmi_rax);
|
| + __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
|
| + __ jmp(&done);
|
| +
|
| + __ bind(&load_smi_rdx);
|
| + __ SmiToInteger32(kScratchRegister, rdx);
|
| + __ cvtlsi2sd(xmm0, kScratchRegister);
|
| + __ JumpIfNotSmi(rax, &load_nonsmi_rax);
|
| +
|
| + __ bind(&load_smi_rax);
|
| + __ SmiToInteger32(kScratchRegister, rax);
|
| + __ cvtlsi2sd(xmm1, kScratchRegister);
|
| +
|
| + __ bind(&done);
|
| +}
|
| +
|
| +
|
| +void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
|
| + Label* not_numbers) {
|
| + Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
|
| + // Load operand in rdx into xmm0, or branch to not_numbers.
|
| + __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
|
| + __ JumpIfSmi(rdx, &load_smi_rdx);
|
| + __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
|
| + __ j(not_equal, not_numbers); // Argument in rdx is not a number.
|
| + __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
|
| + // Load operand in rax into xmm1, or branch to not_numbers.
|
| + __ JumpIfSmi(rax, &load_smi_rax);
|
| +
|
| + __ bind(&load_nonsmi_rax);
|
| + __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
|
| + __ j(not_equal, not_numbers);
|
| + __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
|
| + __ jmp(&done);
|
| +
|
| + __ bind(&load_smi_rdx);
|
| + __ SmiToInteger32(kScratchRegister, rdx);
|
| + __ cvtlsi2sd(xmm0, kScratchRegister);
|
| + __ JumpIfNotSmi(rax, &load_nonsmi_rax);
|
| +
|
| + __ bind(&load_smi_rax);
|
| + __ SmiToInteger32(kScratchRegister, rax);
|
| + __ cvtlsi2sd(xmm1, kScratchRegister);
|
| + __ bind(&done);
|
| +}
|
| +
|
| +
|
| void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
|
| Label slow, done;
|
|
|
| @@ -8585,6 +9466,172 @@
|
| }
|
|
|
|
|
| +void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
|
| + // The key is in rdx and the parameter count is in rax.
|
| +
|
| + // The displacement is used for skipping the frame pointer on the
|
| + // stack. It is the offset of the last parameter (if any) relative
|
| + // to the frame pointer.
|
| + static const int kDisplacement = 1 * kPointerSize;
|
| +
|
| + // Check that the key is a smi.
|
| + Label slow;
|
| + __ JumpIfNotSmi(rdx, &slow);
|
| +
|
| + // Check if the calling frame is an arguments adaptor frame.
|
| + Label adaptor;
|
| + __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
|
| + __ SmiCompare(Operand(rbx, StandardFrameConstants::kContextOffset),
|
| + Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
|
| + __ j(equal, &adaptor);
|
| +
|
| + // Check index against formal parameters count limit passed in
|
| + // through register rax. Use unsigned comparison to get negative
|
| + // check for free.
|
| + __ cmpq(rdx, rax);
|
| + __ j(above_equal, &slow);
|
| +
|
| + // Read the argument from the stack and return it.
|
| + SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
|
| + __ lea(rbx, Operand(rbp, index.reg, index.scale, 0));
|
| + index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
|
| + __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
|
| + __ Ret();
|
| +
|
| + // Arguments adaptor case: Check index against actual arguments
|
| + // limit found in the arguments adaptor frame. Use unsigned
|
| + // comparison to get negative check for free.
|
| + __ bind(&adaptor);
|
| + __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
|
| + __ cmpq(rdx, rcx);
|
| + __ j(above_equal, &slow);
|
| +
|
| + // Read the argument from the stack and return it.
|
| + index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2);
|
| + __ lea(rbx, Operand(rbx, index.reg, index.scale, 0));
|
| + index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
|
| + __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
|
| + __ Ret();
|
| +
|
| + // Slow-case: Handle non-smi or out-of-bounds access to arguments
|
| + // by calling the runtime system.
|
| + __ bind(&slow);
|
| + __ pop(rbx); // Return address.
|
| + __ push(rdx);
|
| + __ push(rbx);
|
| + __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
|
| +}
|
| +
|
| +
|
| +void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
|
| + // rsp[0] : return address
|
| + // rsp[8] : number of parameters
|
| + // rsp[16] : receiver displacement
|
| + // rsp[24] : function
|
| +
|
| + // The displacement is used for skipping the return address and the
|
| + // frame pointer on the stack. It is the offset of the last
|
| + // parameter (if any) relative to the frame pointer.
|
| + static const int kDisplacement = 2 * kPointerSize;
|
| +
|
| + // Check if the calling frame is an arguments adaptor frame.
|
| + Label adaptor_frame, try_allocate, runtime;
|
| + __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
|
| + __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
|
| + Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
|
| + __ j(equal, &adaptor_frame);
|
| +
|
| + // Get the length from the frame.
|
| + __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
|
| + __ jmp(&try_allocate);
|
| +
|
| + // Patch the arguments.length and the parameters pointer.
|
| + __ bind(&adaptor_frame);
|
| + __ SmiToInteger32(rcx,
|
| + Operand(rdx,
|
| + ArgumentsAdaptorFrameConstants::kLengthOffset));
|
| + // Space on stack must already hold a smi.
|
| + __ Integer32ToSmiField(Operand(rsp, 1 * kPointerSize), rcx);
|
| + // Do not clobber the length index for the indexing operation since
|
| + // it is used compute the size for allocation later.
|
| + __ lea(rdx, Operand(rdx, rcx, times_pointer_size, kDisplacement));
|
| + __ movq(Operand(rsp, 2 * kPointerSize), rdx);
|
| +
|
| + // Try the new space allocation. Start out with computing the size of
|
| + // the arguments object and the elements array.
|
| + Label add_arguments_object;
|
| + __ bind(&try_allocate);
|
| + __ testl(rcx, rcx);
|
| + __ j(zero, &add_arguments_object);
|
| + __ leal(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
|
| + __ bind(&add_arguments_object);
|
| + __ addl(rcx, Immediate(Heap::kArgumentsObjectSize));
|
| +
|
| + // Do the allocation of both objects in one go.
|
| + __ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
|
| +
|
| + // Get the arguments boilerplate from the current (global) context.
|
| + int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
|
| + __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
|
| + __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
|
| + __ movq(rdi, Operand(rdi, offset));
|
| +
|
| + // Copy the JS object part.
|
| + STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
|
| + __ movq(kScratchRegister, FieldOperand(rdi, 0 * kPointerSize));
|
| + __ movq(rdx, FieldOperand(rdi, 1 * kPointerSize));
|
| + __ movq(rbx, FieldOperand(rdi, 2 * kPointerSize));
|
| + __ movq(FieldOperand(rax, 0 * kPointerSize), kScratchRegister);
|
| + __ movq(FieldOperand(rax, 1 * kPointerSize), rdx);
|
| + __ movq(FieldOperand(rax, 2 * kPointerSize), rbx);
|
| +
|
| + // Setup the callee in-object property.
|
| + ASSERT(Heap::arguments_callee_index == 0);
|
| + __ movq(kScratchRegister, Operand(rsp, 3 * kPointerSize));
|
| + __ movq(FieldOperand(rax, JSObject::kHeaderSize), kScratchRegister);
|
| +
|
| + // Get the length (smi tagged) and set that as an in-object property too.
|
| + ASSERT(Heap::arguments_length_index == 1);
|
| + __ movq(rcx, Operand(rsp, 1 * kPointerSize));
|
| + __ movq(FieldOperand(rax, JSObject::kHeaderSize + kPointerSize), rcx);
|
| +
|
| + // If there are no actual arguments, we're done.
|
| + Label done;
|
| + __ SmiTest(rcx);
|
| + __ j(zero, &done);
|
| +
|
| + // Get the parameters pointer from the stack and untag the length.
|
| + __ movq(rdx, Operand(rsp, 2 * kPointerSize));
|
| +
|
| + // Setup the elements pointer in the allocated arguments object and
|
| + // initialize the header in the elements fixed array.
|
| + __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
|
| + __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
|
| + __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
|
| + __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
|
| + __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
|
| + __ SmiToInteger32(rcx, rcx); // Untag length for the loop below.
|
| +
|
| + // Copy the fixed array slots.
|
| + Label loop;
|
| + __ bind(&loop);
|
| + __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
|
| + __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister);
|
| + __ addq(rdi, Immediate(kPointerSize));
|
| + __ subq(rdx, Immediate(kPointerSize));
|
| + __ decl(rcx);
|
| + __ j(not_zero, &loop);
|
| +
|
| + // Return and remove the on-stack parameters.
|
| + __ bind(&done);
|
| + __ ret(3 * kPointerSize);
|
| +
|
| + // Do the runtime call to allocate the arguments object.
|
| + __ bind(&runtime);
|
| + __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
|
| +}
|
| +
|
| +
|
| void RegExpExecStub::Generate(MacroAssembler* masm) {
|
| // Just jump directly to runtime if native RegExp is not selected at compile
|
| // time or if regexp entry in generated code is turned off runtime switch or
|
| @@ -8932,18 +9979,6 @@
|
| }
|
|
|
|
|
| -void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm,
|
| - Register hash,
|
| - Register mask) {
|
| - __ and_(hash, mask);
|
| - // Each entry in string cache consists of two pointer sized fields,
|
| - // but times_twice_pointer_size (multiplication by 16) scale factor
|
| - // is not supported by addrmode on x64 platform.
|
| - // So we have to premultiply entry index before lookup.
|
| - __ shl(hash, Immediate(kPointerSizeLog2 + 1));
|
| -}
|
| -
|
| -
|
| void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
|
| Register object,
|
| Register result,
|
| @@ -9023,6 +10058,18 @@
|
| }
|
|
|
|
|
| +void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm,
|
| + Register hash,
|
| + Register mask) {
|
| + __ and_(hash, mask);
|
| + // Each entry in string cache consists of two pointer sized fields,
|
| + // but times_twice_pointer_size (multiplication by 16) scale factor
|
| + // is not supported by addrmode on x64 platform.
|
| + // So we have to premultiply entry index before lookup.
|
| + __ shl(hash, Immediate(kPointerSizeLog2 + 1));
|
| +}
|
| +
|
| +
|
| void NumberToStringStub::Generate(MacroAssembler* masm) {
|
| Label runtime;
|
|
|
| @@ -9038,12 +10085,6 @@
|
| }
|
|
|
|
|
| -void RecordWriteStub::Generate(MacroAssembler* masm) {
|
| - masm->RecordWriteHelper(object_, addr_, scratch_);
|
| - masm->ret(0);
|
| -}
|
| -
|
| -
|
| static int NegativeComparisonResult(Condition cc) {
|
| ASSERT(cc != equal);
|
| ASSERT((cc == less) || (cc == less_equal)
|
| @@ -9319,283 +10360,76 @@
|
| }
|
|
|
|
|
| -// Call the function just below TOS on the stack with the given
|
| -// arguments. The receiver is the TOS.
|
| -void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
|
| - CallFunctionFlags flags,
|
| - int position) {
|
| - // Push the arguments ("left-to-right") on the stack.
|
| - int arg_count = args->length();
|
| - for (int i = 0; i < arg_count; i++) {
|
| - Load(args->at(i));
|
| - frame_->SpillTop();
|
| - }
|
| +void StackCheckStub::Generate(MacroAssembler* masm) {
|
| + // Because builtins always remove the receiver from the stack, we
|
| + // have to fake one to avoid underflowing the stack. The receiver
|
| + // must be inserted below the return address on the stack so we
|
| + // temporarily store that in a register.
|
| + __ pop(rax);
|
| + __ Push(Smi::FromInt(0));
|
| + __ push(rax);
|
|
|
| - // Record the position for debugging purposes.
|
| - CodeForSourcePosition(position);
|
| -
|
| - // Use the shared code stub to call the function.
|
| - InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
|
| - CallFunctionStub call_function(arg_count, in_loop, flags);
|
| - Result answer = frame_->CallStub(&call_function, arg_count + 1);
|
| - // Restore context and replace function on the stack with the
|
| - // result of the stub invocation.
|
| - frame_->RestoreContextRegister();
|
| - frame_->SetElementAt(0, &answer);
|
| + // Do tail-call to runtime routine.
|
| + __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
|
| }
|
|
|
|
|
| -void InstanceofStub::Generate(MacroAssembler* masm) {
|
| - // Implements "value instanceof function" operator.
|
| - // Expected input state:
|
| - // rsp[0] : return address
|
| - // rsp[1] : function pointer
|
| - // rsp[2] : value
|
| - // Returns a bitwise zero to indicate that the value
|
| - // is and instance of the function and anything else to
|
| - // indicate that the value is not an instance.
|
| -
|
| - // Get the object - go slow case if it's a smi.
|
| +void CallFunctionStub::Generate(MacroAssembler* masm) {
|
| Label slow;
|
| - __ movq(rax, Operand(rsp, 2 * kPointerSize));
|
| - __ JumpIfSmi(rax, &slow);
|
|
|
| - // Check that the left hand is a JS object. Leave its map in rax.
|
| - __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);
|
| - __ j(below, &slow);
|
| - __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE);
|
| - __ j(above, &slow);
|
| + // If the receiver might be a value (string, number or boolean) check for this
|
| + // and box it if it is.
|
| + if (ReceiverMightBeValue()) {
|
| + // Get the receiver from the stack.
|
| + // +1 ~ return address
|
| + Label receiver_is_value, receiver_is_js_object;
|
| + __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize));
|
|
|
| - // Get the prototype of the function.
|
| - __ movq(rdx, Operand(rsp, 1 * kPointerSize));
|
| - // rdx is function, rax is map.
|
| + // Check if receiver is a smi (which is a number value).
|
| + __ JumpIfSmi(rax, &receiver_is_value);
|
|
|
| - // Look up the function and the map in the instanceof cache.
|
| - Label miss;
|
| - __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
|
| - __ j(not_equal, &miss);
|
| - __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex);
|
| - __ j(not_equal, &miss);
|
| - __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
|
| - __ ret(2 * kPointerSize);
|
| + // Check if the receiver is a valid JS object.
|
| + __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rdi);
|
| + __ j(above_equal, &receiver_is_js_object);
|
|
|
| - __ bind(&miss);
|
| - __ TryGetFunctionPrototype(rdx, rbx, &slow);
|
| + // Call the runtime to box the value.
|
| + __ bind(&receiver_is_value);
|
| + __ EnterInternalFrame();
|
| + __ push(rax);
|
| + __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
|
| + __ LeaveInternalFrame();
|
| + __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rax);
|
|
|
| - // Check that the function prototype is a JS object.
|
| - __ JumpIfSmi(rbx, &slow);
|
| - __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister);
|
| - __ j(below, &slow);
|
| - __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
|
| - __ j(above, &slow);
|
| + __ bind(&receiver_is_js_object);
|
| + }
|
|
|
| - // Register mapping:
|
| - // rax is object map.
|
| - // rdx is function.
|
| - // rbx is function prototype.
|
| - __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
|
| - __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
|
| + // Get the function to call from the stack.
|
| + // +2 ~ receiver, return address
|
| + __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
|
|
|
| - __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
|
| + // Check that the function really is a JavaScript function.
|
| + __ JumpIfSmi(rdi, &slow);
|
| + // Goto slow case if we do not have a function.
|
| + __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
|
| + __ j(not_equal, &slow);
|
|
|
| - // Loop through the prototype chain looking for the function prototype.
|
| - Label loop, is_instance, is_not_instance;
|
| - __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
|
| - __ bind(&loop);
|
| - __ cmpq(rcx, rbx);
|
| - __ j(equal, &is_instance);
|
| - __ cmpq(rcx, kScratchRegister);
|
| - // The code at is_not_instance assumes that kScratchRegister contains a
|
| - // non-zero GCable value (the null object in this case).
|
| - __ j(equal, &is_not_instance);
|
| - __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
|
| - __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
|
| - __ jmp(&loop);
|
| + // Fast-case: Just invoke the function.
|
| + ParameterCount actual(argc_);
|
| + __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
|
|
|
| - __ bind(&is_instance);
|
| - __ xorl(rax, rax);
|
| - // Store bitwise zero in the cache. This is a Smi in GC terms.
|
| - ASSERT_EQ(0, kSmiTag);
|
| - __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
|
| - __ ret(2 * kPointerSize);
|
| -
|
| - __ bind(&is_not_instance);
|
| - // We have to store a non-zero value in the cache.
|
| - __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
|
| - __ ret(2 * kPointerSize);
|
| -
|
| - // Slow-case: Go through the JavaScript implementation.
|
| + // Slow-case: Non-function called.
|
| __ bind(&slow);
|
| - __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
|
| + // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
|
| + // of the original receiver from the call site).
|
| + __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi);
|
| + __ Set(rax, argc_);
|
| + __ Set(rbx, 0);
|
| + __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
|
| + Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
|
| + __ Jump(adaptor, RelocInfo::CODE_TARGET);
|
| }
|
|
|
|
|
| -void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
|
| - // rsp[0] : return address
|
| - // rsp[8] : number of parameters
|
| - // rsp[16] : receiver displacement
|
| - // rsp[24] : function
|
| -
|
| - // The displacement is used for skipping the return address and the
|
| - // frame pointer on the stack. It is the offset of the last
|
| - // parameter (if any) relative to the frame pointer.
|
| - static const int kDisplacement = 2 * kPointerSize;
|
| -
|
| - // Check if the calling frame is an arguments adaptor frame.
|
| - Label adaptor_frame, try_allocate, runtime;
|
| - __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
|
| - __ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
|
| - Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
|
| - __ j(equal, &adaptor_frame);
|
| -
|
| - // Get the length from the frame.
|
| - __ SmiToInteger32(rcx, Operand(rsp, 1 * kPointerSize));
|
| - __ jmp(&try_allocate);
|
| -
|
| - // Patch the arguments.length and the parameters pointer.
|
| - __ bind(&adaptor_frame);
|
| - __ SmiToInteger32(rcx,
|
| - Operand(rdx,
|
| - ArgumentsAdaptorFrameConstants::kLengthOffset));
|
| - // Space on stack must already hold a smi.
|
| - __ Integer32ToSmiField(Operand(rsp, 1 * kPointerSize), rcx);
|
| - // Do not clobber the length index for the indexing operation since
|
| - // it is used compute the size for allocation later.
|
| - __ lea(rdx, Operand(rdx, rcx, times_pointer_size, kDisplacement));
|
| - __ movq(Operand(rsp, 2 * kPointerSize), rdx);
|
| -
|
| - // Try the new space allocation. Start out with computing the size of
|
| - // the arguments object and the elements array.
|
| - Label add_arguments_object;
|
| - __ bind(&try_allocate);
|
| - __ testl(rcx, rcx);
|
| - __ j(zero, &add_arguments_object);
|
| - __ leal(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
|
| - __ bind(&add_arguments_object);
|
| - __ addl(rcx, Immediate(Heap::kArgumentsObjectSize));
|
| -
|
| - // Do the allocation of both objects in one go.
|
| - __ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
|
| -
|
| - // Get the arguments boilerplate from the current (global) context.
|
| - int offset = Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX);
|
| - __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
|
| - __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
|
| - __ movq(rdi, Operand(rdi, offset));
|
| -
|
| - // Copy the JS object part.
|
| - STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize);
|
| - __ movq(kScratchRegister, FieldOperand(rdi, 0 * kPointerSize));
|
| - __ movq(rdx, FieldOperand(rdi, 1 * kPointerSize));
|
| - __ movq(rbx, FieldOperand(rdi, 2 * kPointerSize));
|
| - __ movq(FieldOperand(rax, 0 * kPointerSize), kScratchRegister);
|
| - __ movq(FieldOperand(rax, 1 * kPointerSize), rdx);
|
| - __ movq(FieldOperand(rax, 2 * kPointerSize), rbx);
|
| -
|
| - // Setup the callee in-object property.
|
| - ASSERT(Heap::arguments_callee_index == 0);
|
| - __ movq(kScratchRegister, Operand(rsp, 3 * kPointerSize));
|
| - __ movq(FieldOperand(rax, JSObject::kHeaderSize), kScratchRegister);
|
| -
|
| - // Get the length (smi tagged) and set that as an in-object property too.
|
| - ASSERT(Heap::arguments_length_index == 1);
|
| - __ movq(rcx, Operand(rsp, 1 * kPointerSize));
|
| - __ movq(FieldOperand(rax, JSObject::kHeaderSize + kPointerSize), rcx);
|
| -
|
| - // If there are no actual arguments, we're done.
|
| - Label done;
|
| - __ SmiTest(rcx);
|
| - __ j(zero, &done);
|
| -
|
| - // Get the parameters pointer from the stack and untag the length.
|
| - __ movq(rdx, Operand(rsp, 2 * kPointerSize));
|
| -
|
| - // Setup the elements pointer in the allocated arguments object and
|
| - // initialize the header in the elements fixed array.
|
| - __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
|
| - __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
|
| - __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
|
| - __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
|
| - __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
|
| - __ SmiToInteger32(rcx, rcx); // Untag length for the loop below.
|
| -
|
| - // Copy the fixed array slots.
|
| - Label loop;
|
| - __ bind(&loop);
|
| - __ movq(kScratchRegister, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
|
| - __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), kScratchRegister);
|
| - __ addq(rdi, Immediate(kPointerSize));
|
| - __ subq(rdx, Immediate(kPointerSize));
|
| - __ decl(rcx);
|
| - __ j(not_zero, &loop);
|
| -
|
| - // Return and remove the on-stack parameters.
|
| - __ bind(&done);
|
| - __ ret(3 * kPointerSize);
|
| -
|
| - // Do the runtime call to allocate the arguments object.
|
| - __ bind(&runtime);
|
| - __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
|
| -}
|
| -
|
| -
|
| -void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
|
| - // The key is in rdx and the parameter count is in rax.
|
| -
|
| - // The displacement is used for skipping the frame pointer on the
|
| - // stack. It is the offset of the last parameter (if any) relative
|
| - // to the frame pointer.
|
| - static const int kDisplacement = 1 * kPointerSize;
|
| -
|
| - // Check that the key is a smi.
|
| - Label slow;
|
| - __ JumpIfNotSmi(rdx, &slow);
|
| -
|
| - // Check if the calling frame is an arguments adaptor frame.
|
| - Label adaptor;
|
| - __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
|
| - __ SmiCompare(Operand(rbx, StandardFrameConstants::kContextOffset),
|
| - Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
|
| - __ j(equal, &adaptor);
|
| -
|
| - // Check index against formal parameters count limit passed in
|
| - // through register rax. Use unsigned comparison to get negative
|
| - // check for free.
|
| - __ cmpq(rdx, rax);
|
| - __ j(above_equal, &slow);
|
| -
|
| - // Read the argument from the stack and return it.
|
| - SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
|
| - __ lea(rbx, Operand(rbp, index.reg, index.scale, 0));
|
| - index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
|
| - __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
|
| - __ Ret();
|
| -
|
| - // Arguments adaptor case: Check index against actual arguments
|
| - // limit found in the arguments adaptor frame. Use unsigned
|
| - // comparison to get negative check for free.
|
| - __ bind(&adaptor);
|
| - __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
|
| - __ cmpq(rdx, rcx);
|
| - __ j(above_equal, &slow);
|
| -
|
| - // Read the argument from the stack and return it.
|
| - index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2);
|
| - __ lea(rbx, Operand(rbx, index.reg, index.scale, 0));
|
| - index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
|
| - __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
|
| - __ Ret();
|
| -
|
| - // Slow-case: Handle non-smi or out-of-bounds access to arguments
|
| - // by calling the runtime system.
|
| - __ bind(&slow);
|
| - __ pop(rbx); // Return address.
|
| - __ push(rdx);
|
| - __ push(rbx);
|
| - __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
|
| -}
|
| -
|
| -
|
| void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
|
| // Check that stack should contain next handler, frame pointer, state and
|
| // return address in that order.
|
| @@ -9625,6 +10459,11 @@
|
| }
|
|
|
|
|
| +void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
|
| + UNREACHABLE();
|
| +}
|
| +
|
| +
|
| void CEntryStub::GenerateCore(MacroAssembler* masm,
|
| Label* throw_normal_exception,
|
| Label* throw_termination_exception,
|
| @@ -9815,62 +10654,6 @@
|
| }
|
|
|
|
|
| -void CallFunctionStub::Generate(MacroAssembler* masm) {
|
| - Label slow;
|
| -
|
| - // If the receiver might be a value (string, number or boolean) check for this
|
| - // and box it if it is.
|
| - if (ReceiverMightBeValue()) {
|
| - // Get the receiver from the stack.
|
| - // +1 ~ return address
|
| - Label receiver_is_value, receiver_is_js_object;
|
| - __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize));
|
| -
|
| - // Check if receiver is a smi (which is a number value).
|
| - __ JumpIfSmi(rax, &receiver_is_value);
|
| -
|
| - // Check if the receiver is a valid JS object.
|
| - __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rdi);
|
| - __ j(above_equal, &receiver_is_js_object);
|
| -
|
| - // Call the runtime to box the value.
|
| - __ bind(&receiver_is_value);
|
| - __ EnterInternalFrame();
|
| - __ push(rax);
|
| - __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
|
| - __ LeaveInternalFrame();
|
| - __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rax);
|
| -
|
| - __ bind(&receiver_is_js_object);
|
| - }
|
| -
|
| - // Get the function to call from the stack.
|
| - // +2 ~ receiver, return address
|
| - __ movq(rdi, Operand(rsp, (argc_ + 2) * kPointerSize));
|
| -
|
| - // Check that the function really is a JavaScript function.
|
| - __ JumpIfSmi(rdi, &slow);
|
| - // Goto slow case if we do not have a function.
|
| - __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
|
| - __ j(not_equal, &slow);
|
| -
|
| - // Fast-case: Just invoke the function.
|
| - ParameterCount actual(argc_);
|
| - __ InvokeFunction(rdi, actual, JUMP_FUNCTION);
|
| -
|
| - // Slow-case: Non-function called.
|
| - __ bind(&slow);
|
| - // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
|
| - // of the original receiver from the call site).
|
| - __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi);
|
| - __ Set(rax, argc_);
|
| - __ Set(rbx, 0);
|
| - __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
|
| - Handle<Code> adaptor(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
|
| - __ Jump(adaptor, RelocInfo::CODE_TARGET);
|
| -}
|
| -
|
| -
|
| void CEntryStub::Generate(MacroAssembler* masm) {
|
| // rax: number of arguments including receiver
|
| // rbx: pointer to C function (C callee-saved)
|
| @@ -9939,11 +10722,6 @@
|
| }
|
|
|
|
|
| -void ApiGetterEntryStub::Generate(MacroAssembler* masm) {
|
| - UNREACHABLE();
|
| -}
|
| -
|
| -
|
| void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
|
| Label invoke, exit;
|
| #ifdef ENABLE_LOGGING_AND_PROFILING
|
| @@ -10075,890 +10853,91 @@
|
| }
|
|
|
|
|
| -// -----------------------------------------------------------------------------
|
| -// Implementation of stubs.
|
| +void InstanceofStub::Generate(MacroAssembler* masm) {
|
| + // Implements "value instanceof function" operator.
|
| + // Expected input state:
|
| + // rsp[0] : return address
|
| + // rsp[1] : function pointer
|
| + // rsp[2] : value
|
| + // Returns a bitwise zero to indicate that the value
|
| + // is and instance of the function and anything else to
|
| + // indicate that the value is not an instance.
|
|
|
| -// Stub classes have public member named masm, not masm_.
|
| + // Get the object - go slow case if it's a smi.
|
| + Label slow;
|
| + __ movq(rax, Operand(rsp, 2 * kPointerSize));
|
| + __ JumpIfSmi(rax, &slow);
|
|
|
| -void StackCheckStub::Generate(MacroAssembler* masm) {
|
| - // Because builtins always remove the receiver from the stack, we
|
| - // have to fake one to avoid underflowing the stack. The receiver
|
| - // must be inserted below the return address on the stack so we
|
| - // temporarily store that in a register.
|
| - __ pop(rax);
|
| - __ Push(Smi::FromInt(0));
|
| - __ push(rax);
|
| + // Check that the left hand is a JS object. Leave its map in rax.
|
| + __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rax);
|
| + __ j(below, &slow);
|
| + __ CmpInstanceType(rax, LAST_JS_OBJECT_TYPE);
|
| + __ j(above, &slow);
|
|
|
| - // Do tail-call to runtime routine.
|
| - __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
|
| -}
|
| + // Get the prototype of the function.
|
| + __ movq(rdx, Operand(rsp, 1 * kPointerSize));
|
| + // rdx is function, rax is map.
|
|
|
| + // Look up the function and the map in the instanceof cache.
|
| + Label miss;
|
| + __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
|
| + __ j(not_equal, &miss);
|
| + __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex);
|
| + __ j(not_equal, &miss);
|
| + __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
|
| + __ ret(2 * kPointerSize);
|
|
|
| -void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
|
| - __ SmiToInteger32(kScratchRegister, rdx);
|
| - __ cvtlsi2sd(xmm0, kScratchRegister);
|
| - __ SmiToInteger32(kScratchRegister, rax);
|
| - __ cvtlsi2sd(xmm1, kScratchRegister);
|
| -}
|
| + __ bind(&miss);
|
| + __ TryGetFunctionPrototype(rdx, rbx, &slow);
|
|
|
| + // Check that the function prototype is a JS object.
|
| + __ JumpIfSmi(rbx, &slow);
|
| + __ CmpObjectType(rbx, FIRST_JS_OBJECT_TYPE, kScratchRegister);
|
| + __ j(below, &slow);
|
| + __ CmpInstanceType(kScratchRegister, LAST_JS_OBJECT_TYPE);
|
| + __ j(above, &slow);
|
|
|
| -void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) {
|
| - Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done;
|
| - // Load operand in rdx into xmm0.
|
| - __ JumpIfSmi(rdx, &load_smi_rdx);
|
| - __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
|
| - // Load operand in rax into xmm1.
|
| - __ JumpIfSmi(rax, &load_smi_rax);
|
| - __ bind(&load_nonsmi_rax);
|
| - __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
|
| - __ jmp(&done);
|
| + // Register mapping:
|
| + // rax is object map.
|
| + // rdx is function.
|
| + // rbx is function prototype.
|
| + __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
|
| + __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
|
|
|
| - __ bind(&load_smi_rdx);
|
| - __ SmiToInteger32(kScratchRegister, rdx);
|
| - __ cvtlsi2sd(xmm0, kScratchRegister);
|
| - __ JumpIfNotSmi(rax, &load_nonsmi_rax);
|
| + __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
|
|
|
| - __ bind(&load_smi_rax);
|
| - __ SmiToInteger32(kScratchRegister, rax);
|
| - __ cvtlsi2sd(xmm1, kScratchRegister);
|
| + // Loop through the prototype chain looking for the function prototype.
|
| + Label loop, is_instance, is_not_instance;
|
| + __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
|
| + __ bind(&loop);
|
| + __ cmpq(rcx, rbx);
|
| + __ j(equal, &is_instance);
|
| + __ cmpq(rcx, kScratchRegister);
|
| + // The code at is_not_instance assumes that kScratchRegister contains a
|
| + // non-zero GCable value (the null object in this case).
|
| + __ j(equal, &is_not_instance);
|
| + __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
|
| + __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
|
| + __ jmp(&loop);
|
|
|
| - __ bind(&done);
|
| -}
|
| + __ bind(&is_instance);
|
| + __ xorl(rax, rax);
|
| + // Store bitwise zero in the cache. This is a Smi in GC terms.
|
| + ASSERT_EQ(0, kSmiTag);
|
| + __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
|
| + __ ret(2 * kPointerSize);
|
|
|
| + __ bind(&is_not_instance);
|
| + // We have to store a non-zero value in the cache.
|
| + __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
|
| + __ ret(2 * kPointerSize);
|
|
|
| -void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
|
| - Label* not_numbers) {
|
| - Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
|
| - // Load operand in rdx into xmm0, or branch to not_numbers.
|
| - __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
|
| - __ JumpIfSmi(rdx, &load_smi_rdx);
|
| - __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
|
| - __ j(not_equal, not_numbers); // Argument in rdx is not a number.
|
| - __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
|
| - // Load operand in rax into xmm1, or branch to not_numbers.
|
| - __ JumpIfSmi(rax, &load_smi_rax);
|
| -
|
| - __ bind(&load_nonsmi_rax);
|
| - __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
|
| - __ j(not_equal, not_numbers);
|
| - __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
|
| - __ jmp(&done);
|
| -
|
| - __ bind(&load_smi_rdx);
|
| - __ SmiToInteger32(kScratchRegister, rdx);
|
| - __ cvtlsi2sd(xmm0, kScratchRegister);
|
| - __ JumpIfNotSmi(rax, &load_nonsmi_rax);
|
| -
|
| - __ bind(&load_smi_rax);
|
| - __ SmiToInteger32(kScratchRegister, rax);
|
| - __ cvtlsi2sd(xmm1, kScratchRegister);
|
| - __ bind(&done);
|
| + // Slow-case: Go through the JavaScript implementation.
|
| + __ bind(&slow);
|
| + __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
|
| }
|
|
|
|
|
| -// Input: rdx, rax are the left and right objects of a bit op.
|
| -// Output: rax, rcx are left and right integers for a bit op.
|
| -void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
|
| - Label* conversion_failure,
|
| - Register heap_number_map) {
|
| - // Check float operands.
|
| - Label arg1_is_object, check_undefined_arg1;
|
| - Label arg2_is_object, check_undefined_arg2;
|
| - Label load_arg2, done;
|
| -
|
| - __ JumpIfNotSmi(rdx, &arg1_is_object);
|
| - __ SmiToInteger32(rdx, rdx);
|
| - __ jmp(&load_arg2);
|
| -
|
| - // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
|
| - __ bind(&check_undefined_arg1);
|
| - __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
|
| - __ j(not_equal, conversion_failure);
|
| - __ movl(rdx, Immediate(0));
|
| - __ jmp(&load_arg2);
|
| -
|
| - __ bind(&arg1_is_object);
|
| - __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
|
| - __ j(not_equal, &check_undefined_arg1);
|
| - // Get the untagged integer version of the edx heap number in rcx.
|
| - IntegerConvert(masm, rdx, rdx);
|
| -
|
| - // Here rdx has the untagged integer, rax has a Smi or a heap number.
|
| - __ bind(&load_arg2);
|
| - // Test if arg2 is a Smi.
|
| - __ JumpIfNotSmi(rax, &arg2_is_object);
|
| - __ SmiToInteger32(rax, rax);
|
| - __ movl(rcx, rax);
|
| - __ jmp(&done);
|
| -
|
| - // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
|
| - __ bind(&check_undefined_arg2);
|
| - __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
|
| - __ j(not_equal, conversion_failure);
|
| - __ movl(rcx, Immediate(0));
|
| - __ jmp(&done);
|
| -
|
| - __ bind(&arg2_is_object);
|
| - __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
|
| - __ j(not_equal, &check_undefined_arg2);
|
| - // Get the untagged integer version of the eax heap number in ecx.
|
| - IntegerConvert(masm, rcx, rax);
|
| - __ bind(&done);
|
| - __ movl(rax, rdx);
|
| -}
|
| -
|
| -
|
| -// Input: rdx, rax are the left and right objects of a bit op.
|
| -// Output: rax, rcx are left and right integers for a bit op.
|
| -void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
|
| - // Check float operands.
|
| - Label done;
|
| - Label rax_is_smi;
|
| - Label rax_is_object;
|
| - Label rdx_is_object;
|
| -
|
| - __ JumpIfNotSmi(rdx, &rdx_is_object);
|
| - __ SmiToInteger32(rdx, rdx);
|
| - __ JumpIfSmi(rax, &rax_is_smi);
|
| -
|
| - __ bind(&rax_is_object);
|
| - IntegerConvert(masm, rcx, rax); // Uses rdi, rcx and rbx.
|
| - __ jmp(&done);
|
| -
|
| - __ bind(&rdx_is_object);
|
| - IntegerConvert(masm, rdx, rdx); // Uses rdi, rcx and rbx.
|
| - __ JumpIfNotSmi(rax, &rax_is_object);
|
| - __ bind(&rax_is_smi);
|
| - __ SmiToInteger32(rcx, rax);
|
| -
|
| - __ bind(&done);
|
| - __ movl(rax, rdx);
|
| -}
|
| -
|
| -
|
| -const char* GenericBinaryOpStub::GetName() {
|
| - if (name_ != NULL) return name_;
|
| - const int len = 100;
|
| - name_ = Bootstrapper::AllocateAutoDeletedArray(len);
|
| - if (name_ == NULL) return "OOM";
|
| - const char* op_name = Token::Name(op_);
|
| - const char* overwrite_name;
|
| - switch (mode_) {
|
| - case NO_OVERWRITE: overwrite_name = "Alloc"; break;
|
| - case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
|
| - case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
|
| - default: overwrite_name = "UnknownOverwrite"; break;
|
| - }
|
| -
|
| - OS::SNPrintF(Vector<char>(name_, len),
|
| - "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
|
| - op_name,
|
| - overwrite_name,
|
| - (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
|
| - args_in_registers_ ? "RegArgs" : "StackArgs",
|
| - args_reversed_ ? "_R" : "",
|
| - static_operands_type_.ToString(),
|
| - BinaryOpIC::GetName(runtime_operands_type_));
|
| - return name_;
|
| -}
|
| -
|
| -
|
| -void GenericBinaryOpStub::GenerateCall(
|
| - MacroAssembler* masm,
|
| - Register left,
|
| - Register right) {
|
| - if (!ArgsInRegistersSupported()) {
|
| - // Pass arguments on the stack.
|
| - __ push(left);
|
| - __ push(right);
|
| - } else {
|
| - // The calling convention with registers is left in rdx and right in rax.
|
| - Register left_arg = rdx;
|
| - Register right_arg = rax;
|
| - if (!(left.is(left_arg) && right.is(right_arg))) {
|
| - if (left.is(right_arg) && right.is(left_arg)) {
|
| - if (IsOperationCommutative()) {
|
| - SetArgsReversed();
|
| - } else {
|
| - __ xchg(left, right);
|
| - }
|
| - } else if (left.is(left_arg)) {
|
| - __ movq(right_arg, right);
|
| - } else if (right.is(right_arg)) {
|
| - __ movq(left_arg, left);
|
| - } else if (left.is(right_arg)) {
|
| - if (IsOperationCommutative()) {
|
| - __ movq(left_arg, right);
|
| - SetArgsReversed();
|
| - } else {
|
| - // Order of moves important to avoid destroying left argument.
|
| - __ movq(left_arg, left);
|
| - __ movq(right_arg, right);
|
| - }
|
| - } else if (right.is(left_arg)) {
|
| - if (IsOperationCommutative()) {
|
| - __ movq(right_arg, left);
|
| - SetArgsReversed();
|
| - } else {
|
| - // Order of moves important to avoid destroying right argument.
|
| - __ movq(right_arg, right);
|
| - __ movq(left_arg, left);
|
| - }
|
| - } else {
|
| - // Order of moves is not important.
|
| - __ movq(left_arg, left);
|
| - __ movq(right_arg, right);
|
| - }
|
| - }
|
| -
|
| - // Update flags to indicate that arguments are in registers.
|
| - SetArgsInRegisters();
|
| - __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
|
| - }
|
| -
|
| - // Call the stub.
|
| - __ CallStub(this);
|
| -}
|
| -
|
| -
|
| -void GenericBinaryOpStub::GenerateCall(
|
| - MacroAssembler* masm,
|
| - Register left,
|
| - Smi* right) {
|
| - if (!ArgsInRegistersSupported()) {
|
| - // Pass arguments on the stack.
|
| - __ push(left);
|
| - __ Push(right);
|
| - } else {
|
| - // The calling convention with registers is left in rdx and right in rax.
|
| - Register left_arg = rdx;
|
| - Register right_arg = rax;
|
| - if (left.is(left_arg)) {
|
| - __ Move(right_arg, right);
|
| - } else if (left.is(right_arg) && IsOperationCommutative()) {
|
| - __ Move(left_arg, right);
|
| - SetArgsReversed();
|
| - } else {
|
| - // For non-commutative operations, left and right_arg might be
|
| - // the same register. Therefore, the order of the moves is
|
| - // important here in order to not overwrite left before moving
|
| - // it to left_arg.
|
| - __ movq(left_arg, left);
|
| - __ Move(right_arg, right);
|
| - }
|
| -
|
| - // Update flags to indicate that arguments are in registers.
|
| - SetArgsInRegisters();
|
| - __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
|
| - }
|
| -
|
| - // Call the stub.
|
| - __ CallStub(this);
|
| -}
|
| -
|
| -
|
| -void GenericBinaryOpStub::GenerateCall(
|
| - MacroAssembler* masm,
|
| - Smi* left,
|
| - Register right) {
|
| - if (!ArgsInRegistersSupported()) {
|
| - // Pass arguments on the stack.
|
| - __ Push(left);
|
| - __ push(right);
|
| - } else {
|
| - // The calling convention with registers is left in rdx and right in rax.
|
| - Register left_arg = rdx;
|
| - Register right_arg = rax;
|
| - if (right.is(right_arg)) {
|
| - __ Move(left_arg, left);
|
| - } else if (right.is(left_arg) && IsOperationCommutative()) {
|
| - __ Move(right_arg, left);
|
| - SetArgsReversed();
|
| - } else {
|
| - // For non-commutative operations, right and left_arg might be
|
| - // the same register. Therefore, the order of the moves is
|
| - // important here in order to not overwrite right before moving
|
| - // it to right_arg.
|
| - __ movq(right_arg, right);
|
| - __ Move(left_arg, left);
|
| - }
|
| - // Update flags to indicate that arguments are in registers.
|
| - SetArgsInRegisters();
|
| - __ IncrementCounter(&Counters::generic_binary_stub_calls_regs, 1);
|
| - }
|
| -
|
| - // Call the stub.
|
| - __ CallStub(this);
|
| -}
|
| -
|
| -
|
| -Result GenericBinaryOpStub::GenerateCall(MacroAssembler* masm,
|
| - VirtualFrame* frame,
|
| - Result* left,
|
| - Result* right) {
|
| - if (ArgsInRegistersSupported()) {
|
| - SetArgsInRegisters();
|
| - return frame->CallStub(this, left, right);
|
| - } else {
|
| - frame->Push(left);
|
| - frame->Push(right);
|
| - return frame->CallStub(this, 2);
|
| - }
|
| -}
|
| -
|
| -
|
| -void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
|
| - // 1. Move arguments into rdx, rax except for DIV and MOD, which need the
|
| - // dividend in rax and rdx free for the division. Use rax, rbx for those.
|
| - Comment load_comment(masm, "-- Load arguments");
|
| - Register left = rdx;
|
| - Register right = rax;
|
| - if (op_ == Token::DIV || op_ == Token::MOD) {
|
| - left = rax;
|
| - right = rbx;
|
| - if (HasArgsInRegisters()) {
|
| - __ movq(rbx, rax);
|
| - __ movq(rax, rdx);
|
| - }
|
| - }
|
| - if (!HasArgsInRegisters()) {
|
| - __ movq(right, Operand(rsp, 1 * kPointerSize));
|
| - __ movq(left, Operand(rsp, 2 * kPointerSize));
|
| - }
|
| -
|
| - Label not_smis;
|
| - // 2. Smi check both operands.
|
| - if (static_operands_type_.IsSmi()) {
|
| - // Skip smi check if we know that both arguments are smis.
|
| - if (FLAG_debug_code) {
|
| - __ AbortIfNotSmi(left);
|
| - __ AbortIfNotSmi(right);
|
| - }
|
| - if (op_ == Token::BIT_OR) {
|
| - // Handle OR here, since we do extra smi-checking in the or code below.
|
| - __ SmiOr(right, right, left);
|
| - GenerateReturn(masm);
|
| - return;
|
| - }
|
| - } else {
|
| - if (op_ != Token::BIT_OR) {
|
| - // Skip the check for OR as it is better combined with the
|
| - // actual operation.
|
| - Comment smi_check_comment(masm, "-- Smi check arguments");
|
| - __ JumpIfNotBothSmi(left, right, ¬_smis);
|
| - }
|
| - }
|
| -
|
| - // 3. Operands are both smis (except for OR), perform the operation leaving
|
| - // the result in rax and check the result if necessary.
|
| - Comment perform_smi(masm, "-- Perform smi operation");
|
| - Label use_fp_on_smis;
|
| - switch (op_) {
|
| - case Token::ADD: {
|
| - ASSERT(right.is(rax));
|
| - __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
|
| - break;
|
| - }
|
| -
|
| - case Token::SUB: {
|
| - __ SmiSub(left, left, right, &use_fp_on_smis);
|
| - __ movq(rax, left);
|
| - break;
|
| - }
|
| -
|
| - case Token::MUL:
|
| - ASSERT(right.is(rax));
|
| - __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative.
|
| - break;
|
| -
|
| - case Token::DIV:
|
| - ASSERT(left.is(rax));
|
| - __ SmiDiv(left, left, right, &use_fp_on_smis);
|
| - break;
|
| -
|
| - case Token::MOD:
|
| - ASSERT(left.is(rax));
|
| - __ SmiMod(left, left, right, slow);
|
| - break;
|
| -
|
| - case Token::BIT_OR:
|
| - ASSERT(right.is(rax));
|
| - __ movq(rcx, right); // Save the right operand.
|
| - __ SmiOr(right, right, left); // BIT_OR is commutative.
|
| - __ testb(right, Immediate(kSmiTagMask));
|
| - __ j(not_zero, ¬_smis);
|
| - break;
|
| -
|
| - case Token::BIT_AND:
|
| - ASSERT(right.is(rax));
|
| - __ SmiAnd(right, right, left); // BIT_AND is commutative.
|
| - break;
|
| -
|
| - case Token::BIT_XOR:
|
| - ASSERT(right.is(rax));
|
| - __ SmiXor(right, right, left); // BIT_XOR is commutative.
|
| - break;
|
| -
|
| - case Token::SHL:
|
| - case Token::SHR:
|
| - case Token::SAR:
|
| - switch (op_) {
|
| - case Token::SAR:
|
| - __ SmiShiftArithmeticRight(left, left, right);
|
| - break;
|
| - case Token::SHR:
|
| - __ SmiShiftLogicalRight(left, left, right, slow);
|
| - break;
|
| - case Token::SHL:
|
| - __ SmiShiftLeft(left, left, right);
|
| - break;
|
| - default:
|
| - UNREACHABLE();
|
| - }
|
| - __ movq(rax, left);
|
| - break;
|
| -
|
| - default:
|
| - UNREACHABLE();
|
| - break;
|
| - }
|
| -
|
| - // 4. Emit return of result in rax.
|
| - GenerateReturn(masm);
|
| -
|
| - // 5. For some operations emit inline code to perform floating point
|
| - // operations on known smis (e.g., if the result of the operation
|
| - // overflowed the smi range).
|
| - switch (op_) {
|
| - case Token::ADD:
|
| - case Token::SUB:
|
| - case Token::MUL:
|
| - case Token::DIV: {
|
| - ASSERT(use_fp_on_smis.is_linked());
|
| - __ bind(&use_fp_on_smis);
|
| - if (op_ == Token::DIV) {
|
| - __ movq(rdx, rax);
|
| - __ movq(rax, rbx);
|
| - }
|
| - // left is rdx, right is rax.
|
| - __ AllocateHeapNumber(rbx, rcx, slow);
|
| - FloatingPointHelper::LoadSSE2SmiOperands(masm);
|
| - switch (op_) {
|
| - case Token::ADD: __ addsd(xmm0, xmm1); break;
|
| - case Token::SUB: __ subsd(xmm0, xmm1); break;
|
| - case Token::MUL: __ mulsd(xmm0, xmm1); break;
|
| - case Token::DIV: __ divsd(xmm0, xmm1); break;
|
| - default: UNREACHABLE();
|
| - }
|
| - __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0);
|
| - __ movq(rax, rbx);
|
| - GenerateReturn(masm);
|
| - }
|
| - default:
|
| - break;
|
| - }
|
| -
|
| - // 6. Non-smi operands, fall out to the non-smi code with the operands in
|
| - // rdx and rax.
|
| - Comment done_comment(masm, "-- Enter non-smi code");
|
| - __ bind(¬_smis);
|
| -
|
| - switch (op_) {
|
| - case Token::DIV:
|
| - case Token::MOD:
|
| - // Operands are in rax, rbx at this point.
|
| - __ movq(rdx, rax);
|
| - __ movq(rax, rbx);
|
| - break;
|
| -
|
| - case Token::BIT_OR:
|
| - // Right operand is saved in rcx and rax was destroyed by the smi
|
| - // operation.
|
| - __ movq(rax, rcx);
|
| - break;
|
| -
|
| - default:
|
| - break;
|
| - }
|
| -}
|
| -
|
| -
|
| -void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
|
| - Label call_runtime;
|
| -
|
| - if (ShouldGenerateSmiCode()) {
|
| - GenerateSmiCode(masm, &call_runtime);
|
| - } else if (op_ != Token::MOD) {
|
| - if (!HasArgsInRegisters()) {
|
| - GenerateLoadArguments(masm);
|
| - }
|
| - }
|
| - // Floating point case.
|
| - if (ShouldGenerateFPCode()) {
|
| - switch (op_) {
|
| - case Token::ADD:
|
| - case Token::SUB:
|
| - case Token::MUL:
|
| - case Token::DIV: {
|
| - if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
|
| - HasSmiCodeInStub()) {
|
| - // Execution reaches this point when the first non-smi argument occurs
|
| - // (and only if smi code is generated). This is the right moment to
|
| - // patch to HEAP_NUMBERS state. The transition is attempted only for
|
| - // the four basic operations. The stub stays in the DEFAULT state
|
| - // forever for all other operations (also if smi code is skipped).
|
| - GenerateTypeTransition(masm);
|
| - break;
|
| - }
|
| -
|
| - Label not_floats;
|
| - // rax: y
|
| - // rdx: x
|
| - if (static_operands_type_.IsNumber()) {
|
| - if (FLAG_debug_code) {
|
| - // Assert at runtime that inputs are only numbers.
|
| - __ AbortIfNotNumber(rdx);
|
| - __ AbortIfNotNumber(rax);
|
| - }
|
| - FloatingPointHelper::LoadSSE2NumberOperands(masm);
|
| - } else {
|
| - FloatingPointHelper::LoadSSE2UnknownOperands(masm, &call_runtime);
|
| - }
|
| -
|
| - switch (op_) {
|
| - case Token::ADD: __ addsd(xmm0, xmm1); break;
|
| - case Token::SUB: __ subsd(xmm0, xmm1); break;
|
| - case Token::MUL: __ mulsd(xmm0, xmm1); break;
|
| - case Token::DIV: __ divsd(xmm0, xmm1); break;
|
| - default: UNREACHABLE();
|
| - }
|
| - // Allocate a heap number, if needed.
|
| - Label skip_allocation;
|
| - OverwriteMode mode = mode_;
|
| - if (HasArgsReversed()) {
|
| - if (mode == OVERWRITE_RIGHT) {
|
| - mode = OVERWRITE_LEFT;
|
| - } else if (mode == OVERWRITE_LEFT) {
|
| - mode = OVERWRITE_RIGHT;
|
| - }
|
| - }
|
| - switch (mode) {
|
| - case OVERWRITE_LEFT:
|
| - __ JumpIfNotSmi(rdx, &skip_allocation);
|
| - __ AllocateHeapNumber(rbx, rcx, &call_runtime);
|
| - __ movq(rdx, rbx);
|
| - __ bind(&skip_allocation);
|
| - __ movq(rax, rdx);
|
| - break;
|
| - case OVERWRITE_RIGHT:
|
| - // If the argument in rax is already an object, we skip the
|
| - // allocation of a heap number.
|
| - __ JumpIfNotSmi(rax, &skip_allocation);
|
| - // Fall through!
|
| - case NO_OVERWRITE:
|
| - // Allocate a heap number for the result. Keep rax and rdx intact
|
| - // for the possible runtime call.
|
| - __ AllocateHeapNumber(rbx, rcx, &call_runtime);
|
| - __ movq(rax, rbx);
|
| - __ bind(&skip_allocation);
|
| - break;
|
| - default: UNREACHABLE();
|
| - }
|
| - __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
|
| - GenerateReturn(masm);
|
| - __ bind(¬_floats);
|
| - if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
|
| - !HasSmiCodeInStub()) {
|
| - // Execution reaches this point when the first non-number argument
|
| - // occurs (and only if smi code is skipped from the stub, otherwise
|
| - // the patching has already been done earlier in this case branch).
|
| - // A perfect moment to try patching to STRINGS for ADD operation.
|
| - if (op_ == Token::ADD) {
|
| - GenerateTypeTransition(masm);
|
| - }
|
| - }
|
| - break;
|
| - }
|
| - case Token::MOD: {
|
| - // For MOD we go directly to runtime in the non-smi case.
|
| - break;
|
| - }
|
| - case Token::BIT_OR:
|
| - case Token::BIT_AND:
|
| - case Token::BIT_XOR:
|
| - case Token::SAR:
|
| - case Token::SHL:
|
| - case Token::SHR: {
|
| - Label skip_allocation, non_smi_shr_result;
|
| - Register heap_number_map = r9;
|
| - __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
|
| - if (static_operands_type_.IsNumber()) {
|
| - if (FLAG_debug_code) {
|
| - // Assert at runtime that inputs are only numbers.
|
| - __ AbortIfNotNumber(rdx);
|
| - __ AbortIfNotNumber(rax);
|
| - }
|
| - FloatingPointHelper::LoadNumbersAsIntegers(masm);
|
| - } else {
|
| - FloatingPointHelper::LoadAsIntegers(masm,
|
| - &call_runtime,
|
| - heap_number_map);
|
| - }
|
| - switch (op_) {
|
| - case Token::BIT_OR: __ orl(rax, rcx); break;
|
| - case Token::BIT_AND: __ andl(rax, rcx); break;
|
| - case Token::BIT_XOR: __ xorl(rax, rcx); break;
|
| - case Token::SAR: __ sarl_cl(rax); break;
|
| - case Token::SHL: __ shll_cl(rax); break;
|
| - case Token::SHR: {
|
| - __ shrl_cl(rax);
|
| - // Check if result is negative. This can only happen for a shift
|
| - // by zero.
|
| - __ testl(rax, rax);
|
| - __ j(negative, &non_smi_shr_result);
|
| - break;
|
| - }
|
| - default: UNREACHABLE();
|
| - }
|
| -
|
| - STATIC_ASSERT(kSmiValueSize == 32);
|
| - // Tag smi result and return.
|
| - __ Integer32ToSmi(rax, rax);
|
| - GenerateReturn(masm);
|
| -
|
| - // All bit-ops except SHR return a signed int32 that can be
|
| - // returned immediately as a smi.
|
| - // We might need to allocate a HeapNumber if we shift a negative
|
| - // number right by zero (i.e., convert to UInt32).
|
| - if (op_ == Token::SHR) {
|
| - ASSERT(non_smi_shr_result.is_linked());
|
| - __ bind(&non_smi_shr_result);
|
| - // Allocate a heap number if needed.
|
| - __ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
|
| - switch (mode_) {
|
| - case OVERWRITE_LEFT:
|
| - case OVERWRITE_RIGHT:
|
| - // If the operand was an object, we skip the
|
| - // allocation of a heap number.
|
| - __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ?
|
| - 1 * kPointerSize : 2 * kPointerSize));
|
| - __ JumpIfNotSmi(rax, &skip_allocation);
|
| - // Fall through!
|
| - case NO_OVERWRITE:
|
| - // Allocate heap number in new space.
|
| - // Not using AllocateHeapNumber macro in order to reuse
|
| - // already loaded heap_number_map.
|
| - __ AllocateInNewSpace(HeapNumber::kSize,
|
| - rax,
|
| - rcx,
|
| - no_reg,
|
| - &call_runtime,
|
| - TAG_OBJECT);
|
| - // Set the map.
|
| - if (FLAG_debug_code) {
|
| - __ AbortIfNotRootValue(heap_number_map,
|
| - Heap::kHeapNumberMapRootIndex,
|
| - "HeapNumberMap register clobbered.");
|
| - }
|
| - __ movq(FieldOperand(rax, HeapObject::kMapOffset),
|
| - heap_number_map);
|
| - __ bind(&skip_allocation);
|
| - break;
|
| - default: UNREACHABLE();
|
| - }
|
| - // Store the result in the HeapNumber and return.
|
| - __ cvtqsi2sd(xmm0, rbx);
|
| - __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
|
| - GenerateReturn(masm);
|
| - }
|
| -
|
| - break;
|
| - }
|
| - default: UNREACHABLE(); break;
|
| - }
|
| - }
|
| -
|
| - // If all else fails, use the runtime system to get the correct
|
| - // result. If arguments was passed in registers now place them on the
|
| - // stack in the correct order below the return address.
|
| - __ bind(&call_runtime);
|
| -
|
| - if (HasArgsInRegisters()) {
|
| - GenerateRegisterArgsPush(masm);
|
| - }
|
| -
|
| - switch (op_) {
|
| - case Token::ADD: {
|
| - // Registers containing left and right operands respectively.
|
| - Register lhs, rhs;
|
| -
|
| - if (HasArgsReversed()) {
|
| - lhs = rax;
|
| - rhs = rdx;
|
| - } else {
|
| - lhs = rdx;
|
| - rhs = rax;
|
| - }
|
| -
|
| - // Test for string arguments before calling runtime.
|
| - Label not_strings, both_strings, not_string1, string1, string1_smi2;
|
| -
|
| - // If this stub has already generated FP-specific code then the arguments
|
| - // are already in rdx and rax.
|
| - if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
|
| - GenerateLoadArguments(masm);
|
| - }
|
| -
|
| - Condition is_smi;
|
| - is_smi = masm->CheckSmi(lhs);
|
| - __ j(is_smi, ¬_string1);
|
| - __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, r8);
|
| - __ j(above_equal, ¬_string1);
|
| -
|
| - // First argument is a a string, test second.
|
| - is_smi = masm->CheckSmi(rhs);
|
| - __ j(is_smi, &string1_smi2);
|
| - __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9);
|
| - __ j(above_equal, &string1);
|
| -
|
| - // First and second argument are strings.
|
| - StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
|
| - __ TailCallStub(&string_add_stub);
|
| -
|
| - __ bind(&string1_smi2);
|
| - // First argument is a string, second is a smi. Try to lookup the number
|
| - // string for the smi in the number string cache.
|
| - NumberToStringStub::GenerateLookupNumberStringCache(
|
| - masm, rhs, rbx, rcx, r8, true, &string1);
|
| -
|
| - // Replace second argument on stack and tailcall string add stub to make
|
| - // the result.
|
| - __ movq(Operand(rsp, 1 * kPointerSize), rbx);
|
| - __ TailCallStub(&string_add_stub);
|
| -
|
| - // Only first argument is a string.
|
| - __ bind(&string1);
|
| - __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
|
| -
|
| - // First argument was not a string, test second.
|
| - __ bind(¬_string1);
|
| - is_smi = masm->CheckSmi(rhs);
|
| - __ j(is_smi, ¬_strings);
|
| - __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, rhs);
|
| - __ j(above_equal, ¬_strings);
|
| -
|
| - // Only second argument is a string.
|
| - __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
|
| -
|
| - __ bind(¬_strings);
|
| - // Neither argument is a string.
|
| - __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
|
| - break;
|
| - }
|
| - case Token::SUB:
|
| - __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
|
| - break;
|
| - case Token::MUL:
|
| - __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
|
| - break;
|
| - case Token::DIV:
|
| - __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
|
| - break;
|
| - case Token::MOD:
|
| - __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
|
| - break;
|
| - case Token::BIT_OR:
|
| - __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
|
| - break;
|
| - case Token::BIT_AND:
|
| - __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
|
| - break;
|
| - case Token::BIT_XOR:
|
| - __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
|
| - break;
|
| - case Token::SAR:
|
| - __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
|
| - break;
|
| - case Token::SHL:
|
| - __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
|
| - break;
|
| - case Token::SHR:
|
| - __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
|
| - break;
|
| - default:
|
| - UNREACHABLE();
|
| - }
|
| -}
|
| -
|
| -
|
| -void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
|
| - ASSERT(!HasArgsInRegisters());
|
| - __ movq(rax, Operand(rsp, 1 * kPointerSize));
|
| - __ movq(rdx, Operand(rsp, 2 * kPointerSize));
|
| -}
|
| -
|
| -
|
| -void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
|
| - // If arguments are not passed in registers remove them from the stack before
|
| - // returning.
|
| - if (!HasArgsInRegisters()) {
|
| - __ ret(2 * kPointerSize); // Remove both operands
|
| - } else {
|
| - __ ret(0);
|
| - }
|
| -}
|
| -
|
| -
|
| -void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
|
| - ASSERT(HasArgsInRegisters());
|
| - __ pop(rcx);
|
| - if (HasArgsReversed()) {
|
| - __ push(rax);
|
| - __ push(rdx);
|
| - } else {
|
| - __ push(rdx);
|
| - __ push(rax);
|
| - }
|
| - __ push(rcx);
|
| -}
|
| -
|
| -
|
| -void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
|
| - Label get_result;
|
| -
|
| - // Ensure the operands are on the stack.
|
| - if (HasArgsInRegisters()) {
|
| - GenerateRegisterArgsPush(masm);
|
| - }
|
| -
|
| - // Left and right arguments are already on stack.
|
| - __ pop(rcx); // Save the return address.
|
| -
|
| - // Push this stub's key.
|
| - __ Push(Smi::FromInt(MinorKey()));
|
| -
|
| - // Although the operation and the type info are encoded into the key,
|
| - // the encoding is opaque, so push them too.
|
| - __ Push(Smi::FromInt(op_));
|
| -
|
| - __ Push(Smi::FromInt(runtime_operands_type_));
|
| -
|
| - __ push(rcx); // The return address.
|
| -
|
| - // Perform patching to an appropriate fast case and return the result.
|
| - __ TailCallExternalReference(
|
| - ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
|
| - 5,
|
| - 1);
|
| -}
|
| -
|
| -
|
| -Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
|
| - GenericBinaryOpStub stub(key, type_info);
|
| - return stub.GetCode();
|
| -}
|
| -
|
| -
|
| int CompareStub::MinorKey() {
|
| // Encode the three parameters in a unique 16 bit value. To avoid duplicate
|
| // stubs the never NaN NaN condition is only taken into account if the
|
| @@ -12064,6 +12043,11 @@
|
|
|
| #undef __
|
|
|
| +void RecordWriteStub::Generate(MacroAssembler* masm) {
|
| + masm->RecordWriteHelper(object_, addr_, scratch_);
|
| + masm->ret(0);
|
| +}
|
| +
|
| } } // namespace v8::internal
|
|
|
| #endif // V8_TARGET_ARCH_X64
|
|
|