| Index: src/ia32/codegen-ia32.cc
|
| ===================================================================
|
| --- src/ia32/codegen-ia32.cc (revision 3964)
|
| +++ src/ia32/codegen-ia32.cc (working copy)
|
| @@ -39,6 +39,7 @@
|
| #include "register-allocator-inl.h"
|
| #include "runtime.h"
|
| #include "scopes.h"
|
| +#include "virtual-frame-inl.h"
|
|
|
| namespace v8 {
|
| namespace internal {
|
| @@ -116,9 +117,6 @@
|
| }
|
|
|
|
|
| -Scope* CodeGenerator::scope() { return info_->function()->scope(); }
|
| -
|
| -
|
| // Calling conventions:
|
| // ebp: caller's frame pointer
|
| // esp: stack pointer
|
| @@ -128,6 +126,7 @@
|
| void CodeGenerator::Generate(CompilationInfo* info) {
|
| // Record the position for debugging purposes.
|
| CodeForFunctionPosition(info->function());
|
| + Comment cmnt(masm_, "[ function compiled by virtual frame code generator");
|
|
|
| // Initialize state.
|
| info_ = info;
|
| @@ -734,7 +733,27 @@
|
| Result value = frame_->Pop();
|
| value.ToRegister();
|
|
|
| - if (value.is_number()) {
|
| + if (value.is_integer32()) { // Also takes Smi case.
|
| + Comment cmnt(masm_, "ONLY_INTEGER_32");
|
| + if (FLAG_debug_code) {
|
| + Label ok;
|
| + __ AbortIfNotNumber(value.reg(), "ToBoolean operand is not a number.");
|
| + __ test(value.reg(), Immediate(kSmiTagMask));
|
| + __ j(zero, &ok);
|
| + __ fldz();
|
| + __ fld_d(FieldOperand(value.reg(), HeapNumber::kValueOffset));
|
| + __ FCmp();
|
| + __ j(not_zero, &ok);
|
| + __ Abort("Smi was wrapped in HeapNumber in output from bitop");
|
| + __ bind(&ok);
|
| + }
|
| + // In the integer32 case there are no Smis hidden in heap numbers, so we
|
| + // need only test for Smi zero.
|
| + __ test(value.reg(), Operand(value.reg()));
|
| + dest->false_target()->Branch(zero);
|
| + value.Unuse();
|
| + dest->Split(not_zero);
|
| + } else if (value.is_number()) {
|
| Comment cmnt(masm_, "ONLY_NUMBER");
|
| // Fast case if NumberInfo indicates only numbers.
|
| if (FLAG_debug_code) {
|
| @@ -818,8 +837,17 @@
|
| // Takes the operands in edx and eax and loads them as integers in eax
|
| // and ecx.
|
| static void LoadAsIntegers(MacroAssembler* masm,
|
| + NumberInfo number_info,
|
| bool use_sse3,
|
| Label* operand_conversion_failure);
|
| + static void LoadNumbersAsIntegers(MacroAssembler* masm,
|
| + NumberInfo number_info,
|
| + bool use_sse3,
|
| + Label* operand_conversion_failure);
|
| + static void LoadUnknownsAsIntegers(MacroAssembler* masm,
|
| + bool use_sse3,
|
| + Label* operand_conversion_failure);
|
| +
|
| // Test if operands are smis or heap numbers and load them
|
| // into xmm0 and xmm1 if they are. Operands are in edx and eax.
|
| // Leaves operands unchanged.
|
| @@ -851,13 +879,14 @@
|
| }
|
|
|
| OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
|
| - "GenericBinaryOpStub_%s_%s%s_%s%s_%s",
|
| + "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
|
| op_name,
|
| overwrite_name,
|
| (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
|
| args_in_registers_ ? "RegArgs" : "StackArgs",
|
| args_reversed_ ? "_R" : "",
|
| - NumberInfo::ToString(operands_type_));
|
| + static_operands_type_.ToString(),
|
| + BinaryOpIC::GetName(runtime_operands_type_));
|
| return name_;
|
| }
|
|
|
| @@ -869,8 +898,11 @@
|
| Register dst,
|
| Register left,
|
| Register right,
|
| + NumberInfo left_info,
|
| + NumberInfo right_info,
|
| OverwriteMode mode)
|
| - : op_(op), dst_(dst), left_(left), right_(right), mode_(mode) {
|
| + : op_(op), dst_(dst), left_(left), right_(right),
|
| + left_info_(left_info), right_info_(right_info), mode_(mode) {
|
| set_comment("[ DeferredInlineBinaryOperation");
|
| }
|
|
|
| @@ -881,6 +913,8 @@
|
| Register dst_;
|
| Register left_;
|
| Register right_;
|
| + NumberInfo left_info_;
|
| + NumberInfo right_info_;
|
| OverwriteMode mode_;
|
| };
|
|
|
| @@ -894,18 +928,22 @@
|
| CpuFeatures::Scope use_sse2(SSE2);
|
| Label call_runtime, after_alloc_failure;
|
| Label left_smi, right_smi, load_right, do_op;
|
| - __ test(left_, Immediate(kSmiTagMask));
|
| - __ j(zero, &left_smi);
|
| - __ cmp(FieldOperand(left_, HeapObject::kMapOffset),
|
| - Factory::heap_number_map());
|
| - __ j(not_equal, &call_runtime);
|
| - __ movdbl(xmm0, FieldOperand(left_, HeapNumber::kValueOffset));
|
| - if (mode_ == OVERWRITE_LEFT) {
|
| - __ mov(dst_, left_);
|
| + if (!left_info_.IsSmi()) {
|
| + __ test(left_, Immediate(kSmiTagMask));
|
| + __ j(zero, &left_smi);
|
| + if (!left_info_.IsNumber()) {
|
| + __ cmp(FieldOperand(left_, HeapObject::kMapOffset),
|
| + Factory::heap_number_map());
|
| + __ j(not_equal, &call_runtime);
|
| + }
|
| + __ movdbl(xmm0, FieldOperand(left_, HeapNumber::kValueOffset));
|
| + if (mode_ == OVERWRITE_LEFT) {
|
| + __ mov(dst_, left_);
|
| + }
|
| + __ jmp(&load_right);
|
| +
|
| + __ bind(&left_smi);
|
| }
|
| - __ jmp(&load_right);
|
| -
|
| - __ bind(&left_smi);
|
| __ SmiUntag(left_);
|
| __ cvtsi2sd(xmm0, Operand(left_));
|
| __ SmiTag(left_);
|
| @@ -917,23 +955,27 @@
|
| }
|
|
|
| __ bind(&load_right);
|
| - __ test(right_, Immediate(kSmiTagMask));
|
| - __ j(zero, &right_smi);
|
| - __ cmp(FieldOperand(right_, HeapObject::kMapOffset),
|
| - Factory::heap_number_map());
|
| - __ j(not_equal, &call_runtime);
|
| - __ movdbl(xmm1, FieldOperand(right_, HeapNumber::kValueOffset));
|
| - if (mode_ == OVERWRITE_RIGHT) {
|
| - __ mov(dst_, right_);
|
| - } else if (mode_ == NO_OVERWRITE) {
|
| - Label alloc_failure;
|
| - __ push(left_);
|
| - __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
|
| - __ pop(left_);
|
| + if (!right_info_.IsSmi()) {
|
| + __ test(right_, Immediate(kSmiTagMask));
|
| + __ j(zero, &right_smi);
|
| + if (!right_info_.IsNumber()) {
|
| + __ cmp(FieldOperand(right_, HeapObject::kMapOffset),
|
| + Factory::heap_number_map());
|
| + __ j(not_equal, &call_runtime);
|
| + }
|
| + __ movdbl(xmm1, FieldOperand(right_, HeapNumber::kValueOffset));
|
| + if (mode_ == OVERWRITE_RIGHT) {
|
| + __ mov(dst_, right_);
|
| + } else if (mode_ == NO_OVERWRITE) {
|
| + Label alloc_failure;
|
| + __ push(left_);
|
| + __ AllocateHeapNumber(dst_, left_, no_reg, &after_alloc_failure);
|
| + __ pop(left_);
|
| + }
|
| + __ jmp(&do_op);
|
| +
|
| + __ bind(&right_smi);
|
| }
|
| - __ jmp(&do_op);
|
| -
|
| - __ bind(&right_smi);
|
| __ SmiUntag(right_);
|
| __ cvtsi2sd(xmm1, Operand(right_));
|
| __ SmiTag(right_);
|
| @@ -959,13 +1001,105 @@
|
| __ pop(left_);
|
| __ bind(&call_runtime);
|
| }
|
| - GenericBinaryOpStub stub(op_, mode_, NO_SMI_CODE_IN_STUB);
|
| + GenericBinaryOpStub stub(op_,
|
| + mode_,
|
| + NO_SMI_CODE_IN_STUB,
|
| + NumberInfo::Combine(left_info_, right_info_));
|
| stub.GenerateCall(masm_, left_, right_);
|
| if (!dst_.is(eax)) __ mov(dst_, eax);
|
| __ bind(&done);
|
| }
|
|
|
|
|
| +static NumberInfo CalculateNumberInfo(NumberInfo operands_type,
|
| + Token::Value op,
|
| + const Result& right,
|
| + const Result& left) {
|
| + // Set NumberInfo of result according to the operation performed.
|
| + // Rely on the fact that smis have a 31 bit payload on ia32.
|
| + ASSERT(kSmiValueSize == 31);
|
| + switch (op) {
|
| + case Token::COMMA:
|
| + return right.number_info();
|
| + case Token::OR:
|
| + case Token::AND:
|
| + // Result type can be either of the two input types.
|
| + return operands_type;
|
| + case Token::BIT_AND: {
|
| + // Anding with positive Smis will give you a Smi.
|
| + if (right.is_constant() && right.handle()->IsSmi() &&
|
| + Smi::cast(*right.handle())->value() >= 0) {
|
| + return NumberInfo::Smi();
|
| + } else if (left.is_constant() && left.handle()->IsSmi() &&
|
| + Smi::cast(*left.handle())->value() >= 0) {
|
| + return NumberInfo::Smi();
|
| + }
|
| + return (operands_type.IsSmi())
|
| + ? NumberInfo::Smi()
|
| + : NumberInfo::Integer32();
|
| + }
|
| + case Token::BIT_OR: {
|
| + // Oring with negative Smis will give you a Smi.
|
| + if (right.is_constant() && right.handle()->IsSmi() &&
|
| + Smi::cast(*right.handle())->value() < 0) {
|
| + return NumberInfo::Smi();
|
| + } else if (left.is_constant() && left.handle()->IsSmi() &&
|
| + Smi::cast(*left.handle())->value() < 0) {
|
| + return NumberInfo::Smi();
|
| + }
|
| + return (operands_type.IsSmi())
|
| + ? NumberInfo::Smi()
|
| + : NumberInfo::Integer32();
|
| + }
|
| + case Token::BIT_XOR:
|
| + // Result is always a 32 bit integer. Smi property of inputs is preserved.
|
| + return (operands_type.IsSmi())
|
| + ? NumberInfo::Smi()
|
| + : NumberInfo::Integer32();
|
| + case Token::SAR:
|
| + if (left.is_smi()) return NumberInfo::Smi();
|
| + // Result is a smi if we shift by a constant >= 1, otherwise an integer32.
|
| + return (right.is_constant() && right.handle()->IsSmi()
|
| + && Smi::cast(*right.handle())->value() >= 1)
|
| + ? NumberInfo::Smi()
|
| + : NumberInfo::Integer32();
|
| + case Token::SHR:
|
| + // Result is a smi if we shift by a constant >= 2, otherwise an integer32.
|
| + return (right.is_constant() && right.handle()->IsSmi()
|
| + && Smi::cast(*right.handle())->value() >= 2)
|
| + ? NumberInfo::Smi()
|
| + : NumberInfo::Integer32();
|
| + case Token::ADD:
|
| + if (operands_type.IsSmi()) {
|
| + // The Integer32 range is big enough to take the sum of any two Smis.
|
| + return NumberInfo::Integer32();
|
| + } else {
|
| + // Result could be a string or a number. Check types of inputs.
|
| + return operands_type.IsNumber()
|
| + ? NumberInfo::Number()
|
| + : NumberInfo::Unknown();
|
| + }
|
| + case Token::SHL:
|
| + return NumberInfo::Integer32();
|
| + case Token::SUB:
|
| + // The Integer32 range is big enough to take the difference of any two
|
| + // Smis.
|
| + return (operands_type.IsSmi()) ?
|
| + NumberInfo::Integer32() :
|
| + NumberInfo::Number();
|
| + case Token::MUL:
|
| + case Token::DIV:
|
| + case Token::MOD:
|
| + // Result is always a number.
|
| + return NumberInfo::Number();
|
| + default:
|
| + UNREACHABLE();
|
| + }
|
| + UNREACHABLE();
|
| + return NumberInfo::Unknown();
|
| +}
|
| +
|
| +
|
| void CodeGenerator::GenericBinaryOperation(Token::Value op,
|
| StaticType* type,
|
| OverwriteMode overwrite_mode) {
|
| @@ -1021,9 +1155,11 @@
|
| }
|
|
|
| // Get number type of left and right sub-expressions.
|
| - NumberInfo::Type operands_type =
|
| + NumberInfo operands_type =
|
| NumberInfo::Combine(left.number_info(), right.number_info());
|
|
|
| + NumberInfo result_type = CalculateNumberInfo(operands_type, op, right, left);
|
| +
|
| Result answer;
|
| if (left_is_non_smi_constant || right_is_non_smi_constant) {
|
| // Go straight to the slow case, with no smi code.
|
| @@ -1044,7 +1180,10 @@
|
| // generate the inline Smi check code if this operation is part of a loop.
|
| // For all other operations only inline the Smi check code for likely smis
|
| // if the operation is part of a loop.
|
| - if (loop_nesting() > 0 && (Token::IsBitOp(op) || type->IsLikelySmi())) {
|
| + if (loop_nesting() > 0 &&
|
| + (Token::IsBitOp(op) ||
|
| + operands_type.IsInteger32() ||
|
| + type->IsLikelySmi())) {
|
| answer = LikelySmiBinaryOperation(op, &left, &right, overwrite_mode);
|
| } else {
|
| GenericBinaryOpStub stub(op,
|
| @@ -1055,58 +1194,6 @@
|
| }
|
| }
|
|
|
| - // Set NumberInfo of result according to the operation performed.
|
| - // Rely on the fact that smis have a 31 bit payload on ia32.
|
| - ASSERT(kSmiValueSize == 31);
|
| - NumberInfo::Type result_type = NumberInfo::kUnknown;
|
| - switch (op) {
|
| - case Token::COMMA:
|
| - result_type = right.number_info();
|
| - break;
|
| - case Token::OR:
|
| - case Token::AND:
|
| - // Result type can be either of the two input types.
|
| - result_type = operands_type;
|
| - break;
|
| - case Token::BIT_OR:
|
| - case Token::BIT_XOR:
|
| - case Token::BIT_AND:
|
| - // Result is always a number. Smi property of inputs is preserved.
|
| - result_type = (operands_type == NumberInfo::kSmi)
|
| - ? NumberInfo::kSmi
|
| - : NumberInfo::kNumber;
|
| - break;
|
| - case Token::SAR:
|
| - // Result is a smi if we shift by a constant >= 1, otherwise a number.
|
| - result_type = (right.is_constant() && right.handle()->IsSmi()
|
| - && Smi::cast(*right.handle())->value() >= 1)
|
| - ? NumberInfo::kSmi
|
| - : NumberInfo::kNumber;
|
| - break;
|
| - case Token::SHR:
|
| - // Result is a smi if we shift by a constant >= 2, otherwise a number.
|
| - result_type = (right.is_constant() && right.handle()->IsSmi()
|
| - && Smi::cast(*right.handle())->value() >= 2)
|
| - ? NumberInfo::kSmi
|
| - : NumberInfo::kNumber;
|
| - break;
|
| - case Token::ADD:
|
| - // Result could be a string or a number. Check types of inputs.
|
| - result_type = NumberInfo::IsNumber(operands_type)
|
| - ? NumberInfo::kNumber
|
| - : NumberInfo::kUnknown;
|
| - break;
|
| - case Token::SHL:
|
| - case Token::SUB:
|
| - case Token::MUL:
|
| - case Token::DIV:
|
| - case Token::MOD:
|
| - // Result is always a number.
|
| - result_type = NumberInfo::kNumber;
|
| - break;
|
| - default:
|
| - UNREACHABLE();
|
| - }
|
| answer.set_number_info(result_type);
|
| frame_->Push(&answer);
|
| }
|
| @@ -1193,6 +1280,12 @@
|
| }
|
|
|
|
|
| +static void CheckTwoForSminess(MacroAssembler* masm,
|
| + Register left, Register right, Register scratch,
|
| + NumberInfo left_info, NumberInfo right_info,
|
| + DeferredInlineBinaryOperation* deferred);
|
| +
|
| +
|
| // Implements a binary operation using a deferred code object and some
|
| // inline code to operate on smis quickly.
|
| Result CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
|
| @@ -1273,6 +1366,8 @@
|
| (op == Token::DIV) ? eax : edx,
|
| left->reg(),
|
| right->reg(),
|
| + left->number_info(),
|
| + right->number_info(),
|
| overwrite_mode);
|
| if (left->reg().is(right->reg())) {
|
| __ test(left->reg(), Immediate(kSmiTagMask));
|
| @@ -1370,11 +1465,11 @@
|
| answer.reg(),
|
| left->reg(),
|
| ecx,
|
| + left->number_info(),
|
| + right->number_info(),
|
| overwrite_mode);
|
| - __ mov(answer.reg(), left->reg());
|
| - __ or_(answer.reg(), Operand(ecx));
|
| - __ test(answer.reg(), Immediate(kSmiTagMask));
|
| - deferred->Branch(not_zero);
|
| + CheckTwoForSminess(masm_, left->reg(), right->reg(), answer.reg(),
|
| + left->number_info(), right->number_info(), deferred);
|
|
|
| // Untag both operands.
|
| __ mov(answer.reg(), left->reg());
|
| @@ -1444,16 +1539,12 @@
|
| answer.reg(),
|
| left->reg(),
|
| right->reg(),
|
| + left->number_info(),
|
| + right->number_info(),
|
| overwrite_mode);
|
| - if (left->reg().is(right->reg())) {
|
| - __ test(left->reg(), Immediate(kSmiTagMask));
|
| - } else {
|
| - __ mov(answer.reg(), left->reg());
|
| - __ or_(answer.reg(), Operand(right->reg()));
|
| - ASSERT(kSmiTag == 0); // Adjust test if not the case.
|
| - __ test(answer.reg(), Immediate(kSmiTagMask));
|
| - }
|
| - deferred->Branch(not_zero);
|
| + CheckTwoForSminess(masm_, left->reg(), right->reg(), answer.reg(),
|
| + left->number_info(), right->number_info(), deferred);
|
| +
|
| __ mov(answer.reg(), left->reg());
|
| switch (op) {
|
| case Token::ADD:
|
| @@ -1522,13 +1613,16 @@
|
| DeferredInlineSmiOperation(Token::Value op,
|
| Register dst,
|
| Register src,
|
| + NumberInfo number_info,
|
| Smi* value,
|
| OverwriteMode overwrite_mode)
|
| : op_(op),
|
| dst_(dst),
|
| src_(src),
|
| + number_info_(number_info),
|
| value_(value),
|
| overwrite_mode_(overwrite_mode) {
|
| + if (number_info.IsSmi()) overwrite_mode_ = NO_OVERWRITE;
|
| set_comment("[ DeferredInlineSmiOperation");
|
| }
|
|
|
| @@ -1538,6 +1632,7 @@
|
| Token::Value op_;
|
| Register dst_;
|
| Register src_;
|
| + NumberInfo number_info_;
|
| Smi* value_;
|
| OverwriteMode overwrite_mode_;
|
| };
|
| @@ -1548,7 +1643,8 @@
|
| GenericBinaryOpStub stub(
|
| op_,
|
| overwrite_mode_,
|
| - (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB);
|
| + (op_ == Token::MOD) ? NO_GENERIC_BINARY_FLAGS : NO_SMI_CODE_IN_STUB,
|
| + NumberInfo::Combine(NumberInfo::Smi(), number_info_));
|
| stub.GenerateCall(masm_, src_, value_);
|
| if (!dst_.is(eax)) __ mov(dst_, eax);
|
| }
|
| @@ -1562,9 +1658,11 @@
|
| Register dst,
|
| Smi* value,
|
| Register src,
|
| + NumberInfo number_info,
|
| OverwriteMode overwrite_mode)
|
| : op_(op),
|
| dst_(dst),
|
| + number_info_(number_info),
|
| value_(value),
|
| src_(src),
|
| overwrite_mode_(overwrite_mode) {
|
| @@ -1576,6 +1674,7 @@
|
| private:
|
| Token::Value op_;
|
| Register dst_;
|
| + NumberInfo number_info_;
|
| Smi* value_;
|
| Register src_;
|
| OverwriteMode overwrite_mode_;
|
| @@ -1583,7 +1682,11 @@
|
|
|
|
|
| void DeferredInlineSmiOperationReversed::Generate() {
|
| - GenericBinaryOpStub igostub(op_, overwrite_mode_, NO_SMI_CODE_IN_STUB);
|
| + GenericBinaryOpStub igostub(
|
| + op_,
|
| + overwrite_mode_,
|
| + NO_SMI_CODE_IN_STUB,
|
| + NumberInfo::Combine(NumberInfo::Smi(), number_info_));
|
| igostub.GenerateCall(masm_, value_, src_);
|
| if (!dst_.is(eax)) __ mov(dst_, eax);
|
| }
|
| @@ -1595,9 +1698,14 @@
|
| class DeferredInlineSmiAdd: public DeferredCode {
|
| public:
|
| DeferredInlineSmiAdd(Register dst,
|
| + NumberInfo number_info,
|
| Smi* value,
|
| OverwriteMode overwrite_mode)
|
| - : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
|
| + : dst_(dst),
|
| + number_info_(number_info),
|
| + value_(value),
|
| + overwrite_mode_(overwrite_mode) {
|
| + if (number_info_.IsSmi()) overwrite_mode_ = NO_OVERWRITE;
|
| set_comment("[ DeferredInlineSmiAdd");
|
| }
|
|
|
| @@ -1605,6 +1713,7 @@
|
|
|
| private:
|
| Register dst_;
|
| + NumberInfo number_info_;
|
| Smi* value_;
|
| OverwriteMode overwrite_mode_;
|
| };
|
| @@ -1613,7 +1722,11 @@
|
| void DeferredInlineSmiAdd::Generate() {
|
| // Undo the optimistic add operation and call the shared stub.
|
| __ sub(Operand(dst_), Immediate(value_));
|
| - GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
|
| + GenericBinaryOpStub igostub(
|
| + Token::ADD,
|
| + overwrite_mode_,
|
| + NO_SMI_CODE_IN_STUB,
|
| + NumberInfo::Combine(NumberInfo::Smi(), number_info_));
|
| igostub.GenerateCall(masm_, dst_, value_);
|
| if (!dst_.is(eax)) __ mov(dst_, eax);
|
| }
|
| @@ -1625,9 +1738,13 @@
|
| class DeferredInlineSmiAddReversed: public DeferredCode {
|
| public:
|
| DeferredInlineSmiAddReversed(Register dst,
|
| + NumberInfo number_info,
|
| Smi* value,
|
| OverwriteMode overwrite_mode)
|
| - : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
|
| + : dst_(dst),
|
| + number_info_(number_info),
|
| + value_(value),
|
| + overwrite_mode_(overwrite_mode) {
|
| set_comment("[ DeferredInlineSmiAddReversed");
|
| }
|
|
|
| @@ -1635,6 +1752,7 @@
|
|
|
| private:
|
| Register dst_;
|
| + NumberInfo number_info_;
|
| Smi* value_;
|
| OverwriteMode overwrite_mode_;
|
| };
|
| @@ -1643,7 +1761,11 @@
|
| void DeferredInlineSmiAddReversed::Generate() {
|
| // Undo the optimistic add operation and call the shared stub.
|
| __ sub(Operand(dst_), Immediate(value_));
|
| - GenericBinaryOpStub igostub(Token::ADD, overwrite_mode_, NO_SMI_CODE_IN_STUB);
|
| + GenericBinaryOpStub igostub(
|
| + Token::ADD,
|
| + overwrite_mode_,
|
| + NO_SMI_CODE_IN_STUB,
|
| + NumberInfo::Combine(NumberInfo::Smi(), number_info_));
|
| igostub.GenerateCall(masm_, value_, dst_);
|
| if (!dst_.is(eax)) __ mov(dst_, eax);
|
| }
|
| @@ -1656,9 +1778,14 @@
|
| class DeferredInlineSmiSub: public DeferredCode {
|
| public:
|
| DeferredInlineSmiSub(Register dst,
|
| + NumberInfo number_info,
|
| Smi* value,
|
| OverwriteMode overwrite_mode)
|
| - : dst_(dst), value_(value), overwrite_mode_(overwrite_mode) {
|
| + : dst_(dst),
|
| + number_info_(number_info),
|
| + value_(value),
|
| + overwrite_mode_(overwrite_mode) {
|
| + if (number_info.IsSmi()) overwrite_mode_ = NO_OVERWRITE;
|
| set_comment("[ DeferredInlineSmiSub");
|
| }
|
|
|
| @@ -1666,6 +1793,7 @@
|
|
|
| private:
|
| Register dst_;
|
| + NumberInfo number_info_;
|
| Smi* value_;
|
| OverwriteMode overwrite_mode_;
|
| };
|
| @@ -1674,7 +1802,11 @@
|
| void DeferredInlineSmiSub::Generate() {
|
| // Undo the optimistic sub operation and call the shared stub.
|
| __ add(Operand(dst_), Immediate(value_));
|
| - GenericBinaryOpStub igostub(Token::SUB, overwrite_mode_, NO_SMI_CODE_IN_STUB);
|
| + GenericBinaryOpStub igostub(
|
| + Token::SUB,
|
| + overwrite_mode_,
|
| + NO_SMI_CODE_IN_STUB,
|
| + NumberInfo::Combine(NumberInfo::Smi(), number_info_));
|
| igostub.GenerateCall(masm_, dst_, value_);
|
| if (!dst_.is(eax)) __ mov(dst_, eax);
|
| }
|
| @@ -1718,17 +1850,21 @@
|
| DeferredCode* deferred = NULL;
|
| if (reversed) {
|
| deferred = new DeferredInlineSmiAddReversed(operand->reg(),
|
| + operand->number_info(),
|
| smi_value,
|
| overwrite_mode);
|
| } else {
|
| deferred = new DeferredInlineSmiAdd(operand->reg(),
|
| + operand->number_info(),
|
| smi_value,
|
| overwrite_mode);
|
| }
|
| __ add(Operand(operand->reg()), Immediate(value));
|
| deferred->Branch(overflow);
|
| - __ test(operand->reg(), Immediate(kSmiTagMask));
|
| - deferred->Branch(not_zero);
|
| + if (!operand->number_info().IsSmi()) {
|
| + __ test(operand->reg(), Immediate(kSmiTagMask));
|
| + deferred->Branch(not_zero);
|
| + }
|
| deferred->BindExit();
|
| answer = *operand;
|
| break;
|
| @@ -1743,24 +1879,29 @@
|
| answer = allocator()->Allocate();
|
| ASSERT(answer.is_valid());
|
| __ Set(answer.reg(), Immediate(value));
|
| - deferred = new DeferredInlineSmiOperationReversed(op,
|
| - answer.reg(),
|
| - smi_value,
|
| - operand->reg(),
|
| - overwrite_mode);
|
| + deferred =
|
| + new DeferredInlineSmiOperationReversed(op,
|
| + answer.reg(),
|
| + smi_value,
|
| + operand->reg(),
|
| + operand->number_info(),
|
| + overwrite_mode);
|
| __ sub(answer.reg(), Operand(operand->reg()));
|
| } else {
|
| operand->ToRegister();
|
| frame_->Spill(operand->reg());
|
| answer = *operand;
|
| deferred = new DeferredInlineSmiSub(operand->reg(),
|
| + operand->number_info(),
|
| smi_value,
|
| overwrite_mode);
|
| __ sub(Operand(operand->reg()), Immediate(value));
|
| }
|
| deferred->Branch(overflow);
|
| - __ test(answer.reg(), Immediate(kSmiTagMask));
|
| - deferred->Branch(not_zero);
|
| + if (!operand->number_info().IsSmi()) {
|
| + __ test(answer.reg(), Immediate(kSmiTagMask));
|
| + deferred->Branch(not_zero);
|
| + }
|
| deferred->BindExit();
|
| operand->Unuse();
|
| break;
|
| @@ -1777,19 +1918,27 @@
|
| int shift_value = int_value & 0x1f;
|
| operand->ToRegister();
|
| frame_->Spill(operand->reg());
|
| - DeferredInlineSmiOperation* deferred =
|
| - new DeferredInlineSmiOperation(op,
|
| - operand->reg(),
|
| - operand->reg(),
|
| - smi_value,
|
| - overwrite_mode);
|
| - __ test(operand->reg(), Immediate(kSmiTagMask));
|
| - deferred->Branch(not_zero);
|
| - if (shift_value > 0) {
|
| - __ sar(operand->reg(), shift_value);
|
| - __ and_(operand->reg(), ~kSmiTagMask);
|
| + if (!operand->number_info().IsSmi()) {
|
| + DeferredInlineSmiOperation* deferred =
|
| + new DeferredInlineSmiOperation(op,
|
| + operand->reg(),
|
| + operand->reg(),
|
| + operand->number_info(),
|
| + smi_value,
|
| + overwrite_mode);
|
| + __ test(operand->reg(), Immediate(kSmiTagMask));
|
| + deferred->Branch(not_zero);
|
| + if (shift_value > 0) {
|
| + __ sar(operand->reg(), shift_value);
|
| + __ and_(operand->reg(), ~kSmiTagMask);
|
| + }
|
| + deferred->BindExit();
|
| + } else {
|
| + if (shift_value > 0) {
|
| + __ sar(operand->reg(), shift_value);
|
| + __ and_(operand->reg(), ~kSmiTagMask);
|
| + }
|
| }
|
| - deferred->BindExit();
|
| answer = *operand;
|
| }
|
| break;
|
| @@ -1810,10 +1959,13 @@
|
| new DeferredInlineSmiOperation(op,
|
| answer.reg(),
|
| operand->reg(),
|
| + operand->number_info(),
|
| smi_value,
|
| overwrite_mode);
|
| - __ test(operand->reg(), Immediate(kSmiTagMask));
|
| - deferred->Branch(not_zero);
|
| + if (!operand->number_info().IsSmi()) {
|
| + __ test(operand->reg(), Immediate(kSmiTagMask));
|
| + deferred->Branch(not_zero);
|
| + }
|
| __ mov(answer.reg(), operand->reg());
|
| __ SmiUntag(answer.reg());
|
| __ shr(answer.reg(), shift_value);
|
| @@ -1855,10 +2007,13 @@
|
| answer.reg(),
|
| smi_value,
|
| right.reg(),
|
| + right.number_info(),
|
| overwrite_mode);
|
| __ mov(answer.reg(), Immediate(int_value));
|
| __ sar(ecx, kSmiTagSize);
|
| - deferred->Branch(carry);
|
| + if (!right.number_info().IsSmi()) {
|
| + deferred->Branch(carry);
|
| + }
|
| __ shl_cl(answer.reg());
|
| __ cmp(answer.reg(), 0xc0000000);
|
| deferred->Branch(sign);
|
| @@ -1877,6 +2032,7 @@
|
| new DeferredInlineSmiOperation(op,
|
| operand->reg(),
|
| operand->reg(),
|
| + operand->number_info(),
|
| smi_value,
|
| overwrite_mode);
|
| __ test(operand->reg(), Immediate(kSmiTagMask));
|
| @@ -1891,10 +2047,13 @@
|
| new DeferredInlineSmiOperation(op,
|
| answer.reg(),
|
| operand->reg(),
|
| + operand->number_info(),
|
| smi_value,
|
| overwrite_mode);
|
| - __ test(operand->reg(), Immediate(kSmiTagMask));
|
| - deferred->Branch(not_zero);
|
| + if (!operand->number_info().IsSmi()) {
|
| + __ test(operand->reg(), Immediate(kSmiTagMask));
|
| + deferred->Branch(not_zero);
|
| + }
|
| __ mov(answer.reg(), operand->reg());
|
| ASSERT(kSmiTag == 0); // adjust code if not the case
|
| // We do no shifts, only the Smi conversion, if shift_value is 1.
|
| @@ -1918,20 +2077,25 @@
|
| frame_->Spill(operand->reg());
|
| DeferredCode* deferred = NULL;
|
| if (reversed) {
|
| - deferred = new DeferredInlineSmiOperationReversed(op,
|
| - operand->reg(),
|
| - smi_value,
|
| - operand->reg(),
|
| - overwrite_mode);
|
| + deferred =
|
| + new DeferredInlineSmiOperationReversed(op,
|
| + operand->reg(),
|
| + smi_value,
|
| + operand->reg(),
|
| + operand->number_info(),
|
| + overwrite_mode);
|
| } else {
|
| deferred = new DeferredInlineSmiOperation(op,
|
| operand->reg(),
|
| operand->reg(),
|
| + operand->number_info(),
|
| smi_value,
|
| overwrite_mode);
|
| }
|
| - __ test(operand->reg(), Immediate(kSmiTagMask));
|
| - deferred->Branch(not_zero);
|
| + if (!operand->number_info().IsSmi()) {
|
| + __ test(operand->reg(), Immediate(kSmiTagMask));
|
| + deferred->Branch(not_zero);
|
| + }
|
| if (op == Token::BIT_AND) {
|
| __ and_(Operand(operand->reg()), Immediate(value));
|
| } else if (op == Token::BIT_XOR) {
|
| @@ -1958,6 +2122,7 @@
|
| new DeferredInlineSmiOperation(op,
|
| operand->reg(),
|
| operand->reg(),
|
| + operand->number_info(),
|
| smi_value,
|
| overwrite_mode);
|
| // Check that lowest log2(value) bits of operand are zero, and test
|
| @@ -1989,11 +2154,13 @@
|
| (IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
|
| operand->ToRegister();
|
| frame_->Spill(operand->reg());
|
| - DeferredCode* deferred = new DeferredInlineSmiOperation(op,
|
| - operand->reg(),
|
| - operand->reg(),
|
| - smi_value,
|
| - overwrite_mode);
|
| + DeferredCode* deferred =
|
| + new DeferredInlineSmiOperation(op,
|
| + operand->reg(),
|
| + operand->reg(),
|
| + operand->number_info(),
|
| + smi_value,
|
| + overwrite_mode);
|
| // Check for negative or non-Smi left hand side.
|
| __ test(operand->reg(), Immediate(kSmiTagMask | 0x80000000));
|
| deferred->Branch(not_zero);
|
| @@ -2027,6 +2194,8 @@
|
|
|
|
|
| static bool CouldBeNaN(const Result& result) {
|
| + if (result.number_info().IsSmi()) return false;
|
| + if (result.number_info().IsInteger32()) return false;
|
| if (!result.is_constant()) return true;
|
| if (!result.handle()->IsHeapNumber()) return false;
|
| return isnan(HeapNumber::cast(*result.handle())->value());
|
| @@ -5422,6 +5591,54 @@
|
| }
|
|
|
|
|
| +void CodeGenerator::GenerateCharFromCode(ZoneList<Expression*>* args) {
|
| + Comment(masm_, "[ GenerateCharFromCode");
|
| + ASSERT(args->length() == 1);
|
| +
|
| + Load(args->at(0));
|
| + Result code = frame_->Pop();
|
| + code.ToRegister();
|
| + ASSERT(code.is_valid());
|
| +
|
| + Result temp = allocator()->Allocate();
|
| + ASSERT(temp.is_valid());
|
| +
|
| + JumpTarget slow_case;
|
| + JumpTarget exit;
|
| +
|
| + // Fast case of Heap::LookupSingleCharacterStringFromCode.
|
| + ASSERT(kSmiTag == 0);
|
| + ASSERT(kSmiShiftSize == 0);
|
| + ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
|
| + __ test(code.reg(),
|
| + Immediate(kSmiTagMask |
|
| + ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
|
| + slow_case.Branch(not_zero, &code, not_taken);
|
| +
|
| + __ Set(temp.reg(), Immediate(Factory::single_character_string_cache()));
|
| + ASSERT(kSmiTag == 0);
|
| + ASSERT(kSmiTagSize == 1);
|
| + ASSERT(kSmiShiftSize == 0);
|
| + // At this point code register contains smi tagged ascii char code.
|
| + __ mov(temp.reg(), FieldOperand(temp.reg(),
|
| + code.reg(), times_half_pointer_size,
|
| + FixedArray::kHeaderSize));
|
| + __ cmp(temp.reg(), Factory::undefined_value());
|
| + slow_case.Branch(equal, &code, not_taken);
|
| + code.Unuse();
|
| +
|
| + frame_->Push(&temp);
|
| + exit.Jump();
|
| +
|
| + slow_case.Bind(&code);
|
| + frame_->Push(&code);
|
| + Result result = frame_->CallRuntime(Runtime::kCharFromCode, 1);
|
| + frame_->Push(&result);
|
| +
|
| + exit.Bind();
|
| +}
|
| +
|
| +
|
| void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
|
| ASSERT(args->length() == 1);
|
| Load(args->at(0));
|
| @@ -5742,22 +5959,13 @@
|
| ASSERT(args->length() == 0);
|
| frame_->SpillAll();
|
|
|
| - // Make sure the frame is aligned like the OS expects.
|
| - static const int kFrameAlignment = OS::ActivationFrameAlignment();
|
| - if (kFrameAlignment > 0) {
|
| - ASSERT(IsPowerOf2(kFrameAlignment));
|
| - __ mov(edi, Operand(esp)); // Save in callee-saved register.
|
| - __ and_(esp, -kFrameAlignment);
|
| - }
|
| + static const int num_arguments = 0;
|
| + __ PrepareCallCFunction(num_arguments, eax);
|
|
|
| // Call V8::RandomPositiveSmi().
|
| - __ call(FUNCTION_ADDR(V8::RandomPositiveSmi), RelocInfo::RUNTIME_ENTRY);
|
| + __ CallCFunction(ExternalReference::random_positive_smi_function(),
|
| + num_arguments);
|
|
|
| - // Restore stack pointer from callee-saved register edi.
|
| - if (kFrameAlignment > 0) {
|
| - __ mov(esp, Operand(edi));
|
| - }
|
| -
|
| Result result = allocator_->Allocate(eax);
|
| frame_->Push(&result);
|
| }
|
| @@ -5825,6 +6033,194 @@
|
| }
|
|
|
|
|
| +// Generates the Math.pow method - only handles special cases and branches to
|
| +// the runtime system if not.Please note - this function assumes that
|
| +// the callsite has executed ToNumber on both arguments and that the
|
| +// arguments are not the same identifier.
|
| +void CodeGenerator::GenerateMathPow(ZoneList<Expression*>* args) {
|
| + ASSERT(args->length() == 2);
|
| + Load(args->at(0));
|
| + Load(args->at(1));
|
| + if (!CpuFeatures::IsSupported(SSE2)) {
|
| + Result res = frame_->CallRuntime(Runtime::kMath_pow, 2);
|
| + frame_->Push(&res);
|
| + } else {
|
| + CpuFeatures::Scope use_sse2(SSE2);
|
| + Label allocate_return;
|
| + // Load the two operands while leaving the values on the frame.
|
| + frame()->Dup();
|
| + Result exponent = frame()->Pop();
|
| + exponent.ToRegister();
|
| + frame()->Spill(exponent.reg());
|
| + frame()->PushElementAt(1);
|
| + Result base = frame()->Pop();
|
| + base.ToRegister();
|
| + frame()->Spill(base.reg());
|
| +
|
| + Result answer = allocator()->Allocate();
|
| + ASSERT(answer.is_valid());
|
| + // We can safely assume that the base and exponent is not in the same
|
| + // register since we only call this from one callsite (math.js).
|
| + ASSERT(!exponent.reg().is(base.reg()));
|
| + JumpTarget call_runtime;
|
| +
|
| + // Save 1 in xmm3 - we need this several times later on.
|
| + __ mov(answer.reg(), Immediate(1));
|
| + __ cvtsi2sd(xmm3, Operand(answer.reg()));
|
| +
|
| + Label exponent_nonsmi;
|
| + Label base_nonsmi;
|
| + // If the exponent is a heap number go to that specific case.
|
| + __ test(exponent.reg(), Immediate(kSmiTagMask));
|
| + __ j(not_zero, &exponent_nonsmi);
|
| + __ test(base.reg(), Immediate(kSmiTagMask));
|
| + __ j(not_zero, &base_nonsmi);
|
| +
|
| + // Optimized version when y is an integer.
|
| + Label powi;
|
| + __ SmiUntag(base.reg());
|
| + __ cvtsi2sd(xmm0, Operand(base.reg()));
|
| + __ jmp(&powi);
|
| + // exponent is smi and base is a heapnumber.
|
| + __ bind(&base_nonsmi);
|
| + __ cmp(FieldOperand(base.reg(), HeapObject::kMapOffset),
|
| + Factory::heap_number_map());
|
| + call_runtime.Branch(not_equal);
|
| +
|
| + __ movdbl(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
|
| +
|
| + // Optimized version of pow if y is an integer.
|
| + __ bind(&powi);
|
| + __ SmiUntag(exponent.reg());
|
| +
|
| + // Save exponent in base as we need to check if exponent is negative later.
|
| + // We know that base and exponent are in different registers.
|
| + __ mov(base.reg(), exponent.reg());
|
| +
|
| + // Get absolute value of exponent.
|
| + Label no_neg;
|
| + __ cmp(exponent.reg(), 0);
|
| + __ j(greater_equal, &no_neg);
|
| + __ neg(exponent.reg());
|
| + __ bind(&no_neg);
|
| +
|
| + // Load xmm1 with 1.
|
| + __ movsd(xmm1, xmm3);
|
| + Label while_true;
|
| + Label no_multiply;
|
| +
|
| + // Label allocate_and_return;
|
| + __ bind(&while_true);
|
| + __ shr(exponent.reg(), 1);
|
| + __ j(not_carry, &no_multiply);
|
| + __ mulsd(xmm1, xmm0);
|
| + __ bind(&no_multiply);
|
| + __ test(exponent.reg(), Operand(exponent.reg()));
|
| + __ mulsd(xmm0, xmm0);
|
| + __ j(not_zero, &while_true);
|
| +
|
| + // x has the original value of y - if y is negative return 1/result.
|
| + __ test(base.reg(), Operand(base.reg()));
|
| + __ j(positive, &allocate_return);
|
| + // Special case if xmm1 has reached infinity.
|
| + __ mov(answer.reg(), Immediate(0x7FB00000));
|
| + __ movd(xmm0, Operand(answer.reg()));
|
| + __ cvtss2sd(xmm0, xmm0);
|
| + __ ucomisd(xmm0, xmm1);
|
| + call_runtime.Branch(equal);
|
| + __ divsd(xmm3, xmm1);
|
| + __ movsd(xmm1, xmm3);
|
| + __ jmp(&allocate_return);
|
| +
|
| + // exponent (or both) is a heapnumber - no matter what we should now work
|
| + // on doubles.
|
| + __ bind(&exponent_nonsmi);
|
| + __ cmp(FieldOperand(exponent.reg(), HeapObject::kMapOffset),
|
| + Factory::heap_number_map());
|
| + call_runtime.Branch(not_equal);
|
| + __ movdbl(xmm1, FieldOperand(exponent.reg(), HeapNumber::kValueOffset));
|
| + // Test if exponent is nan.
|
| + __ ucomisd(xmm1, xmm1);
|
| + call_runtime.Branch(parity_even);
|
| +
|
| + Label base_not_smi;
|
| + Label handle_special_cases;
|
| + __ test(base.reg(), Immediate(kSmiTagMask));
|
| + __ j(not_zero, &base_not_smi);
|
| + __ SmiUntag(base.reg());
|
| + __ cvtsi2sd(xmm0, Operand(base.reg()));
|
| + __ jmp(&handle_special_cases);
|
| + __ bind(&base_not_smi);
|
| + __ cmp(FieldOperand(base.reg(), HeapObject::kMapOffset),
|
| + Factory::heap_number_map());
|
| + call_runtime.Branch(not_equal);
|
| + __ mov(answer.reg(), FieldOperand(base.reg(), HeapNumber::kExponentOffset));
|
| + __ and_(answer.reg(), HeapNumber::kExponentMask);
|
| + __ cmp(Operand(answer.reg()), Immediate(HeapNumber::kExponentMask));
|
| + // base is NaN or +/-Infinity
|
| + call_runtime.Branch(greater_equal);
|
| + __ movdbl(xmm0, FieldOperand(base.reg(), HeapNumber::kValueOffset));
|
| +
|
| + // base is in xmm0 and exponent is in xmm1.
|
| + __ bind(&handle_special_cases);
|
| + Label not_minus_half;
|
| + // Test for -0.5.
|
| + // Load xmm2 with -0.5.
|
| + __ mov(answer.reg(), Immediate(0xBF000000));
|
| + __ movd(xmm2, Operand(answer.reg()));
|
| + __ cvtss2sd(xmm2, xmm2);
|
| + // xmm2 now has -0.5.
|
| + __ ucomisd(xmm2, xmm1);
|
| + __ j(not_equal, ¬_minus_half);
|
| +
|
| + // Calculates reciprocal of square root.
|
| + // Note that 1/sqrt(x) = sqrt(1/x))
|
| + __ divsd(xmm3, xmm0);
|
| + __ movsd(xmm1, xmm3);
|
| + __ sqrtsd(xmm1, xmm1);
|
| + __ jmp(&allocate_return);
|
| +
|
| + // Test for 0.5.
|
| + __ bind(¬_minus_half);
|
| + // Load xmm2 with 0.5.
|
| + // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
|
| + __ addsd(xmm2, xmm3);
|
| + // xmm2 now has 0.5.
|
| + __ comisd(xmm2, xmm1);
|
| + call_runtime.Branch(not_equal);
|
| + // Calculates square root.
|
| + __ movsd(xmm1, xmm0);
|
| + __ sqrtsd(xmm1, xmm1);
|
| +
|
| + JumpTarget done;
|
| + Label failure, success;
|
| + __ bind(&allocate_return);
|
| + // Make a copy of the frame to enable us to handle allocation
|
| + // failure after the JumpTarget jump.
|
| + VirtualFrame* clone = new VirtualFrame(frame());
|
| + __ AllocateHeapNumber(answer.reg(), exponent.reg(),
|
| + base.reg(), &failure);
|
| + __ movdbl(FieldOperand(answer.reg(), HeapNumber::kValueOffset), xmm1);
|
| + // Remove the two original values from the frame - we only need those
|
| + // in the case where we branch to runtime.
|
| + frame()->Drop(2);
|
| + exponent.Unuse();
|
| + base.Unuse();
|
| + done.Jump(&answer);
|
| + // Use the copy of the original frame as our current frame.
|
| + RegisterFile empty_regs;
|
| + SetFrame(clone, &empty_regs);
|
| + // If we experience an allocation failure we branch to runtime.
|
| + __ bind(&failure);
|
| + call_runtime.Bind();
|
| + answer = frame()->CallRuntime(Runtime::kMath_pow_cfunction, 2);
|
| +
|
| + done.Bind(&answer);
|
| + frame()->Push(&answer);
|
| + }
|
| +}
|
| +
|
| +
|
| void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
|
| ASSERT_EQ(args->length(), 1);
|
| Load(args->at(0));
|
| @@ -5843,6 +6239,63 @@
|
| }
|
|
|
|
|
| +// Generates the Math.sqrt method. Please note - this function assumes that
|
| +// the callsite has executed ToNumber on the argument.
|
| +void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
|
| + ASSERT_EQ(args->length(), 1);
|
| + Load(args->at(0));
|
| +
|
| + if (!CpuFeatures::IsSupported(SSE2)) {
|
| + Result result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
|
| + frame()->Push(&result);
|
| + } else {
|
| + CpuFeatures::Scope use_sse2(SSE2);
|
| + // Leave original value on the frame if we need to call runtime.
|
| + frame()->Dup();
|
| + Result result = frame()->Pop();
|
| + result.ToRegister();
|
| + frame()->Spill(result.reg());
|
| + Label runtime;
|
| + Label non_smi;
|
| + Label load_done;
|
| + JumpTarget end;
|
| +
|
| + __ test(result.reg(), Immediate(kSmiTagMask));
|
| + __ j(not_zero, &non_smi);
|
| + __ SmiUntag(result.reg());
|
| + __ cvtsi2sd(xmm0, Operand(result.reg()));
|
| + __ jmp(&load_done);
|
| + __ bind(&non_smi);
|
| + __ cmp(FieldOperand(result.reg(), HeapObject::kMapOffset),
|
| + Factory::heap_number_map());
|
| + __ j(not_equal, &runtime);
|
| + __ movdbl(xmm0, FieldOperand(result.reg(), HeapNumber::kValueOffset));
|
| +
|
| + __ bind(&load_done);
|
| + __ sqrtsd(xmm0, xmm0);
|
| + // A copy of the virtual frame to allow us to go to runtime after the
|
| + // JumpTarget jump.
|
| + Result scratch = allocator()->Allocate();
|
| + VirtualFrame* clone = new VirtualFrame(frame());
|
| + __ AllocateHeapNumber(result.reg(), scratch.reg(), no_reg, &runtime);
|
| +
|
| + __ movdbl(FieldOperand(result.reg(), HeapNumber::kValueOffset), xmm0);
|
| + frame()->Drop(1);
|
| + scratch.Unuse();
|
| + end.Jump(&result);
|
| + // We only branch to runtime if we have an allocation error.
|
| + // Use the copy of the original frame as our current frame.
|
| + RegisterFile empty_regs;
|
| + SetFrame(clone, &empty_regs);
|
| + __ bind(&runtime);
|
| + result = frame()->CallRuntime(Runtime::kMath_sqrt, 1);
|
| +
|
| + end.Bind(&result);
|
| + frame()->Push(&result);
|
| + }
|
| +}
|
| +
|
| +
|
| void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
|
| if (CheckForInlineRuntimeCall(node)) {
|
| return;
|
| @@ -6702,8 +7155,12 @@
|
| public:
|
| DeferredReferenceSetKeyedValue(Register value,
|
| Register key,
|
| - Register receiver)
|
| - : value_(value), key_(key), receiver_(receiver) {
|
| + Register receiver,
|
| + Register scratch)
|
| + : value_(value),
|
| + key_(key),
|
| + receiver_(receiver),
|
| + scratch_(scratch) {
|
| set_comment("[ DeferredReferenceSetKeyedValue");
|
| }
|
|
|
| @@ -6715,17 +7172,65 @@
|
| Register value_;
|
| Register key_;
|
| Register receiver_;
|
| + Register scratch_;
|
| Label patch_site_;
|
| };
|
|
|
|
|
| void DeferredReferenceSetKeyedValue::Generate() {
|
| __ IncrementCounter(&Counters::keyed_store_inline_miss, 1);
|
| - // Push receiver and key arguments on the stack.
|
| - __ push(receiver_);
|
| - __ push(key_);
|
| - // Move value argument to eax as expected by the IC stub.
|
| - if (!value_.is(eax)) __ mov(eax, value_);
|
| + // Move value_ to eax, key_ to ecx, and receiver_ to edx.
|
| + Register old_value = value_;
|
| +
|
| + // First, move value to eax.
|
| + if (!value_.is(eax)) {
|
| + if (key_.is(eax)) {
|
| + // Move key_ out of eax, preferably to ecx.
|
| + if (!value_.is(ecx) && !receiver_.is(ecx)) {
|
| + __ mov(ecx, key_);
|
| + key_ = ecx;
|
| + } else {
|
| + __ mov(scratch_, key_);
|
| + key_ = scratch_;
|
| + }
|
| + }
|
| + if (receiver_.is(eax)) {
|
| + // Move receiver_ out of eax, preferably to edx.
|
| + if (!value_.is(edx) && !key_.is(edx)) {
|
| + __ mov(edx, receiver_);
|
| + receiver_ = edx;
|
| + } else {
|
| + // Both moves to scratch are from eax, also, no valid path hits both.
|
| + __ mov(scratch_, receiver_);
|
| + receiver_ = scratch_;
|
| + }
|
| + }
|
| + __ mov(eax, value_);
|
| + value_ = eax;
|
| + }
|
| +
|
| + // Now value_ is in eax. Move the other two to the right positions.
|
| + // We do not update the variables key_ and receiver_ to ecx and edx.
|
| + if (key_.is(ecx)) {
|
| + if (!receiver_.is(edx)) {
|
| + __ mov(edx, receiver_);
|
| + }
|
| + } else if (key_.is(edx)) {
|
| + if (receiver_.is(ecx)) {
|
| + __ xchg(edx, ecx);
|
| + } else {
|
| + __ mov(ecx, key_);
|
| + if (!receiver_.is(edx)) {
|
| + __ mov(edx, receiver_);
|
| + }
|
| + }
|
| + } else { // Key is not in edx or ecx.
|
| + if (!receiver_.is(edx)) {
|
| + __ mov(edx, receiver_);
|
| + }
|
| + __ mov(ecx, key_);
|
| + }
|
| +
|
| // Call the IC stub.
|
| Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
|
| __ call(ic, RelocInfo::CODE_TARGET);
|
| @@ -6738,11 +7243,8 @@
|
| // Here we use masm_-> instead of the __ macro because this is the
|
| // instruction that gets patched and coverage code gets in the way.
|
| masm_->test(eax, Immediate(-delta_to_patch_site));
|
| - // Restore value (returned from store IC), key and receiver
|
| - // registers.
|
| - if (!value_.is(eax)) __ mov(value_, eax);
|
| - __ pop(key_);
|
| - __ pop(receiver_);
|
| + // Restore value (returned from store IC) register.
|
| + if (!old_value.is(eax)) __ mov(old_value, eax);
|
| }
|
|
|
|
|
| @@ -6866,8 +7368,10 @@
|
| deferred->Branch(not_equal);
|
|
|
| // Check that the key is a smi.
|
| - __ test(key.reg(), Immediate(kSmiTagMask));
|
| - deferred->Branch(not_zero);
|
| + if (!key.is_smi()) {
|
| + __ test(key.reg(), Immediate(kSmiTagMask));
|
| + deferred->Branch(not_zero);
|
| + }
|
|
|
| // Get the elements array from the receiver and check that it
|
| // is not a dictionary.
|
| @@ -6941,7 +7445,8 @@
|
| DeferredReferenceSetKeyedValue* deferred =
|
| new DeferredReferenceSetKeyedValue(result.reg(),
|
| key.reg(),
|
| - receiver.reg());
|
| + receiver.reg(),
|
| + tmp.reg());
|
|
|
| // Check that the value is a smi if it is not a constant. We can skip
|
| // the write barrier for smis and constants.
|
| @@ -7001,7 +7506,6 @@
|
| // indicate that we have generated an inline version of the
|
| // keyed store.
|
| __ nop();
|
| - frame()->Drop(2);
|
| }
|
| ASSERT(frame()->height() == original_height - 3);
|
| return result;
|
| @@ -7012,6 +7516,34 @@
|
| #define __ ACCESS_MASM(masm)
|
|
|
|
|
| +static void CheckTwoForSminess(MacroAssembler* masm,
|
| + Register left, Register right, Register scratch,
|
| + NumberInfo left_info, NumberInfo right_info,
|
| + DeferredInlineBinaryOperation* deferred) {
|
| + if (left.is(right)) {
|
| + if (!left_info.IsSmi()) {
|
| + __ test(left, Immediate(kSmiTagMask));
|
| + deferred->Branch(not_zero);
|
| + }
|
| + } else if (!left_info.IsSmi()) {
|
| + if (!right_info.IsSmi()) {
|
| + __ mov(scratch, left);
|
| + __ or_(scratch, Operand(right));
|
| + __ test(scratch, Immediate(kSmiTagMask));
|
| + deferred->Branch(not_zero);
|
| + } else {
|
| + __ test(left, Immediate(kSmiTagMask));
|
| + deferred->Branch(not_zero);
|
| + }
|
| + } else {
|
| + if (!right_info.IsSmi()) {
|
| + __ test(right, Immediate(kSmiTagMask));
|
| + deferred->Branch(not_zero);
|
| + }
|
| + }
|
| +}
|
| +
|
| +
|
| Handle<String> Reference::GetName() {
|
| ASSERT(type_ == NAMED);
|
| Property* property = expression_->AsProperty();
|
| @@ -7143,6 +7675,7 @@
|
| Comment cmnt(masm, "[ Store to keyed Property");
|
| Property* property = expression()->AsProperty();
|
| ASSERT(property != NULL);
|
| +
|
| Result answer = cgen_->EmitKeyedStore(property->key()->type());
|
| cgen_->frame()->Push(&answer);
|
| set_unloaded();
|
| @@ -7195,7 +7728,7 @@
|
| __ push(esi);
|
| __ push(edx);
|
| __ push(ecx); // Restore return address.
|
| - __ TailCallRuntime(ExternalReference(Runtime::kNewClosure), 2, 1);
|
| + __ TailCallRuntime(Runtime::kNewClosure, 2, 1);
|
| }
|
|
|
|
|
| @@ -7239,7 +7772,7 @@
|
|
|
| // Need to collect. Call into runtime system.
|
| __ bind(&gc);
|
| - __ TailCallRuntime(ExternalReference(Runtime::kNewContext), 1, 1);
|
| + __ TailCallRuntime(Runtime::kNewContext, 1, 1);
|
| }
|
|
|
|
|
| @@ -7294,8 +7827,7 @@
|
| __ ret(3 * kPointerSize);
|
|
|
| __ bind(&slow_case);
|
| - ExternalReference runtime(Runtime::kCreateArrayLiteralShallow);
|
| - __ TailCallRuntime(runtime, 3, 1);
|
| + __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
|
| }
|
|
|
|
|
| @@ -7512,6 +8044,22 @@
|
| __ mov(left, Operand(esp, 2 * kPointerSize));
|
| }
|
|
|
| + if (static_operands_type_.IsSmi()) {
|
| + if (op_ == Token::BIT_OR) {
|
| + __ or_(right, Operand(left));
|
| + GenerateReturn(masm);
|
| + return;
|
| + } else if (op_ == Token::BIT_AND) {
|
| + __ and_(right, Operand(left));
|
| + GenerateReturn(masm);
|
| + return;
|
| + } else if (op_ == Token::BIT_XOR) {
|
| + __ xor_(right, Operand(left));
|
| + GenerateReturn(masm);
|
| + return;
|
| + }
|
| + }
|
| +
|
| // 2. Prepare the smi check of both operands by oring them together.
|
| Comment smi_check_comment(masm, "-- Smi check arguments");
|
| Label not_smis;
|
| @@ -7820,146 +8368,181 @@
|
| // Generate fast case smi code if requested. This flag is set when the fast
|
| // case smi code is not generated by the caller. Generating it here will speed
|
| // up common operations.
|
| - if (HasSmiCodeInStub()) {
|
| + if (ShouldGenerateSmiCode()) {
|
| GenerateSmiCode(masm, &call_runtime);
|
| } else if (op_ != Token::MOD) { // MOD goes straight to runtime.
|
| - GenerateLoadArguments(masm);
|
| + if (!HasArgsInRegisters()) {
|
| + GenerateLoadArguments(masm);
|
| + }
|
| }
|
|
|
| // Floating point case.
|
| - switch (op_) {
|
| - case Token::ADD:
|
| - case Token::SUB:
|
| - case Token::MUL:
|
| - case Token::DIV: {
|
| - if (CpuFeatures::IsSupported(SSE2)) {
|
| - CpuFeatures::Scope use_sse2(SSE2);
|
| - if (NumberInfo::IsNumber(operands_type_)) {
|
| - if (FLAG_debug_code) {
|
| - // Assert at runtime that inputs are only numbers.
|
| - __ AbortIfNotNumber(edx,
|
| - "GenericBinaryOpStub operand not a number.");
|
| - __ AbortIfNotNumber(eax,
|
| - "GenericBinaryOpStub operand not a number.");
|
| - }
|
| - FloatingPointHelper::LoadSSE2Operands(masm);
|
| - } else {
|
| - FloatingPointHelper::LoadSSE2Operands(masm, &call_runtime);
|
| + if (ShouldGenerateFPCode()) {
|
| + switch (op_) {
|
| + case Token::ADD:
|
| + case Token::SUB:
|
| + case Token::MUL:
|
| + case Token::DIV: {
|
| + if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
|
| + HasSmiCodeInStub()) {
|
| + // Execution reaches this point when the first non-smi argument occurs
|
| + // (and only if smi code is generated). This is the right moment to
|
| + // patch to HEAP_NUMBERS state. The transition is attempted only for
|
| + // the four basic operations. The stub stays in the DEFAULT state
|
| + // forever for all other operations (also if smi code is skipped).
|
| + GenerateTypeTransition(masm);
|
| }
|
|
|
| - switch (op_) {
|
| - case Token::ADD: __ addsd(xmm0, xmm1); break;
|
| - case Token::SUB: __ subsd(xmm0, xmm1); break;
|
| - case Token::MUL: __ mulsd(xmm0, xmm1); break;
|
| - case Token::DIV: __ divsd(xmm0, xmm1); break;
|
| - default: UNREACHABLE();
|
| + Label not_floats;
|
| + if (CpuFeatures::IsSupported(SSE2)) {
|
| + CpuFeatures::Scope use_sse2(SSE2);
|
| + if (static_operands_type_.IsNumber()) {
|
| + if (FLAG_debug_code) {
|
| + // Assert at runtime that inputs are only numbers.
|
| + __ AbortIfNotNumber(edx,
|
| + "GenericBinaryOpStub operand not a number.");
|
| + __ AbortIfNotNumber(eax,
|
| + "GenericBinaryOpStub operand not a number.");
|
| + }
|
| + if (static_operands_type_.IsSmi()) {
|
| + FloatingPointHelper::LoadSSE2Smis(masm, ecx);
|
| + } else {
|
| + FloatingPointHelper::LoadSSE2Operands(masm);
|
| + }
|
| + } else {
|
| + FloatingPointHelper::LoadSSE2Operands(masm, &call_runtime);
|
| + }
|
| +
|
| + switch (op_) {
|
| + case Token::ADD: __ addsd(xmm0, xmm1); break;
|
| + case Token::SUB: __ subsd(xmm0, xmm1); break;
|
| + case Token::MUL: __ mulsd(xmm0, xmm1); break;
|
| + case Token::DIV: __ divsd(xmm0, xmm1); break;
|
| + default: UNREACHABLE();
|
| + }
|
| + GenerateHeapResultAllocation(masm, &call_runtime);
|
| + __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
|
| + GenerateReturn(masm);
|
| + } else { // SSE2 not available, use FPU.
|
| + if (static_operands_type_.IsNumber()) {
|
| + if (FLAG_debug_code) {
|
| + // Assert at runtime that inputs are only numbers.
|
| + __ AbortIfNotNumber(edx,
|
| + "GenericBinaryOpStub operand not a number.");
|
| + __ AbortIfNotNumber(eax,
|
| + "GenericBinaryOpStub operand not a number.");
|
| + }
|
| + } else {
|
| + FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
|
| + }
|
| + FloatingPointHelper::LoadFloatOperands(
|
| + masm,
|
| + ecx,
|
| + FloatingPointHelper::ARGS_IN_REGISTERS);
|
| + switch (op_) {
|
| + case Token::ADD: __ faddp(1); break;
|
| + case Token::SUB: __ fsubp(1); break;
|
| + case Token::MUL: __ fmulp(1); break;
|
| + case Token::DIV: __ fdivp(1); break;
|
| + default: UNREACHABLE();
|
| + }
|
| + Label after_alloc_failure;
|
| + GenerateHeapResultAllocation(masm, &after_alloc_failure);
|
| + __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
|
| + GenerateReturn(masm);
|
| + __ bind(&after_alloc_failure);
|
| + __ ffree();
|
| + __ jmp(&call_runtime);
|
| }
|
| - GenerateHeapResultAllocation(masm, &call_runtime);
|
| - __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
|
| - GenerateReturn(masm);
|
| - } else { // SSE2 not available, use FPU.
|
| - if (NumberInfo::IsNumber(operands_type_)) {
|
| - if (FLAG_debug_code) {
|
| - // Assert at runtime that inputs are only numbers.
|
| - __ AbortIfNotNumber(edx,
|
| - "GenericBinaryOpStub operand not a number.");
|
| - __ AbortIfNotNumber(eax,
|
| - "GenericBinaryOpStub operand not a number.");
|
| + __ bind(¬_floats);
|
| + if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
|
| + !HasSmiCodeInStub()) {
|
| + // Execution reaches this point when the first non-number argument
|
| + // occurs (and only if smi code is skipped from the stub, otherwise
|
| + // the patching has already been done earlier in this case branch).
|
| + // Try patching to STRINGS for ADD operation.
|
| + if (op_ == Token::ADD) {
|
| + GenerateTypeTransition(masm);
|
| }
|
| - } else {
|
| - FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
|
| }
|
| - FloatingPointHelper::LoadFloatOperands(
|
| - masm,
|
| - ecx,
|
| - FloatingPointHelper::ARGS_IN_REGISTERS);
|
| - switch (op_) {
|
| - case Token::ADD: __ faddp(1); break;
|
| - case Token::SUB: __ fsubp(1); break;
|
| - case Token::MUL: __ fmulp(1); break;
|
| - case Token::DIV: __ fdivp(1); break;
|
| - default: UNREACHABLE();
|
| - }
|
| - Label after_alloc_failure;
|
| - GenerateHeapResultAllocation(masm, &after_alloc_failure);
|
| - __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
|
| - GenerateReturn(masm);
|
| - __ bind(&after_alloc_failure);
|
| - __ ffree();
|
| - __ jmp(&call_runtime);
|
| + break;
|
| }
|
| - }
|
| - case Token::MOD: {
|
| - // For MOD we go directly to runtime in the non-smi case.
|
| - break;
|
| - }
|
| - case Token::BIT_OR:
|
| - case Token::BIT_AND:
|
| - case Token::BIT_XOR:
|
| - case Token::SAR:
|
| - case Token::SHL:
|
| - case Token::SHR: {
|
| - Label non_smi_result;
|
| - FloatingPointHelper::LoadAsIntegers(masm, use_sse3_, &call_runtime);
|
| - switch (op_) {
|
| - case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
|
| - case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
|
| - case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
|
| - case Token::SAR: __ sar_cl(eax); break;
|
| - case Token::SHL: __ shl_cl(eax); break;
|
| - case Token::SHR: __ shr_cl(eax); break;
|
| - default: UNREACHABLE();
|
| + case Token::MOD: {
|
| + // For MOD we go directly to runtime in the non-smi case.
|
| + break;
|
| }
|
| - if (op_ == Token::SHR) {
|
| - // Check if result is non-negative and fits in a smi.
|
| - __ test(eax, Immediate(0xc0000000));
|
| - __ j(not_zero, &call_runtime);
|
| - } else {
|
| - // Check if result fits in a smi.
|
| - __ cmp(eax, 0xc0000000);
|
| - __ j(negative, &non_smi_result);
|
| - }
|
| - // Tag smi result and return.
|
| - __ SmiTag(eax);
|
| - GenerateReturn(masm);
|
| -
|
| - // All ops except SHR return a signed int32 that we load in a HeapNumber.
|
| - if (op_ != Token::SHR) {
|
| - __ bind(&non_smi_result);
|
| - // Allocate a heap number if needed.
|
| - __ mov(ebx, Operand(eax)); // ebx: result
|
| - Label skip_allocation;
|
| - switch (mode_) {
|
| - case OVERWRITE_LEFT:
|
| - case OVERWRITE_RIGHT:
|
| - // If the operand was an object, we skip the
|
| - // allocation of a heap number.
|
| - __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
|
| - 1 * kPointerSize : 2 * kPointerSize));
|
| - __ test(eax, Immediate(kSmiTagMask));
|
| - __ j(not_zero, &skip_allocation, not_taken);
|
| - // Fall through!
|
| - case NO_OVERWRITE:
|
| - __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
|
| - __ bind(&skip_allocation);
|
| - break;
|
| + case Token::BIT_OR:
|
| + case Token::BIT_AND:
|
| + case Token::BIT_XOR:
|
| + case Token::SAR:
|
| + case Token::SHL:
|
| + case Token::SHR: {
|
| + Label non_smi_result;
|
| + FloatingPointHelper::LoadAsIntegers(masm,
|
| + static_operands_type_,
|
| + use_sse3_,
|
| + &call_runtime);
|
| + switch (op_) {
|
| + case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
|
| + case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
|
| + case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
|
| + case Token::SAR: __ sar_cl(eax); break;
|
| + case Token::SHL: __ shl_cl(eax); break;
|
| + case Token::SHR: __ shr_cl(eax); break;
|
| default: UNREACHABLE();
|
| }
|
| - // Store the result in the HeapNumber and return.
|
| - if (CpuFeatures::IsSupported(SSE2)) {
|
| - CpuFeatures::Scope use_sse2(SSE2);
|
| - __ cvtsi2sd(xmm0, Operand(ebx));
|
| - __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
|
| + if (op_ == Token::SHR) {
|
| + // Check if result is non-negative and fits in a smi.
|
| + __ test(eax, Immediate(0xc0000000));
|
| + __ j(not_zero, &call_runtime);
|
| } else {
|
| - __ mov(Operand(esp, 1 * kPointerSize), ebx);
|
| - __ fild_s(Operand(esp, 1 * kPointerSize));
|
| - __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
|
| + // Check if result fits in a smi.
|
| + __ cmp(eax, 0xc0000000);
|
| + __ j(negative, &non_smi_result);
|
| }
|
| + // Tag smi result and return.
|
| + __ SmiTag(eax);
|
| GenerateReturn(masm);
|
| +
|
| + // All ops except SHR return a signed int32 that we load in
|
| + // a HeapNumber.
|
| + if (op_ != Token::SHR) {
|
| + __ bind(&non_smi_result);
|
| + // Allocate a heap number if needed.
|
| + __ mov(ebx, Operand(eax)); // ebx: result
|
| + Label skip_allocation;
|
| + switch (mode_) {
|
| + case OVERWRITE_LEFT:
|
| + case OVERWRITE_RIGHT:
|
| + // If the operand was an object, we skip the
|
| + // allocation of a heap number.
|
| + __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
|
| + 1 * kPointerSize : 2 * kPointerSize));
|
| + __ test(eax, Immediate(kSmiTagMask));
|
| + __ j(not_zero, &skip_allocation, not_taken);
|
| + // Fall through!
|
| + case NO_OVERWRITE:
|
| + __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
|
| + __ bind(&skip_allocation);
|
| + break;
|
| + default: UNREACHABLE();
|
| + }
|
| + // Store the result in the HeapNumber and return.
|
| + if (CpuFeatures::IsSupported(SSE2)) {
|
| + CpuFeatures::Scope use_sse2(SSE2);
|
| + __ cvtsi2sd(xmm0, Operand(ebx));
|
| + __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
|
| + } else {
|
| + __ mov(Operand(esp, 1 * kPointerSize), ebx);
|
| + __ fild_s(Operand(esp, 1 * kPointerSize));
|
| + __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
|
| + }
|
| + GenerateReturn(masm);
|
| + }
|
| + break;
|
| }
|
| - break;
|
| + default: UNREACHABLE(); break;
|
| }
|
| - default: UNREACHABLE(); break;
|
| }
|
|
|
| // If all else fails, use the runtime system to get the correct
|
| @@ -7967,30 +8550,40 @@
|
| // stack in the correct order below the return address.
|
| __ bind(&call_runtime);
|
| if (HasArgsInRegisters()) {
|
| - __ pop(ecx);
|
| - if (HasArgsReversed()) {
|
| - __ push(eax);
|
| - __ push(edx);
|
| - } else {
|
| - __ push(edx);
|
| - __ push(eax);
|
| - }
|
| - __ push(ecx);
|
| + GenerateRegisterArgsPush(masm);
|
| }
|
| +
|
| switch (op_) {
|
| case Token::ADD: {
|
| // Test for string arguments before calling runtime.
|
| Label not_strings, not_string1, string1, string1_smi2;
|
| - Result answer;
|
| - __ test(edx, Immediate(kSmiTagMask));
|
| +
|
| + // If this stub has already generated FP-specific code then the arguments
|
| + // are already in edx, eax
|
| + if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
|
| + GenerateLoadArguments(masm);
|
| + }
|
| +
|
| + // Registers containing left and right operands respectively.
|
| + Register lhs, rhs;
|
| + if (HasArgsReversed()) {
|
| + lhs = eax;
|
| + rhs = edx;
|
| + } else {
|
| + lhs = edx;
|
| + rhs = eax;
|
| + }
|
| +
|
| + // Test if first argument is a string.
|
| + __ test(lhs, Immediate(kSmiTagMask));
|
| __ j(zero, ¬_string1);
|
| - __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ecx);
|
| + __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx);
|
| __ j(above_equal, ¬_string1);
|
|
|
| // First argument is a string, test second.
|
| - __ test(eax, Immediate(kSmiTagMask));
|
| + __ test(rhs, Immediate(kSmiTagMask));
|
| __ j(zero, &string1_smi2);
|
| - __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ecx);
|
| + __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
|
| __ j(above_equal, &string1);
|
|
|
| // First and second argument are strings. Jump to the string add stub.
|
| @@ -8001,36 +8594,26 @@
|
| // First argument is a string, second is a smi. Try to lookup the number
|
| // string for the smi in the number string cache.
|
| NumberToStringStub::GenerateLookupNumberStringCache(
|
| - masm, eax, edi, ebx, ecx, true, &string1);
|
| + masm, rhs, edi, ebx, ecx, true, &string1);
|
|
|
| - // Call the string add stub to make the result.
|
| - __ EnterInternalFrame();
|
| - __ push(edx); // Original first argument.
|
| - __ push(edi); // Number to string result for second argument.
|
| - __ CallStub(&string_add_stub);
|
| - __ LeaveInternalFrame();
|
| - __ ret(2 * kPointerSize);
|
| + // Replace second argument on stack and tailcall string add stub to make
|
| + // the result.
|
| + __ mov(Operand(esp, 1 * kPointerSize), edi);
|
| + __ TailCallStub(&string_add_stub);
|
|
|
| + // Only first argument is a string.
|
| __ bind(&string1);
|
| - __ InvokeBuiltin(
|
| - HasArgsReversed() ?
|
| - Builtins::STRING_ADD_RIGHT :
|
| - Builtins::STRING_ADD_LEFT,
|
| - JUMP_FUNCTION);
|
| + __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
|
|
|
| // First argument was not a string, test second.
|
| __ bind(¬_string1);
|
| - __ test(eax, Immediate(kSmiTagMask));
|
| + __ test(rhs, Immediate(kSmiTagMask));
|
| __ j(zero, ¬_strings);
|
| - __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ecx);
|
| + __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
|
| __ j(above_equal, ¬_strings);
|
|
|
| // Only second argument is a string.
|
| - __ InvokeBuiltin(
|
| - HasArgsReversed() ?
|
| - Builtins::STRING_ADD_LEFT :
|
| - Builtins::STRING_ADD_RIGHT,
|
| - JUMP_FUNCTION);
|
| + __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
|
|
|
| __ bind(¬_strings);
|
| // Neither argument is a string.
|
| @@ -8070,6 +8653,13 @@
|
| default:
|
| UNREACHABLE();
|
| }
|
| +
|
| + // Generate an unreachable reference to the DEFAULT stub so that it can be
|
| + // found at the end of this stub when clearing ICs at GC.
|
| + if (runtime_operands_type_ != BinaryOpIC::DEFAULT) {
|
| + GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT);
|
| + __ TailCallStub(&uninit);
|
| + }
|
| }
|
|
|
|
|
| @@ -8123,10 +8713,9 @@
|
|
|
| void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
|
| // If arguments are not passed in registers read them from the stack.
|
| - if (!HasArgsInRegisters()) {
|
| - __ mov(eax, Operand(esp, 1 * kPointerSize));
|
| - __ mov(edx, Operand(esp, 2 * kPointerSize));
|
| - }
|
| + ASSERT(!HasArgsInRegisters());
|
| + __ mov(eax, Operand(esp, 1 * kPointerSize));
|
| + __ mov(edx, Operand(esp, 2 * kPointerSize));
|
| }
|
|
|
|
|
| @@ -8141,6 +8730,75 @@
|
| }
|
|
|
|
|
| +void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
|
| + ASSERT(HasArgsInRegisters());
|
| + __ pop(ecx);
|
| + if (HasArgsReversed()) {
|
| + __ push(eax);
|
| + __ push(edx);
|
| + } else {
|
| + __ push(edx);
|
| + __ push(eax);
|
| + }
|
| + __ push(ecx);
|
| +}
|
| +
|
| +
|
| +void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
|
| + Label get_result;
|
| +
|
| + // Keep a copy of operands on the stack and make sure they are also in
|
| + // edx, eax.
|
| + if (HasArgsInRegisters()) {
|
| + GenerateRegisterArgsPush(masm);
|
| + } else {
|
| + GenerateLoadArguments(masm);
|
| + }
|
| +
|
| + // Internal frame is necessary to handle exceptions properly.
|
| + __ EnterInternalFrame();
|
| +
|
| + // Push arguments on stack if the stub expects them there.
|
| + if (!HasArgsInRegisters()) {
|
| + __ push(edx);
|
| + __ push(eax);
|
| + }
|
| + // Call the stub proper to get the result in eax.
|
| + __ call(&get_result);
|
| + __ LeaveInternalFrame();
|
| +
|
| + __ pop(ecx); // Return address.
|
| + // Left and right arguments are now on top.
|
| + // Push the operation result. The tail call to BinaryOp_Patch will
|
| + // return it to the original caller.
|
| + __ push(eax);
|
| + // Push this stub's key. Although the operation and the type info are
|
| + // encoded into the key, the encoding is opaque, so push them too.
|
| + __ push(Immediate(Smi::FromInt(MinorKey())));
|
| + __ push(Immediate(Smi::FromInt(op_)));
|
| + __ push(Immediate(Smi::FromInt(runtime_operands_type_)));
|
| +
|
| + __ push(ecx); // Return address.
|
| +
|
| + // Patch the caller to an appropriate specialized stub
|
| + // and return the operation result.
|
| + __ TailCallExternalReference(
|
| + ExternalReference(IC_Utility(IC::kBinaryOp_Patch)),
|
| + 6,
|
| + 1);
|
| +
|
| + // The entry point for the result calculation is assumed to be immediately
|
| + // after this sequence.
|
| + __ bind(&get_result);
|
| +}
|
| +
|
| +Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
|
| + GenericBinaryOpStub stub(key, type_info);
|
| + HandleScope scope;
|
| + return stub.GetCode();
|
| +}
|
| +
|
| +
|
| void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
|
| // Input on stack:
|
| // esp[4]: argument (should be number).
|
| @@ -8247,7 +8905,7 @@
|
| __ bind(&runtime_call_clear_stack);
|
| __ fstp(0);
|
| __ bind(&runtime_call);
|
| - __ TailCallRuntime(ExternalReference(RuntimeFunction()), 1, 1);
|
| + __ TailCallExternalReference(ExternalReference(RuntimeFunction()), 1, 1);
|
| }
|
|
|
|
|
| @@ -8353,24 +9011,29 @@
|
| // trashed registers.
|
| void IntegerConvert(MacroAssembler* masm,
|
| Register source,
|
| + NumberInfo number_info,
|
| bool use_sse3,
|
| Label* conversion_failure) {
|
| ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
|
| Label done, right_exponent, normal_exponent;
|
| Register scratch = ebx;
|
| Register scratch2 = edi;
|
| - // Get exponent word.
|
| - __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
|
| - // Get exponent alone in scratch2.
|
| - __ mov(scratch2, scratch);
|
| - __ and_(scratch2, HeapNumber::kExponentMask);
|
| + if (!number_info.IsInteger32() || !use_sse3) {
|
| + // Get exponent word.
|
| + __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
|
| + // Get exponent alone in scratch2.
|
| + __ mov(scratch2, scratch);
|
| + __ and_(scratch2, HeapNumber::kExponentMask);
|
| + }
|
| if (use_sse3) {
|
| CpuFeatures::Scope scope(SSE3);
|
| - // Check whether the exponent is too big for a 64 bit signed integer.
|
| - static const uint32_t kTooBigExponent =
|
| - (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
|
| - __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
|
| - __ j(greater_equal, conversion_failure);
|
| + if (!number_info.IsInteger32()) {
|
| + // Check whether the exponent is too big for a 64 bit signed integer.
|
| + static const uint32_t kTooBigExponent =
|
| + (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
|
| + __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
|
| + __ j(greater_equal, conversion_failure);
|
| + }
|
| // Load x87 register with heap number.
|
| __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
|
| // Reserve space for 64 bit answer.
|
| @@ -8484,16 +9147,66 @@
|
|
|
| // Input: edx, eax are the left and right objects of a bit op.
|
| // Output: eax, ecx are left and right integers for a bit op.
|
| -void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
|
| - bool use_sse3,
|
| - Label* conversion_failure) {
|
| +void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm,
|
| + NumberInfo number_info,
|
| + bool use_sse3,
|
| + Label* conversion_failure) {
|
| // Check float operands.
|
| Label arg1_is_object, check_undefined_arg1;
|
| Label arg2_is_object, check_undefined_arg2;
|
| Label load_arg2, done;
|
|
|
| + if (!number_info.IsHeapNumber()) {
|
| + if (!number_info.IsSmi()) {
|
| + __ test(edx, Immediate(kSmiTagMask));
|
| + __ j(not_zero, &arg1_is_object);
|
| + }
|
| + __ SmiUntag(edx);
|
| + __ jmp(&load_arg2);
|
| + }
|
| +
|
| + __ bind(&arg1_is_object);
|
| +
|
| + // Get the untagged integer version of the edx heap number in ecx.
|
| + IntegerConvert(masm, edx, number_info, use_sse3, conversion_failure);
|
| + __ mov(edx, ecx);
|
| +
|
| + // Here edx has the untagged integer, eax has a Smi or a heap number.
|
| + __ bind(&load_arg2);
|
| + if (!number_info.IsHeapNumber()) {
|
| + // Test if arg2 is a Smi.
|
| + if (!number_info.IsSmi()) {
|
| + __ test(eax, Immediate(kSmiTagMask));
|
| + __ j(not_zero, &arg2_is_object);
|
| + }
|
| + __ SmiUntag(eax);
|
| + __ mov(ecx, eax);
|
| + __ jmp(&done);
|
| + }
|
| +
|
| + __ bind(&arg2_is_object);
|
| +
|
| + // Get the untagged integer version of the eax heap number in ecx.
|
| + IntegerConvert(masm, eax, number_info, use_sse3, conversion_failure);
|
| + __ bind(&done);
|
| + __ mov(eax, edx);
|
| +}
|
| +
|
| +
|
| +// Input: edx, eax are the left and right objects of a bit op.
|
| +// Output: eax, ecx are left and right integers for a bit op.
|
| +void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm,
|
| + bool use_sse3,
|
| + Label* conversion_failure) {
|
| + // Check float operands.
|
| + Label arg1_is_object, check_undefined_arg1;
|
| + Label arg2_is_object, check_undefined_arg2;
|
| + Label load_arg2, done;
|
| +
|
| + // Test if arg1 is a Smi.
|
| __ test(edx, Immediate(kSmiTagMask));
|
| __ j(not_zero, &arg1_is_object);
|
| +
|
| __ SmiUntag(edx);
|
| __ jmp(&load_arg2);
|
|
|
| @@ -8508,15 +9221,22 @@
|
| __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
|
| __ cmp(ebx, Factory::heap_number_map());
|
| __ j(not_equal, &check_undefined_arg1);
|
| +
|
| // Get the untagged integer version of the edx heap number in ecx.
|
| - IntegerConvert(masm, edx, use_sse3, conversion_failure);
|
| + IntegerConvert(masm,
|
| + edx,
|
| + NumberInfo::Unknown(),
|
| + use_sse3,
|
| + conversion_failure);
|
| __ mov(edx, ecx);
|
|
|
| // Here edx has the untagged integer, eax has a Smi or a heap number.
|
| __ bind(&load_arg2);
|
| +
|
| // Test if arg2 is a Smi.
|
| __ test(eax, Immediate(kSmiTagMask));
|
| __ j(not_zero, &arg2_is_object);
|
| +
|
| __ SmiUntag(eax);
|
| __ mov(ecx, eax);
|
| __ jmp(&done);
|
| @@ -8532,13 +9252,30 @@
|
| __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
|
| __ cmp(ebx, Factory::heap_number_map());
|
| __ j(not_equal, &check_undefined_arg2);
|
| +
|
| // Get the untagged integer version of the eax heap number in ecx.
|
| - IntegerConvert(masm, eax, use_sse3, conversion_failure);
|
| + IntegerConvert(masm,
|
| + eax,
|
| + NumberInfo::Unknown(),
|
| + use_sse3,
|
| + conversion_failure);
|
| __ bind(&done);
|
| __ mov(eax, edx);
|
| }
|
|
|
|
|
| +void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
|
| + NumberInfo number_info,
|
| + bool use_sse3,
|
| + Label* conversion_failure) {
|
| + if (number_info.IsNumber()) {
|
| + LoadNumbersAsIntegers(masm, number_info, use_sse3, conversion_failure);
|
| + } else {
|
| + LoadUnknownsAsIntegers(masm, use_sse3, conversion_failure);
|
| + }
|
| +}
|
| +
|
| +
|
| void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
|
| Register number) {
|
| Label load_smi, done;
|
| @@ -8775,7 +9512,11 @@
|
| __ j(not_equal, &slow, not_taken);
|
|
|
| // Convert the heap number in eax to an untagged integer in ecx.
|
| - IntegerConvert(masm, eax, CpuFeatures::IsSupported(SSE3), &slow);
|
| + IntegerConvert(masm,
|
| + eax,
|
| + NumberInfo::Unknown(),
|
| + CpuFeatures::IsSupported(SSE3),
|
| + &slow);
|
|
|
| // Do the bitwise operation and check if the result fits in a smi.
|
| Label try_float;
|
| @@ -8911,7 +9652,7 @@
|
| __ pop(ebx); // Return address.
|
| __ push(edx);
|
| __ push(ebx);
|
| - __ TailCallRuntime(ExternalReference(Runtime::kGetArgumentsProperty), 1, 1);
|
| + __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
|
| }
|
|
|
|
|
| @@ -9012,7 +9753,7 @@
|
|
|
| // Do the runtime call to allocate the arguments object.
|
| __ bind(&runtime);
|
| - __ TailCallRuntime(ExternalReference(Runtime::kNewArgumentsFast), 3, 1);
|
| + __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
|
| }
|
|
|
|
|
| @@ -9021,10 +9762,10 @@
|
| // time or if regexp entry in generated code is turned off runtime switch or
|
| // at compilation.
|
| #ifndef V8_NATIVE_REGEXP
|
| - __ TailCallRuntime(ExternalReference(Runtime::kRegExpExec), 4, 1);
|
| + __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
|
| #else // V8_NATIVE_REGEXP
|
| if (!FLAG_regexp_entry_native) {
|
| - __ TailCallRuntime(ExternalReference(Runtime::kRegExpExec), 4, 1);
|
| + __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
|
| return;
|
| }
|
|
|
| @@ -9207,48 +9948,50 @@
|
| // All checks done. Now push arguments for native regexp code.
|
| __ IncrementCounter(&Counters::regexp_entry_native, 1);
|
|
|
| + static const int kRegExpExecuteArguments = 7;
|
| + __ PrepareCallCFunction(kRegExpExecuteArguments, ecx);
|
| +
|
| // Argument 7: Indicate that this is a direct call from JavaScript.
|
| - __ push(Immediate(1));
|
| + __ mov(Operand(esp, 6 * kPointerSize), Immediate(1));
|
|
|
| // Argument 6: Start (high end) of backtracking stack memory area.
|
| __ mov(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_address));
|
| __ add(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
|
| - __ push(ecx);
|
| + __ mov(Operand(esp, 5 * kPointerSize), ecx);
|
|
|
| // Argument 5: static offsets vector buffer.
|
| - __ push(Immediate(ExternalReference::address_of_static_offsets_vector()));
|
| + __ mov(Operand(esp, 4 * kPointerSize),
|
| + Immediate(ExternalReference::address_of_static_offsets_vector()));
|
|
|
| // Argument 4: End of string data
|
| // Argument 3: Start of string data
|
| - Label push_two_byte, push_rest;
|
| + Label setup_two_byte, setup_rest;
|
| __ test(edi, Operand(edi));
|
| __ mov(edi, FieldOperand(eax, String::kLengthOffset));
|
| - __ j(zero, &push_two_byte);
|
| + __ j(zero, &setup_two_byte);
|
| __ lea(ecx, FieldOperand(eax, edi, times_1, SeqAsciiString::kHeaderSize));
|
| - __ push(ecx); // Argument 4.
|
| + __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
|
| __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize));
|
| - __ push(ecx); // Argument 3.
|
| - __ jmp(&push_rest);
|
| + __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
|
| + __ jmp(&setup_rest);
|
|
|
| - __ bind(&push_two_byte);
|
| + __ bind(&setup_two_byte);
|
| __ lea(ecx, FieldOperand(eax, edi, times_2, SeqTwoByteString::kHeaderSize));
|
| - __ push(ecx); // Argument 4.
|
| + __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
|
| __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize));
|
| - __ push(ecx); // Argument 3.
|
| + __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
|
|
|
| - __ bind(&push_rest);
|
| + __ bind(&setup_rest);
|
|
|
| // Argument 2: Previous index.
|
| - __ push(ebx);
|
| + __ mov(Operand(esp, 1 * kPointerSize), ebx);
|
|
|
| // Argument 1: Subject string.
|
| - __ push(eax);
|
| + __ mov(Operand(esp, 0 * kPointerSize), eax);
|
|
|
| // Locate the code entry and call it.
|
| __ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
|
| - __ call(Operand(edx));
|
| - // Remove arguments.
|
| - __ add(Operand(esp), Immediate(7 * kPointerSize));
|
| + __ CallCFunction(edx, kRegExpExecuteArguments);
|
|
|
| // Check the result.
|
| Label success;
|
| @@ -9346,7 +10089,7 @@
|
|
|
| // Do the runtime call to execute the regexp.
|
| __ bind(&runtime);
|
| - __ TailCallRuntime(ExternalReference(Runtime::kRegExpExec), 4, 1);
|
| + __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
|
| #endif // V8_NATIVE_REGEXP
|
| }
|
|
|
| @@ -9415,7 +10158,7 @@
|
|
|
| __ bind(&runtime);
|
| // Handle number to string in the runtime system if not found in the cache.
|
| - __ TailCallRuntime(ExternalReference(Runtime::kNumberToString), 1, 1);
|
| + __ TailCallRuntime(Runtime::kNumberToString, 1, 1);
|
| }
|
|
|
|
|
| @@ -9700,7 +10443,7 @@
|
| __ push(eax);
|
|
|
| // Do tail-call to runtime routine.
|
| - __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1, 1);
|
| + __ TailCallRuntime(Runtime::kStackGuard, 1, 1);
|
| }
|
|
|
|
|
| @@ -9861,9 +10604,7 @@
|
| __ LeaveExitFrame(ExitFrame::MODE_NORMAL);
|
| __ ret(0);
|
| __ bind(&promote_scheduled_exception);
|
| - __ TailCallRuntime(ExternalReference(Runtime::kPromoteScheduledException),
|
| - 0,
|
| - 1);
|
| + __ TailCallRuntime(Runtime::kPromoteScheduledException, 0, 1);
|
| }
|
|
|
|
|
| @@ -10348,6 +11089,7 @@
|
| Label make_two_character_string, make_flat_ascii_string;
|
| GenerateTwoCharacterSymbolTableProbe(masm, ebx, ecx, eax, edx, edi,
|
| &make_two_character_string);
|
| + __ IncrementCounter(&Counters::string_add_native, 1);
|
| __ ret(2 * kPointerSize);
|
|
|
| __ bind(&make_two_character_string);
|
| @@ -10492,7 +11234,7 @@
|
|
|
| // Just jump to runtime to add the two strings.
|
| __ bind(&string_add_runtime);
|
| - __ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2, 1);
|
| + __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
|
| }
|
|
|
|
|
| @@ -10556,6 +11298,7 @@
|
| // Copy from edi to esi using rep movs instruction.
|
| __ mov(scratch, count);
|
| __ sar(count, 2); // Number of doublewords to copy.
|
| + __ cld();
|
| __ rep_movs();
|
|
|
| // Find number of bytes left.
|
| @@ -10627,10 +11370,7 @@
|
|
|
| // Calculate capacity mask from the symbol table capacity.
|
| Register mask = scratch2;
|
| - static const int kCapacityOffset =
|
| - FixedArray::kHeaderSize +
|
| - SymbolTable::kCapacityIndex * kPointerSize;
|
| - __ mov(mask, FieldOperand(symbol_table, kCapacityOffset));
|
| + __ mov(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
|
| __ SmiUntag(mask);
|
| __ sub(Operand(mask), Immediate(1));
|
|
|
| @@ -10655,16 +11395,12 @@
|
|
|
| // Load the entry from the symble table.
|
| Register candidate = scratch; // Scratch register contains candidate.
|
| - ASSERT_EQ(1, SymbolTableShape::kEntrySize);
|
| - static const int kFirstElementOffset =
|
| - FixedArray::kHeaderSize +
|
| - SymbolTable::kPrefixStartIndex * kPointerSize +
|
| - SymbolTableShape::kPrefixSize * kPointerSize;
|
| + ASSERT_EQ(1, SymbolTable::kEntrySize);
|
| __ mov(candidate,
|
| FieldOperand(symbol_table,
|
| scratch,
|
| times_pointer_size,
|
| - kFirstElementOffset));
|
| + SymbolTable::kElementsStartOffset));
|
|
|
| // If entry is undefined no string with this hash can be found.
|
| __ cmp(candidate, Factory::undefined_value());
|
| @@ -10818,7 +11554,7 @@
|
| Label make_two_character_string;
|
| GenerateTwoCharacterSymbolTableProbe(masm, ebx, ecx, eax, edx, edi,
|
| &make_two_character_string);
|
| - __ ret(2 * kPointerSize);
|
| + __ ret(3 * kPointerSize);
|
|
|
| __ bind(&make_two_character_string);
|
| // Setup registers for allocating the two character string.
|
| @@ -10902,7 +11638,7 @@
|
|
|
| // Just jump to runtime to create the sub string.
|
| __ bind(&runtime);
|
| - __ TailCallRuntime(ExternalReference(Runtime::kSubString), 3, 1);
|
| + __ TailCallRuntime(Runtime::kSubString, 3, 1);
|
| }
|
|
|
|
|
| @@ -11018,7 +11754,7 @@
|
| // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
|
| // tagged as a small integer.
|
| __ bind(&runtime);
|
| - __ TailCallRuntime(ExternalReference(Runtime::kStringCompare), 2, 1);
|
| + __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
|
| }
|
|
|
| #undef __
|
|
|