| Index: src/ia32/lithium-codegen-ia32.cc
|
| ===================================================================
|
| --- src/ia32/lithium-codegen-ia32.cc (revision 6800)
|
| +++ src/ia32/lithium-codegen-ia32.cc (working copy)
|
| @@ -1,4 +1,4 @@
|
| -// Copyright 2010 the V8 project authors. All rights reserved.
|
| +// Copyright 2011 the V8 project authors. All rights reserved.
|
| // Redistribution and use in source and binary forms, with or without
|
| // modification, are permitted provided that the following conditions are
|
| // met:
|
| @@ -25,6 +25,10 @@
|
| // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
|
| +#include "v8.h"
|
| +
|
| +#if defined(V8_TARGET_ARCH_IA32)
|
| +
|
| #include "ia32/lithium-codegen-ia32.h"
|
| #include "code-stubs.h"
|
| #include "stub-cache.h"
|
| @@ -33,17 +37,26 @@
|
| namespace internal {
|
|
|
|
|
| +// When invoking builtins, we need to record the safepoint in the middle of
|
| +// the invoke instruction sequence generated by the macro assembler.
|
| class SafepointGenerator : public PostCallGenerator {
|
| public:
|
| SafepointGenerator(LCodeGen* codegen,
|
| LPointerMap* pointers,
|
| - int deoptimization_index)
|
| + int deoptimization_index,
|
| + bool ensure_reloc_space = false)
|
| : codegen_(codegen),
|
| pointers_(pointers),
|
| - deoptimization_index_(deoptimization_index) { }
|
| + deoptimization_index_(deoptimization_index),
|
| + ensure_reloc_space_(ensure_reloc_space) { }
|
| virtual ~SafepointGenerator() { }
|
|
|
| virtual void Generate() {
|
| + // Ensure that we have enough space in the reloc info to patch
|
| + // this with calls when doing deoptimization.
|
| + if (ensure_reloc_space_) {
|
| + codegen_->masm()->RecordComment(RelocInfo::kFillerCommentString, true);
|
| + }
|
| codegen_->RecordSafepoint(pointers_, deoptimization_index_);
|
| }
|
|
|
| @@ -51,6 +64,7 @@
|
| LCodeGen* codegen_;
|
| LPointerMap* pointers_;
|
| int deoptimization_index_;
|
| + bool ensure_reloc_space_;
|
| };
|
|
|
|
|
| @@ -71,7 +85,7 @@
|
| void LCodeGen::FinishCode(Handle<Code> code) {
|
| ASSERT(is_done());
|
| code->set_stack_slots(StackSlotCount());
|
| - code->set_safepoint_table_start(safepoints_.GetCodeOffset());
|
| + code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
|
| PopulateDeoptimizationData(code);
|
| }
|
|
|
| @@ -135,11 +149,24 @@
|
| __ j(not_zero, &loop);
|
| } else {
|
| __ sub(Operand(esp), Immediate(slots * kPointerSize));
|
| +#ifdef _MSC_VER
|
| + // On windows, you may not access the stack more than one page below
|
| + // the most recently mapped page. To make the allocated area randomly
|
| + // accessible, we write to each page in turn (the value is irrelevant).
|
| + const int kPageSize = 4 * KB;
|
| + for (int offset = slots * kPointerSize - kPageSize;
|
| + offset > 0;
|
| + offset -= kPageSize) {
|
| + __ mov(Operand(esp, offset), eax);
|
| + }
|
| +#endif
|
| }
|
| }
|
|
|
| // Trace the call.
|
| if (FLAG_trace) {
|
| + // We have not executed any compiled code yet, so esi still holds the
|
| + // incoming context.
|
| __ CallRuntime(Runtime::kTraceEnter, 0);
|
| }
|
| return !is_aborted();
|
| @@ -261,6 +288,53 @@
|
| }
|
|
|
|
|
| +Operand LCodeGen::HighOperand(LOperand* op) {
|
| + ASSERT(op->IsDoubleStackSlot());
|
| + int index = op->index();
|
| + int offset = (index >= 0) ? index + 3 : index - 1;
|
| + return Operand(ebp, -offset * kPointerSize);
|
| +}
|
| +
|
| +
|
| +void LCodeGen::WriteTranslation(LEnvironment* environment,
|
| + Translation* translation) {
|
| + if (environment == NULL) return;
|
| +
|
| + // The translation includes one command per value in the environment.
|
| + int translation_size = environment->values()->length();
|
| + // The output frame height does not include the parameters.
|
| + int height = translation_size - environment->parameter_count();
|
| +
|
| + WriteTranslation(environment->outer(), translation);
|
| + int closure_id = DefineDeoptimizationLiteral(environment->closure());
|
| + translation->BeginFrame(environment->ast_id(), closure_id, height);
|
| + for (int i = 0; i < translation_size; ++i) {
|
| + LOperand* value = environment->values()->at(i);
|
| + // spilled_registers_ and spilled_double_registers_ are either
|
| + // both NULL or both set.
|
| + if (environment->spilled_registers() != NULL && value != NULL) {
|
| + if (value->IsRegister() &&
|
| + environment->spilled_registers()[value->index()] != NULL) {
|
| + translation->MarkDuplicate();
|
| + AddToTranslation(translation,
|
| + environment->spilled_registers()[value->index()],
|
| + environment->HasTaggedValueAt(i));
|
| + } else if (
|
| + value->IsDoubleRegister() &&
|
| + environment->spilled_double_registers()[value->index()] != NULL) {
|
| + translation->MarkDuplicate();
|
| + AddToTranslation(
|
| + translation,
|
| + environment->spilled_double_registers()[value->index()],
|
| + false);
|
| + }
|
| + }
|
| +
|
| + AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
|
| + }
|
| +}
|
| +
|
| +
|
| void LCodeGen::AddToTranslation(Translation* translation,
|
| LOperand* op,
|
| bool is_tagged) {
|
| @@ -303,18 +377,16 @@
|
|
|
| void LCodeGen::CallCode(Handle<Code> code,
|
| RelocInfo::Mode mode,
|
| - LInstruction* instr) {
|
| - if (instr != NULL) {
|
| - LPointerMap* pointers = instr->pointer_map();
|
| - RecordPosition(pointers->position());
|
| - __ call(code, mode);
|
| - RegisterLazyDeoptimization(instr);
|
| - } else {
|
| - LPointerMap no_pointers(0);
|
| - RecordPosition(no_pointers.position());
|
| - __ call(code, mode);
|
| - RecordSafepoint(&no_pointers, Safepoint::kNoDeoptimizationIndex);
|
| + LInstruction* instr,
|
| + bool adjusted) {
|
| + ASSERT(instr != NULL);
|
| + LPointerMap* pointers = instr->pointer_map();
|
| + RecordPosition(pointers->position());
|
| + if (!adjusted) {
|
| + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
|
| }
|
| + __ call(code, mode);
|
| + RegisterLazyDeoptimization(instr);
|
|
|
| // Signal that we don't inline smi code before these stubs in the
|
| // optimizing code generator.
|
| @@ -325,26 +397,20 @@
|
| }
|
|
|
|
|
| -void LCodeGen::CallRuntime(Runtime::Function* function,
|
| - int num_arguments,
|
| - LInstruction* instr) {
|
| +void LCodeGen::CallRuntime(Runtime::Function* fun,
|
| + int argc,
|
| + LInstruction* instr,
|
| + bool adjusted) {
|
| ASSERT(instr != NULL);
|
| + ASSERT(instr->HasPointerMap());
|
| LPointerMap* pointers = instr->pointer_map();
|
| - ASSERT(pointers != NULL);
|
| RecordPosition(pointers->position());
|
|
|
| - __ CallRuntime(function, num_arguments);
|
| - // Runtime calls to Throw are not supposed to ever return at the
|
| - // call site, so don't register lazy deoptimization for these. We do
|
| - // however have to record a safepoint since throwing exceptions can
|
| - // cause garbage collections.
|
| - // BUG(3243555): register a lazy deoptimization point at throw. We need
|
| - // it to be able to inline functions containing a throw statement.
|
| - if (!instr->IsThrow()) {
|
| - RegisterLazyDeoptimization(instr);
|
| - } else {
|
| - RecordSafepoint(instr->pointer_map(), Safepoint::kNoDeoptimizationIndex);
|
| + if (!adjusted) {
|
| + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
|
| }
|
| + __ CallRuntime(fun, argc);
|
| + RegisterLazyDeoptimization(instr);
|
| }
|
|
|
|
|
| @@ -385,7 +451,7 @@
|
| ++frame_count;
|
| }
|
| Translation translation(&translations_, frame_count);
|
| - environment->WriteTranslation(this, &translation);
|
| + WriteTranslation(environment, &translation);
|
| int deoptimization_index = deoptimizations_.length();
|
| environment->Register(deoptimization_index, translation.index());
|
| deoptimizations_.Add(environment);
|
| @@ -504,37 +570,36 @@
|
| }
|
|
|
|
|
| -void LCodeGen::RecordSafepoint(LPointerMap* pointers,
|
| - int deoptimization_index) {
|
| +void LCodeGen::RecordSafepoint(
|
| + LPointerMap* pointers,
|
| + Safepoint::Kind kind,
|
| + int arguments,
|
| + int deoptimization_index) {
|
| const ZoneList<LOperand*>* operands = pointers->operands();
|
| Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
|
| - deoptimization_index);
|
| + kind, arguments, deoptimization_index);
|
| for (int i = 0; i < operands->length(); i++) {
|
| LOperand* pointer = operands->at(i);
|
| if (pointer->IsStackSlot()) {
|
| safepoint.DefinePointerSlot(pointer->index());
|
| + } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
|
| + safepoint.DefinePointerRegister(ToRegister(pointer));
|
| }
|
| }
|
| }
|
|
|
|
|
| +void LCodeGen::RecordSafepoint(LPointerMap* pointers,
|
| + int deoptimization_index) {
|
| + RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index);
|
| +}
|
| +
|
| +
|
| void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
|
| int arguments,
|
| int deoptimization_index) {
|
| - const ZoneList<LOperand*>* operands = pointers->operands();
|
| - Safepoint safepoint =
|
| - safepoints_.DefineSafepointWithRegisters(
|
| - masm(), arguments, deoptimization_index);
|
| - for (int i = 0; i < operands->length(); i++) {
|
| - LOperand* pointer = operands->at(i);
|
| - if (pointer->IsStackSlot()) {
|
| - safepoint.DefinePointerSlot(pointer->index());
|
| - } else if (pointer->IsRegister()) {
|
| - safepoint.DefinePointerRegister(ToRegister(pointer));
|
| - }
|
| - }
|
| - // Register esi always contains a pointer to the context.
|
| - safepoint.DefinePointerRegister(esi);
|
| + RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments,
|
| + deoptimization_index);
|
| }
|
|
|
|
|
| @@ -557,66 +622,7 @@
|
|
|
|
|
| void LCodeGen::DoParallelMove(LParallelMove* move) {
|
| - // xmm0 must always be a scratch register.
|
| - XMMRegister xmm_scratch = xmm0;
|
| - LUnallocated marker_operand(LUnallocated::NONE);
|
| -
|
| - Register cpu_scratch = esi;
|
| - bool destroys_cpu_scratch = false;
|
| -
|
| - LGapResolver resolver(move->move_operands(), &marker_operand);
|
| - const ZoneList<LMoveOperands>* moves = resolver.ResolveInReverseOrder();
|
| - for (int i = moves->length() - 1; i >= 0; --i) {
|
| - LMoveOperands move = moves->at(i);
|
| - LOperand* from = move.from();
|
| - LOperand* to = move.to();
|
| - ASSERT(!from->IsDoubleRegister() ||
|
| - !ToDoubleRegister(from).is(xmm_scratch));
|
| - ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(xmm_scratch));
|
| - ASSERT(!from->IsRegister() || !ToRegister(from).is(cpu_scratch));
|
| - ASSERT(!to->IsRegister() || !ToRegister(to).is(cpu_scratch));
|
| - if (from->IsConstantOperand()) {
|
| - __ mov(ToOperand(to), ToImmediate(from));
|
| - } else if (from == &marker_operand) {
|
| - if (to->IsRegister() || to->IsStackSlot()) {
|
| - __ mov(ToOperand(to), cpu_scratch);
|
| - ASSERT(destroys_cpu_scratch);
|
| - } else {
|
| - ASSERT(to->IsDoubleRegister() || to->IsDoubleStackSlot());
|
| - __ movdbl(ToOperand(to), xmm_scratch);
|
| - }
|
| - } else if (to == &marker_operand) {
|
| - if (from->IsRegister() || from->IsStackSlot()) {
|
| - __ mov(cpu_scratch, ToOperand(from));
|
| - destroys_cpu_scratch = true;
|
| - } else {
|
| - ASSERT(from->IsDoubleRegister() || from->IsDoubleStackSlot());
|
| - __ movdbl(xmm_scratch, ToOperand(from));
|
| - }
|
| - } else if (from->IsRegister()) {
|
| - __ mov(ToOperand(to), ToRegister(from));
|
| - } else if (to->IsRegister()) {
|
| - __ mov(ToRegister(to), ToOperand(from));
|
| - } else if (from->IsStackSlot()) {
|
| - ASSERT(to->IsStackSlot());
|
| - __ push(eax);
|
| - __ mov(eax, ToOperand(from));
|
| - __ mov(ToOperand(to), eax);
|
| - __ pop(eax);
|
| - } else if (from->IsDoubleRegister()) {
|
| - __ movdbl(ToOperand(to), ToDoubleRegister(from));
|
| - } else if (to->IsDoubleRegister()) {
|
| - __ movdbl(ToDoubleRegister(to), ToOperand(from));
|
| - } else {
|
| - ASSERT(to->IsDoubleStackSlot() && from->IsDoubleStackSlot());
|
| - __ movdbl(xmm_scratch, ToOperand(from));
|
| - __ movdbl(ToOperand(to), xmm_scratch);
|
| - }
|
| - }
|
| -
|
| - if (destroys_cpu_scratch) {
|
| - __ mov(cpu_scratch, Operand(ebp, -kPointerSize));
|
| - }
|
| + resolver_.Resolve(move);
|
| }
|
|
|
|
|
| @@ -643,6 +649,7 @@
|
|
|
|
|
| void LCodeGen::DoCallStub(LCallStub* instr) {
|
| + ASSERT(ToRegister(instr->context()).is(esi));
|
| ASSERT(ToRegister(instr->result()).is(eax));
|
| switch (instr->hydrogen()->major_key()) {
|
| case CodeStub::RegExpConstructResult: {
|
| @@ -703,11 +710,11 @@
|
|
|
|
|
| void LCodeGen::DoModI(LModI* instr) {
|
| - LOperand* right = instr->right();
|
| + LOperand* right = instr->InputAt(1);
|
| ASSERT(ToRegister(instr->result()).is(edx));
|
| - ASSERT(ToRegister(instr->left()).is(eax));
|
| - ASSERT(!ToRegister(instr->right()).is(eax));
|
| - ASSERT(!ToRegister(instr->right()).is(edx));
|
| + ASSERT(ToRegister(instr->InputAt(0)).is(eax));
|
| + ASSERT(!ToRegister(instr->InputAt(1)).is(eax));
|
| + ASSERT(!ToRegister(instr->InputAt(1)).is(edx));
|
|
|
| Register right_reg = ToRegister(right);
|
|
|
| @@ -743,11 +750,11 @@
|
|
|
|
|
| void LCodeGen::DoDivI(LDivI* instr) {
|
| - LOperand* right = instr->right();
|
| + LOperand* right = instr->InputAt(1);
|
| ASSERT(ToRegister(instr->result()).is(eax));
|
| - ASSERT(ToRegister(instr->left()).is(eax));
|
| - ASSERT(!ToRegister(instr->right()).is(eax));
|
| - ASSERT(!ToRegister(instr->right()).is(edx));
|
| + ASSERT(ToRegister(instr->InputAt(0)).is(eax));
|
| + ASSERT(!ToRegister(instr->InputAt(1)).is(eax));
|
| + ASSERT(!ToRegister(instr->InputAt(1)).is(edx));
|
|
|
| Register left_reg = eax;
|
|
|
| @@ -789,11 +796,11 @@
|
|
|
|
|
| void LCodeGen::DoMulI(LMulI* instr) {
|
| - Register left = ToRegister(instr->left());
|
| - LOperand* right = instr->right();
|
| + Register left = ToRegister(instr->InputAt(0));
|
| + LOperand* right = instr->InputAt(1);
|
|
|
| if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
| - __ mov(ToRegister(instr->temp()), left);
|
| + __ mov(ToRegister(instr->TempAt(0)), left);
|
| }
|
|
|
| if (right->IsConstantOperand()) {
|
| @@ -812,12 +819,12 @@
|
| __ test(left, Operand(left));
|
| __ j(not_zero, &done);
|
| if (right->IsConstantOperand()) {
|
| - if (ToInteger32(LConstantOperand::cast(right)) < 0) {
|
| + if (ToInteger32(LConstantOperand::cast(right)) <= 0) {
|
| DeoptimizeIf(no_condition, instr->environment());
|
| }
|
| } else {
|
| // Test the non-zero operand for negative sign.
|
| - __ or_(ToRegister(instr->temp()), ToOperand(right));
|
| + __ or_(ToRegister(instr->TempAt(0)), ToOperand(right));
|
| DeoptimizeIf(sign, instr->environment());
|
| }
|
| __ bind(&done);
|
| @@ -826,8 +833,8 @@
|
|
|
|
|
| void LCodeGen::DoBitI(LBitI* instr) {
|
| - LOperand* left = instr->left();
|
| - LOperand* right = instr->right();
|
| + LOperand* left = instr->InputAt(0);
|
| + LOperand* right = instr->InputAt(1);
|
| ASSERT(left->Equals(instr->result()));
|
| ASSERT(left->IsRegister());
|
|
|
| @@ -867,8 +874,8 @@
|
|
|
|
|
| void LCodeGen::DoShiftI(LShiftI* instr) {
|
| - LOperand* left = instr->left();
|
| - LOperand* right = instr->right();
|
| + LOperand* left = instr->InputAt(0);
|
| + LOperand* right = instr->InputAt(1);
|
| ASSERT(left->Equals(instr->result()));
|
| ASSERT(left->IsRegister());
|
| if (right->IsRegister()) {
|
| @@ -923,8 +930,8 @@
|
|
|
|
|
| void LCodeGen::DoSubI(LSubI* instr) {
|
| - LOperand* left = instr->left();
|
| - LOperand* right = instr->right();
|
| + LOperand* left = instr->InputAt(0);
|
| + LOperand* right = instr->InputAt(1);
|
| ASSERT(left->Equals(instr->result()));
|
|
|
| if (right->IsConstantOperand()) {
|
| @@ -953,19 +960,31 @@
|
| if (BitCast<uint64_t, double>(v) == 0) {
|
| __ xorpd(res, res);
|
| } else {
|
| - int32_t v_int32 = static_cast<int32_t>(v);
|
| - if (static_cast<double>(v_int32) == v) {
|
| - __ push_imm32(v_int32);
|
| - __ cvtsi2sd(res, Operand(esp, 0));
|
| - __ add(Operand(esp), Immediate(kPointerSize));
|
| + Register temp = ToRegister(instr->TempAt(0));
|
| + uint64_t int_val = BitCast<uint64_t, double>(v);
|
| + int32_t lower = static_cast<int32_t>(int_val);
|
| + int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
|
| + if (CpuFeatures::IsSupported(SSE4_1)) {
|
| + CpuFeatures::Scope scope(SSE4_1);
|
| + if (lower != 0) {
|
| + __ Set(temp, Immediate(lower));
|
| + __ movd(res, Operand(temp));
|
| + __ Set(temp, Immediate(upper));
|
| + __ pinsrd(res, Operand(temp), 1);
|
| + } else {
|
| + __ xorpd(res, res);
|
| + __ Set(temp, Immediate(upper));
|
| + __ pinsrd(res, Operand(temp), 1);
|
| + }
|
| } else {
|
| - uint64_t int_val = BitCast<uint64_t, double>(v);
|
| - int32_t lower = static_cast<int32_t>(int_val);
|
| - int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
|
| - __ push_imm32(upper);
|
| - __ push_imm32(lower);
|
| - __ movdbl(res, Operand(esp, 0));
|
| - __ add(Operand(esp), Immediate(2 * kPointerSize));
|
| + __ Set(temp, Immediate(upper));
|
| + __ movd(res, Operand(temp));
|
| + __ psllq(res, 32);
|
| + if (lower != 0) {
|
| + __ Set(temp, Immediate(lower));
|
| + __ movd(xmm0, Operand(temp));
|
| + __ por(res, xmm0);
|
| + }
|
| }
|
| }
|
| }
|
| @@ -979,22 +998,29 @@
|
|
|
| void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
|
| Register result = ToRegister(instr->result());
|
| - Register array = ToRegister(instr->input());
|
| + Register array = ToRegister(instr->InputAt(0));
|
| __ mov(result, FieldOperand(array, JSArray::kLengthOffset));
|
| }
|
|
|
|
|
| void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) {
|
| Register result = ToRegister(instr->result());
|
| - Register array = ToRegister(instr->input());
|
| + Register array = ToRegister(instr->InputAt(0));
|
| __ mov(result, FieldOperand(array, FixedArray::kLengthOffset));
|
| }
|
|
|
|
|
| +void LCodeGen::DoPixelArrayLength(LPixelArrayLength* instr) {
|
| + Register result = ToRegister(instr->result());
|
| + Register array = ToRegister(instr->InputAt(0));
|
| + __ mov(result, FieldOperand(array, PixelArray::kLengthOffset));
|
| +}
|
| +
|
| +
|
| void LCodeGen::DoValueOf(LValueOf* instr) {
|
| - Register input = ToRegister(instr->input());
|
| + Register input = ToRegister(instr->InputAt(0));
|
| Register result = ToRegister(instr->result());
|
| - Register map = ToRegister(instr->temporary());
|
| + Register map = ToRegister(instr->TempAt(0));
|
| ASSERT(input.is(result));
|
| NearLabel done;
|
| // If the object is a smi return the object.
|
| @@ -1011,15 +1037,15 @@
|
|
|
|
|
| void LCodeGen::DoBitNotI(LBitNotI* instr) {
|
| - LOperand* input = instr->input();
|
| + LOperand* input = instr->InputAt(0);
|
| ASSERT(input->Equals(instr->result()));
|
| __ not_(ToRegister(input));
|
| }
|
|
|
|
|
| void LCodeGen::DoThrow(LThrow* instr) {
|
| - __ push(ToOperand(instr->input()));
|
| - CallRuntime(Runtime::kThrow, 1, instr);
|
| + __ push(ToOperand(instr->InputAt(0)));
|
| + CallRuntime(Runtime::kThrow, 1, instr, false);
|
|
|
| if (FLAG_debug_code) {
|
| Comment("Unreachable code.");
|
| @@ -1029,8 +1055,8 @@
|
|
|
|
|
| void LCodeGen::DoAddI(LAddI* instr) {
|
| - LOperand* left = instr->left();
|
| - LOperand* right = instr->right();
|
| + LOperand* left = instr->InputAt(0);
|
| + LOperand* right = instr->InputAt(1);
|
| ASSERT(left->Equals(instr->result()));
|
|
|
| if (right->IsConstantOperand()) {
|
| @@ -1046,8 +1072,8 @@
|
|
|
|
|
| void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
|
| - LOperand* left = instr->left();
|
| - LOperand* right = instr->right();
|
| + LOperand* left = instr->InputAt(0);
|
| + LOperand* right = instr->InputAt(1);
|
| // Modulo uses a fixed result register.
|
| ASSERT(instr->op() == Token::MOD || left->Equals(instr->result()));
|
| switch (instr->op()) {
|
| @@ -1086,12 +1112,12 @@
|
|
|
|
|
| void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
|
| - ASSERT(ToRegister(instr->left()).is(edx));
|
| - ASSERT(ToRegister(instr->right()).is(eax));
|
| + ASSERT(ToRegister(instr->InputAt(0)).is(edx));
|
| + ASSERT(ToRegister(instr->InputAt(1)).is(eax));
|
| ASSERT(ToRegister(instr->result()).is(eax));
|
|
|
| TypeRecordingBinaryOpStub stub(instr->op(), NO_OVERWRITE);
|
| - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
| + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
|
| }
|
|
|
|
|
| @@ -1128,17 +1154,17 @@
|
|
|
| Representation r = instr->hydrogen()->representation();
|
| if (r.IsInteger32()) {
|
| - Register reg = ToRegister(instr->input());
|
| + Register reg = ToRegister(instr->InputAt(0));
|
| __ test(reg, Operand(reg));
|
| EmitBranch(true_block, false_block, not_zero);
|
| } else if (r.IsDouble()) {
|
| - XMMRegister reg = ToDoubleRegister(instr->input());
|
| + XMMRegister reg = ToDoubleRegister(instr->InputAt(0));
|
| __ xorpd(xmm0, xmm0);
|
| __ ucomisd(reg, xmm0);
|
| EmitBranch(true_block, false_block, not_equal);
|
| } else {
|
| ASSERT(r.IsTagged());
|
| - Register reg = ToRegister(instr->input());
|
| + Register reg = ToRegister(instr->InputAt(0));
|
| if (instr->hydrogen()->type().IsBoolean()) {
|
| __ cmp(reg, Factory::true_value());
|
| EmitBranch(true_block, false_block, equal);
|
| @@ -1204,6 +1230,7 @@
|
|
|
| void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
|
| __ pushad();
|
| + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
|
| __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
|
| RecordSafepointWithRegisters(
|
| instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
|
| @@ -1266,8 +1293,8 @@
|
|
|
|
|
| void LCodeGen::DoCmpID(LCmpID* instr) {
|
| - LOperand* left = instr->left();
|
| - LOperand* right = instr->right();
|
| + LOperand* left = instr->InputAt(0);
|
| + LOperand* right = instr->InputAt(1);
|
| LOperand* result = instr->result();
|
|
|
| NearLabel unordered;
|
| @@ -1282,18 +1309,18 @@
|
|
|
| NearLabel done;
|
| Condition cc = TokenToCondition(instr->op(), instr->is_double());
|
| - __ mov(ToRegister(result), Handle<Object>(Heap::true_value()));
|
| + __ mov(ToRegister(result), Factory::true_value());
|
| __ j(cc, &done);
|
|
|
| __ bind(&unordered);
|
| - __ mov(ToRegister(result), Handle<Object>(Heap::false_value()));
|
| + __ mov(ToRegister(result), Factory::false_value());
|
| __ bind(&done);
|
| }
|
|
|
|
|
| void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
|
| - LOperand* left = instr->left();
|
| - LOperand* right = instr->right();
|
| + LOperand* left = instr->InputAt(0);
|
| + LOperand* right = instr->InputAt(1);
|
| int false_block = chunk_->LookupDestination(instr->false_block_id());
|
| int true_block = chunk_->LookupDestination(instr->true_block_id());
|
|
|
| @@ -1312,22 +1339,22 @@
|
|
|
|
|
| void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
|
| - Register left = ToRegister(instr->left());
|
| - Register right = ToRegister(instr->right());
|
| + Register left = ToRegister(instr->InputAt(0));
|
| + Register right = ToRegister(instr->InputAt(1));
|
| Register result = ToRegister(instr->result());
|
|
|
| __ cmp(left, Operand(right));
|
| - __ mov(result, Handle<Object>(Heap::true_value()));
|
| + __ mov(result, Factory::true_value());
|
| NearLabel done;
|
| __ j(equal, &done);
|
| - __ mov(result, Handle<Object>(Heap::false_value()));
|
| + __ mov(result, Factory::false_value());
|
| __ bind(&done);
|
| }
|
|
|
|
|
| void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
|
| - Register left = ToRegister(instr->left());
|
| - Register right = ToRegister(instr->right());
|
| + Register left = ToRegister(instr->InputAt(0));
|
| + Register right = ToRegister(instr->InputAt(1));
|
| int false_block = chunk_->LookupDestination(instr->false_block_id());
|
| int true_block = chunk_->LookupDestination(instr->true_block_id());
|
|
|
| @@ -1337,7 +1364,7 @@
|
|
|
|
|
| void LCodeGen::DoIsNull(LIsNull* instr) {
|
| - Register reg = ToRegister(instr->input());
|
| + Register reg = ToRegister(instr->InputAt(0));
|
| Register result = ToRegister(instr->result());
|
|
|
| // TODO(fsc): If the expression is known to be a smi, then it's
|
| @@ -1345,10 +1372,10 @@
|
|
|
| __ cmp(reg, Factory::null_value());
|
| if (instr->is_strict()) {
|
| - __ mov(result, Handle<Object>(Heap::true_value()));
|
| + __ mov(result, Factory::true_value());
|
| NearLabel done;
|
| __ j(equal, &done);
|
| - __ mov(result, Handle<Object>(Heap::false_value()));
|
| + __ mov(result, Factory::false_value());
|
| __ bind(&done);
|
| } else {
|
| NearLabel true_value, false_value, done;
|
| @@ -1365,17 +1392,17 @@
|
| __ test(scratch, Immediate(1 << Map::kIsUndetectable));
|
| __ j(not_zero, &true_value);
|
| __ bind(&false_value);
|
| - __ mov(result, Handle<Object>(Heap::false_value()));
|
| + __ mov(result, Factory::false_value());
|
| __ jmp(&done);
|
| __ bind(&true_value);
|
| - __ mov(result, Handle<Object>(Heap::true_value()));
|
| + __ mov(result, Factory::true_value());
|
| __ bind(&done);
|
| }
|
| }
|
|
|
|
|
| void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
|
| - Register reg = ToRegister(instr->input());
|
| + Register reg = ToRegister(instr->InputAt(0));
|
|
|
| // TODO(fsc): If the expression is known to be a smi, then it's
|
| // definitely not null. Jump to the false block.
|
| @@ -1396,7 +1423,7 @@
|
| __ j(zero, false_label);
|
| // Check for undetectable objects by looking in the bit field in
|
| // the map. The object has already been smi checked.
|
| - Register scratch = ToRegister(instr->temp());
|
| + Register scratch = ToRegister(instr->TempAt(0));
|
| __ mov(scratch, FieldOperand(reg, HeapObject::kMapOffset));
|
| __ movzx_b(scratch, FieldOperand(scratch, Map::kBitFieldOffset));
|
| __ test(scratch, Immediate(1 << Map::kIsUndetectable));
|
| @@ -1435,29 +1462,29 @@
|
|
|
|
|
| void LCodeGen::DoIsObject(LIsObject* instr) {
|
| - Register reg = ToRegister(instr->input());
|
| + Register reg = ToRegister(instr->InputAt(0));
|
| Register result = ToRegister(instr->result());
|
| - Register temp = ToRegister(instr->temp());
|
| + Register temp = ToRegister(instr->TempAt(0));
|
| Label is_false, is_true, done;
|
|
|
| Condition true_cond = EmitIsObject(reg, result, temp, &is_false, &is_true);
|
| __ j(true_cond, &is_true);
|
|
|
| __ bind(&is_false);
|
| - __ mov(result, Handle<Object>(Heap::false_value()));
|
| + __ mov(result, Factory::false_value());
|
| __ jmp(&done);
|
|
|
| __ bind(&is_true);
|
| - __ mov(result, Handle<Object>(Heap::true_value()));
|
| + __ mov(result, Factory::true_value());
|
|
|
| __ bind(&done);
|
| }
|
|
|
|
|
| void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
|
| - Register reg = ToRegister(instr->input());
|
| - Register temp = ToRegister(instr->temp());
|
| - Register temp2 = ToRegister(instr->temp2());
|
| + Register reg = ToRegister(instr->InputAt(0));
|
| + Register temp = ToRegister(instr->TempAt(0));
|
| + Register temp2 = ToRegister(instr->TempAt(1));
|
|
|
| int true_block = chunk_->LookupDestination(instr->true_block_id());
|
| int false_block = chunk_->LookupDestination(instr->false_block_id());
|
| @@ -1471,21 +1498,21 @@
|
|
|
|
|
| void LCodeGen::DoIsSmi(LIsSmi* instr) {
|
| - Operand input = ToOperand(instr->input());
|
| + Operand input = ToOperand(instr->InputAt(0));
|
| Register result = ToRegister(instr->result());
|
|
|
| ASSERT(instr->hydrogen()->value()->representation().IsTagged());
|
| __ test(input, Immediate(kSmiTagMask));
|
| - __ mov(result, Handle<Object>(Heap::true_value()));
|
| + __ mov(result, Factory::true_value());
|
| NearLabel done;
|
| __ j(zero, &done);
|
| - __ mov(result, Handle<Object>(Heap::false_value()));
|
| + __ mov(result, Factory::false_value());
|
| __ bind(&done);
|
| }
|
|
|
|
|
| void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
|
| - Operand input = ToOperand(instr->input());
|
| + Operand input = ToOperand(instr->InputAt(0));
|
|
|
| int true_block = chunk_->LookupDestination(instr->true_block_id());
|
| int false_block = chunk_->LookupDestination(instr->false_block_id());
|
| @@ -1495,19 +1522,18 @@
|
| }
|
|
|
|
|
| -InstanceType LHasInstanceType::TestType() {
|
| - InstanceType from = hydrogen()->from();
|
| - InstanceType to = hydrogen()->to();
|
| +static InstanceType TestType(HHasInstanceType* instr) {
|
| + InstanceType from = instr->from();
|
| + InstanceType to = instr->to();
|
| if (from == FIRST_TYPE) return to;
|
| ASSERT(from == to || to == LAST_TYPE);
|
| return from;
|
| }
|
|
|
|
|
| -
|
| -Condition LHasInstanceType::BranchCondition() {
|
| - InstanceType from = hydrogen()->from();
|
| - InstanceType to = hydrogen()->to();
|
| +static Condition BranchCondition(HHasInstanceType* instr) {
|
| + InstanceType from = instr->from();
|
| + InstanceType to = instr->to();
|
| if (from == to) return equal;
|
| if (to == LAST_TYPE) return above_equal;
|
| if (from == FIRST_TYPE) return below_equal;
|
| @@ -1517,26 +1543,26 @@
|
|
|
|
|
| void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
|
| - Register input = ToRegister(instr->input());
|
| + Register input = ToRegister(instr->InputAt(0));
|
| Register result = ToRegister(instr->result());
|
|
|
| ASSERT(instr->hydrogen()->value()->representation().IsTagged());
|
| __ test(input, Immediate(kSmiTagMask));
|
| NearLabel done, is_false;
|
| __ j(zero, &is_false);
|
| - __ CmpObjectType(input, instr->TestType(), result);
|
| - __ j(NegateCondition(instr->BranchCondition()), &is_false);
|
| - __ mov(result, Handle<Object>(Heap::true_value()));
|
| + __ CmpObjectType(input, TestType(instr->hydrogen()), result);
|
| + __ j(NegateCondition(BranchCondition(instr->hydrogen())), &is_false);
|
| + __ mov(result, Factory::true_value());
|
| __ jmp(&done);
|
| __ bind(&is_false);
|
| - __ mov(result, Handle<Object>(Heap::false_value()));
|
| + __ mov(result, Factory::false_value());
|
| __ bind(&done);
|
| }
|
|
|
|
|
| void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
|
| - Register input = ToRegister(instr->input());
|
| - Register temp = ToRegister(instr->temp());
|
| + Register input = ToRegister(instr->InputAt(0));
|
| + Register temp = ToRegister(instr->TempAt(0));
|
|
|
| int true_block = chunk_->LookupDestination(instr->true_block_id());
|
| int false_block = chunk_->LookupDestination(instr->false_block_id());
|
| @@ -1546,29 +1572,29 @@
|
| __ test(input, Immediate(kSmiTagMask));
|
| __ j(zero, false_label);
|
|
|
| - __ CmpObjectType(input, instr->TestType(), temp);
|
| - EmitBranch(true_block, false_block, instr->BranchCondition());
|
| + __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
|
| + EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
|
| }
|
|
|
|
|
| void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
|
| - Register input = ToRegister(instr->input());
|
| + Register input = ToRegister(instr->InputAt(0));
|
| Register result = ToRegister(instr->result());
|
|
|
| ASSERT(instr->hydrogen()->value()->representation().IsTagged());
|
| - __ mov(result, Handle<Object>(Heap::true_value()));
|
| + __ mov(result, Factory::true_value());
|
| __ test(FieldOperand(input, String::kHashFieldOffset),
|
| Immediate(String::kContainsCachedArrayIndexMask));
|
| NearLabel done;
|
| __ j(not_zero, &done);
|
| - __ mov(result, Handle<Object>(Heap::false_value()));
|
| + __ mov(result, Factory::false_value());
|
| __ bind(&done);
|
| }
|
|
|
|
|
| void LCodeGen::DoHasCachedArrayIndexAndBranch(
|
| LHasCachedArrayIndexAndBranch* instr) {
|
| - Register input = ToRegister(instr->input());
|
| + Register input = ToRegister(instr->InputAt(0));
|
|
|
| int true_block = chunk_->LookupDestination(instr->true_block_id());
|
| int false_block = chunk_->LookupDestination(instr->false_block_id());
|
| @@ -1637,10 +1663,10 @@
|
|
|
|
|
| void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
|
| - Register input = ToRegister(instr->input());
|
| + Register input = ToRegister(instr->InputAt(0));
|
| Register result = ToRegister(instr->result());
|
| ASSERT(input.is(result));
|
| - Register temp = ToRegister(instr->temporary());
|
| + Register temp = ToRegister(instr->TempAt(0));
|
| Handle<String> class_name = instr->hydrogen()->class_name();
|
| NearLabel done;
|
| Label is_true, is_false;
|
| @@ -1650,19 +1676,19 @@
|
| __ j(not_equal, &is_false);
|
|
|
| __ bind(&is_true);
|
| - __ mov(result, Handle<Object>(Heap::true_value()));
|
| + __ mov(result, Factory::true_value());
|
| __ jmp(&done);
|
|
|
| __ bind(&is_false);
|
| - __ mov(result, Handle<Object>(Heap::false_value()));
|
| + __ mov(result, Factory::false_value());
|
| __ bind(&done);
|
| }
|
|
|
|
|
| void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
|
| - Register input = ToRegister(instr->input());
|
| - Register temp = ToRegister(instr->temporary());
|
| - Register temp2 = ToRegister(instr->temporary2());
|
| + Register input = ToRegister(instr->InputAt(0));
|
| + Register temp = ToRegister(instr->TempAt(0));
|
| + Register temp2 = ToRegister(instr->TempAt(1));
|
| if (input.is(temp)) {
|
| // Swap.
|
| Register swapper = temp;
|
| @@ -1684,7 +1710,7 @@
|
|
|
|
|
| void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
|
| - Register reg = ToRegister(instr->input());
|
| + Register reg = ToRegister(instr->InputAt(0));
|
| int true_block = instr->true_block_id();
|
| int false_block = instr->false_block_id();
|
|
|
| @@ -1694,7 +1720,8 @@
|
|
|
|
|
| void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
|
| - // Object and function are in fixed registers eax and edx.
|
| + // Object and function are in fixed registers defined by the stub.
|
| + ASSERT(ToRegister(instr->context()).is(esi));
|
| InstanceofStub stub(InstanceofStub::kArgsInRegisters);
|
| CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
|
|
| @@ -1710,6 +1737,7 @@
|
|
|
|
|
| void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
|
| + ASSERT(ToRegister(instr->context()).is(esi));
|
| int true_block = chunk_->LookupDestination(instr->true_block_id());
|
| int false_block = chunk_->LookupDestination(instr->false_block_id());
|
|
|
| @@ -1720,6 +1748,108 @@
|
| }
|
|
|
|
|
| +void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
|
| + class DeferredInstanceOfKnownGlobal: public LDeferredCode {
|
| + public:
|
| + DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
|
| + LInstanceOfKnownGlobal* instr)
|
| + : LDeferredCode(codegen), instr_(instr) { }
|
| + virtual void Generate() {
|
| + codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
|
| + }
|
| +
|
| + Label* map_check() { return &map_check_; }
|
| +
|
| + private:
|
| + LInstanceOfKnownGlobal* instr_;
|
| + Label map_check_;
|
| + };
|
| +
|
| + DeferredInstanceOfKnownGlobal* deferred;
|
| + deferred = new DeferredInstanceOfKnownGlobal(this, instr);
|
| +
|
| + Label done, false_result;
|
| + Register object = ToRegister(instr->InputAt(0));
|
| + Register temp = ToRegister(instr->TempAt(0));
|
| +
|
| + // A Smi is not an instance of anything.
|
| + __ test(object, Immediate(kSmiTagMask));
|
| + __ j(zero, &false_result, not_taken);
|
| +
|
| + // This is the inlined call site instanceof cache. The two occurences of the
|
| + // hole value will be patched to the last map/result pair generated by the
|
| + // instanceof stub.
|
| + NearLabel cache_miss;
|
| + Register map = ToRegister(instr->TempAt(0));
|
| + __ mov(map, FieldOperand(object, HeapObject::kMapOffset));
|
| + __ bind(deferred->map_check()); // Label for calculating code patching.
|
| + __ cmp(map, Factory::the_hole_value()); // Patched to cached map.
|
| + __ j(not_equal, &cache_miss, not_taken);
|
| + __ mov(eax, Factory::the_hole_value()); // Patched to either true or false.
|
| + __ jmp(&done);
|
| +
|
| + // The inlined call site cache did not match. Check for null and string
|
| + // before calling the deferred code.
|
| + __ bind(&cache_miss);
|
| + // Null is not an instance of anything.
|
| + __ cmp(object, Factory::null_value());
|
| + __ j(equal, &false_result);
|
| +
|
| + // String values are not instances of anything.
|
| + Condition is_string = masm_->IsObjectStringType(object, temp, temp);
|
| + __ j(is_string, &false_result);
|
| +
|
| + // Go to the deferred code.
|
| + __ jmp(deferred->entry());
|
| +
|
| + __ bind(&false_result);
|
| + __ mov(ToRegister(instr->result()), Factory::false_value());
|
| +
|
| + // Here result has either true or false. Deferred code also produces true or
|
| + // false object.
|
| + __ bind(deferred->exit());
|
| + __ bind(&done);
|
| +}
|
| +
|
| +
|
| +void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
|
| + Label* map_check) {
|
| + __ PushSafepointRegisters();
|
| +
|
| + InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
|
| + flags = static_cast<InstanceofStub::Flags>(
|
| + flags | InstanceofStub::kArgsInRegisters);
|
| + flags = static_cast<InstanceofStub::Flags>(
|
| + flags | InstanceofStub::kCallSiteInlineCheck);
|
| + flags = static_cast<InstanceofStub::Flags>(
|
| + flags | InstanceofStub::kReturnTrueFalseObject);
|
| + InstanceofStub stub(flags);
|
| +
|
| + // Get the temp register reserved by the instruction. This needs to be edi as
|
| + // its slot of the pushing of safepoint registers is used to communicate the
|
| + // offset to the location of the map check.
|
| + Register temp = ToRegister(instr->TempAt(0));
|
| + ASSERT(temp.is(edi));
|
| + __ mov(InstanceofStub::right(), Immediate(instr->function()));
|
| + static const int kAdditionalDelta = 16;
|
| + int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
|
| + Label before_push_delta;
|
| + __ bind(&before_push_delta);
|
| + __ mov(temp, Immediate(delta));
|
| + __ mov(Operand(esp, EspIndexForPushAll(temp) * kPointerSize), temp);
|
| + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
|
| + __ call(stub.GetCode(), RelocInfo::CODE_TARGET);
|
| + ASSERT_EQ(kAdditionalDelta,
|
| + masm_->SizeOfCodeGeneratedSince(&before_push_delta));
|
| + RecordSafepointWithRegisters(
|
| + instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
|
| + // Put the result value into the eax slot and restore all registers.
|
| + __ mov(Operand(esp, EspIndexForPushAll(eax) * kPointerSize), eax);
|
| +
|
| + __ PopSafepointRegisters();
|
| +}
|
| +
|
| +
|
| static Condition ComputeCompareCondition(Token::Value op) {
|
| switch (op) {
|
| case Token::EQ_STRICT:
|
| @@ -1744,7 +1874,7 @@
|
| Token::Value op = instr->op();
|
|
|
| Handle<Code> ic = CompareIC::GetUninitialized(op);
|
| - CallCode(ic, RelocInfo::CODE_TARGET, instr);
|
| + CallCode(ic, RelocInfo::CODE_TARGET, instr, false);
|
|
|
| Condition condition = ComputeCompareCondition(op);
|
| if (op == Token::GT || op == Token::LTE) {
|
| @@ -1767,7 +1897,7 @@
|
| int false_block = chunk_->LookupDestination(instr->false_block_id());
|
|
|
| Handle<Code> ic = CompareIC::GetUninitialized(op);
|
| - CallCode(ic, RelocInfo::CODE_TARGET, instr);
|
| + CallCode(ic, RelocInfo::CODE_TARGET, instr, false);
|
|
|
| // The compare stub expects compare condition and the input operands
|
| // reversed for GT and LTE.
|
| @@ -1782,14 +1912,17 @@
|
|
|
| void LCodeGen::DoReturn(LReturn* instr) {
|
| if (FLAG_trace) {
|
| - // Preserve the return value on the stack and rely on the runtime
|
| - // call to return the value in the same register.
|
| + // Preserve the return value on the stack and rely on the runtime call
|
| + // to return the value in the same register. We're leaving the code
|
| + // managed by the register allocator and tearing down the frame, it's
|
| + // safe to write to the context register.
|
| __ push(eax);
|
| + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
|
| __ CallRuntime(Runtime::kTraceExit, 1);
|
| }
|
| __ mov(esp, ebp);
|
| __ pop(ebp);
|
| - __ ret((ParameterCount() + 1) * kPointerSize);
|
| + __ Ret((ParameterCount() + 1) * kPointerSize, ecx);
|
| }
|
|
|
|
|
| @@ -1804,13 +1937,44 @@
|
|
|
|
|
| void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
|
| - Register value = ToRegister(instr->input());
|
| - __ mov(Operand::Cell(instr->hydrogen()->cell()), value);
|
| + Register value = ToRegister(instr->InputAt(0));
|
| + Operand cell_operand = Operand::Cell(instr->hydrogen()->cell());
|
| +
|
| + // If the cell we are storing to contains the hole it could have
|
| + // been deleted from the property dictionary. In that case, we need
|
| + // to update the property details in the property dictionary to mark
|
| + // it as no longer deleted. We deoptimize in that case.
|
| + if (instr->hydrogen()->check_hole_value()) {
|
| + __ cmp(cell_operand, Factory::the_hole_value());
|
| + DeoptimizeIf(equal, instr->environment());
|
| + }
|
| +
|
| + // Store the value.
|
| + __ mov(cell_operand, value);
|
| }
|
|
|
|
|
| +void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
|
| + Register context = ToRegister(instr->context());
|
| + Register result = ToRegister(instr->result());
|
| + __ mov(result, ContextOperand(context, instr->slot_index()));
|
| +}
|
| +
|
| +
|
| +void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
|
| + Register context = ToRegister(instr->context());
|
| + Register value = ToRegister(instr->value());
|
| + __ mov(ContextOperand(context, instr->slot_index()), value);
|
| + if (instr->needs_write_barrier()) {
|
| + Register temp = ToRegister(instr->TempAt(0));
|
| + int offset = Context::SlotOffset(instr->slot_index());
|
| + __ RecordWrite(context, offset, value, temp, kSaveFPRegs);
|
| + }
|
| +}
|
| +
|
| +
|
| void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
|
| - Register object = ToRegister(instr->input());
|
| + Register object = ToRegister(instr->InputAt(0));
|
| Register result = ToRegister(instr->result());
|
| if (instr->hydrogen()->is_in_object()) {
|
| __ mov(result, FieldOperand(object, instr->hydrogen()->offset()));
|
| @@ -1822,6 +1986,7 @@
|
|
|
|
|
| void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
|
| + ASSERT(ToRegister(instr->context()).is(esi));
|
| ASSERT(ToRegister(instr->object()).is(eax));
|
| ASSERT(ToRegister(instr->result()).is(eax));
|
|
|
| @@ -1833,7 +1998,7 @@
|
|
|
| void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
|
| Register function = ToRegister(instr->function());
|
| - Register temp = ToRegister(instr->temporary());
|
| + Register temp = ToRegister(instr->TempAt(0));
|
| Register result = ToRegister(instr->result());
|
|
|
| // Check that the function really is a function.
|
| @@ -1874,22 +2039,33 @@
|
|
|
|
|
| void LCodeGen::DoLoadElements(LLoadElements* instr) {
|
| - ASSERT(instr->result()->Equals(instr->input()));
|
| - Register reg = ToRegister(instr->input());
|
| - __ mov(reg, FieldOperand(reg, JSObject::kElementsOffset));
|
| + Register result = ToRegister(instr->result());
|
| + Register input = ToRegister(instr->InputAt(0));
|
| + __ mov(result, FieldOperand(input, JSObject::kElementsOffset));
|
| if (FLAG_debug_code) {
|
| NearLabel done;
|
| - __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
|
| + __ cmp(FieldOperand(result, HeapObject::kMapOffset),
|
| Immediate(Factory::fixed_array_map()));
|
| __ j(equal, &done);
|
| - __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
|
| + __ cmp(FieldOperand(result, HeapObject::kMapOffset),
|
| + Immediate(Factory::pixel_array_map()));
|
| + __ j(equal, &done);
|
| + __ cmp(FieldOperand(result, HeapObject::kMapOffset),
|
| Immediate(Factory::fixed_cow_array_map()));
|
| - __ Check(equal, "Check for fast elements failed.");
|
| + __ Check(equal, "Check for fast elements or pixel array failed.");
|
| __ bind(&done);
|
| }
|
| }
|
|
|
|
|
| +void LCodeGen::DoLoadPixelArrayExternalPointer(
|
| + LLoadPixelArrayExternalPointer* instr) {
|
| + Register result = ToRegister(instr->result());
|
| + Register input = ToRegister(instr->InputAt(0));
|
| + __ mov(result, FieldOperand(input, PixelArray::kExternalPointerOffset));
|
| +}
|
| +
|
| +
|
| void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
|
| Register arguments = ToRegister(instr->arguments());
|
| Register length = ToRegister(instr->length());
|
| @@ -1899,6 +2075,8 @@
|
| __ sub(length, index);
|
| DeoptimizeIf(below_equal, instr->environment());
|
|
|
| + // There are two words between the frame pointer and the last argument.
|
| + // Subtracting from length accounts for one of them add one more.
|
| __ mov(result, Operand(arguments, length, times_4, kPointerSize));
|
| }
|
|
|
| @@ -1906,36 +2084,34 @@
|
| void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
|
| Register elements = ToRegister(instr->elements());
|
| Register key = ToRegister(instr->key());
|
| - Register result;
|
| - if (instr->load_result() != NULL) {
|
| - result = ToRegister(instr->load_result());
|
| - } else {
|
| - result = ToRegister(instr->result());
|
| - ASSERT(result.is(elements));
|
| - }
|
| + Register result = ToRegister(instr->result());
|
| + ASSERT(result.is(elements));
|
|
|
| // Load the result.
|
| - __ mov(result, FieldOperand(elements, key, times_4, FixedArray::kHeaderSize));
|
| + __ mov(result, FieldOperand(elements,
|
| + key,
|
| + times_pointer_size,
|
| + FixedArray::kHeaderSize));
|
|
|
| - Representation r = instr->hydrogen()->representation();
|
| - if (r.IsInteger32()) {
|
| - // Untag and check for smi.
|
| - __ SmiUntag(result);
|
| - DeoptimizeIf(carry, instr->environment());
|
| - } else if (r.IsDouble()) {
|
| - EmitNumberUntagD(result,
|
| - ToDoubleRegister(instr->result()),
|
| - instr->environment());
|
| - } else {
|
| - // Check for the hole value.
|
| - ASSERT(r.IsTagged());
|
| - __ cmp(result, Factory::the_hole_value());
|
| - DeoptimizeIf(equal, instr->environment());
|
| - }
|
| + // Check for the hole value.
|
| + __ cmp(result, Factory::the_hole_value());
|
| + DeoptimizeIf(equal, instr->environment());
|
| }
|
|
|
|
|
| +void LCodeGen::DoLoadPixelArrayElement(LLoadPixelArrayElement* instr) {
|
| + Register external_elements = ToRegister(instr->external_pointer());
|
| + Register key = ToRegister(instr->key());
|
| + Register result = ToRegister(instr->result());
|
| + ASSERT(result.is(external_elements));
|
| +
|
| + // Load the result.
|
| + __ movzx_b(result, Operand(external_elements, key, times_1, 0));
|
| +}
|
| +
|
| +
|
| void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
|
| + ASSERT(ToRegister(instr->context()).is(esi));
|
| ASSERT(ToRegister(instr->object()).is(edx));
|
| ASSERT(ToRegister(instr->key()).is(eax));
|
|
|
| @@ -1948,7 +2124,7 @@
|
| Register result = ToRegister(instr->result());
|
|
|
| // Check for arguments adapter frame.
|
| - Label done, adapted;
|
| + NearLabel done, adapted;
|
| __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
|
| __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
|
| __ cmp(Operand(result),
|
| @@ -1963,18 +2139,19 @@
|
| __ bind(&adapted);
|
| __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
|
|
|
| - // Done. Pointer to topmost argument is in result.
|
| + // Result is the frame pointer for the frame if not adapted and for the real
|
| + // frame below the adaptor frame if adapted.
|
| __ bind(&done);
|
| }
|
|
|
|
|
| void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
|
| - Operand elem = ToOperand(instr->input());
|
| + Operand elem = ToOperand(instr->InputAt(0));
|
| Register result = ToRegister(instr->result());
|
|
|
| - Label done;
|
| + NearLabel done;
|
|
|
| - // No arguments adaptor frame. Number of arguments is fixed.
|
| + // If no arguments adaptor frame the number of arguments is fixed.
|
| __ cmp(ebp, elem);
|
| __ mov(result, Immediate(scope()->num_parameters()));
|
| __ j(equal, &done);
|
| @@ -1985,31 +2162,43 @@
|
| ArgumentsAdaptorFrameConstants::kLengthOffset));
|
| __ SmiUntag(result);
|
|
|
| - // Done. Argument length is in result register.
|
| + // Argument length is in result register.
|
| __ bind(&done);
|
| }
|
|
|
|
|
| void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
|
| Register receiver = ToRegister(instr->receiver());
|
| - ASSERT(ToRegister(instr->function()).is(edi));
|
| + Register function = ToRegister(instr->function());
|
| + Register length = ToRegister(instr->length());
|
| + Register elements = ToRegister(instr->elements());
|
| + Register scratch = ToRegister(instr->TempAt(0));
|
| + ASSERT(receiver.is(eax)); // Used for parameter count.
|
| + ASSERT(function.is(edi)); // Required by InvokeFunction.
|
| ASSERT(ToRegister(instr->result()).is(eax));
|
|
|
| - // If the receiver is null or undefined, we have to pass the
|
| - // global object as a receiver.
|
| - NearLabel global_receiver, receiver_ok;
|
| + // If the receiver is null or undefined, we have to pass the global object
|
| + // as a receiver.
|
| + NearLabel global_object, receiver_ok;
|
| __ cmp(receiver, Factory::null_value());
|
| - __ j(equal, &global_receiver);
|
| + __ j(equal, &global_object);
|
| __ cmp(receiver, Factory::undefined_value());
|
| - __ j(not_equal, &receiver_ok);
|
| - __ bind(&global_receiver);
|
| - __ mov(receiver, GlobalObjectOperand());
|
| - __ bind(&receiver_ok);
|
| + __ j(equal, &global_object);
|
|
|
| - Register length = ToRegister(instr->length());
|
| - Register elements = ToRegister(instr->elements());
|
| + // The receiver should be a JS object.
|
| + __ test(receiver, Immediate(kSmiTagMask));
|
| + DeoptimizeIf(equal, instr->environment());
|
| + __ CmpObjectType(receiver, FIRST_JS_OBJECT_TYPE, scratch);
|
| + DeoptimizeIf(below, instr->environment());
|
| + __ jmp(&receiver_ok);
|
|
|
| - Label invoke;
|
| + __ bind(&global_object);
|
| + // TODO(kmillikin): We have a hydrogen value for the global object. See
|
| + // if it's better to use it than to explicitly fetch it from the context
|
| + // here.
|
| + __ mov(receiver, Operand(ebp, StandardFrameConstants::kContextOffset));
|
| + __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_INDEX));
|
| + __ bind(&receiver_ok);
|
|
|
| // Copy the arguments to this function possibly from the
|
| // adaptor frame below it.
|
| @@ -2022,7 +2211,7 @@
|
|
|
| // Loop through the arguments pushing them onto the execution
|
| // stack.
|
| - Label loop;
|
| + NearLabel invoke, loop;
|
| // length is a small non-negative integer, due to the test above.
|
| __ test(length, Operand(length));
|
| __ j(zero, &invoke);
|
| @@ -2033,17 +2222,22 @@
|
|
|
| // Invoke the function.
|
| __ bind(&invoke);
|
| - ASSERT(receiver.is(eax));
|
| + ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
|
| + LPointerMap* pointers = instr->pointer_map();
|
| + LEnvironment* env = instr->deoptimization_environment();
|
| + RecordPosition(pointers->position());
|
| + RegisterEnvironmentForDeoptimization(env);
|
| + SafepointGenerator safepoint_generator(this,
|
| + pointers,
|
| + env->deoptimization_index(),
|
| + true);
|
| v8::internal::ParameterCount actual(eax);
|
| - SafepointGenerator safepoint_generator(this,
|
| - instr->pointer_map(),
|
| - Safepoint::kNoDeoptimizationIndex);
|
| - __ InvokeFunction(edi, actual, CALL_FUNCTION, &safepoint_generator);
|
| + __ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator);
|
| }
|
|
|
|
|
| void LCodeGen::DoPushArgument(LPushArgument* instr) {
|
| - LOperand* argument = instr->input();
|
| + LOperand* argument = instr->InputAt(0);
|
| if (argument->IsConstantOperand()) {
|
| __ push(ToImmediate(argument));
|
| } else {
|
| @@ -2052,16 +2246,31 @@
|
| }
|
|
|
|
|
| +void LCodeGen::DoContext(LContext* instr) {
|
| + Register result = ToRegister(instr->result());
|
| + __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
|
| +}
|
| +
|
| +
|
| +void LCodeGen::DoOuterContext(LOuterContext* instr) {
|
| + Register context = ToRegister(instr->context());
|
| + Register result = ToRegister(instr->result());
|
| + __ mov(result, Operand(context, Context::SlotOffset(Context::CLOSURE_INDEX)));
|
| + __ mov(result, FieldOperand(result, JSFunction::kContextOffset));
|
| +}
|
| +
|
| +
|
| void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
|
| + Register context = ToRegister(instr->context());
|
| Register result = ToRegister(instr->result());
|
| - __ mov(result, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
|
| + __ mov(result, Operand(context, Context::SlotOffset(Context::GLOBAL_INDEX)));
|
| }
|
|
|
|
|
| void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
|
| + Register global = ToRegister(instr->global());
|
| Register result = ToRegister(instr->result());
|
| - __ mov(result, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
|
| - __ mov(result, FieldOperand(result, GlobalObject::kGlobalReceiverOffset));
|
| + __ mov(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset));
|
| }
|
|
|
|
|
| @@ -2075,6 +2284,8 @@
|
| (scope()->num_heap_slots() > 0);
|
| if (change_context) {
|
| __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
|
| + } else {
|
| + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
|
| }
|
|
|
| // Set eax to arguments count if adaption is not needed. Assumes that eax
|
| @@ -2090,14 +2301,15 @@
|
| if (*function == *graph()->info()->closure()) {
|
| __ CallSelf();
|
| } else {
|
| + // This is an indirect call and will not be recorded in the reloc info.
|
| + // Add a comment to the reloc info in case we need to patch this during
|
| + // deoptimization.
|
| + __ RecordComment(RelocInfo::kFillerCommentString, true);
|
| __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
|
| }
|
|
|
| // Setup deoptimization.
|
| RegisterLazyDeoptimization(instr);
|
| -
|
| - // Restore context.
|
| - __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
|
| }
|
|
|
|
|
| @@ -2109,7 +2321,7 @@
|
|
|
|
|
| void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
|
| - Register input_reg = ToRegister(instr->input());
|
| + Register input_reg = ToRegister(instr->InputAt(0));
|
| __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
|
| Factory::heap_number_map());
|
| DeoptimizeIf(not_equal, instr->environment());
|
| @@ -2123,11 +2335,12 @@
|
|
|
| Label negative;
|
| __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
|
| - // Check the sign of the argument. If the argument is positive,
|
| - // just return it.
|
| + // Check the sign of the argument. If the argument is positive, just
|
| + // return it. We do not need to patch the stack since |input| and
|
| + // |result| are the same register and |input| will be restored
|
| + // unchanged by popping safepoint registers.
|
| __ test(tmp, Immediate(HeapNumber::kSignMask));
|
| __ j(not_zero, &negative);
|
| - __ mov(tmp, input_reg);
|
| __ jmp(&done);
|
|
|
| __ bind(&negative);
|
| @@ -2139,6 +2352,7 @@
|
| // Slow case: Call the runtime system to do the number allocation.
|
| __ bind(&slow);
|
|
|
| + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
|
| __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
|
| RecordSafepointWithRegisters(
|
| instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
|
| @@ -2154,14 +2368,25 @@
|
| __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
|
| __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
|
| __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
|
| + __ mov(Operand(esp, EspIndexForPushAll(input_reg) * kPointerSize), tmp);
|
|
|
| __ bind(&done);
|
| - __ mov(Operand(esp, EspIndexForPushAll(input_reg) * kPointerSize), tmp);
|
| -
|
| __ PopSafepointRegisters();
|
| }
|
|
|
|
|
| +void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
|
| + Register input_reg = ToRegister(instr->InputAt(0));
|
| + __ test(input_reg, Operand(input_reg));
|
| + Label is_positive;
|
| + __ j(not_sign, &is_positive);
|
| + __ neg(input_reg);
|
| + __ test(input_reg, Operand(input_reg));
|
| + DeoptimizeIf(negative, instr->environment());
|
| + __ bind(&is_positive);
|
| +}
|
| +
|
| +
|
| void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
|
| // Class for deferred case.
|
| class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
|
| @@ -2176,41 +2401,25 @@
|
| LUnaryMathOperation* instr_;
|
| };
|
|
|
| - ASSERT(instr->input()->Equals(instr->result()));
|
| + ASSERT(instr->InputAt(0)->Equals(instr->result()));
|
| Representation r = instr->hydrogen()->value()->representation();
|
|
|
| if (r.IsDouble()) {
|
| XMMRegister scratch = xmm0;
|
| - XMMRegister input_reg = ToDoubleRegister(instr->input());
|
| + XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
|
| __ pxor(scratch, scratch);
|
| __ subsd(scratch, input_reg);
|
| __ pand(input_reg, scratch);
|
| } else if (r.IsInteger32()) {
|
| - Register input_reg = ToRegister(instr->input());
|
| - __ test(input_reg, Operand(input_reg));
|
| - Label is_positive;
|
| - __ j(not_sign, &is_positive);
|
| - __ neg(input_reg);
|
| - __ test(input_reg, Operand(input_reg));
|
| - DeoptimizeIf(negative, instr->environment());
|
| - __ bind(&is_positive);
|
| + EmitIntegerMathAbs(instr);
|
| } else { // Tagged case.
|
| DeferredMathAbsTaggedHeapNumber* deferred =
|
| new DeferredMathAbsTaggedHeapNumber(this, instr);
|
| - Label not_smi;
|
| - Register input_reg = ToRegister(instr->input());
|
| + Register input_reg = ToRegister(instr->InputAt(0));
|
| // Smi check.
|
| __ test(input_reg, Immediate(kSmiTagMask));
|
| __ j(not_zero, deferred->entry());
|
| - __ test(input_reg, Operand(input_reg));
|
| - Label is_positive;
|
| - __ j(not_sign, &is_positive);
|
| - __ neg(input_reg);
|
| -
|
| - __ test(input_reg, Operand(input_reg));
|
| - DeoptimizeIf(negative, instr->environment());
|
| -
|
| - __ bind(&is_positive);
|
| + EmitIntegerMathAbs(instr);
|
| __ bind(deferred->exit());
|
| }
|
| }
|
| @@ -2219,7 +2428,7 @@
|
| void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
|
| XMMRegister xmm_scratch = xmm0;
|
| Register output_reg = ToRegister(instr->result());
|
| - XMMRegister input_reg = ToDoubleRegister(instr->input());
|
| + XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
|
| __ xorpd(xmm_scratch, xmm_scratch); // Zero the register.
|
| __ ucomisd(input_reg, xmm_scratch);
|
|
|
| @@ -2241,7 +2450,7 @@
|
| void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
|
| XMMRegister xmm_scratch = xmm0;
|
| Register output_reg = ToRegister(instr->result());
|
| - XMMRegister input_reg = ToDoubleRegister(instr->input());
|
| + XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
|
|
|
| // xmm_scratch = 0.5
|
| ExternalReference one_half = ExternalReference::address_of_one_half();
|
| @@ -2274,7 +2483,7 @@
|
|
|
|
|
| void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
|
| - XMMRegister input_reg = ToDoubleRegister(instr->input());
|
| + XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
|
| ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
|
| __ sqrtsd(input_reg, input_reg);
|
| }
|
| @@ -2282,20 +2491,22 @@
|
|
|
| void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
|
| XMMRegister xmm_scratch = xmm0;
|
| - XMMRegister input_reg = ToDoubleRegister(instr->input());
|
| + XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
|
| ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
|
| ExternalReference negative_infinity =
|
| ExternalReference::address_of_negative_infinity();
|
| __ movdbl(xmm_scratch, Operand::StaticVariable(negative_infinity));
|
| __ ucomisd(xmm_scratch, input_reg);
|
| DeoptimizeIf(equal, instr->environment());
|
| + __ xorpd(xmm_scratch, xmm_scratch);
|
| + __ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
|
| __ sqrtsd(input_reg, input_reg);
|
| }
|
|
|
|
|
| void LCodeGen::DoPower(LPower* instr) {
|
| - LOperand* left = instr->left();
|
| - LOperand* right = instr->right();
|
| + LOperand* left = instr->InputAt(0);
|
| + LOperand* right = instr->InputAt(1);
|
| DoubleRegister result_reg = ToDoubleRegister(instr->result());
|
| Representation exponent_type = instr->hydrogen()->right()->representation();
|
| if (exponent_type.IsDouble()) {
|
| @@ -2353,7 +2564,7 @@
|
| ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
|
| TranscendentalCacheStub stub(TranscendentalCache::LOG,
|
| TranscendentalCacheStub::UNTAGGED);
|
| - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
| + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
|
| }
|
|
|
|
|
| @@ -2361,7 +2572,7 @@
|
| ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
|
| TranscendentalCacheStub stub(TranscendentalCache::COS,
|
| TranscendentalCacheStub::UNTAGGED);
|
| - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
| + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
|
| }
|
|
|
|
|
| @@ -2369,7 +2580,7 @@
|
| ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
|
| TranscendentalCacheStub stub(TranscendentalCache::SIN,
|
| TranscendentalCacheStub::UNTAGGED);
|
| - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
| + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
|
| }
|
|
|
|
|
| @@ -2407,45 +2618,46 @@
|
|
|
|
|
| void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
|
| + ASSERT(ToRegister(instr->context()).is(esi));
|
| + ASSERT(ToRegister(instr->key()).is(ecx));
|
| ASSERT(ToRegister(instr->result()).is(eax));
|
|
|
| int arity = instr->arity();
|
| Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
|
| CallCode(ic, RelocInfo::CODE_TARGET, instr);
|
| - __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
|
| }
|
|
|
|
|
| void LCodeGen::DoCallNamed(LCallNamed* instr) {
|
| + ASSERT(ToRegister(instr->context()).is(esi));
|
| ASSERT(ToRegister(instr->result()).is(eax));
|
|
|
| int arity = instr->arity();
|
| Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
|
| __ mov(ecx, instr->name());
|
| CallCode(ic, RelocInfo::CODE_TARGET, instr);
|
| - __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
|
| }
|
|
|
|
|
| void LCodeGen::DoCallFunction(LCallFunction* instr) {
|
| + ASSERT(ToRegister(instr->context()).is(esi));
|
| ASSERT(ToRegister(instr->result()).is(eax));
|
|
|
| int arity = instr->arity();
|
| CallFunctionStub stub(arity, NOT_IN_LOOP, RECEIVER_MIGHT_BE_VALUE);
|
| CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
| __ Drop(1);
|
| - __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
|
| }
|
|
|
|
|
| void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
|
| + ASSERT(ToRegister(instr->context()).is(esi));
|
| ASSERT(ToRegister(instr->result()).is(eax));
|
|
|
| int arity = instr->arity();
|
| Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
|
| __ mov(ecx, instr->name());
|
| CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
|
| - __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
|
| }
|
|
|
|
|
| @@ -2457,7 +2669,8 @@
|
|
|
|
|
| void LCodeGen::DoCallNew(LCallNew* instr) {
|
| - ASSERT(ToRegister(instr->input()).is(edi));
|
| + ASSERT(ToRegister(instr->context()).is(esi));
|
| + ASSERT(ToRegister(instr->constructor()).is(edi));
|
| ASSERT(ToRegister(instr->result()).is(eax));
|
|
|
| Handle<Code> builtin(Builtins::builtin(Builtins::JSConstructCall));
|
| @@ -2467,7 +2680,7 @@
|
|
|
|
|
| void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
|
| - CallRuntime(instr->function(), instr->arity(), instr);
|
| + CallRuntime(instr->function(), instr->arity(), instr, false);
|
| }
|
|
|
|
|
| @@ -2484,12 +2697,12 @@
|
| if (instr->is_in_object()) {
|
| __ mov(FieldOperand(object, offset), value);
|
| if (instr->needs_write_barrier()) {
|
| - Register temp = ToRegister(instr->temp());
|
| + Register temp = ToRegister(instr->TempAt(0));
|
| // Update the write barrier for the object for in-object properties.
|
| __ RecordWrite(object, offset, value, temp, kSaveFPRegs);
|
| }
|
| } else {
|
| - Register temp = ToRegister(instr->temp());
|
| + Register temp = ToRegister(instr->TempAt(0));
|
| __ mov(temp, FieldOperand(object, JSObject::kPropertiesOffset));
|
| __ mov(FieldOperand(temp, offset), value);
|
| if (instr->needs_write_barrier()) {
|
| @@ -2502,6 +2715,7 @@
|
|
|
|
|
| void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
|
| + ASSERT(ToRegister(instr->context()).is(esi));
|
| ASSERT(ToRegister(instr->object()).is(edx));
|
| ASSERT(ToRegister(instr->value()).is(eax));
|
|
|
| @@ -2530,20 +2744,27 @@
|
| ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
|
| __ mov(FieldOperand(elements, offset), value);
|
| } else {
|
| - __ mov(FieldOperand(elements, key, times_4, FixedArray::kHeaderSize),
|
| + __ mov(FieldOperand(elements,
|
| + key,
|
| + times_pointer_size,
|
| + FixedArray::kHeaderSize),
|
| value);
|
| }
|
|
|
| - // Update the write barrier unless we're certain that we're storing a smi.
|
| if (instr->hydrogen()->NeedsWriteBarrier()) {
|
| // Compute address of modified element and store it into key register.
|
| - __ lea(key, FieldOperand(elements, key, times_4, FixedArray::kHeaderSize));
|
| + __ lea(key,
|
| + FieldOperand(elements,
|
| + key,
|
| + times_pointer_size,
|
| + FixedArray::kHeaderSize));
|
| __ RecordWrite(elements, key, value, kSaveFPRegs);
|
| }
|
| }
|
|
|
|
|
| void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
|
| + ASSERT(ToRegister(instr->context()).is(esi));
|
| ASSERT(ToRegister(instr->object()).is(edx));
|
| ASSERT(ToRegister(instr->key()).is(ecx));
|
| ASSERT(ToRegister(instr->value()).is(eax));
|
| @@ -2553,8 +2774,154 @@
|
| }
|
|
|
|
|
| +void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
|
| + class DeferredStringCharCodeAt: public LDeferredCode {
|
| + public:
|
| + DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
|
| + : LDeferredCode(codegen), instr_(instr) { }
|
| + virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
|
| + private:
|
| + LStringCharCodeAt* instr_;
|
| + };
|
| +
|
| + Register string = ToRegister(instr->string());
|
| + Register index = no_reg;
|
| + int const_index = -1;
|
| + if (instr->index()->IsConstantOperand()) {
|
| + const_index = ToInteger32(LConstantOperand::cast(instr->index()));
|
| + STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
|
| + if (!Smi::IsValid(const_index)) {
|
| + // Guaranteed to be out of bounds because of the assert above.
|
| + // So the bounds check that must dominate this instruction must
|
| + // have deoptimized already.
|
| + if (FLAG_debug_code) {
|
| + __ Abort("StringCharCodeAt: out of bounds index.");
|
| + }
|
| + // No code needs to be generated.
|
| + return;
|
| + }
|
| + } else {
|
| + index = ToRegister(instr->index());
|
| + }
|
| + Register result = ToRegister(instr->result());
|
| +
|
| + DeferredStringCharCodeAt* deferred =
|
| + new DeferredStringCharCodeAt(this, instr);
|
| +
|
| + NearLabel flat_string, ascii_string, done;
|
| +
|
| + // Fetch the instance type of the receiver into result register.
|
| + __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
|
| + __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
|
| +
|
| + // We need special handling for non-flat strings.
|
| + STATIC_ASSERT(kSeqStringTag == 0);
|
| + __ test(result, Immediate(kStringRepresentationMask));
|
| + __ j(zero, &flat_string);
|
| +
|
| + // Handle non-flat strings.
|
| + __ test(result, Immediate(kIsConsStringMask));
|
| + __ j(zero, deferred->entry());
|
| +
|
| + // ConsString.
|
| + // Check whether the right hand side is the empty string (i.e. if
|
| + // this is really a flat string in a cons string). If that is not
|
| + // the case we would rather go to the runtime system now to flatten
|
| + // the string.
|
| + __ cmp(FieldOperand(string, ConsString::kSecondOffset),
|
| + Immediate(Factory::empty_string()));
|
| + __ j(not_equal, deferred->entry());
|
| + // Get the first of the two strings and load its instance type.
|
| + __ mov(string, FieldOperand(string, ConsString::kFirstOffset));
|
| + __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
|
| + __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
|
| + // If the first cons component is also non-flat, then go to runtime.
|
| + STATIC_ASSERT(kSeqStringTag == 0);
|
| + __ test(result, Immediate(kStringRepresentationMask));
|
| + __ j(not_zero, deferred->entry());
|
| +
|
| + // Check for 1-byte or 2-byte string.
|
| + __ bind(&flat_string);
|
| + STATIC_ASSERT(kAsciiStringTag != 0);
|
| + __ test(result, Immediate(kStringEncodingMask));
|
| + __ j(not_zero, &ascii_string);
|
| +
|
| + // 2-byte string.
|
| + // Load the 2-byte character code into the result register.
|
| + STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
|
| + if (instr->index()->IsConstantOperand()) {
|
| + __ movzx_w(result,
|
| + FieldOperand(string,
|
| + SeqTwoByteString::kHeaderSize + 2 * const_index));
|
| + } else {
|
| + __ movzx_w(result, FieldOperand(string,
|
| + index,
|
| + times_2,
|
| + SeqTwoByteString::kHeaderSize));
|
| + }
|
| + __ jmp(&done);
|
| +
|
| + // ASCII string.
|
| + // Load the byte into the result register.
|
| + __ bind(&ascii_string);
|
| + if (instr->index()->IsConstantOperand()) {
|
| + __ movzx_b(result, FieldOperand(string,
|
| + SeqAsciiString::kHeaderSize + const_index));
|
| + } else {
|
| + __ movzx_b(result, FieldOperand(string,
|
| + index,
|
| + times_1,
|
| + SeqAsciiString::kHeaderSize));
|
| + }
|
| + __ bind(&done);
|
| + __ bind(deferred->exit());
|
| +}
|
| +
|
| +
|
| +void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
|
| + Register string = ToRegister(instr->string());
|
| + Register result = ToRegister(instr->result());
|
| +
|
| + // TODO(3095996): Get rid of this. For now, we need to make the
|
| + // result register contain a valid pointer because it is already
|
| + // contained in the register pointer map.
|
| + __ Set(result, Immediate(0));
|
| +
|
| + __ PushSafepointRegisters();
|
| + __ push(string);
|
| + // Push the index as a smi. This is safe because of the checks in
|
| + // DoStringCharCodeAt above.
|
| + STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
|
| + if (instr->index()->IsConstantOperand()) {
|
| + int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
|
| + __ push(Immediate(Smi::FromInt(const_index)));
|
| + } else {
|
| + Register index = ToRegister(instr->index());
|
| + __ SmiTag(index);
|
| + __ push(index);
|
| + }
|
| + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
|
| + __ CallRuntimeSaveDoubles(Runtime::kStringCharCodeAt);
|
| + RecordSafepointWithRegisters(
|
| + instr->pointer_map(), 2, Safepoint::kNoDeoptimizationIndex);
|
| + if (FLAG_debug_code) {
|
| + __ AbortIfNotSmi(eax);
|
| + }
|
| + __ SmiUntag(eax);
|
| + __ mov(Operand(esp, EspIndexForPushAll(result) * kPointerSize), eax);
|
| + __ PopSafepointRegisters();
|
| +}
|
| +
|
| +
|
| +void LCodeGen::DoStringLength(LStringLength* instr) {
|
| + Register string = ToRegister(instr->string());
|
| + Register result = ToRegister(instr->result());
|
| + __ mov(result, FieldOperand(string, String::kLengthOffset));
|
| +}
|
| +
|
| +
|
| void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
|
| - LOperand* input = instr->input();
|
| + LOperand* input = instr->InputAt(0);
|
| ASSERT(input->IsRegister() || input->IsStackSlot());
|
| LOperand* output = instr->result();
|
| ASSERT(output->IsDoubleRegister());
|
| @@ -2572,7 +2939,7 @@
|
| LNumberTagI* instr_;
|
| };
|
|
|
| - LOperand* input = instr->input();
|
| + LOperand* input = instr->InputAt(0);
|
| ASSERT(input->IsRegister() && input->Equals(instr->result()));
|
| Register reg = ToRegister(input);
|
|
|
| @@ -2585,7 +2952,7 @@
|
|
|
| void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
|
| Label slow;
|
| - Register reg = ToRegister(instr->input());
|
| + Register reg = ToRegister(instr->InputAt(0));
|
| Register tmp = reg.is(eax) ? ecx : eax;
|
|
|
| // Preserve the value of all registers.
|
| @@ -2611,6 +2978,7 @@
|
| // integer value.
|
| __ mov(Operand(esp, EspIndexForPushAll(reg) * kPointerSize), Immediate(0));
|
|
|
| + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
|
| __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
|
| RecordSafepointWithRegisters(
|
| instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
|
| @@ -2635,9 +3003,9 @@
|
| LNumberTagD* instr_;
|
| };
|
|
|
| - XMMRegister input_reg = ToDoubleRegister(instr->input());
|
| + XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
|
| Register reg = ToRegister(instr->result());
|
| - Register tmp = ToRegister(instr->temp());
|
| + Register tmp = ToRegister(instr->TempAt(0));
|
|
|
| DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
|
| if (FLAG_inline_new) {
|
| @@ -2658,6 +3026,7 @@
|
| __ Set(reg, Immediate(0));
|
|
|
| __ PushSafepointRegisters();
|
| + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
|
| __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
|
| RecordSafepointWithRegisters(
|
| instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
|
| @@ -2667,7 +3036,7 @@
|
|
|
|
|
| void LCodeGen::DoSmiTag(LSmiTag* instr) {
|
| - LOperand* input = instr->input();
|
| + LOperand* input = instr->InputAt(0);
|
| ASSERT(input->IsRegister() && input->Equals(instr->result()));
|
| ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
|
| __ SmiTag(ToRegister(input));
|
| @@ -2675,7 +3044,7 @@
|
|
|
|
|
| void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
|
| - LOperand* input = instr->input();
|
| + LOperand* input = instr->InputAt(0);
|
| ASSERT(input->IsRegister() && input->Equals(instr->result()));
|
| if (instr->needs_check()) {
|
| __ test(ToRegister(input), Immediate(kSmiTagMask));
|
| @@ -2735,7 +3104,7 @@
|
|
|
| void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
|
| NearLabel done, heap_number;
|
| - Register input_reg = ToRegister(instr->input());
|
| + Register input_reg = ToRegister(instr->InputAt(0));
|
|
|
| // Heap number map check.
|
| __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
|
| @@ -2778,7 +3147,7 @@
|
| __ add(Operand(esp), Immediate(kDoubleSize));
|
| } else {
|
| NearLabel deopt;
|
| - XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
|
| + XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0));
|
| __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
|
| __ cvttsd2si(input_reg, Operand(xmm0));
|
| __ cmp(input_reg, 0x80000000u);
|
| @@ -2795,7 +3164,7 @@
|
| // Deoptimize if we don't have a heap number.
|
| DeoptimizeIf(not_equal, instr->environment());
|
|
|
| - XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
|
| + XMMRegister xmm_temp = ToDoubleRegister(instr->TempAt(0));
|
| __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
|
| __ cvttsd2si(input_reg, Operand(xmm0));
|
| __ cvtsi2sd(xmm_temp, Operand(input_reg));
|
| @@ -2815,7 +3184,7 @@
|
|
|
|
|
| void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
|
| - LOperand* input = instr->input();
|
| + LOperand* input = instr->InputAt(0);
|
| ASSERT(input->IsRegister());
|
| ASSERT(input->Equals(instr->result()));
|
|
|
| @@ -2835,7 +3204,7 @@
|
|
|
|
|
| void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
|
| - LOperand* input = instr->input();
|
| + LOperand* input = instr->InputAt(0);
|
| ASSERT(input->IsRegister());
|
| LOperand* result = instr->result();
|
| ASSERT(result->IsDoubleRegister());
|
| @@ -2848,7 +3217,7 @@
|
|
|
|
|
| void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
|
| - LOperand* input = instr->input();
|
| + LOperand* input = instr->InputAt(0);
|
| ASSERT(input->IsDoubleRegister());
|
| LOperand* result = instr->result();
|
| ASSERT(result->IsRegister());
|
| @@ -2885,9 +3254,60 @@
|
| __ add(Operand(esp), Immediate(kDoubleSize));
|
| __ bind(&done);
|
| } else {
|
| - // This will bail out if the input was not in the int32 range (or,
|
| - // unfortunately, if the input was 0x80000000).
|
| - DeoptimizeIf(equal, instr->environment());
|
| + NearLabel done;
|
| + Register temp_reg = ToRegister(instr->TempAt(0));
|
| + XMMRegister xmm_scratch = xmm0;
|
| +
|
| + // If cvttsd2si succeeded, we're done. Otherwise, we attempt
|
| + // manual conversion.
|
| + __ j(not_equal, &done);
|
| +
|
| + // Get high 32 bits of the input in result_reg and temp_reg.
|
| + __ pshufd(xmm_scratch, input_reg, 1);
|
| + __ movd(Operand(temp_reg), xmm_scratch);
|
| + __ mov(result_reg, temp_reg);
|
| +
|
| + // Prepare negation mask in temp_reg.
|
| + __ sar(temp_reg, kBitsPerInt - 1);
|
| +
|
| + // Extract the exponent from result_reg and subtract adjusted
|
| + // bias from it. The adjustment is selected in a way such that
|
| + // when the difference is zero, the answer is in the low 32 bits
|
| + // of the input, otherwise a shift has to be performed.
|
| + __ shr(result_reg, HeapNumber::kExponentShift);
|
| + __ and_(result_reg,
|
| + HeapNumber::kExponentMask >> HeapNumber::kExponentShift);
|
| + __ sub(Operand(result_reg),
|
| + Immediate(HeapNumber::kExponentBias +
|
| + HeapNumber::kExponentBits +
|
| + HeapNumber::kMantissaBits));
|
| + // Don't handle big (> kMantissaBits + kExponentBits == 63) or
|
| + // special exponents.
|
| + DeoptimizeIf(greater, instr->environment());
|
| +
|
| + // Zero out the sign and the exponent in the input (by shifting
|
| + // it to the left) and restore the implicit mantissa bit,
|
| + // i.e. convert the input to unsigned int64 shifted left by
|
| + // kExponentBits.
|
| + ExternalReference minus_zero = ExternalReference::address_of_minus_zero();
|
| + // Minus zero has the most significant bit set and the other
|
| + // bits cleared.
|
| + __ movdbl(xmm_scratch, Operand::StaticVariable(minus_zero));
|
| + __ psllq(input_reg, HeapNumber::kExponentBits);
|
| + __ por(input_reg, xmm_scratch);
|
| +
|
| + // Get the amount to shift the input right in xmm_scratch.
|
| + __ neg(result_reg);
|
| + __ movd(xmm_scratch, Operand(result_reg));
|
| +
|
| + // Shift the input right and extract low 32 bits.
|
| + __ psrlq(input_reg, xmm_scratch);
|
| + __ movd(Operand(result_reg), input_reg);
|
| +
|
| + // Use the prepared mask in temp_reg to negate the result if necessary.
|
| + __ xor_(result_reg, Operand(temp_reg));
|
| + __ sub(result_reg, Operand(temp_reg));
|
| + __ bind(&done);
|
| }
|
| } else {
|
| NearLabel done;
|
| @@ -2914,7 +3334,7 @@
|
|
|
|
|
| void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
|
| - LOperand* input = instr->input();
|
| + LOperand* input = instr->InputAt(0);
|
| ASSERT(input->IsRegister());
|
| __ test(ToRegister(input), Immediate(kSmiTagMask));
|
| DeoptimizeIf(instr->condition(), instr->environment());
|
| @@ -2922,19 +3342,25 @@
|
|
|
|
|
| void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
|
| - Register input = ToRegister(instr->input());
|
| - Register temp = ToRegister(instr->temp());
|
| + Register input = ToRegister(instr->InputAt(0));
|
| + Register temp = ToRegister(instr->TempAt(0));
|
| InstanceType first = instr->hydrogen()->first();
|
| InstanceType last = instr->hydrogen()->last();
|
|
|
| __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
|
| - __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
|
| - static_cast<int8_t>(first));
|
|
|
| // If there is only one type in the interval check for equality.
|
| if (first == last) {
|
| + __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
|
| + static_cast<int8_t>(first));
|
| DeoptimizeIf(not_equal, instr->environment());
|
| - } else {
|
| + } else if (first == FIRST_STRING_TYPE && last == LAST_STRING_TYPE) {
|
| + // String has a dedicated bit in instance type.
|
| + __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), kIsNotStringMask);
|
| + DeoptimizeIf(not_zero, instr->environment());
|
| + } else {
|
| + __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
|
| + static_cast<int8_t>(first));
|
| DeoptimizeIf(below, instr->environment());
|
| // Omit check for the last type.
|
| if (last != LAST_TYPE) {
|
| @@ -2947,15 +3373,15 @@
|
|
|
|
|
| void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
|
| - ASSERT(instr->input()->IsRegister());
|
| - Register reg = ToRegister(instr->input());
|
| + ASSERT(instr->InputAt(0)->IsRegister());
|
| + Register reg = ToRegister(instr->InputAt(0));
|
| __ cmp(reg, instr->hydrogen()->target());
|
| DeoptimizeIf(not_equal, instr->environment());
|
| }
|
|
|
|
|
| void LCodeGen::DoCheckMap(LCheckMap* instr) {
|
| - LOperand* input = instr->input();
|
| + LOperand* input = instr->InputAt(0);
|
| ASSERT(input->IsRegister());
|
| Register reg = ToRegister(input);
|
| __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
|
| @@ -2964,26 +3390,25 @@
|
| }
|
|
|
|
|
| -void LCodeGen::LoadPrototype(Register result, Handle<JSObject> prototype) {
|
| - if (Heap::InNewSpace(*prototype)) {
|
| +void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
|
| + if (Heap::InNewSpace(*object)) {
|
| Handle<JSGlobalPropertyCell> cell =
|
| - Factory::NewJSGlobalPropertyCell(prototype);
|
| + Factory::NewJSGlobalPropertyCell(object);
|
| __ mov(result, Operand::Cell(cell));
|
| } else {
|
| - __ mov(result, prototype);
|
| + __ mov(result, object);
|
| }
|
| }
|
|
|
|
|
| void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
|
| - Register reg = ToRegister(instr->temp());
|
| + Register reg = ToRegister(instr->TempAt(0));
|
|
|
| Handle<JSObject> holder = instr->holder();
|
| - Handle<Map> receiver_map = instr->receiver_map();
|
| - Handle<JSObject> current_prototype(JSObject::cast(receiver_map->prototype()));
|
| + Handle<JSObject> current_prototype = instr->prototype();
|
|
|
| // Load prototype object.
|
| - LoadPrototype(reg, current_prototype);
|
| + LoadHeapObject(reg, current_prototype);
|
|
|
| // Check prototype maps up to the holder.
|
| while (!current_prototype.is_identical_to(holder)) {
|
| @@ -2993,7 +3418,7 @@
|
| current_prototype =
|
| Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
|
| // Load next prototype object.
|
| - LoadPrototype(reg, current_prototype);
|
| + LoadHeapObject(reg, current_prototype);
|
| }
|
|
|
| // Check the holder map.
|
| @@ -3017,21 +3442,22 @@
|
| FastCloneShallowArrayStub::Mode mode =
|
| FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
|
| FastCloneShallowArrayStub stub(mode, length);
|
| - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
| + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
|
| } else if (instr->hydrogen()->depth() > 1) {
|
| - CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
|
| + CallRuntime(Runtime::kCreateArrayLiteral, 3, instr, false);
|
| } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
|
| - CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
|
| + CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr, false);
|
| } else {
|
| FastCloneShallowArrayStub::Mode mode =
|
| FastCloneShallowArrayStub::CLONE_ELEMENTS;
|
| FastCloneShallowArrayStub stub(mode, length);
|
| - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
| + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
|
| }
|
| }
|
|
|
|
|
| void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
|
| + ASSERT(ToRegister(instr->context()).is(esi));
|
| // Setup the parameters to the stub/runtime call.
|
| __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
|
| __ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
|
| @@ -3069,7 +3495,7 @@
|
| __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
|
| __ push(Immediate(instr->hydrogen()->pattern()));
|
| __ push(Immediate(instr->hydrogen()->flags()));
|
| - CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
|
| + CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr, false);
|
| __ mov(ebx, eax);
|
|
|
| __ bind(&materialized);
|
| @@ -3081,7 +3507,7 @@
|
| __ bind(&runtime_allocate);
|
| __ push(ebx);
|
| __ push(Immediate(Smi::FromInt(size)));
|
| - CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
|
| + CallRuntime(Runtime::kAllocateInNewSpace, 1, instr, false);
|
| __ pop(ebx);
|
|
|
| __ bind(&allocated);
|
| @@ -3104,35 +3530,35 @@
|
| // Use the fast case closure allocation code that allocates in new
|
| // space for nested functions that don't need literals cloning.
|
| Handle<SharedFunctionInfo> shared_info = instr->shared_info();
|
| - bool pretenure = !instr->hydrogen()->pretenure();
|
| + bool pretenure = instr->hydrogen()->pretenure();
|
| if (shared_info->num_literals() == 0 && !pretenure) {
|
| FastNewClosureStub stub;
|
| __ push(Immediate(shared_info));
|
| - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
| + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
|
| } else {
|
| - __ push(esi);
|
| + __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
|
| __ push(Immediate(shared_info));
|
| __ push(Immediate(pretenure
|
| ? Factory::true_value()
|
| : Factory::false_value()));
|
| - CallRuntime(Runtime::kNewClosure, 3, instr);
|
| + CallRuntime(Runtime::kNewClosure, 3, instr, false);
|
| }
|
| }
|
|
|
|
|
| void LCodeGen::DoTypeof(LTypeof* instr) {
|
| - LOperand* input = instr->input();
|
| + LOperand* input = instr->InputAt(0);
|
| if (input->IsConstantOperand()) {
|
| __ push(ToImmediate(input));
|
| } else {
|
| __ push(ToOperand(input));
|
| }
|
| - CallRuntime(Runtime::kTypeof, 1, instr);
|
| + CallRuntime(Runtime::kTypeof, 1, instr, false);
|
| }
|
|
|
|
|
| void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
|
| - Register input = ToRegister(instr->input());
|
| + Register input = ToRegister(instr->InputAt(0));
|
| Register result = ToRegister(instr->result());
|
| Label true_label;
|
| Label false_label;
|
| @@ -3144,18 +3570,18 @@
|
| instr->type_literal());
|
| __ j(final_branch_condition, &true_label);
|
| __ bind(&false_label);
|
| - __ mov(result, Handle<Object>(Heap::false_value()));
|
| + __ mov(result, Factory::false_value());
|
| __ jmp(&done);
|
|
|
| __ bind(&true_label);
|
| - __ mov(result, Handle<Object>(Heap::true_value()));
|
| + __ mov(result, Factory::true_value());
|
|
|
| __ bind(&done);
|
| }
|
|
|
|
|
| void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
|
| - Register input = ToRegister(instr->input());
|
| + Register input = ToRegister(instr->InputAt(0));
|
| int true_block = chunk_->LookupDestination(instr->true_block_id());
|
| int false_block = chunk_->LookupDestination(instr->false_block_id());
|
| Label* true_label = chunk_->GetAssemblyLabel(true_block);
|
| @@ -3193,9 +3619,9 @@
|
| final_branch_condition = below;
|
|
|
| } else if (type_name->Equals(Heap::boolean_symbol())) {
|
| - __ cmp(input, Handle<Object>(Heap::true_value()));
|
| + __ cmp(input, Factory::true_value());
|
| __ j(equal, true_label);
|
| - __ cmp(input, Handle<Object>(Heap::false_value()));
|
| + __ cmp(input, Factory::false_value());
|
| final_branch_condition = equal;
|
|
|
| } else if (type_name->Equals(Heap::undefined_symbol())) {
|
| @@ -3246,6 +3672,53 @@
|
| }
|
|
|
|
|
| +void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
|
| + Register result = ToRegister(instr->result());
|
| + NearLabel true_label;
|
| + NearLabel false_label;
|
| + NearLabel done;
|
| +
|
| + EmitIsConstructCall(result);
|
| + __ j(equal, &true_label);
|
| +
|
| + __ mov(result, Factory::false_value());
|
| + __ jmp(&done);
|
| +
|
| + __ bind(&true_label);
|
| + __ mov(result, Factory::true_value());
|
| +
|
| + __ bind(&done);
|
| +}
|
| +
|
| +
|
| +void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
|
| + Register temp = ToRegister(instr->TempAt(0));
|
| + int true_block = chunk_->LookupDestination(instr->true_block_id());
|
| + int false_block = chunk_->LookupDestination(instr->false_block_id());
|
| +
|
| + EmitIsConstructCall(temp);
|
| + EmitBranch(true_block, false_block, equal);
|
| +}
|
| +
|
| +
|
| +void LCodeGen::EmitIsConstructCall(Register temp) {
|
| + // Get the frame pointer for the calling frame.
|
| + __ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
|
| +
|
| + // Skip the arguments adaptor frame if it exists.
|
| + NearLabel check_frame_marker;
|
| + __ cmp(Operand(temp, StandardFrameConstants::kContextOffset),
|
| + Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
|
| + __ j(not_equal, &check_frame_marker);
|
| + __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
|
| +
|
| + // Check the marker in the calling frame.
|
| + __ bind(&check_frame_marker);
|
| + __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
|
| + Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
|
| +}
|
| +
|
| +
|
| void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
|
| // No code for lazy bailout instruction. Used to capture environment after a
|
| // call for populating the safepoint data with deoptimization data.
|
| @@ -3266,10 +3739,20 @@
|
| } else {
|
| __ push(ToOperand(key));
|
| }
|
| - RecordPosition(instr->pointer_map()->position());
|
| + ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
|
| + LPointerMap* pointers = instr->pointer_map();
|
| + LEnvironment* env = instr->deoptimization_environment();
|
| + RecordPosition(pointers->position());
|
| + RegisterEnvironmentForDeoptimization(env);
|
| + // Create safepoint generator that will also ensure enough space in the
|
| + // reloc info for patching in deoptimization (since this is invoking a
|
| + // builtin)
|
| SafepointGenerator safepoint_generator(this,
|
| - instr->pointer_map(),
|
| - Safepoint::kNoDeoptimizationIndex);
|
| + pointers,
|
| + env->deoptimization_index(),
|
| + true);
|
| + __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
|
| + __ push(Immediate(Smi::FromInt(strict_mode_flag())));
|
| __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION, &safepoint_generator);
|
| }
|
|
|
| @@ -3282,7 +3765,7 @@
|
| __ j(above_equal, &done);
|
|
|
| StackCheckStub stub;
|
| - CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
| + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr, false);
|
| __ bind(&done);
|
| }
|
|
|
| @@ -3307,3 +3790,5 @@
|
| #undef __
|
|
|
| } } // namespace v8::internal
|
| +
|
| +#endif // V8_TARGET_ARCH_IA32
|
|
|