| Index: src/arm/lithium-codegen-arm.cc
|
| ===================================================================
|
| --- src/arm/lithium-codegen-arm.cc (revision 6800)
|
| +++ src/arm/lithium-codegen-arm.cc (working copy)
|
| @@ -1,4 +1,4 @@
|
| -// Copyright 2010 the V8 project authors. All rights reserved.
|
| +// Copyright 2011 the V8 project authors. All rights reserved.
|
| // Redistribution and use in source and binary forms, with or without
|
| // modification, are permitted provided that the following conditions are
|
| // met:
|
| @@ -54,6 +54,157 @@
|
| };
|
|
|
|
|
| +class LGapNode: public ZoneObject {
|
| + public:
|
| + explicit LGapNode(LOperand* operand)
|
| + : operand_(operand), resolved_(false), visited_id_(-1) { }
|
| +
|
| + LOperand* operand() const { return operand_; }
|
| + bool IsResolved() const { return !IsAssigned() || resolved_; }
|
| + void MarkResolved() {
|
| + ASSERT(!IsResolved());
|
| + resolved_ = true;
|
| + }
|
| + int visited_id() const { return visited_id_; }
|
| + void set_visited_id(int id) {
|
| + ASSERT(id > visited_id_);
|
| + visited_id_ = id;
|
| + }
|
| +
|
| + bool IsAssigned() const { return assigned_from_.is_set(); }
|
| + LGapNode* assigned_from() const { return assigned_from_.get(); }
|
| + void set_assigned_from(LGapNode* n) { assigned_from_.set(n); }
|
| +
|
| + private:
|
| + LOperand* operand_;
|
| + SetOncePointer<LGapNode> assigned_from_;
|
| + bool resolved_;
|
| + int visited_id_;
|
| +};
|
| +
|
| +
|
| +LGapResolver::LGapResolver()
|
| + : nodes_(32),
|
| + identified_cycles_(4),
|
| + result_(16),
|
| + next_visited_id_(0) {
|
| +}
|
| +
|
| +
|
| +const ZoneList<LMoveOperands>* LGapResolver::Resolve(
|
| + const ZoneList<LMoveOperands>* moves,
|
| + LOperand* marker_operand) {
|
| + nodes_.Rewind(0);
|
| + identified_cycles_.Rewind(0);
|
| + result_.Rewind(0);
|
| + next_visited_id_ = 0;
|
| +
|
| + for (int i = 0; i < moves->length(); ++i) {
|
| + LMoveOperands move = moves->at(i);
|
| + if (!move.IsRedundant()) RegisterMove(move);
|
| + }
|
| +
|
| + for (int i = 0; i < identified_cycles_.length(); ++i) {
|
| + ResolveCycle(identified_cycles_[i], marker_operand);
|
| + }
|
| +
|
| + int unresolved_nodes;
|
| + do {
|
| + unresolved_nodes = 0;
|
| + for (int j = 0; j < nodes_.length(); j++) {
|
| + LGapNode* node = nodes_[j];
|
| + if (!node->IsResolved() && node->assigned_from()->IsResolved()) {
|
| + AddResultMove(node->assigned_from(), node);
|
| + node->MarkResolved();
|
| + }
|
| + if (!node->IsResolved()) ++unresolved_nodes;
|
| + }
|
| + } while (unresolved_nodes > 0);
|
| + return &result_;
|
| +}
|
| +
|
| +
|
| +void LGapResolver::AddResultMove(LGapNode* from, LGapNode* to) {
|
| + AddResultMove(from->operand(), to->operand());
|
| +}
|
| +
|
| +
|
| +void LGapResolver::AddResultMove(LOperand* from, LOperand* to) {
|
| + result_.Add(LMoveOperands(from, to));
|
| +}
|
| +
|
| +
|
| +void LGapResolver::ResolveCycle(LGapNode* start, LOperand* marker_operand) {
|
| + ZoneList<LOperand*> cycle_operands(8);
|
| + cycle_operands.Add(marker_operand);
|
| + LGapNode* cur = start;
|
| + do {
|
| + cur->MarkResolved();
|
| + cycle_operands.Add(cur->operand());
|
| + cur = cur->assigned_from();
|
| + } while (cur != start);
|
| + cycle_operands.Add(marker_operand);
|
| +
|
| + for (int i = cycle_operands.length() - 1; i > 0; --i) {
|
| + LOperand* from = cycle_operands[i];
|
| + LOperand* to = cycle_operands[i - 1];
|
| + AddResultMove(from, to);
|
| + }
|
| +}
|
| +
|
| +
|
| +bool LGapResolver::CanReach(LGapNode* a, LGapNode* b, int visited_id) {
|
| + ASSERT(a != b);
|
| + LGapNode* cur = a;
|
| + while (cur != b && cur->visited_id() != visited_id && cur->IsAssigned()) {
|
| + cur->set_visited_id(visited_id);
|
| + cur = cur->assigned_from();
|
| + }
|
| +
|
| + return cur == b;
|
| +}
|
| +
|
| +
|
| +bool LGapResolver::CanReach(LGapNode* a, LGapNode* b) {
|
| + ASSERT(a != b);
|
| + return CanReach(a, b, next_visited_id_++);
|
| +}
|
| +
|
| +
|
| +void LGapResolver::RegisterMove(LMoveOperands move) {
|
| + if (move.source()->IsConstantOperand()) {
|
| + // Constant moves should be last in the machine code. Therefore add them
|
| + // first to the result set.
|
| + AddResultMove(move.source(), move.destination());
|
| + } else {
|
| + LGapNode* from = LookupNode(move.source());
|
| + LGapNode* to = LookupNode(move.destination());
|
| + if (to->IsAssigned() && to->assigned_from() == from) {
|
| + move.Eliminate();
|
| + return;
|
| + }
|
| + ASSERT(!to->IsAssigned());
|
| + if (CanReach(from, to)) {
|
| + // This introduces a cycle. Save.
|
| + identified_cycles_.Add(from);
|
| + }
|
| + to->set_assigned_from(from);
|
| + }
|
| +}
|
| +
|
| +
|
| +LGapNode* LGapResolver::LookupNode(LOperand* operand) {
|
| + for (int i = 0; i < nodes_.length(); ++i) {
|
| + if (nodes_[i]->operand()->Equals(operand)) return nodes_[i];
|
| + }
|
| +
|
| + // No node found => create a new one.
|
| + LGapNode* result = new LGapNode(operand);
|
| + nodes_.Add(result);
|
| + return result;
|
| +}
|
| +
|
| +
|
| #define __ masm()->
|
|
|
| bool LCodeGen::GenerateCode() {
|
| @@ -72,7 +223,7 @@
|
| void LCodeGen::FinishCode(Handle<Code> code) {
|
| ASSERT(is_done());
|
| code->set_stack_slots(StackSlotCount());
|
| - code->set_safepoint_table_start(safepoints_.GetCodeOffset());
|
| + code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
|
| PopulateDeoptimizationData(code);
|
| }
|
|
|
| @@ -190,6 +341,11 @@
|
| __ jmp(code->exit());
|
| }
|
|
|
| + // Force constant pool emission at the end of deferred code to make
|
| + // sure that no constant pools are emitted after the official end of
|
| + // the instruction sequence.
|
| + masm()->CheckConstPool(true, false);
|
| +
|
| // Deferred code is the last part of the instruction sequence. Mark
|
| // the generated code as done unless we bailed out.
|
| if (!is_aborted()) status_ = DONE;
|
| @@ -324,6 +480,45 @@
|
| }
|
|
|
|
|
| +void LCodeGen::WriteTranslation(LEnvironment* environment,
|
| + Translation* translation) {
|
| + if (environment == NULL) return;
|
| +
|
| + // The translation includes one command per value in the environment.
|
| + int translation_size = environment->values()->length();
|
| + // The output frame height does not include the parameters.
|
| + int height = translation_size - environment->parameter_count();
|
| +
|
| + WriteTranslation(environment->outer(), translation);
|
| + int closure_id = DefineDeoptimizationLiteral(environment->closure());
|
| + translation->BeginFrame(environment->ast_id(), closure_id, height);
|
| + for (int i = 0; i < translation_size; ++i) {
|
| + LOperand* value = environment->values()->at(i);
|
| + // spilled_registers_ and spilled_double_registers_ are either
|
| + // both NULL or both set.
|
| + if (environment->spilled_registers() != NULL && value != NULL) {
|
| + if (value->IsRegister() &&
|
| + environment->spilled_registers()[value->index()] != NULL) {
|
| + translation->MarkDuplicate();
|
| + AddToTranslation(translation,
|
| + environment->spilled_registers()[value->index()],
|
| + environment->HasTaggedValueAt(i));
|
| + } else if (
|
| + value->IsDoubleRegister() &&
|
| + environment->spilled_double_registers()[value->index()] != NULL) {
|
| + translation->MarkDuplicate();
|
| + AddToTranslation(
|
| + translation,
|
| + environment->spilled_double_registers()[value->index()],
|
| + false);
|
| + }
|
| + }
|
| +
|
| + AddToTranslation(translation, value, environment->HasTaggedValueAt(i));
|
| + }
|
| +}
|
| +
|
| +
|
| void LCodeGen::AddToTranslation(Translation* translation,
|
| LOperand* op,
|
| bool is_tagged) {
|
| @@ -367,17 +562,11 @@
|
| void LCodeGen::CallCode(Handle<Code> code,
|
| RelocInfo::Mode mode,
|
| LInstruction* instr) {
|
| - if (instr != NULL) {
|
| - LPointerMap* pointers = instr->pointer_map();
|
| - RecordPosition(pointers->position());
|
| - __ Call(code, mode);
|
| - RegisterLazyDeoptimization(instr);
|
| - } else {
|
| - LPointerMap no_pointers(0);
|
| - RecordPosition(no_pointers.position());
|
| - __ Call(code, mode);
|
| - RecordSafepoint(&no_pointers, Safepoint::kNoDeoptimizationIndex);
|
| - }
|
| + ASSERT(instr != NULL);
|
| + LPointerMap* pointers = instr->pointer_map();
|
| + RecordPosition(pointers->position());
|
| + __ Call(code, mode);
|
| + RegisterLazyDeoptimization(instr);
|
| }
|
|
|
|
|
| @@ -390,15 +579,7 @@
|
| RecordPosition(pointers->position());
|
|
|
| __ CallRuntime(function, num_arguments);
|
| - // Runtime calls to Throw are not supposed to ever return at the
|
| - // call site, so don't register lazy deoptimization for these. We do
|
| - // however have to record a safepoint since throwing exceptions can
|
| - // cause garbage collections.
|
| - if (!instr->IsThrow()) {
|
| - RegisterLazyDeoptimization(instr);
|
| - } else {
|
| - RecordSafepoint(instr->pointer_map(), Safepoint::kNoDeoptimizationIndex);
|
| - }
|
| + RegisterLazyDeoptimization(instr);
|
| }
|
|
|
|
|
| @@ -439,7 +620,7 @@
|
| ++frame_count;
|
| }
|
| Translation translation(&translations_, frame_count);
|
| - environment->WriteTranslation(this, &translation);
|
| + WriteTranslation(environment, &translation);
|
| int deoptimization_index = deoptimizations_.length();
|
| environment->Register(deoptimization_index, translation.index());
|
| deoptimizations_.Add(environment);
|
| @@ -466,7 +647,7 @@
|
| return;
|
| }
|
|
|
| - if (cc == no_condition) {
|
| + if (cc == al) {
|
| if (FLAG_trap_on_deopt) __ stop("trap_on_deopt");
|
| __ Jump(entry, RelocInfo::RUNTIME_ENTRY);
|
| } else {
|
| @@ -541,40 +722,52 @@
|
| }
|
|
|
|
|
| -void LCodeGen::RecordSafepoint(LPointerMap* pointers,
|
| - int deoptimization_index) {
|
| +void LCodeGen::RecordSafepoint(
|
| + LPointerMap* pointers,
|
| + Safepoint::Kind kind,
|
| + int arguments,
|
| + int deoptimization_index) {
|
| const ZoneList<LOperand*>* operands = pointers->operands();
|
| Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
|
| - deoptimization_index);
|
| + kind, arguments, deoptimization_index);
|
| for (int i = 0; i < operands->length(); i++) {
|
| LOperand* pointer = operands->at(i);
|
| if (pointer->IsStackSlot()) {
|
| safepoint.DefinePointerSlot(pointer->index());
|
| + } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
|
| + safepoint.DefinePointerRegister(ToRegister(pointer));
|
| }
|
| }
|
| + if (kind & Safepoint::kWithRegisters) {
|
| + // Register cp always contains a pointer to the context.
|
| + safepoint.DefinePointerRegister(cp);
|
| + }
|
| }
|
|
|
|
|
| +void LCodeGen::RecordSafepoint(LPointerMap* pointers,
|
| + int deoptimization_index) {
|
| + RecordSafepoint(pointers, Safepoint::kSimple, 0, deoptimization_index);
|
| +}
|
| +
|
| +
|
| void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
|
| int arguments,
|
| int deoptimization_index) {
|
| - const ZoneList<LOperand*>* operands = pointers->operands();
|
| - Safepoint safepoint =
|
| - safepoints_.DefineSafepointWithRegisters(
|
| - masm(), arguments, deoptimization_index);
|
| - for (int i = 0; i < operands->length(); i++) {
|
| - LOperand* pointer = operands->at(i);
|
| - if (pointer->IsStackSlot()) {
|
| - safepoint.DefinePointerSlot(pointer->index());
|
| - } else if (pointer->IsRegister()) {
|
| - safepoint.DefinePointerRegister(ToRegister(pointer));
|
| - }
|
| - }
|
| - // Register cp always contains a pointer to the context.
|
| - safepoint.DefinePointerRegister(cp);
|
| + RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments,
|
| + deoptimization_index);
|
| }
|
|
|
|
|
| +void LCodeGen::RecordSafepointWithRegistersAndDoubles(
|
| + LPointerMap* pointers,
|
| + int arguments,
|
| + int deoptimization_index) {
|
| + RecordSafepoint(pointers, Safepoint::kWithRegistersAndDoubles, arguments,
|
| + deoptimization_index);
|
| +}
|
| +
|
| +
|
| void LCodeGen::RecordPosition(int position) {
|
| if (!FLAG_debug_info || position == RelocInfo::kNoPosition) return;
|
| masm()->positions_recorder()->RecordPosition(position);
|
| @@ -601,12 +794,12 @@
|
| Register core_scratch = scratch0();
|
| bool destroys_core_scratch = false;
|
|
|
| - LGapResolver resolver(move->move_operands(), &marker_operand);
|
| - const ZoneList<LMoveOperands>* moves = resolver.ResolveInReverseOrder();
|
| + const ZoneList<LMoveOperands>* moves =
|
| + resolver_.Resolve(move->move_operands(), &marker_operand);
|
| for (int i = moves->length() - 1; i >= 0; --i) {
|
| LMoveOperands move = moves->at(i);
|
| - LOperand* from = move.from();
|
| - LOperand* to = move.to();
|
| + LOperand* from = move.source();
|
| + LOperand* to = move.destination();
|
| ASSERT(!from->IsDoubleRegister() ||
|
| !ToDoubleRegister(from).is(dbl_scratch));
|
| ASSERT(!to->IsDoubleRegister() || !ToDoubleRegister(to).is(dbl_scratch));
|
| @@ -748,7 +941,8 @@
|
| break;
|
| }
|
| case CodeStub::StringCharAt: {
|
| - Abort("StringCharAtStub unimplemented.");
|
| + StringCharAtStub stub;
|
| + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
| break;
|
| }
|
| case CodeStub::MathPow: {
|
| @@ -771,7 +965,9 @@
|
| break;
|
| }
|
| case CodeStub::TranscendentalCache: {
|
| - Abort("TranscendentalCache unimplemented.");
|
| + __ ldr(r0, MemOperand(sp, 0));
|
| + TranscendentalCacheStub stub(instr->transcendental_type());
|
| + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
| break;
|
| }
|
| default:
|
| @@ -786,28 +982,199 @@
|
|
|
|
|
| void LCodeGen::DoModI(LModI* instr) {
|
| - Abort("DoModI unimplemented.");
|
| + class DeferredModI: public LDeferredCode {
|
| + public:
|
| + DeferredModI(LCodeGen* codegen, LModI* instr)
|
| + : LDeferredCode(codegen), instr_(instr) { }
|
| + virtual void Generate() {
|
| + codegen()->DoDeferredGenericBinaryStub(instr_, Token::MOD);
|
| + }
|
| + private:
|
| + LModI* instr_;
|
| + };
|
| + // These registers hold untagged 32 bit values.
|
| + Register left = ToRegister(instr->InputAt(0));
|
| + Register right = ToRegister(instr->InputAt(1));
|
| + Register result = ToRegister(instr->result());
|
| + Register scratch = scratch0();
|
| +
|
| + Label deoptimize, done;
|
| + // Check for x % 0.
|
| + if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
|
| + __ tst(right, Operand(right));
|
| + __ b(eq, &deoptimize);
|
| + }
|
| +
|
| + // Check for (0 % -x) that will produce negative zero.
|
| + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
| + Label ok;
|
| + __ tst(left, Operand(left));
|
| + __ b(ne, &ok);
|
| + __ tst(right, Operand(right));
|
| + __ b(pl, &ok);
|
| + __ b(al, &deoptimize);
|
| + __ bind(&ok);
|
| + }
|
| +
|
| + // Try a few common cases before using the generic stub.
|
| + Label call_stub;
|
| + const int kUnfolds = 3;
|
| + // Skip if either side is negative.
|
| + __ cmp(left, Operand(0));
|
| + __ cmp(right, Operand(0), NegateCondition(mi));
|
| + __ b(mi, &call_stub);
|
| + // If the right hand side is smaller than the (nonnegative)
|
| + // left hand side, it is the result. Else try a few subtractions
|
| + // of the left hand side.
|
| + __ mov(scratch, left);
|
| + for (int i = 0; i < kUnfolds; i++) {
|
| + // Check if the left hand side is less or equal than the
|
| + // the right hand side.
|
| + __ cmp(scratch, right);
|
| + __ mov(result, scratch, LeaveCC, lt);
|
| + __ b(lt, &done);
|
| + // If not, reduce the left hand side by the right hand
|
| + // side and check again.
|
| + if (i < kUnfolds - 1) __ sub(scratch, scratch, right);
|
| + }
|
| +
|
| + // Check for power of two on the right hand side.
|
| + __ JumpIfNotPowerOfTwoOrZero(right, scratch, &call_stub);
|
| + // Perform modulo operation (scratch contains right - 1).
|
| + __ and_(result, scratch, Operand(left));
|
| +
|
| + __ bind(&call_stub);
|
| + // Call the generic stub. The numbers in r0 and r1 have
|
| + // to be tagged to Smis. If that is not possible, deoptimize.
|
| + DeferredModI* deferred = new DeferredModI(this, instr);
|
| + __ TrySmiTag(left, &deoptimize, scratch);
|
| + __ TrySmiTag(right, &deoptimize, scratch);
|
| +
|
| + __ b(al, deferred->entry());
|
| + __ bind(deferred->exit());
|
| +
|
| + // If the result in r0 is a Smi, untag it, else deoptimize.
|
| + __ JumpIfNotSmi(result, &deoptimize);
|
| + __ SmiUntag(result);
|
| +
|
| + __ b(al, &done);
|
| + __ bind(&deoptimize);
|
| + DeoptimizeIf(al, instr->environment());
|
| + __ bind(&done);
|
| }
|
|
|
|
|
| void LCodeGen::DoDivI(LDivI* instr) {
|
| - Abort("DoDivI unimplemented.");
|
| + class DeferredDivI: public LDeferredCode {
|
| + public:
|
| + DeferredDivI(LCodeGen* codegen, LDivI* instr)
|
| + : LDeferredCode(codegen), instr_(instr) { }
|
| + virtual void Generate() {
|
| + codegen()->DoDeferredGenericBinaryStub(instr_, Token::DIV);
|
| + }
|
| + private:
|
| + LDivI* instr_;
|
| + };
|
| +
|
| + const Register left = ToRegister(instr->InputAt(0));
|
| + const Register right = ToRegister(instr->InputAt(1));
|
| + const Register scratch = scratch0();
|
| + const Register result = ToRegister(instr->result());
|
| +
|
| + // Check for x / 0.
|
| + if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
|
| + __ tst(right, right);
|
| + DeoptimizeIf(eq, instr->environment());
|
| + }
|
| +
|
| + // Check for (0 / -x) that will produce negative zero.
|
| + if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
| + Label left_not_zero;
|
| + __ tst(left, Operand(left));
|
| + __ b(ne, &left_not_zero);
|
| + __ tst(right, Operand(right));
|
| + DeoptimizeIf(mi, instr->environment());
|
| + __ bind(&left_not_zero);
|
| + }
|
| +
|
| + // Check for (-kMinInt / -1).
|
| + if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
|
| + Label left_not_min_int;
|
| + __ cmp(left, Operand(kMinInt));
|
| + __ b(ne, &left_not_min_int);
|
| + __ cmp(right, Operand(-1));
|
| + DeoptimizeIf(eq, instr->environment());
|
| + __ bind(&left_not_min_int);
|
| + }
|
| +
|
| + Label done, deoptimize;
|
| + // Test for a few common cases first.
|
| + __ cmp(right, Operand(1));
|
| + __ mov(result, left, LeaveCC, eq);
|
| + __ b(eq, &done);
|
| +
|
| + __ cmp(right, Operand(2));
|
| + __ tst(left, Operand(1), eq);
|
| + __ mov(result, Operand(left, ASR, 1), LeaveCC, eq);
|
| + __ b(eq, &done);
|
| +
|
| + __ cmp(right, Operand(4));
|
| + __ tst(left, Operand(3), eq);
|
| + __ mov(result, Operand(left, ASR, 2), LeaveCC, eq);
|
| + __ b(eq, &done);
|
| +
|
| + // Call the generic stub. The numbers in r0 and r1 have
|
| + // to be tagged to Smis. If that is not possible, deoptimize.
|
| + DeferredDivI* deferred = new DeferredDivI(this, instr);
|
| +
|
| + __ TrySmiTag(left, &deoptimize, scratch);
|
| + __ TrySmiTag(right, &deoptimize, scratch);
|
| +
|
| + __ b(al, deferred->entry());
|
| + __ bind(deferred->exit());
|
| +
|
| + // If the result in r0 is a Smi, untag it, else deoptimize.
|
| + __ JumpIfNotSmi(result, &deoptimize);
|
| + __ SmiUntag(result);
|
| + __ b(&done);
|
| +
|
| + __ bind(&deoptimize);
|
| + DeoptimizeIf(al, instr->environment());
|
| + __ bind(&done);
|
| }
|
|
|
|
|
| +template<int T>
|
| +void LCodeGen::DoDeferredGenericBinaryStub(LTemplateInstruction<1, 2, T>* instr,
|
| + Token::Value op) {
|
| + Register left = ToRegister(instr->InputAt(0));
|
| + Register right = ToRegister(instr->InputAt(1));
|
| +
|
| + __ PushSafepointRegistersAndDoubles();
|
| + GenericBinaryOpStub stub(op, OVERWRITE_LEFT, left, right);
|
| + __ CallStub(&stub);
|
| + RecordSafepointWithRegistersAndDoubles(instr->pointer_map(),
|
| + 0,
|
| + Safepoint::kNoDeoptimizationIndex);
|
| + // Overwrite the stored value of r0 with the result of the stub.
|
| + __ StoreToSafepointRegistersAndDoublesSlot(r0);
|
| + __ PopSafepointRegistersAndDoubles();
|
| +}
|
| +
|
| +
|
| void LCodeGen::DoMulI(LMulI* instr) {
|
| Register scratch = scratch0();
|
| - Register left = ToRegister(instr->left());
|
| - Register right = EmitLoadRegister(instr->right(), scratch);
|
| + Register left = ToRegister(instr->InputAt(0));
|
| + Register right = EmitLoadRegister(instr->InputAt(1), scratch);
|
|
|
| if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero) &&
|
| - !instr->right()->IsConstantOperand()) {
|
| - __ orr(ToRegister(instr->temp()), left, right);
|
| + !instr->InputAt(1)->IsConstantOperand()) {
|
| + __ orr(ToRegister(instr->TempAt(0)), left, right);
|
| }
|
|
|
| if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
|
| // scratch:left = left * right.
|
| - __ smull(scratch, left, left, right);
|
| + __ smull(left, scratch, left, right);
|
| __ mov(ip, Operand(left, ASR, 31));
|
| __ cmp(ip, Operand(scratch));
|
| DeoptimizeIf(ne, instr->environment());
|
| @@ -820,13 +1187,13 @@
|
| Label done;
|
| __ tst(left, Operand(left));
|
| __ b(ne, &done);
|
| - if (instr->right()->IsConstantOperand()) {
|
| - if (ToInteger32(LConstantOperand::cast(instr->right())) < 0) {
|
| - DeoptimizeIf(no_condition, instr->environment());
|
| + if (instr->InputAt(1)->IsConstantOperand()) {
|
| + if (ToInteger32(LConstantOperand::cast(instr->InputAt(1))) <= 0) {
|
| + DeoptimizeIf(al, instr->environment());
|
| }
|
| } else {
|
| // Test the non-zero operand for negative sign.
|
| - __ cmp(ToRegister(instr->temp()), Operand(0));
|
| + __ cmp(ToRegister(instr->TempAt(0)), Operand(0));
|
| DeoptimizeIf(mi, instr->environment());
|
| }
|
| __ bind(&done);
|
| @@ -835,8 +1202,8 @@
|
|
|
|
|
| void LCodeGen::DoBitI(LBitI* instr) {
|
| - LOperand* left = instr->left();
|
| - LOperand* right = instr->right();
|
| + LOperand* left = instr->InputAt(0);
|
| + LOperand* right = instr->InputAt(1);
|
| ASSERT(left->Equals(instr->result()));
|
| ASSERT(left->IsRegister());
|
| Register result = ToRegister(left);
|
| @@ -860,8 +1227,8 @@
|
|
|
| void LCodeGen::DoShiftI(LShiftI* instr) {
|
| Register scratch = scratch0();
|
| - LOperand* left = instr->left();
|
| - LOperand* right = instr->right();
|
| + LOperand* left = instr->InputAt(0);
|
| + LOperand* right = instr->InputAt(1);
|
| ASSERT(left->Equals(instr->result()));
|
| ASSERT(left->IsRegister());
|
| Register result = ToRegister(left);
|
| @@ -918,9 +1285,9 @@
|
|
|
|
|
| void LCodeGen::DoSubI(LSubI* instr) {
|
| - Register left = ToRegister(instr->left());
|
| - Register right = EmitLoadRegister(instr->right(), ip);
|
| - ASSERT(instr->left()->Equals(instr->result()));
|
| + Register left = ToRegister(instr->InputAt(0));
|
| + Register right = EmitLoadRegister(instr->InputAt(1), ip);
|
| + ASSERT(instr->InputAt(0)->Equals(instr->result()));
|
| __ sub(left, left, right, SetCC);
|
| if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
|
| DeoptimizeIf(vs, instr->environment());
|
| @@ -935,7 +1302,10 @@
|
|
|
|
|
| void LCodeGen::DoConstantD(LConstantD* instr) {
|
| - Abort("DoConstantD unimplemented.");
|
| + ASSERT(instr->result()->IsDoubleRegister());
|
| + DwVfpRegister result = ToDoubleRegister(instr->result());
|
| + double v = instr->value();
|
| + __ vmov(result, v);
|
| }
|
|
|
|
|
| @@ -947,34 +1317,54 @@
|
|
|
| void LCodeGen::DoJSArrayLength(LJSArrayLength* instr) {
|
| Register result = ToRegister(instr->result());
|
| - Register array = ToRegister(instr->input());
|
| + Register array = ToRegister(instr->InputAt(0));
|
| __ ldr(result, FieldMemOperand(array, JSArray::kLengthOffset));
|
| }
|
|
|
|
|
| +void LCodeGen::DoPixelArrayLength(LPixelArrayLength* instr) {
|
| + Register result = ToRegister(instr->result());
|
| + Register array = ToRegister(instr->InputAt(0));
|
| + __ ldr(result, FieldMemOperand(array, PixelArray::kLengthOffset));
|
| +}
|
| +
|
| +
|
| void LCodeGen::DoFixedArrayLength(LFixedArrayLength* instr) {
|
| Register result = ToRegister(instr->result());
|
| - Register array = ToRegister(instr->input());
|
| + Register array = ToRegister(instr->InputAt(0));
|
| __ ldr(result, FieldMemOperand(array, FixedArray::kLengthOffset));
|
| - Abort("DoFixedArrayLength untested.");
|
| }
|
|
|
|
|
| void LCodeGen::DoValueOf(LValueOf* instr) {
|
| - Abort("DoValueOf unimplemented.");
|
| + Register input = ToRegister(instr->InputAt(0));
|
| + Register result = ToRegister(instr->result());
|
| + Register map = ToRegister(instr->TempAt(0));
|
| + ASSERT(input.is(result));
|
| + Label done;
|
| +
|
| + // If the object is a smi return the object.
|
| + __ tst(input, Operand(kSmiTagMask));
|
| + __ b(eq, &done);
|
| +
|
| + // If the object is not a value type, return the object.
|
| + __ CompareObjectType(input, map, map, JS_VALUE_TYPE);
|
| + __ b(ne, &done);
|
| + __ ldr(result, FieldMemOperand(input, JSValue::kValueOffset));
|
| +
|
| + __ bind(&done);
|
| }
|
|
|
|
|
| void LCodeGen::DoBitNotI(LBitNotI* instr) {
|
| - LOperand* input = instr->input();
|
| + LOperand* input = instr->InputAt(0);
|
| ASSERT(input->Equals(instr->result()));
|
| __ mvn(ToRegister(input), Operand(ToRegister(input)));
|
| - Abort("DoBitNotI untested.");
|
| }
|
|
|
|
|
| void LCodeGen::DoThrow(LThrow* instr) {
|
| - Register input_reg = EmitLoadRegister(instr->input(), ip);
|
| + Register input_reg = EmitLoadRegister(instr->InputAt(0), ip);
|
| __ push(input_reg);
|
| CallRuntime(Runtime::kThrow, 1, instr);
|
|
|
| @@ -985,8 +1375,8 @@
|
|
|
|
|
| void LCodeGen::DoAddI(LAddI* instr) {
|
| - LOperand* left = instr->left();
|
| - LOperand* right = instr->right();
|
| + LOperand* left = instr->InputAt(0);
|
| + LOperand* right = instr->InputAt(1);
|
| ASSERT(left->Equals(instr->result()));
|
|
|
| Register right_reg = EmitLoadRegister(right, ip);
|
| @@ -999,8 +1389,8 @@
|
|
|
|
|
| void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
|
| - DoubleRegister left = ToDoubleRegister(instr->left());
|
| - DoubleRegister right = ToDoubleRegister(instr->right());
|
| + DoubleRegister left = ToDoubleRegister(instr->InputAt(0));
|
| + DoubleRegister right = ToDoubleRegister(instr->InputAt(1));
|
| switch (instr->op()) {
|
| case Token::ADD:
|
| __ vadd(left, left, right);
|
| @@ -1015,7 +1405,18 @@
|
| __ vdiv(left, left, right);
|
| break;
|
| case Token::MOD: {
|
| - Abort("DoArithmeticD unimplemented for MOD.");
|
| + // Save r0-r3 on the stack.
|
| + __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
|
| +
|
| + __ PrepareCallCFunction(4, scratch0());
|
| + __ vmov(r0, r1, left);
|
| + __ vmov(r2, r3, right);
|
| + __ CallCFunction(ExternalReference::double_fp_operation(Token::MOD), 4);
|
| + // Move the result in the double result register.
|
| + __ vmov(ToDoubleRegister(instr->result()), r0, r1);
|
| +
|
| + // Restore r0-r3.
|
| + __ ldm(ia_w, sp, r0.bit() | r1.bit() | r2.bit() | r3.bit());
|
| break;
|
| }
|
| default:
|
| @@ -1026,8 +1427,8 @@
|
|
|
|
|
| void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
|
| - ASSERT(ToRegister(instr->left()).is(r1));
|
| - ASSERT(ToRegister(instr->right()).is(r0));
|
| + ASSERT(ToRegister(instr->InputAt(0)).is(r1));
|
| + ASSERT(ToRegister(instr->InputAt(1)).is(r0));
|
| ASSERT(ToRegister(instr->result()).is(r0));
|
|
|
| // TODO(regis): Implement TypeRecordingBinaryOpStub and replace current
|
| @@ -1071,16 +1472,20 @@
|
|
|
| Representation r = instr->hydrogen()->representation();
|
| if (r.IsInteger32()) {
|
| - Register reg = ToRegister(instr->input());
|
| + Register reg = ToRegister(instr->InputAt(0));
|
| __ cmp(reg, Operand(0));
|
| - EmitBranch(true_block, false_block, nz);
|
| + EmitBranch(true_block, false_block, ne);
|
| } else if (r.IsDouble()) {
|
| - DoubleRegister reg = ToDoubleRegister(instr->input());
|
| - __ vcmp(reg, 0.0);
|
| + DoubleRegister reg = ToDoubleRegister(instr->InputAt(0));
|
| + Register scratch = scratch0();
|
| +
|
| + // Test the double value. Zero and NaN are false.
|
| + __ VFPCompareAndLoadFlags(reg, 0.0, scratch);
|
| + __ tst(scratch, Operand(kVFPZConditionFlagBit | kVFPVConditionFlagBit));
|
| EmitBranch(true_block, false_block, ne);
|
| } else {
|
| ASSERT(r.IsTagged());
|
| - Register reg = ToRegister(instr->input());
|
| + Register reg = ToRegister(instr->InputAt(0));
|
| if (instr->hydrogen()->type().IsBoolean()) {
|
| __ LoadRoot(ip, Heap::kTrueValueRootIndex);
|
| __ cmp(reg, ip);
|
| @@ -1103,7 +1508,7 @@
|
| __ tst(reg, Operand(kSmiTagMask));
|
| __ b(eq, true_label);
|
|
|
| - // Test for double values. Zero is false.
|
| + // Test double values. Zero and NaN are false.
|
| Label call_stub;
|
| DoubleRegister dbl_scratch = d0;
|
| Register scratch = scratch0();
|
| @@ -1113,8 +1518,9 @@
|
| __ b(ne, &call_stub);
|
| __ sub(ip, reg, Operand(kHeapObjectTag));
|
| __ vldr(dbl_scratch, ip, HeapNumber::kValueOffset);
|
| - __ vcmp(dbl_scratch, 0.0);
|
| - __ b(eq, false_label);
|
| + __ VFPCompareAndLoadFlags(dbl_scratch, 0.0, scratch);
|
| + __ tst(scratch, Operand(kVFPZConditionFlagBit | kVFPVConditionFlagBit));
|
| + __ b(ne, false_label);
|
| __ b(true_label);
|
|
|
| // The conversion stub doesn't cause garbage collections so it's
|
| @@ -1126,36 +1532,59 @@
|
| __ CallStub(&stub);
|
| __ cmp(reg, Operand(0));
|
| __ ldm(ia_w, sp, saved_regs);
|
| - EmitBranch(true_block, false_block, nz);
|
| + EmitBranch(true_block, false_block, ne);
|
| }
|
| }
|
| }
|
|
|
|
|
| void LCodeGen::EmitGoto(int block, LDeferredCode* deferred_stack_check) {
|
| - // TODO(srdjan): Perform stack overflow check if this goto needs it
|
| - // before jumping.
|
| block = chunk_->LookupDestination(block);
|
| int next_block = GetNextEmittedBlock(current_block_);
|
| if (block != next_block) {
|
| - __ jmp(chunk_->GetAssemblyLabel(block));
|
| + // Perform stack overflow check if this goto needs it before jumping.
|
| + if (deferred_stack_check != NULL) {
|
| + __ LoadRoot(ip, Heap::kStackLimitRootIndex);
|
| + __ cmp(sp, Operand(ip));
|
| + __ b(hs, chunk_->GetAssemblyLabel(block));
|
| + __ jmp(deferred_stack_check->entry());
|
| + deferred_stack_check->SetExit(chunk_->GetAssemblyLabel(block));
|
| + } else {
|
| + __ jmp(chunk_->GetAssemblyLabel(block));
|
| + }
|
| }
|
| }
|
|
|
|
|
| void LCodeGen::DoDeferredStackCheck(LGoto* instr) {
|
| - UNIMPLEMENTED();
|
| + __ PushSafepointRegisters();
|
| + __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
|
| + RecordSafepointWithRegisters(
|
| + instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
|
| + __ PopSafepointRegisters();
|
| }
|
|
|
|
|
| void LCodeGen::DoGoto(LGoto* instr) {
|
| - // TODO(srdjan): Implement deferred stack check.
|
| - EmitGoto(instr->block_id(), NULL);
|
| + class DeferredStackCheck: public LDeferredCode {
|
| + public:
|
| + DeferredStackCheck(LCodeGen* codegen, LGoto* instr)
|
| + : LDeferredCode(codegen), instr_(instr) { }
|
| + virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
|
| + private:
|
| + LGoto* instr_;
|
| + };
|
| +
|
| + DeferredStackCheck* deferred = NULL;
|
| + if (instr->include_stack_check()) {
|
| + deferred = new DeferredStackCheck(this, instr);
|
| + }
|
| + EmitGoto(instr->block_id(), deferred);
|
| }
|
|
|
|
|
| Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
|
| - Condition cond = no_condition;
|
| + Condition cond = kNoCondition;
|
| switch (op) {
|
| case Token::EQ:
|
| case Token::EQ_STRICT:
|
| @@ -1183,40 +1612,84 @@
|
|
|
|
|
| void LCodeGen::EmitCmpI(LOperand* left, LOperand* right) {
|
| - __ cmp(ToRegister(left), ToOperand(right));
|
| - Abort("EmitCmpI untested.");
|
| + __ cmp(ToRegister(left), ToRegister(right));
|
| }
|
|
|
|
|
| void LCodeGen::DoCmpID(LCmpID* instr) {
|
| - Abort("DoCmpID unimplemented.");
|
| + LOperand* left = instr->InputAt(0);
|
| + LOperand* right = instr->InputAt(1);
|
| + LOperand* result = instr->result();
|
| + Register scratch = scratch0();
|
| +
|
| + Label unordered, done;
|
| + if (instr->is_double()) {
|
| + // Compare left and right as doubles and load the
|
| + // resulting flags into the normal status register.
|
| + __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
|
| + // If a NaN is involved, i.e. the result is unordered (V set),
|
| + // jump to unordered to return false.
|
| + __ b(vs, &unordered);
|
| + } else {
|
| + EmitCmpI(left, right);
|
| + }
|
| +
|
| + Condition cc = TokenToCondition(instr->op(), instr->is_double());
|
| + __ LoadRoot(ToRegister(result), Heap::kTrueValueRootIndex);
|
| + __ b(cc, &done);
|
| +
|
| + __ bind(&unordered);
|
| + __ LoadRoot(ToRegister(result), Heap::kFalseValueRootIndex);
|
| + __ bind(&done);
|
| }
|
|
|
|
|
| void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
|
| - Abort("DoCmpIDAndBranch unimplemented.");
|
| + LOperand* left = instr->InputAt(0);
|
| + LOperand* right = instr->InputAt(1);
|
| + int false_block = chunk_->LookupDestination(instr->false_block_id());
|
| + int true_block = chunk_->LookupDestination(instr->true_block_id());
|
| +
|
| + if (instr->is_double()) {
|
| + // Compare left and right as doubles and load the
|
| + // resulting flags into the normal status register.
|
| + __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
|
| + // If a NaN is involved, i.e. the result is unordered (V set),
|
| + // jump to false block label.
|
| + __ b(vs, chunk_->GetAssemblyLabel(false_block));
|
| + } else {
|
| + EmitCmpI(left, right);
|
| + }
|
| +
|
| + Condition cc = TokenToCondition(instr->op(), instr->is_double());
|
| + EmitBranch(true_block, false_block, cc);
|
| }
|
|
|
|
|
| void LCodeGen::DoCmpJSObjectEq(LCmpJSObjectEq* instr) {
|
| - Register left = ToRegister(instr->left());
|
| - Register right = ToRegister(instr->right());
|
| + Register left = ToRegister(instr->InputAt(0));
|
| + Register right = ToRegister(instr->InputAt(1));
|
| Register result = ToRegister(instr->result());
|
|
|
| __ cmp(left, Operand(right));
|
| __ LoadRoot(result, Heap::kTrueValueRootIndex, eq);
|
| __ LoadRoot(result, Heap::kFalseValueRootIndex, ne);
|
| - Abort("DoCmpJSObjectEq untested.");
|
| }
|
|
|
|
|
| void LCodeGen::DoCmpJSObjectEqAndBranch(LCmpJSObjectEqAndBranch* instr) {
|
| - Abort("DoCmpJSObjectEqAndBranch unimplemented.");
|
| + Register left = ToRegister(instr->InputAt(0));
|
| + Register right = ToRegister(instr->InputAt(1));
|
| + int false_block = chunk_->LookupDestination(instr->false_block_id());
|
| + int true_block = chunk_->LookupDestination(instr->true_block_id());
|
| +
|
| + __ cmp(left, Operand(right));
|
| + EmitBranch(true_block, false_block, eq);
|
| }
|
|
|
|
|
| void LCodeGen::DoIsNull(LIsNull* instr) {
|
| - Register reg = ToRegister(instr->input());
|
| + Register reg = ToRegister(instr->InputAt(0));
|
| Register result = ToRegister(instr->result());
|
|
|
| __ LoadRoot(ip, Heap::kNullValueRootIndex);
|
| @@ -1251,7 +1724,7 @@
|
|
|
| void LCodeGen::DoIsNullAndBranch(LIsNullAndBranch* instr) {
|
| Register scratch = scratch0();
|
| - Register reg = ToRegister(instr->input());
|
| + Register reg = ToRegister(instr->InputAt(0));
|
|
|
| // TODO(fsc): If the expression is known to be a smi, then it's
|
| // definitely not null. Jump to the false block.
|
| @@ -1287,25 +1760,69 @@
|
| Register temp2,
|
| Label* is_not_object,
|
| Label* is_object) {
|
| - Abort("EmitIsObject unimplemented.");
|
| - return ne;
|
| + __ JumpIfSmi(input, is_not_object);
|
| +
|
| + __ LoadRoot(temp1, Heap::kNullValueRootIndex);
|
| + __ cmp(input, temp1);
|
| + __ b(eq, is_object);
|
| +
|
| + // Load map.
|
| + __ ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
|
| + // Undetectable objects behave like undefined.
|
| + __ ldrb(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
|
| + __ tst(temp2, Operand(1 << Map::kIsUndetectable));
|
| + __ b(ne, is_not_object);
|
| +
|
| + // Load instance type and check that it is in object type range.
|
| + __ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
|
| + __ cmp(temp2, Operand(FIRST_JS_OBJECT_TYPE));
|
| + __ b(lt, is_not_object);
|
| + __ cmp(temp2, Operand(LAST_JS_OBJECT_TYPE));
|
| + return le;
|
| }
|
|
|
|
|
| void LCodeGen::DoIsObject(LIsObject* instr) {
|
| - Abort("DoIsObject unimplemented.");
|
| + Register reg = ToRegister(instr->InputAt(0));
|
| + Register result = ToRegister(instr->result());
|
| + Register temp = scratch0();
|
| + Label is_false, is_true, done;
|
| +
|
| + Condition true_cond = EmitIsObject(reg, result, temp, &is_false, &is_true);
|
| + __ b(true_cond, &is_true);
|
| +
|
| + __ bind(&is_false);
|
| + __ LoadRoot(result, Heap::kFalseValueRootIndex);
|
| + __ b(&done);
|
| +
|
| + __ bind(&is_true);
|
| + __ LoadRoot(result, Heap::kTrueValueRootIndex);
|
| +
|
| + __ bind(&done);
|
| }
|
|
|
|
|
| void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
|
| - Abort("DoIsObjectAndBranch unimplemented.");
|
| + Register reg = ToRegister(instr->InputAt(0));
|
| + Register temp1 = ToRegister(instr->TempAt(0));
|
| + Register temp2 = scratch0();
|
| +
|
| + int true_block = chunk_->LookupDestination(instr->true_block_id());
|
| + int false_block = chunk_->LookupDestination(instr->false_block_id());
|
| + Label* true_label = chunk_->GetAssemblyLabel(true_block);
|
| + Label* false_label = chunk_->GetAssemblyLabel(false_block);
|
| +
|
| + Condition true_cond =
|
| + EmitIsObject(reg, temp1, temp2, false_label, true_label);
|
| +
|
| + EmitBranch(true_block, false_block, true_cond);
|
| }
|
|
|
|
|
| void LCodeGen::DoIsSmi(LIsSmi* instr) {
|
| ASSERT(instr->hydrogen()->value()->representation().IsTagged());
|
| Register result = ToRegister(instr->result());
|
| - Register input_reg = EmitLoadRegister(instr->input(), ip);
|
| + Register input_reg = EmitLoadRegister(instr->InputAt(0), ip);
|
| __ tst(input_reg, Operand(kSmiTagMask));
|
| __ LoadRoot(result, Heap::kTrueValueRootIndex);
|
| Label done;
|
| @@ -1319,24 +1836,24 @@
|
| int true_block = chunk_->LookupDestination(instr->true_block_id());
|
| int false_block = chunk_->LookupDestination(instr->false_block_id());
|
|
|
| - Register input_reg = EmitLoadRegister(instr->input(), ip);
|
| + Register input_reg = EmitLoadRegister(instr->InputAt(0), ip);
|
| __ tst(input_reg, Operand(kSmiTagMask));
|
| EmitBranch(true_block, false_block, eq);
|
| }
|
|
|
|
|
| -InstanceType LHasInstanceType::TestType() {
|
| - InstanceType from = hydrogen()->from();
|
| - InstanceType to = hydrogen()->to();
|
| +static InstanceType TestType(HHasInstanceType* instr) {
|
| + InstanceType from = instr->from();
|
| + InstanceType to = instr->to();
|
| if (from == FIRST_TYPE) return to;
|
| ASSERT(from == to || to == LAST_TYPE);
|
| return from;
|
| }
|
|
|
|
|
| -Condition LHasInstanceType::BranchCondition() {
|
| - InstanceType from = hydrogen()->from();
|
| - InstanceType to = hydrogen()->to();
|
| +static Condition BranchCondition(HHasInstanceType* instr) {
|
| + InstanceType from = instr->from();
|
| + InstanceType to = instr->to();
|
| if (from == to) return eq;
|
| if (to == LAST_TYPE) return hs;
|
| if (from == FIRST_TYPE) return ls;
|
| @@ -1346,13 +1863,25 @@
|
|
|
|
|
| void LCodeGen::DoHasInstanceType(LHasInstanceType* instr) {
|
| - Abort("DoHasInstanceType unimplemented.");
|
| + Register input = ToRegister(instr->InputAt(0));
|
| + Register result = ToRegister(instr->result());
|
| +
|
| + ASSERT(instr->hydrogen()->value()->representation().IsTagged());
|
| + Label done;
|
| + __ tst(input, Operand(kSmiTagMask));
|
| + __ LoadRoot(result, Heap::kFalseValueRootIndex, eq);
|
| + __ b(eq, &done);
|
| + __ CompareObjectType(input, result, result, TestType(instr->hydrogen()));
|
| + Condition cond = BranchCondition(instr->hydrogen());
|
| + __ LoadRoot(result, Heap::kTrueValueRootIndex, cond);
|
| + __ LoadRoot(result, Heap::kFalseValueRootIndex, NegateCondition(cond));
|
| + __ bind(&done);
|
| }
|
|
|
|
|
| void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
|
| Register scratch = scratch0();
|
| - Register input = ToRegister(instr->input());
|
| + Register input = ToRegister(instr->InputAt(0));
|
|
|
| int true_block = chunk_->LookupDestination(instr->true_block_id());
|
| int false_block = chunk_->LookupDestination(instr->false_block_id());
|
| @@ -1362,23 +1891,41 @@
|
| __ tst(input, Operand(kSmiTagMask));
|
| __ b(eq, false_label);
|
|
|
| - __ CompareObjectType(input, scratch, scratch, instr->TestType());
|
| - EmitBranch(true_block, false_block, instr->BranchCondition());
|
| + __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
|
| + EmitBranch(true_block, false_block, BranchCondition(instr->hydrogen()));
|
| }
|
|
|
|
|
| void LCodeGen::DoHasCachedArrayIndex(LHasCachedArrayIndex* instr) {
|
| - Abort("DoHasCachedArrayIndex unimplemented.");
|
| + Register input = ToRegister(instr->InputAt(0));
|
| + Register result = ToRegister(instr->result());
|
| + Register scratch = scratch0();
|
| +
|
| + ASSERT(instr->hydrogen()->value()->representation().IsTagged());
|
| + __ ldr(scratch,
|
| + FieldMemOperand(input, String::kContainsCachedArrayIndexMask));
|
| + __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
|
| + __ LoadRoot(result, Heap::kTrueValueRootIndex, ne);
|
| + __ LoadRoot(result, Heap::kFalseValueRootIndex, eq);
|
| }
|
|
|
|
|
| void LCodeGen::DoHasCachedArrayIndexAndBranch(
|
| LHasCachedArrayIndexAndBranch* instr) {
|
| - Abort("DoHasCachedArrayIndexAndBranch unimplemented.");
|
| + Register input = ToRegister(instr->InputAt(0));
|
| + Register scratch = scratch0();
|
| +
|
| + int true_block = chunk_->LookupDestination(instr->true_block_id());
|
| + int false_block = chunk_->LookupDestination(instr->false_block_id());
|
| +
|
| + __ ldr(scratch,
|
| + FieldMemOperand(input, String::kContainsCachedArrayIndexMask));
|
| + __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
|
| + EmitBranch(true_block, false_block, ne);
|
| }
|
|
|
|
|
| -// Branches to a label or falls through with the answer in the z flag. Trashes
|
| +// Branches to a label or falls through with the answer in flags. Trashes
|
| // the temp registers, but not the input. Only input and temp2 may alias.
|
| void LCodeGen::EmitClassOfTest(Label* is_true,
|
| Label* is_false,
|
| @@ -1386,23 +1933,97 @@
|
| Register input,
|
| Register temp,
|
| Register temp2) {
|
| - Abort("EmitClassOfTest unimplemented.");
|
| + ASSERT(!input.is(temp));
|
| + ASSERT(!temp.is(temp2)); // But input and temp2 may be the same register.
|
| + __ tst(input, Operand(kSmiTagMask));
|
| + __ b(eq, is_false);
|
| + __ CompareObjectType(input, temp, temp2, FIRST_JS_OBJECT_TYPE);
|
| + __ b(lt, is_false);
|
| +
|
| + // Map is now in temp.
|
| + // Functions have class 'Function'.
|
| + __ CompareInstanceType(temp, temp2, JS_FUNCTION_TYPE);
|
| + if (class_name->IsEqualTo(CStrVector("Function"))) {
|
| + __ b(eq, is_true);
|
| + } else {
|
| + __ b(eq, is_false);
|
| + }
|
| +
|
| + // Check if the constructor in the map is a function.
|
| + __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset));
|
| +
|
| + // As long as JS_FUNCTION_TYPE is the last instance type and it is
|
| + // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
|
| + // LAST_JS_OBJECT_TYPE.
|
| + ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
|
| + ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
|
| +
|
| + // Objects with a non-function constructor have class 'Object'.
|
| + __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
|
| + if (class_name->IsEqualTo(CStrVector("Object"))) {
|
| + __ b(ne, is_true);
|
| + } else {
|
| + __ b(ne, is_false);
|
| + }
|
| +
|
| + // temp now contains the constructor function. Grab the
|
| + // instance class name from there.
|
| + __ ldr(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
|
| + __ ldr(temp, FieldMemOperand(temp,
|
| + SharedFunctionInfo::kInstanceClassNameOffset));
|
| + // The class name we are testing against is a symbol because it's a literal.
|
| + // The name in the constructor is a symbol because of the way the context is
|
| + // booted. This routine isn't expected to work for random API-created
|
| + // classes and it doesn't have to because you can't access it with natives
|
| + // syntax. Since both sides are symbols it is sufficient to use an identity
|
| + // comparison.
|
| + __ cmp(temp, Operand(class_name));
|
| + // End with the answer in flags.
|
| }
|
|
|
|
|
| void LCodeGen::DoClassOfTest(LClassOfTest* instr) {
|
| - Abort("DoClassOfTest unimplemented.");
|
| + Register input = ToRegister(instr->InputAt(0));
|
| + Register result = ToRegister(instr->result());
|
| + ASSERT(input.is(result));
|
| + Handle<String> class_name = instr->hydrogen()->class_name();
|
| +
|
| + Label done, is_true, is_false;
|
| +
|
| + EmitClassOfTest(&is_true, &is_false, class_name, input, scratch0(), input);
|
| + __ b(ne, &is_false);
|
| +
|
| + __ bind(&is_true);
|
| + __ LoadRoot(result, Heap::kTrueValueRootIndex);
|
| + __ jmp(&done);
|
| +
|
| + __ bind(&is_false);
|
| + __ LoadRoot(result, Heap::kFalseValueRootIndex);
|
| + __ bind(&done);
|
| }
|
|
|
|
|
| void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
|
| - Abort("DoClassOfTestAndBranch unimplemented.");
|
| + Register input = ToRegister(instr->InputAt(0));
|
| + Register temp = scratch0();
|
| + Register temp2 = ToRegister(instr->TempAt(0));
|
| + Handle<String> class_name = instr->hydrogen()->class_name();
|
| +
|
| + int true_block = chunk_->LookupDestination(instr->true_block_id());
|
| + int false_block = chunk_->LookupDestination(instr->false_block_id());
|
| +
|
| + Label* true_label = chunk_->GetAssemblyLabel(true_block);
|
| + Label* false_label = chunk_->GetAssemblyLabel(false_block);
|
| +
|
| + EmitClassOfTest(true_label, false_label, class_name, input, temp, temp2);
|
| +
|
| + EmitBranch(true_block, false_block, eq);
|
| }
|
|
|
|
|
| void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
|
| - Register reg = ToRegister(instr->input());
|
| - Register temp = ToRegister(instr->temp());
|
| + Register reg = ToRegister(instr->InputAt(0));
|
| + Register temp = ToRegister(instr->TempAt(0));
|
| int true_block = instr->true_block_id();
|
| int false_block = instr->false_block_id();
|
|
|
| @@ -1413,8 +2034,8 @@
|
|
|
|
|
| void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
|
| - ASSERT(ToRegister(instr->left()).is(r0)); // Object is in r0.
|
| - ASSERT(ToRegister(instr->right()).is(r1)); // Function is in r1.
|
| + ASSERT(ToRegister(instr->InputAt(0)).is(r0)); // Object is in r0.
|
| + ASSERT(ToRegister(instr->InputAt(1)).is(r1)); // Function is in r1.
|
|
|
| InstanceofStub stub(InstanceofStub::kArgsInRegisters);
|
| CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
| @@ -1427,11 +2048,136 @@
|
|
|
|
|
| void LCodeGen::DoInstanceOfAndBranch(LInstanceOfAndBranch* instr) {
|
| - Abort("DoInstanceOfAndBranch unimplemented.");
|
| + ASSERT(ToRegister(instr->InputAt(0)).is(r0)); // Object is in r0.
|
| + ASSERT(ToRegister(instr->InputAt(1)).is(r1)); // Function is in r1.
|
| +
|
| + int true_block = chunk_->LookupDestination(instr->true_block_id());
|
| + int false_block = chunk_->LookupDestination(instr->false_block_id());
|
| +
|
| + InstanceofStub stub(InstanceofStub::kArgsInRegisters);
|
| + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
| + __ tst(r0, Operand(r0));
|
| + EmitBranch(true_block, false_block, eq);
|
| }
|
|
|
|
|
| +void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
|
| + class DeferredInstanceOfKnownGlobal: public LDeferredCode {
|
| + public:
|
| + DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
|
| + LInstanceOfKnownGlobal* instr)
|
| + : LDeferredCode(codegen), instr_(instr) { }
|
| + virtual void Generate() {
|
| + codegen()->DoDeferredLInstanceOfKnownGlobal(instr_, &map_check_);
|
| + }
|
|
|
| + Label* map_check() { return &map_check_; }
|
| +
|
| + private:
|
| + LInstanceOfKnownGlobal* instr_;
|
| + Label map_check_;
|
| + };
|
| +
|
| + DeferredInstanceOfKnownGlobal* deferred;
|
| + deferred = new DeferredInstanceOfKnownGlobal(this, instr);
|
| +
|
| + Label done, false_result;
|
| + Register object = ToRegister(instr->InputAt(0));
|
| + Register temp = ToRegister(instr->TempAt(0));
|
| + Register result = ToRegister(instr->result());
|
| +
|
| + ASSERT(object.is(r0));
|
| + ASSERT(result.is(r0));
|
| +
|
| + // A Smi is not instance of anything.
|
| + __ JumpIfSmi(object, &false_result);
|
| +
|
| + // This is the inlined call site instanceof cache. The two occurences of the
|
| + // hole value will be patched to the last map/result pair generated by the
|
| + // instanceof stub.
|
| + Label cache_miss;
|
| + Register map = temp;
|
| + __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
|
| + __ bind(deferred->map_check()); // Label for calculating code patching.
|
| + // We use Factory::the_hole_value() on purpose instead of loading from the
|
| + // root array to force relocation to be able to later patch with
|
| + // the cached map.
|
| + __ mov(ip, Operand(Factory::the_hole_value()));
|
| + __ cmp(map, Operand(ip));
|
| + __ b(ne, &cache_miss);
|
| + // We use Factory::the_hole_value() on purpose instead of loading from the
|
| + // root array to force relocation to be able to later patch
|
| + // with true or false.
|
| + __ mov(result, Operand(Factory::the_hole_value()));
|
| + __ b(&done);
|
| +
|
| + // The inlined call site cache did not match. Check null and string before
|
| + // calling the deferred code.
|
| + __ bind(&cache_miss);
|
| + // Null is not instance of anything.
|
| + __ LoadRoot(ip, Heap::kNullValueRootIndex);
|
| + __ cmp(object, Operand(ip));
|
| + __ b(eq, &false_result);
|
| +
|
| + // String values is not instance of anything.
|
| + Condition is_string = masm_->IsObjectStringType(object, temp);
|
| + __ b(is_string, &false_result);
|
| +
|
| + // Go to the deferred code.
|
| + __ b(deferred->entry());
|
| +
|
| + __ bind(&false_result);
|
| + __ LoadRoot(result, Heap::kFalseValueRootIndex);
|
| +
|
| + // Here result has either true or false. Deferred code also produces true or
|
| + // false object.
|
| + __ bind(deferred->exit());
|
| + __ bind(&done);
|
| +}
|
| +
|
| +
|
| +void LCodeGen::DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
|
| + Label* map_check) {
|
| + Register result = ToRegister(instr->result());
|
| + ASSERT(result.is(r0));
|
| +
|
| + InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
|
| + flags = static_cast<InstanceofStub::Flags>(
|
| + flags | InstanceofStub::kArgsInRegisters);
|
| + flags = static_cast<InstanceofStub::Flags>(
|
| + flags | InstanceofStub::kCallSiteInlineCheck);
|
| + flags = static_cast<InstanceofStub::Flags>(
|
| + flags | InstanceofStub::kReturnTrueFalseObject);
|
| + InstanceofStub stub(flags);
|
| +
|
| + __ PushSafepointRegisters();
|
| +
|
| + // Get the temp register reserved by the instruction. This needs to be r4 as
|
| + // its slot of the pushing of safepoint registers is used to communicate the
|
| + // offset to the location of the map check.
|
| + Register temp = ToRegister(instr->TempAt(0));
|
| + ASSERT(temp.is(r4));
|
| + __ mov(InstanceofStub::right(), Operand(instr->function()));
|
| + static const int kAdditionalDelta = 4;
|
| + int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
|
| + Label before_push_delta;
|
| + __ bind(&before_push_delta);
|
| + __ BlockConstPoolFor(kAdditionalDelta);
|
| + __ mov(temp, Operand(delta * kPointerSize));
|
| + __ StoreToSafepointRegisterSlot(temp);
|
| + __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
|
| + ASSERT_EQ(kAdditionalDelta,
|
| + masm_->InstructionsGeneratedSince(&before_push_delta));
|
| + RecordSafepointWithRegisters(
|
| + instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
|
| + // Put the result value into the result register slot and
|
| + // restore all registers.
|
| + __ StoreToSafepointRegisterSlot(result);
|
| +
|
| + __ PopSafepointRegisters();
|
| +}
|
| +
|
| +
|
| static Condition ComputeCompareCondition(Token::Value op) {
|
| switch (op) {
|
| case Token::EQ_STRICT:
|
| @@ -1447,7 +2193,7 @@
|
| return ge;
|
| default:
|
| UNREACHABLE();
|
| - return no_condition;
|
| + return kNoCondition;
|
| }
|
| }
|
|
|
| @@ -1457,21 +2203,37 @@
|
|
|
| Handle<Code> ic = CompareIC::GetUninitialized(op);
|
| CallCode(ic, RelocInfo::CODE_TARGET, instr);
|
| + __ cmp(r0, Operand(0)); // This instruction also signals no smi code inlined.
|
|
|
| Condition condition = ComputeCompareCondition(op);
|
| if (op == Token::GT || op == Token::LTE) {
|
| condition = ReverseCondition(condition);
|
| }
|
| - __ cmp(r0, Operand(0));
|
| - __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex,
|
| - condition);
|
| - __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex,
|
| - NegateCondition(condition));
|
| + __ LoadRoot(ToRegister(instr->result()),
|
| + Heap::kTrueValueRootIndex,
|
| + condition);
|
| + __ LoadRoot(ToRegister(instr->result()),
|
| + Heap::kFalseValueRootIndex,
|
| + NegateCondition(condition));
|
| }
|
|
|
|
|
| void LCodeGen::DoCmpTAndBranch(LCmpTAndBranch* instr) {
|
| - Abort("DoCmpTAndBranch unimplemented.");
|
| + Token::Value op = instr->op();
|
| + int true_block = chunk_->LookupDestination(instr->true_block_id());
|
| + int false_block = chunk_->LookupDestination(instr->false_block_id());
|
| +
|
| + Handle<Code> ic = CompareIC::GetUninitialized(op);
|
| + CallCode(ic, RelocInfo::CODE_TARGET, instr);
|
| +
|
| + // The compare stub expects compare condition and the input operands
|
| + // reversed for GT and LTE.
|
| + Condition condition = ComputeCompareCondition(op);
|
| + if (op == Token::GT || op == Token::LTE) {
|
| + condition = ReverseCondition(condition);
|
| + }
|
| + __ cmp(r0, Operand(0));
|
| + EmitBranch(true_block, false_block, condition);
|
| }
|
|
|
|
|
| @@ -1503,14 +2265,54 @@
|
|
|
|
|
| void LCodeGen::DoStoreGlobal(LStoreGlobal* instr) {
|
| - Register value = ToRegister(instr->input());
|
| - __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell())));
|
| - __ str(value, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
|
| + Register value = ToRegister(instr->InputAt(0));
|
| + Register scratch = scratch0();
|
| +
|
| + // Load the cell.
|
| + __ mov(scratch, Operand(Handle<Object>(instr->hydrogen()->cell())));
|
| +
|
| + // If the cell we are storing to contains the hole it could have
|
| + // been deleted from the property dictionary. In that case, we need
|
| + // to update the property details in the property dictionary to mark
|
| + // it as no longer deleted.
|
| + if (instr->hydrogen()->check_hole_value()) {
|
| + Register scratch2 = ToRegister(instr->TempAt(0));
|
| + __ ldr(scratch2,
|
| + FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
|
| + __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
|
| + __ cmp(scratch2, ip);
|
| + DeoptimizeIf(eq, instr->environment());
|
| + }
|
| +
|
| + // Store the value.
|
| + __ str(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
|
| }
|
|
|
|
|
| +void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
|
| + Register context = ToRegister(instr->context());
|
| + Register result = ToRegister(instr->result());
|
| + __ ldr(result,
|
| + MemOperand(context, Context::SlotOffset(Context::FCONTEXT_INDEX)));
|
| + __ ldr(result, ContextOperand(result, instr->slot_index()));
|
| +}
|
| +
|
| +
|
| +void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
|
| + Register context = ToRegister(instr->context());
|
| + Register value = ToRegister(instr->value());
|
| + __ ldr(context,
|
| + MemOperand(context, Context::SlotOffset(Context::FCONTEXT_INDEX)));
|
| + __ str(value, ContextOperand(context, instr->slot_index()));
|
| + if (instr->needs_write_barrier()) {
|
| + int offset = Context::SlotOffset(instr->slot_index());
|
| + __ RecordWrite(context, Operand(offset), value, scratch0());
|
| + }
|
| +}
|
| +
|
| +
|
| void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
|
| - Register object = ToRegister(instr->input());
|
| + Register object = ToRegister(instr->InputAt(0));
|
| Register result = ToRegister(instr->result());
|
| if (instr->hydrogen()->is_in_object()) {
|
| __ ldr(result, FieldMemOperand(object, instr->hydrogen()->offset()));
|
| @@ -1577,20 +2379,82 @@
|
|
|
|
|
| void LCodeGen::DoLoadElements(LLoadElements* instr) {
|
| - Abort("DoLoadElements unimplemented.");
|
| + Register result = ToRegister(instr->result());
|
| + Register input = ToRegister(instr->InputAt(0));
|
| + Register scratch = scratch0();
|
| +
|
| + __ ldr(result, FieldMemOperand(input, JSObject::kElementsOffset));
|
| + if (FLAG_debug_code) {
|
| + Label done;
|
| + __ ldr(scratch, FieldMemOperand(result, HeapObject::kMapOffset));
|
| + __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
|
| + __ cmp(scratch, ip);
|
| + __ b(eq, &done);
|
| + __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex);
|
| + __ cmp(scratch, ip);
|
| + __ b(eq, &done);
|
| + __ LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
|
| + __ cmp(scratch, ip);
|
| + __ Check(eq, "Check for fast elements failed.");
|
| + __ bind(&done);
|
| + }
|
| }
|
|
|
|
|
| +void LCodeGen::DoLoadPixelArrayExternalPointer(
|
| + LLoadPixelArrayExternalPointer* instr) {
|
| + Register to_reg = ToRegister(instr->result());
|
| + Register from_reg = ToRegister(instr->InputAt(0));
|
| + __ ldr(to_reg, FieldMemOperand(from_reg, PixelArray::kExternalPointerOffset));
|
| +}
|
| +
|
| +
|
| void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
|
| - Abort("DoAccessArgumentsAt unimplemented.");
|
| + Register arguments = ToRegister(instr->arguments());
|
| + Register length = ToRegister(instr->length());
|
| + Register index = ToRegister(instr->index());
|
| + Register result = ToRegister(instr->result());
|
| +
|
| + // Bailout index is not a valid argument index. Use unsigned check to get
|
| + // negative check for free.
|
| + __ sub(length, length, index, SetCC);
|
| + DeoptimizeIf(ls, instr->environment());
|
| +
|
| + // There are two words between the frame pointer and the last argument.
|
| + // Subtracting from length accounts for one of them add one more.
|
| + __ add(length, length, Operand(1));
|
| + __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
|
| }
|
|
|
|
|
| void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
|
| - Abort("DoLoadKeyedFastElement unimplemented.");
|
| + Register elements = ToRegister(instr->elements());
|
| + Register key = EmitLoadRegister(instr->key(), scratch0());
|
| + Register result = ToRegister(instr->result());
|
| + Register scratch = scratch0();
|
| + ASSERT(result.is(elements));
|
| +
|
| + // Load the result.
|
| + __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
|
| + __ ldr(result, FieldMemOperand(scratch, FixedArray::kHeaderSize));
|
| +
|
| + // Check for the hole value.
|
| + __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
|
| + __ cmp(result, scratch);
|
| + DeoptimizeIf(eq, instr->environment());
|
| }
|
|
|
|
|
| +void LCodeGen::DoLoadPixelArrayElement(LLoadPixelArrayElement* instr) {
|
| + Register external_elements = ToRegister(instr->external_pointer());
|
| + Register key = ToRegister(instr->key());
|
| + Register result = ToRegister(instr->result());
|
| +
|
| + // Load the result.
|
| + __ ldrb(result, MemOperand(external_elements, key));
|
| +}
|
| +
|
| +
|
| void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
|
| ASSERT(ToRegister(instr->object()).is(r1));
|
| ASSERT(ToRegister(instr->key()).is(r0));
|
| @@ -1601,22 +2465,119 @@
|
|
|
|
|
| void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
|
| - Abort("DoArgumentsElements unimplemented.");
|
| + Register scratch = scratch0();
|
| + Register result = ToRegister(instr->result());
|
| +
|
| + // Check if the calling frame is an arguments adaptor frame.
|
| + Label done, adapted;
|
| + __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
| + __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
|
| + __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
|
| +
|
| + // Result is the frame pointer for the frame if not adapted and for the real
|
| + // frame below the adaptor frame if adapted.
|
| + __ mov(result, fp, LeaveCC, ne);
|
| + __ mov(result, scratch, LeaveCC, eq);
|
| }
|
|
|
|
|
| void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
|
| - Abort("DoArgumentsLength unimplemented.");
|
| + Register elem = ToRegister(instr->InputAt(0));
|
| + Register result = ToRegister(instr->result());
|
| +
|
| + Label done;
|
| +
|
| + // If no arguments adaptor frame the number of arguments is fixed.
|
| + __ cmp(fp, elem);
|
| + __ mov(result, Operand(scope()->num_parameters()));
|
| + __ b(eq, &done);
|
| +
|
| + // Arguments adaptor frame present. Get argument length from there.
|
| + __ ldr(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
| + __ ldr(result,
|
| + MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
|
| + __ SmiUntag(result);
|
| +
|
| + // Argument length is in result register.
|
| + __ bind(&done);
|
| }
|
|
|
|
|
| void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
|
| - Abort("DoApplyArguments unimplemented.");
|
| + Register receiver = ToRegister(instr->receiver());
|
| + Register function = ToRegister(instr->function());
|
| + Register length = ToRegister(instr->length());
|
| + Register elements = ToRegister(instr->elements());
|
| + Register scratch = scratch0();
|
| + ASSERT(receiver.is(r0)); // Used for parameter count.
|
| + ASSERT(function.is(r1)); // Required by InvokeFunction.
|
| + ASSERT(ToRegister(instr->result()).is(r0));
|
| +
|
| + // If the receiver is null or undefined, we have to pass the global object
|
| + // as a receiver.
|
| + Label global_object, receiver_ok;
|
| + __ LoadRoot(scratch, Heap::kNullValueRootIndex);
|
| + __ cmp(receiver, scratch);
|
| + __ b(eq, &global_object);
|
| + __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
|
| + __ cmp(receiver, scratch);
|
| + __ b(eq, &global_object);
|
| +
|
| + // Deoptimize if the receiver is not a JS object.
|
| + __ tst(receiver, Operand(kSmiTagMask));
|
| + DeoptimizeIf(eq, instr->environment());
|
| + __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_OBJECT_TYPE);
|
| + DeoptimizeIf(lo, instr->environment());
|
| + __ jmp(&receiver_ok);
|
| +
|
| + __ bind(&global_object);
|
| + __ ldr(receiver, GlobalObjectOperand());
|
| + __ bind(&receiver_ok);
|
| +
|
| + // Copy the arguments to this function possibly from the
|
| + // adaptor frame below it.
|
| + const uint32_t kArgumentsLimit = 1 * KB;
|
| + __ cmp(length, Operand(kArgumentsLimit));
|
| + DeoptimizeIf(hi, instr->environment());
|
| +
|
| + // Push the receiver and use the register to keep the original
|
| + // number of arguments.
|
| + __ push(receiver);
|
| + __ mov(receiver, length);
|
| + // The arguments are at a one pointer size offset from elements.
|
| + __ add(elements, elements, Operand(1 * kPointerSize));
|
| +
|
| + // Loop through the arguments pushing them onto the execution
|
| + // stack.
|
| + Label invoke, loop;
|
| + // length is a small non-negative integer, due to the test above.
|
| + __ tst(length, Operand(length));
|
| + __ b(eq, &invoke);
|
| + __ bind(&loop);
|
| + __ ldr(scratch, MemOperand(elements, length, LSL, 2));
|
| + __ push(scratch);
|
| + __ sub(length, length, Operand(1), SetCC);
|
| + __ b(ne, &loop);
|
| +
|
| + __ bind(&invoke);
|
| + ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
|
| + LPointerMap* pointers = instr->pointer_map();
|
| + LEnvironment* env = instr->deoptimization_environment();
|
| + RecordPosition(pointers->position());
|
| + RegisterEnvironmentForDeoptimization(env);
|
| + SafepointGenerator safepoint_generator(this,
|
| + pointers,
|
| + env->deoptimization_index());
|
| + // The number of arguments is stored in receiver which is r0, as expected
|
| + // by InvokeFunction.
|
| + v8::internal::ParameterCount actual(receiver);
|
| + __ InvokeFunction(function, actual, CALL_FUNCTION, &safepoint_generator);
|
| + __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
| }
|
|
|
|
|
| void LCodeGen::DoPushArgument(LPushArgument* instr) {
|
| - LOperand* argument = instr->input();
|
| + LOperand* argument = instr->InputAt(0);
|
| if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
|
| Abort("DoPushArgument not implemented for double type.");
|
| } else {
|
| @@ -1626,16 +2587,32 @@
|
| }
|
|
|
|
|
| +void LCodeGen::DoContext(LContext* instr) {
|
| + Register result = ToRegister(instr->result());
|
| + __ mov(result, cp);
|
| +}
|
| +
|
| +
|
| +void LCodeGen::DoOuterContext(LOuterContext* instr) {
|
| + Register context = ToRegister(instr->context());
|
| + Register result = ToRegister(instr->result());
|
| + __ ldr(result,
|
| + MemOperand(context, Context::SlotOffset(Context::CLOSURE_INDEX)));
|
| + __ ldr(result, FieldMemOperand(result, JSFunction::kContextOffset));
|
| +}
|
| +
|
| +
|
| void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
|
| + Register context = ToRegister(instr->context());
|
| Register result = ToRegister(instr->result());
|
| __ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX));
|
| }
|
|
|
|
|
| void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
|
| + Register global = ToRegister(instr->global());
|
| Register result = ToRegister(instr->result());
|
| - __ ldr(result, ContextOperand(cp, Context::GLOBAL_INDEX));
|
| - __ ldr(result, FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset));
|
| + __ ldr(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
|
| }
|
|
|
|
|
| @@ -1680,22 +2657,186 @@
|
|
|
|
|
| void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
|
| - Abort("DoDeferredMathAbsTaggedHeapNumber unimplemented.");
|
| + ASSERT(instr->InputAt(0)->Equals(instr->result()));
|
| + Register input = ToRegister(instr->InputAt(0));
|
| + Register scratch = scratch0();
|
| +
|
| + // Deoptimize if not a heap number.
|
| + __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
|
| + __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
|
| + __ cmp(scratch, Operand(ip));
|
| + DeoptimizeIf(ne, instr->environment());
|
| +
|
| + Label done;
|
| + Register exponent = scratch0();
|
| + scratch = no_reg;
|
| + __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
|
| + // Check the sign of the argument. If the argument is positive, just
|
| + // return it. We do not need to patch the stack since |input| and
|
| + // |result| are the same register and |input| would be restored
|
| + // unchanged by popping safepoint registers.
|
| + __ tst(exponent, Operand(HeapNumber::kSignMask));
|
| + __ b(eq, &done);
|
| +
|
| + // Input is negative. Reverse its sign.
|
| + // Preserve the value of all registers.
|
| + __ PushSafepointRegisters();
|
| +
|
| + // Registers were saved at the safepoint, so we can use
|
| + // many scratch registers.
|
| + Register tmp1 = input.is(r1) ? r0 : r1;
|
| + Register tmp2 = input.is(r2) ? r0 : r2;
|
| + Register tmp3 = input.is(r3) ? r0 : r3;
|
| + Register tmp4 = input.is(r4) ? r0 : r4;
|
| +
|
| + // exponent: floating point exponent value.
|
| +
|
| + Label allocated, slow;
|
| + __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
|
| + __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
|
| + __ b(&allocated);
|
| +
|
| + // Slow case: Call the runtime system to do the number allocation.
|
| + __ bind(&slow);
|
| +
|
| + __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
|
| + RecordSafepointWithRegisters(
|
| + instr->pointer_map(), 0, Safepoint::kNoDeoptimizationIndex);
|
| + // Set the pointer to the new heap number in tmp.
|
| + if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
|
| + // Restore input_reg after call to runtime.
|
| + __ LoadFromSafepointRegisterSlot(input);
|
| + __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
|
| +
|
| + __ bind(&allocated);
|
| + // exponent: floating point exponent value.
|
| + // tmp1: allocated heap number.
|
| + __ bic(exponent, exponent, Operand(HeapNumber::kSignMask));
|
| + __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
|
| + __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
|
| + __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
|
| +
|
| + __ str(tmp1, masm()->SafepointRegisterSlot(input));
|
| + __ PopSafepointRegisters();
|
| +
|
| + __ bind(&done);
|
| }
|
|
|
|
|
| +void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
|
| + Register input = ToRegister(instr->InputAt(0));
|
| + __ cmp(input, Operand(0));
|
| + // We can make rsb conditional because the previous cmp instruction
|
| + // will clear the V (overflow) flag and rsb won't set this flag
|
| + // if input is positive.
|
| + __ rsb(input, input, Operand(0), SetCC, mi);
|
| + // Deoptimize on overflow.
|
| + DeoptimizeIf(vs, instr->environment());
|
| +}
|
| +
|
| +
|
| void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
|
| - Abort("DoMathAbs unimplemented.");
|
| + // Class for deferred case.
|
| + class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
|
| + public:
|
| + DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
|
| + LUnaryMathOperation* instr)
|
| + : LDeferredCode(codegen), instr_(instr) { }
|
| + virtual void Generate() {
|
| + codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
|
| + }
|
| + private:
|
| + LUnaryMathOperation* instr_;
|
| + };
|
| +
|
| + ASSERT(instr->InputAt(0)->Equals(instr->result()));
|
| + Representation r = instr->hydrogen()->value()->representation();
|
| + if (r.IsDouble()) {
|
| + DwVfpRegister input = ToDoubleRegister(instr->InputAt(0));
|
| + __ vabs(input, input);
|
| + } else if (r.IsInteger32()) {
|
| + EmitIntegerMathAbs(instr);
|
| + } else {
|
| + // Representation is tagged.
|
| + DeferredMathAbsTaggedHeapNumber* deferred =
|
| + new DeferredMathAbsTaggedHeapNumber(this, instr);
|
| + Register input = ToRegister(instr->InputAt(0));
|
| + // Smi check.
|
| + __ JumpIfNotSmi(input, deferred->entry());
|
| + // If smi, handle it directly.
|
| + EmitIntegerMathAbs(instr);
|
| + __ bind(deferred->exit());
|
| + }
|
| }
|
|
|
|
|
| +// Truncates a double using a specific rounding mode.
|
| +// Clears the z flag (ne condition) if an overflow occurs.
|
| +void LCodeGen::EmitVFPTruncate(VFPRoundingMode rounding_mode,
|
| + SwVfpRegister result,
|
| + DwVfpRegister double_input,
|
| + Register scratch1,
|
| + Register scratch2) {
|
| + Register prev_fpscr = scratch1;
|
| + Register scratch = scratch2;
|
| +
|
| + // Set custom FPCSR:
|
| + // - Set rounding mode.
|
| + // - Clear vfp cumulative exception flags.
|
| + // - Make sure Flush-to-zero mode control bit is unset.
|
| + __ vmrs(prev_fpscr);
|
| + __ bic(scratch, prev_fpscr, Operand(kVFPExceptionMask |
|
| + kVFPRoundingModeMask |
|
| + kVFPFlushToZeroMask));
|
| + __ orr(scratch, scratch, Operand(rounding_mode));
|
| + __ vmsr(scratch);
|
| +
|
| + // Convert the argument to an integer.
|
| + __ vcvt_s32_f64(result,
|
| + double_input,
|
| + kFPSCRRounding);
|
| +
|
| + // Retrieve FPSCR.
|
| + __ vmrs(scratch);
|
| + // Restore FPSCR.
|
| + __ vmsr(prev_fpscr);
|
| + // Check for vfp exceptions.
|
| + __ tst(scratch, Operand(kVFPExceptionMask));
|
| +}
|
| +
|
| +
|
| void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
|
| - Abort("DoMathFloor unimplemented.");
|
| + DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
|
| + Register result = ToRegister(instr->result());
|
| + SwVfpRegister single_scratch = double_scratch0().low();
|
| + Register scratch1 = scratch0();
|
| + Register scratch2 = ToRegister(instr->TempAt(0));
|
| +
|
| + EmitVFPTruncate(kRoundToMinusInf,
|
| + single_scratch,
|
| + input,
|
| + scratch1,
|
| + scratch2);
|
| + DeoptimizeIf(ne, instr->environment());
|
| +
|
| + // Move the result back to general purpose register r0.
|
| + __ vmov(result, single_scratch);
|
| +
|
| + // Test for -0.
|
| + Label done;
|
| + __ cmp(result, Operand(0));
|
| + __ b(ne, &done);
|
| + __ vmov(scratch1, input.high());
|
| + __ tst(scratch1, Operand(HeapNumber::kSignMask));
|
| + DeoptimizeIf(ne, instr->environment());
|
| + __ bind(&done);
|
| }
|
|
|
|
|
| void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
|
| - Abort("DoMathSqrt unimplemented.");
|
| + DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
|
| + ASSERT(ToDoubleRegister(instr->result()).is(input));
|
| + __ vsqrt(input, input);
|
| }
|
|
|
|
|
| @@ -1718,7 +2859,12 @@
|
|
|
|
|
| void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
|
| - Abort("DoCallKeyed unimplemented.");
|
| + ASSERT(ToRegister(instr->result()).is(r0));
|
| +
|
| + int arity = instr->arity();
|
| + Handle<Code> ic = StubCache::ComputeKeyedCallInitialize(arity, NOT_IN_LOOP);
|
| + CallCode(ic, RelocInfo::CODE_TARGET, instr);
|
| + __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
| }
|
|
|
|
|
| @@ -1746,7 +2892,13 @@
|
|
|
|
|
| void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
|
| - Abort("DoCallGlobal unimplemented.");
|
| + ASSERT(ToRegister(instr->result()).is(r0));
|
| +
|
| + int arity = instr->arity();
|
| + Handle<Code> ic = StubCache::ComputeCallInitialize(arity, NOT_IN_LOOP);
|
| + __ mov(r2, Operand(instr->name()));
|
| + CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
|
| + __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
| }
|
|
|
|
|
| @@ -1758,7 +2910,7 @@
|
|
|
|
|
| void LCodeGen::DoCallNew(LCallNew* instr) {
|
| - ASSERT(ToRegister(instr->input()).is(r1));
|
| + ASSERT(ToRegister(instr->InputAt(0)).is(r1));
|
| ASSERT(ToRegister(instr->result()).is(r0));
|
|
|
| Handle<Code> builtin(Builtins::builtin(Builtins::JSConstructCall));
|
| @@ -1773,7 +2925,34 @@
|
|
|
|
|
| void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
|
| - Abort("DoStoreNamedField unimplemented.");
|
| + Register object = ToRegister(instr->object());
|
| + Register value = ToRegister(instr->value());
|
| + Register scratch = scratch0();
|
| + int offset = instr->offset();
|
| +
|
| + ASSERT(!object.is(value));
|
| +
|
| + if (!instr->transition().is_null()) {
|
| + __ mov(scratch, Operand(instr->transition()));
|
| + __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
|
| + }
|
| +
|
| + // Do the store.
|
| + if (instr->is_in_object()) {
|
| + __ str(value, FieldMemOperand(object, offset));
|
| + if (instr->needs_write_barrier()) {
|
| + // Update the write barrier for the object for in-object properties.
|
| + __ RecordWrite(object, Operand(offset), value, scratch);
|
| + }
|
| + } else {
|
| + __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
|
| + __ str(value, FieldMemOperand(scratch, offset));
|
| + if (instr->needs_write_barrier()) {
|
| + // Update the write barrier for the properties array.
|
| + // object is used as a scratch register.
|
| + __ RecordWrite(scratch, Operand(offset), value, object);
|
| + }
|
| + }
|
| }
|
|
|
|
|
| @@ -1783,19 +2962,42 @@
|
|
|
| // Name is always in r2.
|
| __ mov(r2, Operand(instr->name()));
|
| - Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
|
| + Handle<Code> ic(Builtins::builtin(info_->is_strict()
|
| + ? Builtins::StoreIC_Initialize_Strict
|
| + : Builtins::StoreIC_Initialize));
|
| CallCode(ic, RelocInfo::CODE_TARGET, instr);
|
| }
|
|
|
|
|
| void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
|
| - __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
|
| + __ cmp(ToRegister(instr->index()), ToRegister(instr->length()));
|
| DeoptimizeIf(hs, instr->environment());
|
| }
|
|
|
|
|
| void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
|
| - Abort("DoStoreKeyedFastElement unimplemented.");
|
| + Register value = ToRegister(instr->value());
|
| + Register elements = ToRegister(instr->object());
|
| + Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
|
| + Register scratch = scratch0();
|
| +
|
| + // Do the store.
|
| + if (instr->key()->IsConstantOperand()) {
|
| + ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
|
| + LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
|
| + int offset =
|
| + ToInteger32(const_operand) * kPointerSize + FixedArray::kHeaderSize;
|
| + __ str(value, FieldMemOperand(elements, offset));
|
| + } else {
|
| + __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
|
| + __ str(value, FieldMemOperand(scratch, FixedArray::kHeaderSize));
|
| + }
|
| +
|
| + if (instr->hydrogen()->NeedsWriteBarrier()) {
|
| + // Compute address of modified element and store it into key register.
|
| + __ add(key, scratch, Operand(FixedArray::kHeaderSize));
|
| + __ RecordWrite(elements, key, value);
|
| + }
|
| }
|
|
|
|
|
| @@ -1809,8 +3011,169 @@
|
| }
|
|
|
|
|
| +void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
|
| + class DeferredStringCharCodeAt: public LDeferredCode {
|
| + public:
|
| + DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
|
| + : LDeferredCode(codegen), instr_(instr) { }
|
| + virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
|
| + private:
|
| + LStringCharCodeAt* instr_;
|
| + };
|
| +
|
| + Register scratch = scratch0();
|
| + Register string = ToRegister(instr->string());
|
| + Register index = no_reg;
|
| + int const_index = -1;
|
| + if (instr->index()->IsConstantOperand()) {
|
| + const_index = ToInteger32(LConstantOperand::cast(instr->index()));
|
| + STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
|
| + if (!Smi::IsValid(const_index)) {
|
| + // Guaranteed to be out of bounds because of the assert above.
|
| + // So the bounds check that must dominate this instruction must
|
| + // have deoptimized already.
|
| + if (FLAG_debug_code) {
|
| + __ Abort("StringCharCodeAt: out of bounds index.");
|
| + }
|
| + // No code needs to be generated.
|
| + return;
|
| + }
|
| + } else {
|
| + index = ToRegister(instr->index());
|
| + }
|
| + Register result = ToRegister(instr->result());
|
| +
|
| + DeferredStringCharCodeAt* deferred =
|
| + new DeferredStringCharCodeAt(this, instr);
|
| +
|
| + Label flat_string, ascii_string, done;
|
| +
|
| + // Fetch the instance type of the receiver into result register.
|
| + __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
|
| + __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
|
| +
|
| + // We need special handling for non-flat strings.
|
| + STATIC_ASSERT(kSeqStringTag == 0);
|
| + __ tst(result, Operand(kStringRepresentationMask));
|
| + __ b(eq, &flat_string);
|
| +
|
| + // Handle non-flat strings.
|
| + __ tst(result, Operand(kIsConsStringMask));
|
| + __ b(eq, deferred->entry());
|
| +
|
| + // ConsString.
|
| + // Check whether the right hand side is the empty string (i.e. if
|
| + // this is really a flat string in a cons string). If that is not
|
| + // the case we would rather go to the runtime system now to flatten
|
| + // the string.
|
| + __ ldr(scratch, FieldMemOperand(string, ConsString::kSecondOffset));
|
| + __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
|
| + __ cmp(scratch, ip);
|
| + __ b(ne, deferred->entry());
|
| + // Get the first of the two strings and load its instance type.
|
| + __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
|
| + __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
|
| + __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
|
| + // If the first cons component is also non-flat, then go to runtime.
|
| + STATIC_ASSERT(kSeqStringTag == 0);
|
| + __ tst(result, Operand(kStringRepresentationMask));
|
| + __ b(ne, deferred->entry());
|
| +
|
| + // Check for 1-byte or 2-byte string.
|
| + __ bind(&flat_string);
|
| + STATIC_ASSERT(kAsciiStringTag != 0);
|
| + __ tst(result, Operand(kStringEncodingMask));
|
| + __ b(ne, &ascii_string);
|
| +
|
| + // 2-byte string.
|
| + // Load the 2-byte character code into the result register.
|
| + STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
|
| + if (instr->index()->IsConstantOperand()) {
|
| + __ ldrh(result,
|
| + FieldMemOperand(string,
|
| + SeqTwoByteString::kHeaderSize + 2 * const_index));
|
| + } else {
|
| + __ add(scratch,
|
| + string,
|
| + Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
|
| + __ ldrh(result, MemOperand(scratch, index, LSL, 1));
|
| + }
|
| + __ jmp(&done);
|
| +
|
| + // ASCII string.
|
| + // Load the byte into the result register.
|
| + __ bind(&ascii_string);
|
| + if (instr->index()->IsConstantOperand()) {
|
| + __ ldrb(result, FieldMemOperand(string,
|
| + SeqAsciiString::kHeaderSize + const_index));
|
| + } else {
|
| + __ add(scratch,
|
| + string,
|
| + Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
|
| + __ ldrb(result, MemOperand(scratch, index));
|
| + }
|
| + __ bind(&done);
|
| + __ bind(deferred->exit());
|
| +}
|
| +
|
| +
|
| +void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
|
| + Register string = ToRegister(instr->string());
|
| + Register result = ToRegister(instr->result());
|
| + Register scratch = scratch0();
|
| +
|
| + // TODO(3095996): Get rid of this. For now, we need to make the
|
| + // result register contain a valid pointer because it is already
|
| + // contained in the register pointer map.
|
| + __ mov(result, Operand(0));
|
| +
|
| + __ PushSafepointRegisters();
|
| + __ push(string);
|
| + // Push the index as a smi. This is safe because of the checks in
|
| + // DoStringCharCodeAt above.
|
| + if (instr->index()->IsConstantOperand()) {
|
| + int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
|
| + __ mov(scratch, Operand(Smi::FromInt(const_index)));
|
| + __ push(scratch);
|
| + } else {
|
| + Register index = ToRegister(instr->index());
|
| + __ SmiTag(index);
|
| + __ push(index);
|
| + }
|
| + __ CallRuntimeSaveDoubles(Runtime::kStringCharCodeAt);
|
| + RecordSafepointWithRegisters(
|
| + instr->pointer_map(), 2, Safepoint::kNoDeoptimizationIndex);
|
| + if (FLAG_debug_code) {
|
| + __ AbortIfNotSmi(r0);
|
| + }
|
| + __ SmiUntag(r0);
|
| + MemOperand result_stack_slot = masm()->SafepointRegisterSlot(result);
|
| + __ str(r0, result_stack_slot);
|
| + __ PopSafepointRegisters();
|
| +}
|
| +
|
| +
|
| +void LCodeGen::DoStringLength(LStringLength* instr) {
|
| + Register string = ToRegister(instr->InputAt(0));
|
| + Register result = ToRegister(instr->result());
|
| + __ ldr(result, FieldMemOperand(string, String::kLengthOffset));
|
| +}
|
| +
|
| +
|
| void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
|
| - Abort("DoInteger32ToDouble unimplemented.");
|
| + LOperand* input = instr->InputAt(0);
|
| + ASSERT(input->IsRegister() || input->IsStackSlot());
|
| + LOperand* output = instr->result();
|
| + ASSERT(output->IsDoubleRegister());
|
| + SwVfpRegister single_scratch = double_scratch0().low();
|
| + if (input->IsStackSlot()) {
|
| + Register scratch = scratch0();
|
| + __ ldr(scratch, ToMemOperand(input));
|
| + __ vmov(single_scratch, scratch);
|
| + } else {
|
| + __ vmov(single_scratch, ToRegister(input));
|
| + }
|
| + __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
|
| }
|
|
|
|
|
| @@ -1824,7 +3187,7 @@
|
| LNumberTagI* instr_;
|
| };
|
|
|
| - LOperand* input = instr->input();
|
| + LOperand* input = instr->InputAt(0);
|
| ASSERT(input->IsRegister() && input->Equals(instr->result()));
|
| Register reg = ToRegister(input);
|
|
|
| @@ -1837,7 +3200,7 @@
|
|
|
| void LCodeGen::DoDeferredNumberTagI(LNumberTagI* instr) {
|
| Label slow;
|
| - Register reg = ToRegister(instr->input());
|
| + Register reg = ToRegister(instr->InputAt(0));
|
| DoubleRegister dbl_scratch = d0;
|
| SwVfpRegister flt_scratch = s0;
|
|
|
| @@ -1894,11 +3257,11 @@
|
| LNumberTagD* instr_;
|
| };
|
|
|
| - DoubleRegister input_reg = ToDoubleRegister(instr->input());
|
| + DoubleRegister input_reg = ToDoubleRegister(instr->InputAt(0));
|
| Register scratch = scratch0();
|
| Register reg = ToRegister(instr->result());
|
| - Register temp1 = ToRegister(instr->temp1());
|
| - Register temp2 = ToRegister(instr->temp2());
|
| + Register temp1 = ToRegister(instr->TempAt(0));
|
| + Register temp2 = ToRegister(instr->TempAt(1));
|
|
|
| DeferredNumberTagD* deferred = new DeferredNumberTagD(this, instr);
|
| if (FLAG_inline_new) {
|
| @@ -1931,7 +3294,7 @@
|
|
|
|
|
| void LCodeGen::DoSmiTag(LSmiTag* instr) {
|
| - LOperand* input = instr->input();
|
| + LOperand* input = instr->InputAt(0);
|
| ASSERT(input->IsRegister() && input->Equals(instr->result()));
|
| ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
|
| __ SmiTag(ToRegister(input));
|
| @@ -1939,7 +3302,13 @@
|
|
|
|
|
| void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
|
| - Abort("DoSmiUntag unimplemented.");
|
| + LOperand* input = instr->InputAt(0);
|
| + ASSERT(input->IsRegister() && input->Equals(instr->result()));
|
| + if (instr->needs_check()) {
|
| + __ tst(ToRegister(input), Operand(kSmiTagMask));
|
| + DeoptimizeIf(ne, instr->environment());
|
| + }
|
| + __ SmiUntag(ToRegister(input));
|
| }
|
|
|
|
|
| @@ -2000,11 +3369,11 @@
|
|
|
| void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
|
| Label done;
|
| - Register input_reg = ToRegister(instr->input());
|
| + Register input_reg = ToRegister(instr->InputAt(0));
|
| Register scratch = scratch0();
|
| DoubleRegister dbl_scratch = d0;
|
| SwVfpRegister flt_scratch = s0;
|
| - DoubleRegister dbl_tmp = ToDoubleRegister(instr->temp());
|
| + DoubleRegister dbl_tmp = ToDoubleRegister(instr->TempAt(0));
|
|
|
| // Heap number map check.
|
| __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
|
| @@ -2025,7 +3394,7 @@
|
| __ bind(&heap_number);
|
| __ sub(ip, input_reg, Operand(kHeapObjectTag));
|
| __ vldr(dbl_tmp, ip, HeapNumber::kValueOffset);
|
| - __ vcmp(dbl_tmp, 0.0); // Sets overflow bit if NaN.
|
| + __ vcmp(dbl_tmp, 0.0); // Sets overflow bit in FPSCR flags if NaN.
|
| __ vcvt_s32_f64(flt_scratch, dbl_tmp);
|
| __ vmov(input_reg, flt_scratch); // 32-bit result of conversion.
|
| __ vmrs(pc); // Move vector status bits to normal status bits.
|
| @@ -2046,8 +3415,7 @@
|
| // back to check; note that using non-overlapping s and d regs would be
|
| // slightly faster.
|
| __ vcvt_f64_s32(dbl_scratch, flt_scratch);
|
| - __ vcmp(dbl_scratch, dbl_tmp);
|
| - __ vmrs(pc); // Move vector status bits to normal status bits.
|
| + __ VFPCompareAndSetFlags(dbl_scratch, dbl_tmp);
|
| DeoptimizeIf(ne, instr->environment()); // Not equal or unordered.
|
| if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
| __ tst(input_reg, Operand(input_reg));
|
| @@ -2062,7 +3430,7 @@
|
|
|
|
|
| void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
|
| - LOperand* input = instr->input();
|
| + LOperand* input = instr->InputAt(0);
|
| ASSERT(input->IsRegister());
|
| ASSERT(input->Equals(instr->result()));
|
|
|
| @@ -2082,7 +3450,7 @@
|
|
|
|
|
| void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
|
| - LOperand* input = instr->input();
|
| + LOperand* input = instr->InputAt(0);
|
| ASSERT(input->IsRegister());
|
| LOperand* result = instr->result();
|
| ASSERT(result->IsDoubleRegister());
|
| @@ -2095,12 +3463,47 @@
|
|
|
|
|
| void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
|
| - Abort("DoDoubleToI unimplemented.");
|
| + LOperand* input = instr->InputAt(0);
|
| + ASSERT(input->IsDoubleRegister());
|
| + LOperand* result = instr->result();
|
| + ASSERT(result->IsRegister());
|
| +
|
| + DoubleRegister double_input = ToDoubleRegister(input);
|
| + Register result_reg = ToRegister(result);
|
| + SwVfpRegister single_scratch = double_scratch0().low();
|
| + Register scratch1 = scratch0();
|
| + Register scratch2 = ToRegister(instr->TempAt(0));
|
| +
|
| + VFPRoundingMode rounding_mode = instr->truncating() ? kRoundToMinusInf
|
| + : kRoundToNearest;
|
| +
|
| + EmitVFPTruncate(rounding_mode,
|
| + single_scratch,
|
| + double_input,
|
| + scratch1,
|
| + scratch2);
|
| + // Deoptimize if we had a vfp invalid exception.
|
| + DeoptimizeIf(ne, instr->environment());
|
| + // Retrieve the result.
|
| + __ vmov(result_reg, single_scratch);
|
| +
|
| + if (instr->truncating() &&
|
| + instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
| + Label done;
|
| + __ cmp(result_reg, Operand(0));
|
| + __ b(ne, &done);
|
| + // Check for -0.
|
| + __ vmov(scratch1, double_input.high());
|
| + __ tst(scratch1, Operand(HeapNumber::kSignMask));
|
| + DeoptimizeIf(ne, instr->environment());
|
| +
|
| + __ bind(&done);
|
| + }
|
| }
|
|
|
|
|
| void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
|
| - LOperand* input = instr->input();
|
| + LOperand* input = instr->InputAt(0);
|
| ASSERT(input->IsRegister());
|
| __ tst(ToRegister(input), Operand(kSmiTagMask));
|
| DeoptimizeIf(instr->condition(), instr->environment());
|
| @@ -2108,13 +3511,32 @@
|
|
|
|
|
| void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
|
| - Abort("DoCheckInstanceType unimplemented.");
|
| + Register input = ToRegister(instr->InputAt(0));
|
| + Register scratch = scratch0();
|
| + InstanceType first = instr->hydrogen()->first();
|
| + InstanceType last = instr->hydrogen()->last();
|
| +
|
| + __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
|
| + __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
|
| + __ cmp(scratch, Operand(first));
|
| +
|
| + // If there is only one type in the interval check for equality.
|
| + if (first == last) {
|
| + DeoptimizeIf(ne, instr->environment());
|
| + } else {
|
| + DeoptimizeIf(lo, instr->environment());
|
| + // Omit check for the last type.
|
| + if (last != LAST_TYPE) {
|
| + __ cmp(scratch, Operand(last));
|
| + DeoptimizeIf(hi, instr->environment());
|
| + }
|
| + }
|
| }
|
|
|
|
|
| void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
|
| - ASSERT(instr->input()->IsRegister());
|
| - Register reg = ToRegister(instr->input());
|
| + ASSERT(instr->InputAt(0)->IsRegister());
|
| + Register reg = ToRegister(instr->InputAt(0));
|
| __ cmp(reg, Operand(instr->hydrogen()->target()));
|
| DeoptimizeIf(ne, instr->environment());
|
| }
|
| @@ -2122,7 +3544,7 @@
|
|
|
| void LCodeGen::DoCheckMap(LCheckMap* instr) {
|
| Register scratch = scratch0();
|
| - LOperand* input = instr->input();
|
| + LOperand* input = instr->InputAt(0);
|
| ASSERT(input->IsRegister());
|
| Register reg = ToRegister(input);
|
| __ ldr(scratch, FieldMemOperand(reg, HeapObject::kMapOffset));
|
| @@ -2131,28 +3553,28 @@
|
| }
|
|
|
|
|
| -void LCodeGen::LoadPrototype(Register result,
|
| - Handle<JSObject> prototype) {
|
| - if (Heap::InNewSpace(*prototype)) {
|
| +void LCodeGen::LoadHeapObject(Register result,
|
| + Handle<HeapObject> object) {
|
| + if (Heap::InNewSpace(*object)) {
|
| Handle<JSGlobalPropertyCell> cell =
|
| - Factory::NewJSGlobalPropertyCell(prototype);
|
| + Factory::NewJSGlobalPropertyCell(object);
|
| __ mov(result, Operand(cell));
|
| + __ ldr(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
|
| } else {
|
| - __ mov(result, Operand(prototype));
|
| + __ mov(result, Operand(object));
|
| }
|
| }
|
|
|
|
|
| void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
|
| - Register temp1 = ToRegister(instr->temp1());
|
| - Register temp2 = ToRegister(instr->temp2());
|
| + Register temp1 = ToRegister(instr->TempAt(0));
|
| + Register temp2 = ToRegister(instr->TempAt(1));
|
|
|
| Handle<JSObject> holder = instr->holder();
|
| - Handle<Map> receiver_map = instr->receiver_map();
|
| - Handle<JSObject> current_prototype(JSObject::cast(receiver_map->prototype()));
|
| + Handle<JSObject> current_prototype = instr->prototype();
|
|
|
| // Load prototype object.
|
| - LoadPrototype(temp1, current_prototype);
|
| + LoadHeapObject(temp1, current_prototype);
|
|
|
| // Check prototype maps up to the holder.
|
| while (!current_prototype.is_identical_to(holder)) {
|
| @@ -2162,7 +3584,7 @@
|
| current_prototype =
|
| Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
|
| // Load next prototype object.
|
| - LoadPrototype(temp1, current_prototype);
|
| + LoadHeapObject(temp1, current_prototype);
|
| }
|
|
|
| // Check the holder map.
|
| @@ -2218,27 +3640,113 @@
|
|
|
|
|
| void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
|
| - Abort("DoRegExpLiteral unimplemented.");
|
| + Label materialized;
|
| + // Registers will be used as follows:
|
| + // r3 = JS function.
|
| + // r7 = literals array.
|
| + // r1 = regexp literal.
|
| + // r0 = regexp literal clone.
|
| + // r2 and r4-r6 are used as temporaries.
|
| + __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
|
| + __ ldr(r7, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
|
| + int literal_offset = FixedArray::kHeaderSize +
|
| + instr->hydrogen()->literal_index() * kPointerSize;
|
| + __ ldr(r1, FieldMemOperand(r7, literal_offset));
|
| + __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
|
| + __ cmp(r1, ip);
|
| + __ b(ne, &materialized);
|
| +
|
| + // Create regexp literal using runtime function
|
| + // Result will be in r0.
|
| + __ mov(r6, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
|
| + __ mov(r5, Operand(instr->hydrogen()->pattern()));
|
| + __ mov(r4, Operand(instr->hydrogen()->flags()));
|
| + __ Push(r7, r6, r5, r4);
|
| + CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
|
| + __ mov(r1, r0);
|
| +
|
| + __ bind(&materialized);
|
| + int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
|
| + Label allocated, runtime_allocate;
|
| +
|
| + __ AllocateInNewSpace(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
|
| + __ jmp(&allocated);
|
| +
|
| + __ bind(&runtime_allocate);
|
| + __ mov(r0, Operand(Smi::FromInt(size)));
|
| + __ Push(r1, r0);
|
| + CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
|
| + __ pop(r1);
|
| +
|
| + __ bind(&allocated);
|
| + // Copy the content into the newly allocated memory.
|
| + // (Unroll copy loop once for better throughput).
|
| + for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
|
| + __ ldr(r3, FieldMemOperand(r1, i));
|
| + __ ldr(r2, FieldMemOperand(r1, i + kPointerSize));
|
| + __ str(r3, FieldMemOperand(r0, i));
|
| + __ str(r2, FieldMemOperand(r0, i + kPointerSize));
|
| + }
|
| + if ((size % (2 * kPointerSize)) != 0) {
|
| + __ ldr(r3, FieldMemOperand(r1, size - kPointerSize));
|
| + __ str(r3, FieldMemOperand(r0, size - kPointerSize));
|
| + }
|
| }
|
|
|
|
|
| void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
|
| - Abort("DoFunctionLiteral unimplemented.");
|
| + // Use the fast case closure allocation code that allocates in new
|
| + // space for nested functions that don't need literals cloning.
|
| + Handle<SharedFunctionInfo> shared_info = instr->shared_info();
|
| + bool pretenure = instr->hydrogen()->pretenure();
|
| + if (shared_info->num_literals() == 0 && !pretenure) {
|
| + FastNewClosureStub stub;
|
| + __ mov(r1, Operand(shared_info));
|
| + __ push(r1);
|
| + CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
| + } else {
|
| + __ mov(r2, Operand(shared_info));
|
| + __ mov(r1, Operand(pretenure
|
| + ? Factory::true_value()
|
| + : Factory::false_value()));
|
| + __ Push(cp, r2, r1);
|
| + CallRuntime(Runtime::kNewClosure, 3, instr);
|
| + }
|
| }
|
|
|
|
|
| void LCodeGen::DoTypeof(LTypeof* instr) {
|
| - Abort("DoTypeof unimplemented.");
|
| + Register input = ToRegister(instr->InputAt(0));
|
| + __ push(input);
|
| + CallRuntime(Runtime::kTypeof, 1, instr);
|
| }
|
|
|
|
|
| void LCodeGen::DoTypeofIs(LTypeofIs* instr) {
|
| - Abort("DoTypeofIs unimplemented.");
|
| + Register input = ToRegister(instr->InputAt(0));
|
| + Register result = ToRegister(instr->result());
|
| + Label true_label;
|
| + Label false_label;
|
| + Label done;
|
| +
|
| + Condition final_branch_condition = EmitTypeofIs(&true_label,
|
| + &false_label,
|
| + input,
|
| + instr->type_literal());
|
| + __ b(final_branch_condition, &true_label);
|
| + __ bind(&false_label);
|
| + __ LoadRoot(result, Heap::kFalseValueRootIndex);
|
| + __ b(&done);
|
| +
|
| + __ bind(&true_label);
|
| + __ LoadRoot(result, Heap::kTrueValueRootIndex);
|
| +
|
| + __ bind(&done);
|
| }
|
|
|
|
|
| void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
|
| - Register input = ToRegister(instr->input());
|
| + Register input = ToRegister(instr->InputAt(0));
|
| int true_block = chunk_->LookupDestination(instr->true_block_id());
|
| int false_block = chunk_->LookupDestination(instr->false_block_id());
|
| Label* true_label = chunk_->GetAssemblyLabel(true_block);
|
| @@ -2257,7 +3765,7 @@
|
| Label* false_label,
|
| Register input,
|
| Handle<String> type_name) {
|
| - Condition final_branch_condition = no_condition;
|
| + Condition final_branch_condition = kNoCondition;
|
| Register scratch = scratch0();
|
| if (type_name->Equals(Heap::number_symbol())) {
|
| __ tst(input, Operand(kSmiTagMask));
|
| @@ -2335,6 +3843,55 @@
|
| }
|
|
|
|
|
| +void LCodeGen::DoIsConstructCall(LIsConstructCall* instr) {
|
| + Register result = ToRegister(instr->result());
|
| + Label true_label;
|
| + Label false_label;
|
| + Label done;
|
| +
|
| + EmitIsConstructCall(result, scratch0());
|
| + __ b(eq, &true_label);
|
| +
|
| + __ LoadRoot(result, Heap::kFalseValueRootIndex);
|
| + __ b(&done);
|
| +
|
| +
|
| + __ bind(&true_label);
|
| + __ LoadRoot(result, Heap::kTrueValueRootIndex);
|
| +
|
| + __ bind(&done);
|
| +}
|
| +
|
| +
|
| +void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
|
| + Register temp1 = ToRegister(instr->TempAt(0));
|
| + int true_block = chunk_->LookupDestination(instr->true_block_id());
|
| + int false_block = chunk_->LookupDestination(instr->false_block_id());
|
| +
|
| + EmitIsConstructCall(temp1, scratch0());
|
| + EmitBranch(true_block, false_block, eq);
|
| +}
|
| +
|
| +
|
| +void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
|
| + ASSERT(!temp1.is(temp2));
|
| + // Get the frame pointer for the calling frame.
|
| + __ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
| +
|
| + // Skip the arguments adaptor frame if it exists.
|
| + Label check_frame_marker;
|
| + __ ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
|
| + __ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
|
| + __ b(ne, &check_frame_marker);
|
| + __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
|
| +
|
| + // Check the marker in the calling frame.
|
| + __ bind(&check_frame_marker);
|
| + __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
|
| + __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
|
| +}
|
| +
|
| +
|
| void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
|
| // No code for lazy bailout instruction. Used to capture environment after a
|
| // call for populating the safepoint data with deoptimization data.
|
| @@ -2342,12 +3899,25 @@
|
|
|
|
|
| void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
|
| - DeoptimizeIf(no_condition, instr->environment());
|
| + DeoptimizeIf(al, instr->environment());
|
| }
|
|
|
|
|
| void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
|
| - Abort("DoDeleteProperty unimplemented.");
|
| + Register object = ToRegister(instr->object());
|
| + Register key = ToRegister(instr->key());
|
| + Register strict = scratch0();
|
| + __ mov(strict, Operand(Smi::FromInt(strict_mode_flag())));
|
| + __ Push(object, key, strict);
|
| + ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
|
| + LPointerMap* pointers = instr->pointer_map();
|
| + LEnvironment* env = instr->deoptimization_environment();
|
| + RecordPosition(pointers->position());
|
| + RegisterEnvironmentForDeoptimization(env);
|
| + SafepointGenerator safepoint_generator(this,
|
| + pointers,
|
| + env->deoptimization_index());
|
| + __ InvokeBuiltin(Builtins::DELETE, CALL_JS, &safepoint_generator);
|
| }
|
|
|
|
|
| @@ -2364,7 +3934,19 @@
|
|
|
|
|
| void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
|
| - Abort("DoOsrEntry unimplemented.");
|
| + // This is a pseudo-instruction that ensures that the environment here is
|
| + // properly registered for deoptimization and records the assembler's PC
|
| + // offset.
|
| + LEnvironment* environment = instr->environment();
|
| + environment->SetSpilledRegisters(instr->SpilledRegisterArray(),
|
| + instr->SpilledDoubleRegisterArray());
|
| +
|
| + // If the environment were already registered, we would have no way of
|
| + // backpatching it with the spill slot operands.
|
| + ASSERT(!environment->HasBeenRegistered());
|
| + RegisterEnvironmentForDeoptimization(environment);
|
| + ASSERT(osr_pc_offset_ == -1);
|
| + osr_pc_offset_ = masm()->pc_offset();
|
| }
|
|
|
|
|
|
|