| OLD | NEW |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 324 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 335 if (op->IsStackSlot()) { | 335 if (op->IsStackSlot()) { |
| 336 if (is_tagged) { | 336 if (is_tagged) { |
| 337 translation->StoreStackSlot(op->index()); | 337 translation->StoreStackSlot(op->index()); |
| 338 } else if (is_uint32) { | 338 } else if (is_uint32) { |
| 339 translation->StoreUint32StackSlot(op->index()); | 339 translation->StoreUint32StackSlot(op->index()); |
| 340 } else { | 340 } else { |
| 341 translation->StoreInt32StackSlot(op->index()); | 341 translation->StoreInt32StackSlot(op->index()); |
| 342 } | 342 } |
| 343 } else if (op->IsDoubleStackSlot()) { | 343 } else if (op->IsDoubleStackSlot()) { |
| 344 translation->StoreDoubleStackSlot(op->index()); | 344 translation->StoreDoubleStackSlot(op->index()); |
| 345 } else if (op->IsArgument()) { | |
| 346 ASSERT(is_tagged); | |
| 347 int src_index = GetStackSlotCount() + op->index(); | |
| 348 translation->StoreStackSlot(src_index); | |
| 349 } else if (op->IsRegister()) { | 345 } else if (op->IsRegister()) { |
| 350 Register reg = ToRegister(op); | 346 Register reg = ToRegister(op); |
| 351 if (is_tagged) { | 347 if (is_tagged) { |
| 352 translation->StoreRegister(reg); | 348 translation->StoreRegister(reg); |
| 353 } else if (is_uint32) { | 349 } else if (is_uint32) { |
| 354 translation->StoreUint32Register(reg); | 350 translation->StoreUint32Register(reg); |
| 355 } else { | 351 } else { |
| 356 translation->StoreInt32Register(reg); | 352 translation->StoreInt32Register(reg); |
| 357 } | 353 } |
| 358 } else if (op->IsDoubleRegister()) { | 354 } else if (op->IsDoubleRegister()) { |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 407 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT); | 403 CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT); |
| 408 } | 404 } |
| 409 | 405 |
| 410 | 406 |
| 411 void LCodeGen::CallCodeGeneric(Handle<Code> code, | 407 void LCodeGen::CallCodeGeneric(Handle<Code> code, |
| 412 RelocInfo::Mode mode, | 408 RelocInfo::Mode mode, |
| 413 LInstruction* instr, | 409 LInstruction* instr, |
| 414 SafepointMode safepoint_mode) { | 410 SafepointMode safepoint_mode) { |
| 415 ASSERT(instr != NULL); | 411 ASSERT(instr != NULL); |
| 416 | 412 |
| 417 Assembler::BlockConstPoolScope scope(masm_); | 413 Assembler::BlockPoolsScope scope(masm_); |
| 418 __ Call(code, mode); | 414 __ Call(code, mode); |
| 419 RecordSafepointWithLazyDeopt(instr, safepoint_mode); | 415 RecordSafepointWithLazyDeopt(instr, safepoint_mode); |
| 420 | 416 |
| 421 if ((code->kind() == Code::BINARY_OP_IC) || | 417 if ((code->kind() == Code::BINARY_OP_IC) || |
| 422 (code->kind() == Code::COMPARE_IC)) { | 418 (code->kind() == Code::COMPARE_IC)) { |
| 423 // Signal that we don't inline smi code before these stubs in the | 419 // Signal that we don't inline smi code before these stubs in the |
| 424 // optimizing code generator. | 420 // optimizing code generator. |
| 425 InlineSmiCheckInfo::EmitNotInlined(masm()); | 421 InlineSmiCheckInfo::EmitNotInlined(masm()); |
| 426 } | 422 } |
| 427 } | 423 } |
| (...skipping 10 matching lines...) Expand all Loading... |
| 438 } | 434 } |
| 439 | 435 |
| 440 | 436 |
| 441 void LCodeGen::DoCallNew(LCallNew* instr) { | 437 void LCodeGen::DoCallNew(LCallNew* instr) { |
| 442 ASSERT(ToRegister(instr->context()).is(cp)); | 438 ASSERT(ToRegister(instr->context()).is(cp)); |
| 443 ASSERT(instr->IsMarkedAsCall()); | 439 ASSERT(instr->IsMarkedAsCall()); |
| 444 ASSERT(ToRegister(instr->constructor()).is(x1)); | 440 ASSERT(ToRegister(instr->constructor()).is(x1)); |
| 445 | 441 |
| 446 __ Mov(x0, instr->arity()); | 442 __ Mov(x0, instr->arity()); |
| 447 // No cell in x2 for construct type feedback in optimized code. | 443 // No cell in x2 for construct type feedback in optimized code. |
| 448 Handle<Object> undefined_value(isolate()->factory()->undefined_value()); | 444 Handle<Object> megamorphic_symbol = |
| 449 __ Mov(x2, Operand(undefined_value)); | 445 TypeFeedbackInfo::MegamorphicSentinel(isolate()); |
| 446 __ Mov(x2, Operand(megamorphic_symbol)); |
| 450 | 447 |
| 451 CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); | 448 CallConstructStub stub(NO_CALL_FUNCTION_FLAGS); |
| 452 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); | 449 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); |
| 453 | 450 |
| 454 ASSERT(ToRegister(instr->result()).is(x0)); | 451 ASSERT(ToRegister(instr->result()).is(x0)); |
| 455 } | 452 } |
| 456 | 453 |
| 457 | 454 |
| 458 void LCodeGen::DoCallNewArray(LCallNewArray* instr) { | 455 void LCodeGen::DoCallNewArray(LCallNewArray* instr) { |
| 459 ASSERT(instr->IsMarkedAsCall()); | 456 ASSERT(instr->IsMarkedAsCall()); |
| 460 ASSERT(ToRegister(instr->context()).is(cp)); | 457 ASSERT(ToRegister(instr->context()).is(cp)); |
| 461 ASSERT(ToRegister(instr->constructor()).is(x1)); | 458 ASSERT(ToRegister(instr->constructor()).is(x1)); |
| 462 | 459 |
| 463 __ Mov(x0, Operand(instr->arity())); | 460 __ Mov(x0, Operand(instr->arity())); |
| 464 __ Mov(x2, Operand(factory()->undefined_value())); | 461 __ Mov(x2, Operand(TypeFeedbackInfo::MegamorphicSentinel(isolate()))); |
| 465 | 462 |
| 466 ElementsKind kind = instr->hydrogen()->elements_kind(); | 463 ElementsKind kind = instr->hydrogen()->elements_kind(); |
| 467 AllocationSiteOverrideMode override_mode = | 464 AllocationSiteOverrideMode override_mode = |
| 468 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE) | 465 (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE) |
| 469 ? DISABLE_ALLOCATION_SITES | 466 ? DISABLE_ALLOCATION_SITES |
| 470 : DONT_OVERRIDE; | 467 : DONT_OVERRIDE; |
| 471 | 468 |
| 472 if (instr->arity() == 0) { | 469 if (instr->arity() == 0) { |
| 473 ArrayNoArgumentConstructorStub stub(kind, override_mode); | 470 ArrayNoArgumentConstructorStub stub(kind, override_mode); |
| 474 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); | 471 CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); |
| (...skipping 189 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 664 | 661 |
| 665 | 662 |
| 666 bool LCodeGen::GeneratePrologue() { | 663 bool LCodeGen::GeneratePrologue() { |
| 667 ASSERT(is_generating()); | 664 ASSERT(is_generating()); |
| 668 | 665 |
| 669 if (info()->IsOptimizing()) { | 666 if (info()->IsOptimizing()) { |
| 670 ProfileEntryHookStub::MaybeCallEntryHook(masm_); | 667 ProfileEntryHookStub::MaybeCallEntryHook(masm_); |
| 671 | 668 |
| 672 // TODO(all): Add support for stop_t FLAG in DEBUG mode. | 669 // TODO(all): Add support for stop_t FLAG in DEBUG mode. |
| 673 | 670 |
| 674 // Classic mode functions and builtins need to replace the receiver with the | 671 // Sloppy mode functions and builtins need to replace the receiver with the |
| 675 // global proxy when called as functions (without an explicit receiver | 672 // global proxy when called as functions (without an explicit receiver |
| 676 // object). | 673 // object). |
| 677 if (info_->this_has_uses() && | 674 if (info_->this_has_uses() && |
| 678 info_->is_classic_mode() && | 675 info_->strict_mode() == SLOPPY && |
| 679 !info_->is_native()) { | 676 !info_->is_native()) { |
| 680 Label ok; | 677 Label ok; |
| 681 int receiver_offset = info_->scope()->num_parameters() * kXRegSizeInBytes; | 678 int receiver_offset = info_->scope()->num_parameters() * kXRegSize; |
| 682 __ Peek(x10, receiver_offset); | 679 __ Peek(x10, receiver_offset); |
| 683 __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok); | 680 __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok); |
| 684 | 681 |
| 685 __ Ldr(x10, GlobalObjectMemOperand()); | 682 __ Ldr(x10, GlobalObjectMemOperand()); |
| 686 __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset)); | 683 __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset)); |
| 687 __ Poke(x10, receiver_offset); | 684 __ Poke(x10, receiver_offset); |
| 688 | 685 |
| 689 __ Bind(&ok); | 686 __ Bind(&ok); |
| 690 } | 687 } |
| 691 } | 688 } |
| (...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 767 osr_pc_offset_ = masm()->pc_offset(); | 764 osr_pc_offset_ = masm()->pc_offset(); |
| 768 | 765 |
| 769 // Adjust the frame size, subsuming the unoptimized frame into the | 766 // Adjust the frame size, subsuming the unoptimized frame into the |
| 770 // optimized frame. | 767 // optimized frame. |
| 771 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); | 768 int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); |
| 772 ASSERT(slots >= 0); | 769 ASSERT(slots >= 0); |
| 773 __ Claim(slots); | 770 __ Claim(slots); |
| 774 } | 771 } |
| 775 | 772 |
| 776 | 773 |
| 774 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) { |
| 775 if (!instr->IsLazyBailout() && !instr->IsGap()) { |
| 776 safepoints_.BumpLastLazySafepointIndex(); |
| 777 } |
| 778 } |
| 779 |
| 780 |
| 777 bool LCodeGen::GenerateDeferredCode() { | 781 bool LCodeGen::GenerateDeferredCode() { |
| 778 ASSERT(is_generating()); | 782 ASSERT(is_generating()); |
| 779 if (deferred_.length() > 0) { | 783 if (deferred_.length() > 0) { |
| 780 for (int i = 0; !is_aborted() && (i < deferred_.length()); i++) { | 784 for (int i = 0; !is_aborted() && (i < deferred_.length()); i++) { |
| 781 LDeferredCode* code = deferred_[i]; | 785 LDeferredCode* code = deferred_[i]; |
| 782 | 786 |
| 783 HValue* value = | 787 HValue* value = |
| 784 instructions_->at(code->instruction_index())->hydrogen_value(); | 788 instructions_->at(code->instruction_index())->hydrogen_value(); |
| 785 RecordAndWritePosition( | 789 RecordAndWritePosition( |
| 786 chunk()->graph()->SourcePositionToScriptPosition(value->position())); | 790 chunk()->graph()->SourcePositionToScriptPosition(value->position())); |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 830 | 834 |
| 831 | 835 |
| 832 bool LCodeGen::GenerateDeoptJumpTable() { | 836 bool LCodeGen::GenerateDeoptJumpTable() { |
| 833 if (deopt_jump_table_.length() > 0) { | 837 if (deopt_jump_table_.length() > 0) { |
| 834 Comment(";;; -------------------- Jump table --------------------"); | 838 Comment(";;; -------------------- Jump table --------------------"); |
| 835 } | 839 } |
| 836 Label table_start; | 840 Label table_start; |
| 837 __ bind(&table_start); | 841 __ bind(&table_start); |
| 838 Label needs_frame; | 842 Label needs_frame; |
| 839 for (int i = 0; i < deopt_jump_table_.length(); i++) { | 843 for (int i = 0; i < deopt_jump_table_.length(); i++) { |
| 840 __ Bind(&deopt_jump_table_[i].label); | 844 __ Bind(&deopt_jump_table_[i]->label); |
| 841 Address entry = deopt_jump_table_[i].address; | 845 Address entry = deopt_jump_table_[i]->address; |
| 842 Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type; | 846 Deoptimizer::BailoutType type = deopt_jump_table_[i]->bailout_type; |
| 843 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); | 847 int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type); |
| 844 if (id == Deoptimizer::kNotDeoptimizationEntry) { | 848 if (id == Deoptimizer::kNotDeoptimizationEntry) { |
| 845 Comment(";;; jump table entry %d.", i); | 849 Comment(";;; jump table entry %d.", i); |
| 846 } else { | 850 } else { |
| 847 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); | 851 Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id); |
| 848 } | 852 } |
| 849 if (deopt_jump_table_[i].needs_frame) { | 853 if (deopt_jump_table_[i]->needs_frame) { |
| 850 ASSERT(!info()->saves_caller_doubles()); | 854 ASSERT(!info()->saves_caller_doubles()); |
| 851 __ Mov(__ Tmp0(), Operand(ExternalReference::ForDeoptEntry(entry))); | 855 |
| 856 UseScratchRegisterScope temps(masm()); |
| 857 Register stub_deopt_entry = temps.AcquireX(); |
| 858 Register stub_marker = temps.AcquireX(); |
| 859 |
| 860 __ Mov(stub_deopt_entry, |
| 861 Operand(ExternalReference::ForDeoptEntry(entry))); |
| 852 if (needs_frame.is_bound()) { | 862 if (needs_frame.is_bound()) { |
| 853 __ B(&needs_frame); | 863 __ B(&needs_frame); |
| 854 } else { | 864 } else { |
| 855 __ Bind(&needs_frame); | 865 __ Bind(&needs_frame); |
| 856 // This variant of deopt can only be used with stubs. Since we don't | 866 // This variant of deopt can only be used with stubs. Since we don't |
| 857 // have a function pointer to install in the stack frame that we're | 867 // have a function pointer to install in the stack frame that we're |
| 858 // building, install a special marker there instead. | 868 // building, install a special marker there instead. |
| 859 // TODO(jochen): Revisit the use of TmpX(). | |
| 860 ASSERT(info()->IsStub()); | 869 ASSERT(info()->IsStub()); |
| 861 __ Mov(__ Tmp1(), Operand(Smi::FromInt(StackFrame::STUB))); | 870 __ Mov(stub_marker, Operand(Smi::FromInt(StackFrame::STUB))); |
| 862 __ Push(lr, fp, cp, __ Tmp1()); | 871 __ Push(lr, fp, cp, stub_marker); |
| 863 __ Add(fp, __ StackPointer(), 2 * kPointerSize); | 872 __ Add(fp, __ StackPointer(), 2 * kPointerSize); |
| 864 __ Call(__ Tmp0()); | 873 __ Call(stub_deopt_entry); |
| 865 } | 874 } |
| 866 } else { | 875 } else { |
| 867 if (info()->saves_caller_doubles()) { | 876 if (info()->saves_caller_doubles()) { |
| 868 ASSERT(info()->IsStub()); | 877 ASSERT(info()->IsStub()); |
| 869 RestoreCallerDoubles(); | 878 RestoreCallerDoubles(); |
| 870 } | 879 } |
| 871 __ Call(entry, RelocInfo::RUNTIME_ENTRY); | 880 __ Call(entry, RelocInfo::RUNTIME_ENTRY); |
| 872 } | 881 } |
| 873 masm()->CheckConstPool(false, false); | 882 masm()->CheckConstPool(false, false); |
| 874 } | 883 } |
| (...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 994 ExternalReference count = ExternalReference::stress_deopt_count(isolate()); | 1003 ExternalReference count = ExternalReference::stress_deopt_count(isolate()); |
| 995 | 1004 |
| 996 __ Push(x0, x1, x2); | 1005 __ Push(x0, x1, x2); |
| 997 __ Mrs(x2, NZCV); | 1006 __ Mrs(x2, NZCV); |
| 998 __ Mov(x0, Operand(count)); | 1007 __ Mov(x0, Operand(count)); |
| 999 __ Ldr(w1, MemOperand(x0)); | 1008 __ Ldr(w1, MemOperand(x0)); |
| 1000 __ Subs(x1, x1, 1); | 1009 __ Subs(x1, x1, 1); |
| 1001 __ B(gt, ¬_zero); | 1010 __ B(gt, ¬_zero); |
| 1002 __ Mov(w1, FLAG_deopt_every_n_times); | 1011 __ Mov(w1, FLAG_deopt_every_n_times); |
| 1003 __ Str(w1, MemOperand(x0)); | 1012 __ Str(w1, MemOperand(x0)); |
| 1004 __ Pop(x0, x1, x2); | 1013 __ Pop(x2, x1, x0); |
| 1005 ASSERT(frame_is_built_); | 1014 ASSERT(frame_is_built_); |
| 1006 __ Call(entry, RelocInfo::RUNTIME_ENTRY); | 1015 __ Call(entry, RelocInfo::RUNTIME_ENTRY); |
| 1007 __ Unreachable(); | 1016 __ Unreachable(); |
| 1008 | 1017 |
| 1009 __ Bind(¬_zero); | 1018 __ Bind(¬_zero); |
| 1010 __ Str(w1, MemOperand(x0)); | 1019 __ Str(w1, MemOperand(x0)); |
| 1011 __ Msr(NZCV, x2); | 1020 __ Msr(NZCV, x2); |
| 1012 __ Pop(x0, x1, x2); | 1021 __ Pop(x2, x1, x0); |
| 1013 } | 1022 } |
| 1014 | 1023 |
| 1015 if (info()->ShouldTrapOnDeopt()) { | 1024 if (info()->ShouldTrapOnDeopt()) { |
| 1016 Label dont_trap; | 1025 Label dont_trap; |
| 1017 __ B(&dont_trap, InvertBranchType(branch_type), reg, bit); | 1026 __ B(&dont_trap, InvertBranchType(branch_type), reg, bit); |
| 1018 __ Debug("trap_on_deopt", __LINE__, BREAK); | 1027 __ Debug("trap_on_deopt", __LINE__, BREAK); |
| 1019 __ Bind(&dont_trap); | 1028 __ Bind(&dont_trap); |
| 1020 } | 1029 } |
| 1021 | 1030 |
| 1022 ASSERT(info()->IsStub() || frame_is_built_); | 1031 ASSERT(info()->IsStub() || frame_is_built_); |
| 1023 // Go through jump table if we need to build frame, or restore caller doubles. | 1032 // Go through jump table if we need to build frame, or restore caller doubles. |
| 1024 if (frame_is_built_ && !info()->saves_caller_doubles()) { | 1033 if (branch_type == always && |
| 1025 Label dont_deopt; | 1034 frame_is_built_ && !info()->saves_caller_doubles()) { |
| 1026 __ B(&dont_deopt, InvertBranchType(branch_type), reg, bit); | |
| 1027 __ Call(entry, RelocInfo::RUNTIME_ENTRY); | 1035 __ Call(entry, RelocInfo::RUNTIME_ENTRY); |
| 1028 __ Bind(&dont_deopt); | |
| 1029 } else { | 1036 } else { |
| 1030 // We often have several deopts to the same entry, reuse the last | 1037 // We often have several deopts to the same entry, reuse the last |
| 1031 // jump entry if this is the case. | 1038 // jump entry if this is the case. |
| 1032 if (deopt_jump_table_.is_empty() || | 1039 if (deopt_jump_table_.is_empty() || |
| 1033 (deopt_jump_table_.last().address != entry) || | 1040 (deopt_jump_table_.last()->address != entry) || |
| 1034 (deopt_jump_table_.last().bailout_type != bailout_type) || | 1041 (deopt_jump_table_.last()->bailout_type != bailout_type) || |
| 1035 (deopt_jump_table_.last().needs_frame != !frame_is_built_)) { | 1042 (deopt_jump_table_.last()->needs_frame != !frame_is_built_)) { |
| 1036 Deoptimizer::JumpTableEntry table_entry(entry, | 1043 Deoptimizer::JumpTableEntry* table_entry = |
| 1037 bailout_type, | 1044 new(zone()) Deoptimizer::JumpTableEntry(entry, |
| 1038 !frame_is_built_); | 1045 bailout_type, |
| 1046 !frame_is_built_); |
| 1039 deopt_jump_table_.Add(table_entry, zone()); | 1047 deopt_jump_table_.Add(table_entry, zone()); |
| 1040 } | 1048 } |
| 1041 __ B(&deopt_jump_table_.last().label, | 1049 __ B(&deopt_jump_table_.last()->label, |
| 1042 branch_type, reg, bit); | 1050 branch_type, reg, bit); |
| 1043 } | 1051 } |
| 1044 } | 1052 } |
| 1045 | 1053 |
| 1046 | 1054 |
| 1047 void LCodeGen::Deoptimize(LEnvironment* environment, | 1055 void LCodeGen::Deoptimize(LEnvironment* environment, |
| 1048 Deoptimizer::BailoutType* override_bailout_type) { | 1056 Deoptimizer::BailoutType* override_bailout_type) { |
| 1049 DeoptimizeBranch(environment, always, NoReg, -1, override_bailout_type); | 1057 DeoptimizeBranch(environment, always, NoReg, -1, override_bailout_type); |
| 1050 } | 1058 } |
| 1051 | 1059 |
| 1052 | 1060 |
| 1053 void LCodeGen::DeoptimizeIf(Condition cond, LEnvironment* environment) { | 1061 void LCodeGen::DeoptimizeIf(Condition cond, LEnvironment* environment) { |
| 1054 DeoptimizeBranch(environment, static_cast<BranchType>(cond)); | 1062 DeoptimizeBranch(environment, static_cast<BranchType>(cond)); |
| 1055 } | 1063 } |
| 1056 | 1064 |
| 1057 | 1065 |
| 1058 void LCodeGen::DeoptimizeIfZero(Register rt, LEnvironment* environment) { | 1066 void LCodeGen::DeoptimizeIfZero(Register rt, LEnvironment* environment) { |
| 1059 DeoptimizeBranch(environment, reg_zero, rt); | 1067 DeoptimizeBranch(environment, reg_zero, rt); |
| 1060 } | 1068 } |
| 1061 | 1069 |
| 1062 | 1070 |
| 1071 void LCodeGen::DeoptimizeIfNotZero(Register rt, LEnvironment* environment) { |
| 1072 DeoptimizeBranch(environment, reg_not_zero, rt); |
| 1073 } |
| 1074 |
| 1075 |
| 1063 void LCodeGen::DeoptimizeIfNegative(Register rt, LEnvironment* environment) { | 1076 void LCodeGen::DeoptimizeIfNegative(Register rt, LEnvironment* environment) { |
| 1064 int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit; | 1077 int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit; |
| 1065 DeoptimizeBranch(environment, reg_bit_set, rt, sign_bit); | 1078 DeoptimizeBranch(environment, reg_bit_set, rt, sign_bit); |
| 1066 } | 1079 } |
| 1067 | 1080 |
| 1068 | 1081 |
| 1069 void LCodeGen::DeoptimizeIfSmi(Register rt, | 1082 void LCodeGen::DeoptimizeIfSmi(Register rt, |
| 1070 LEnvironment* environment) { | 1083 LEnvironment* environment) { |
| 1071 DeoptimizeBranch(environment, reg_bit_clear, rt, MaskToBit(kSmiTagMask)); | 1084 DeoptimizeBranch(environment, reg_bit_clear, rt, MaskToBit(kSmiTagMask)); |
| 1072 } | 1085 } |
| (...skipping 427 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1500 } | 1513 } |
| 1501 } else { | 1514 } else { |
| 1502 Register size = ToRegister32(instr->size()); | 1515 Register size = ToRegister32(instr->size()); |
| 1503 __ Sxtw(size.X(), size); | 1516 __ Sxtw(size.X(), size); |
| 1504 __ Allocate(size.X(), result, temp1, temp2, deferred->entry(), flags); | 1517 __ Allocate(size.X(), result, temp1, temp2, deferred->entry(), flags); |
| 1505 } | 1518 } |
| 1506 | 1519 |
| 1507 __ Bind(deferred->exit()); | 1520 __ Bind(deferred->exit()); |
| 1508 | 1521 |
| 1509 if (instr->hydrogen()->MustPrefillWithFiller()) { | 1522 if (instr->hydrogen()->MustPrefillWithFiller()) { |
| 1523 Register filler_count = temp1; |
| 1524 Register filler = temp2; |
| 1525 Register untagged_result = ToRegister(instr->temp3()); |
| 1526 |
| 1510 if (instr->size()->IsConstantOperand()) { | 1527 if (instr->size()->IsConstantOperand()) { |
| 1511 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); | 1528 int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); |
| 1512 __ Mov(temp1, size - kPointerSize); | 1529 __ Mov(filler_count, size / kPointerSize); |
| 1513 } else { | 1530 } else { |
| 1514 __ Sub(temp1.W(), ToRegister32(instr->size()), kPointerSize); | 1531 __ Lsr(filler_count.W(), ToRegister32(instr->size()), kPointerSizeLog2); |
| 1515 } | 1532 } |
| 1516 __ Sub(result, result, kHeapObjectTag); | |
| 1517 | 1533 |
| 1518 // TODO(jbramley): Optimize this loop using stp. | 1534 __ Sub(untagged_result, result, kHeapObjectTag); |
| 1519 Label loop; | 1535 __ Mov(filler, Operand(isolate()->factory()->one_pointer_filler_map())); |
| 1520 __ Bind(&loop); | 1536 __ FillFields(untagged_result, filler_count, filler); |
| 1521 __ Mov(temp2, Operand(isolate()->factory()->one_pointer_filler_map())); | 1537 } else { |
| 1522 __ Str(temp2, MemOperand(result, temp1)); | 1538 ASSERT(instr->temp3() == NULL); |
| 1523 __ Subs(temp1, temp1, kPointerSize); | |
| 1524 __ B(ge, &loop); | |
| 1525 | |
| 1526 __ Add(result, result, kHeapObjectTag); | |
| 1527 } | 1539 } |
| 1528 } | 1540 } |
| 1529 | 1541 |
| 1530 | 1542 |
| 1531 void LCodeGen::DoDeferredAllocate(LAllocate* instr) { | 1543 void LCodeGen::DoDeferredAllocate(LAllocate* instr) { |
| 1532 // TODO(3095996): Get rid of this. For now, we need to make the | 1544 // TODO(3095996): Get rid of this. For now, we need to make the |
| 1533 // result register contain a valid pointer because it is already | 1545 // result register contain a valid pointer because it is already |
| 1534 // contained in the register pointer map. | 1546 // contained in the register pointer map. |
| 1535 __ Mov(ToRegister(instr->result()), Operand(Smi::FromInt(0))); | 1547 __ Mov(ToRegister(instr->result()), Operand(Smi::FromInt(0))); |
| 1536 | 1548 |
| (...skipping 687 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2224 __ Bind(&is_heap_number); | 2236 __ Bind(&is_heap_number); |
| 2225 DoubleRegister dbl_scratch = double_scratch(); | 2237 DoubleRegister dbl_scratch = double_scratch(); |
| 2226 DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp2()); | 2238 DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp2()); |
| 2227 __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset)); | 2239 __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset)); |
| 2228 __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2); | 2240 __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2); |
| 2229 | 2241 |
| 2230 __ Bind(&done); | 2242 __ Bind(&done); |
| 2231 } | 2243 } |
| 2232 | 2244 |
| 2233 | 2245 |
| 2246 void LCodeGen::DoDoubleBits(LDoubleBits* instr) { |
| 2247 DoubleRegister value_reg = ToDoubleRegister(instr->value()); |
| 2248 Register result_reg = ToRegister(instr->result()); |
| 2249 if (instr->hydrogen()->bits() == HDoubleBits::HIGH) { |
| 2250 __ Fmov(result_reg, value_reg); |
| 2251 __ Mov(result_reg, Operand(result_reg, LSR, 32)); |
| 2252 } else { |
| 2253 __ Fmov(result_reg.W(), value_reg.S()); |
| 2254 } |
| 2255 } |
| 2256 |
| 2257 |
| 2258 void LCodeGen::DoConstructDouble(LConstructDouble* instr) { |
| 2259 Register hi_reg = ToRegister(instr->hi()); |
| 2260 Register lo_reg = ToRegister(instr->lo()); |
| 2261 Register temp = ToRegister(instr->temp()); |
| 2262 DoubleRegister result_reg = ToDoubleRegister(instr->result()); |
| 2263 |
| 2264 __ And(temp, lo_reg, Operand(0xffffffff)); |
| 2265 __ Orr(temp, temp, Operand(hi_reg, LSL, 32)); |
| 2266 __ Fmov(result_reg, temp); |
| 2267 } |
| 2268 |
| 2269 |
| 2234 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { | 2270 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { |
| 2235 Handle<String> class_name = instr->hydrogen()->class_name(); | 2271 Handle<String> class_name = instr->hydrogen()->class_name(); |
| 2236 Label* true_label = instr->TrueLabel(chunk_); | 2272 Label* true_label = instr->TrueLabel(chunk_); |
| 2237 Label* false_label = instr->FalseLabel(chunk_); | 2273 Label* false_label = instr->FalseLabel(chunk_); |
| 2238 Register input = ToRegister(instr->value()); | 2274 Register input = ToRegister(instr->value()); |
| 2239 Register scratch1 = ToRegister(instr->temp1()); | 2275 Register scratch1 = ToRegister(instr->temp1()); |
| 2240 Register scratch2 = ToRegister(instr->temp2()); | 2276 Register scratch2 = ToRegister(instr->temp2()); |
| 2241 | 2277 |
| 2242 __ JumpIfSmi(input, false_label); | 2278 __ JumpIfSmi(input, false_label); |
| 2243 | 2279 |
| (...skipping 320 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2564 // the special case below. | 2600 // the special case below. |
| 2565 if (info()->IsStub() && (type == Deoptimizer::EAGER)) { | 2601 if (info()->IsStub() && (type == Deoptimizer::EAGER)) { |
| 2566 type = Deoptimizer::LAZY; | 2602 type = Deoptimizer::LAZY; |
| 2567 } | 2603 } |
| 2568 | 2604 |
| 2569 Comment(";;; deoptimize: %s", instr->hydrogen()->reason()); | 2605 Comment(";;; deoptimize: %s", instr->hydrogen()->reason()); |
| 2570 Deoptimize(instr->environment(), &type); | 2606 Deoptimize(instr->environment(), &type); |
| 2571 } | 2607 } |
| 2572 | 2608 |
| 2573 | 2609 |
| 2610 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { |
| 2611 Register dividend = ToRegister32(instr->dividend()); |
| 2612 int32_t divisor = instr->divisor(); |
| 2613 Register result = ToRegister32(instr->result()); |
| 2614 ASSERT(divisor == kMinInt || (divisor != 0 && IsPowerOf2(Abs(divisor)))); |
| 2615 ASSERT(!result.is(dividend)); |
| 2616 |
| 2617 // Check for (0 / -x) that will produce negative zero. |
| 2618 HDiv* hdiv = instr->hydrogen(); |
| 2619 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 2620 __ Cmp(dividend, 0); |
| 2621 DeoptimizeIf(eq, instr->environment()); |
| 2622 } |
| 2623 // Check for (kMinInt / -1). |
| 2624 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { |
| 2625 __ Cmp(dividend, kMinInt); |
| 2626 DeoptimizeIf(eq, instr->environment()); |
| 2627 } |
| 2628 // Deoptimize if remainder will not be 0. |
| 2629 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && |
| 2630 divisor != 1 && divisor != -1) { |
| 2631 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
| 2632 __ Tst(dividend, mask); |
| 2633 DeoptimizeIf(ne, instr->environment()); |
| 2634 } |
| 2635 |
| 2636 if (divisor == -1) { // Nice shortcut, not needed for correctness. |
| 2637 __ Neg(result, dividend); |
| 2638 return; |
| 2639 } |
| 2640 int32_t shift = WhichPowerOf2Abs(divisor); |
| 2641 if (shift == 0) { |
| 2642 __ Mov(result, dividend); |
| 2643 } else if (shift == 1) { |
| 2644 __ Add(result, dividend, Operand(dividend, LSR, 31)); |
| 2645 } else { |
| 2646 __ Mov(result, Operand(dividend, ASR, 31)); |
| 2647 __ Add(result, dividend, Operand(result, LSR, 32 - shift)); |
| 2648 } |
| 2649 if (shift > 0) __ Mov(result, Operand(result, ASR, shift)); |
| 2650 if (divisor < 0) __ Neg(result, result); |
| 2651 } |
| 2652 |
| 2653 |
| 2654 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { |
| 2655 Register dividend = ToRegister32(instr->dividend()); |
| 2656 int32_t divisor = instr->divisor(); |
| 2657 Register result = ToRegister32(instr->result()); |
| 2658 ASSERT(!AreAliased(dividend, result)); |
| 2659 |
| 2660 if (divisor == 0) { |
| 2661 Deoptimize(instr->environment()); |
| 2662 return; |
| 2663 } |
| 2664 |
| 2665 // Check for (0 / -x) that will produce negative zero. |
| 2666 HDiv* hdiv = instr->hydrogen(); |
| 2667 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 2668 DeoptimizeIfZero(dividend, instr->environment()); |
| 2669 } |
| 2670 |
| 2671 __ FlooringDiv(result, dividend, Abs(divisor)); |
| 2672 __ Add(result, result, Operand(dividend, LSR, 31)); |
| 2673 if (divisor < 0) __ Neg(result, result); |
| 2674 |
| 2675 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { |
| 2676 Register temp = ToRegister32(instr->temp()); |
| 2677 ASSERT(!AreAliased(dividend, result, temp)); |
| 2678 __ Sxtw(dividend.X(), dividend); |
| 2679 __ Mov(temp, divisor); |
| 2680 __ Smsubl(temp.X(), result, temp, dividend.X()); |
| 2681 DeoptimizeIfNotZero(temp, instr->environment()); |
| 2682 } |
| 2683 } |
| 2684 |
| 2685 |
| 2574 void LCodeGen::DoDivI(LDivI* instr) { | 2686 void LCodeGen::DoDivI(LDivI* instr) { |
| 2575 if (!instr->is_flooring() && instr->hydrogen()->RightIsPowerOf2()) { | 2687 HBinaryOperation* hdiv = instr->hydrogen(); |
| 2576 HDiv* hdiv = instr->hydrogen(); | |
| 2577 Register dividend = ToRegister32(instr->left()); | |
| 2578 int32_t divisor = hdiv->right()->GetInteger32Constant(); | |
| 2579 Register result = ToRegister32(instr->result()); | |
| 2580 ASSERT(!result.is(dividend)); | |
| 2581 | |
| 2582 // Check for (0 / -x) that will produce negative zero. | |
| 2583 if (hdiv->left()->RangeCanInclude(0) && divisor < 0 && | |
| 2584 hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | |
| 2585 __ Cmp(dividend, 0); | |
| 2586 DeoptimizeIf(eq, instr->environment()); | |
| 2587 } | |
| 2588 // Check for (kMinInt / -1). | |
| 2589 if (hdiv->left()->RangeCanInclude(kMinInt) && divisor == -1 && | |
| 2590 hdiv->CheckFlag(HValue::kCanOverflow)) { | |
| 2591 __ Cmp(dividend, kMinInt); | |
| 2592 DeoptimizeIf(eq, instr->environment()); | |
| 2593 } | |
| 2594 // Deoptimize if remainder will not be 0. | |
| 2595 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && | |
| 2596 Abs(divisor) != 1) { | |
| 2597 __ Tst(dividend, Abs(divisor) - 1); | |
| 2598 DeoptimizeIf(ne, instr->environment()); | |
| 2599 } | |
| 2600 if (divisor == -1) { // Nice shortcut, not needed for correctness. | |
| 2601 __ Neg(result, dividend); | |
| 2602 return; | |
| 2603 } | |
| 2604 int32_t shift = WhichPowerOf2(Abs(divisor)); | |
| 2605 if (shift == 0) { | |
| 2606 __ Mov(result, dividend); | |
| 2607 } else if (shift == 1) { | |
| 2608 __ Add(result, dividend, Operand(dividend, LSR, 31)); | |
| 2609 } else { | |
| 2610 __ Mov(result, Operand(dividend, ASR, 31)); | |
| 2611 __ Add(result, dividend, Operand(result, LSR, 32 - shift)); | |
| 2612 } | |
| 2613 if (shift > 0) __ Mov(result, Operand(result, ASR, shift)); | |
| 2614 if (divisor < 0) __ Neg(result, result); | |
| 2615 return; | |
| 2616 } | |
| 2617 | |
| 2618 Register dividend = ToRegister32(instr->left()); | 2688 Register dividend = ToRegister32(instr->left()); |
| 2619 Register divisor = ToRegister32(instr->right()); | 2689 Register divisor = ToRegister32(instr->right()); |
| 2620 Register result = ToRegister32(instr->result()); | 2690 Register result = ToRegister32(instr->result()); |
| 2621 HValue* hdiv = instr->hydrogen_value(); | |
| 2622 | 2691 |
| 2623 // Issue the division first, and then check for any deopt cases whilst the | 2692 // Issue the division first, and then check for any deopt cases whilst the |
| 2624 // result is computed. | 2693 // result is computed. |
| 2625 __ Sdiv(result, dividend, divisor); | 2694 __ Sdiv(result, dividend, divisor); |
| 2626 | 2695 |
| 2627 if (hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { | 2696 if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { |
| 2628 ASSERT_EQ(NULL, instr->temp()); | 2697 ASSERT_EQ(NULL, instr->temp()); |
| 2629 return; | 2698 return; |
| 2630 } | 2699 } |
| 2631 | 2700 |
| 2632 Label deopt; | 2701 Label deopt; |
| 2633 // Check for x / 0. | 2702 // Check for x / 0. |
| 2634 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | 2703 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
| 2635 __ Cbz(divisor, &deopt); | 2704 __ Cbz(divisor, &deopt); |
| 2636 } | 2705 } |
| 2637 | 2706 |
| (...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2709 | 2778 |
| 2710 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { | 2779 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { |
| 2711 ASSERT(ToRegister(instr->context()).is(cp)); | 2780 ASSERT(ToRegister(instr->context()).is(cp)); |
| 2712 // FunctionLiteral instruction is marked as call, we can trash any register. | 2781 // FunctionLiteral instruction is marked as call, we can trash any register. |
| 2713 ASSERT(instr->IsMarkedAsCall()); | 2782 ASSERT(instr->IsMarkedAsCall()); |
| 2714 | 2783 |
| 2715 // Use the fast case closure allocation code that allocates in new | 2784 // Use the fast case closure allocation code that allocates in new |
| 2716 // space for nested functions that don't need literals cloning. | 2785 // space for nested functions that don't need literals cloning. |
| 2717 bool pretenure = instr->hydrogen()->pretenure(); | 2786 bool pretenure = instr->hydrogen()->pretenure(); |
| 2718 if (!pretenure && instr->hydrogen()->has_no_literals()) { | 2787 if (!pretenure && instr->hydrogen()->has_no_literals()) { |
| 2719 FastNewClosureStub stub(instr->hydrogen()->language_mode(), | 2788 FastNewClosureStub stub(instr->hydrogen()->strict_mode(), |
| 2720 instr->hydrogen()->is_generator()); | 2789 instr->hydrogen()->is_generator()); |
| 2721 __ Mov(x2, Operand(instr->hydrogen()->shared_info())); | 2790 __ Mov(x2, Operand(instr->hydrogen()->shared_info())); |
| 2722 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); | 2791 CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr); |
| 2723 } else { | 2792 } else { |
| 2724 __ Mov(x2, Operand(instr->hydrogen()->shared_info())); | 2793 __ Mov(x2, Operand(instr->hydrogen()->shared_info())); |
| 2725 __ Mov(x1, Operand(pretenure ? factory()->true_value() | 2794 __ Mov(x1, Operand(pretenure ? factory()->true_value() |
| 2726 : factory()->false_value())); | 2795 : factory()->false_value())); |
| 2727 __ Push(cp, x2, x1); | 2796 __ Push(cp, x2, x1); |
| 2728 CallRuntime(Runtime::kNewClosure, 3, instr); | 2797 CallRuntime(Runtime::kNewClosure, 3, instr); |
| 2729 } | 2798 } |
| (...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2793 } | 2862 } |
| 2794 | 2863 |
| 2795 | 2864 |
| 2796 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { | 2865 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { |
| 2797 Register input = ToRegister(instr->value()); | 2866 Register input = ToRegister(instr->value()); |
| 2798 Register result = ToRegister(instr->result()); | 2867 Register result = ToRegister(instr->result()); |
| 2799 | 2868 |
| 2800 __ AssertString(input); | 2869 __ AssertString(input); |
| 2801 | 2870 |
| 2802 // Assert that we can use a W register load to get the hash. | 2871 // Assert that we can use a W register load to get the hash. |
| 2803 ASSERT((String::kHashShift + String::kArrayIndexValueBits) < kWRegSize); | 2872 ASSERT((String::kHashShift + String::kArrayIndexValueBits) < kWRegSizeInBits); |
| 2804 __ Ldr(result.W(), FieldMemOperand(input, String::kHashFieldOffset)); | 2873 __ Ldr(result.W(), FieldMemOperand(input, String::kHashFieldOffset)); |
| 2805 __ IndexFromHash(result, result); | 2874 __ IndexFromHash(result, result); |
| 2806 } | 2875 } |
| 2807 | 2876 |
| 2808 | 2877 |
| 2809 void LCodeGen::EmitGoto(int block) { | 2878 void LCodeGen::EmitGoto(int block) { |
| 2810 // Do not emit jump if we are emitting a goto to the next block. | 2879 // Do not emit jump if we are emitting a goto to the next block. |
| 2811 if (!IsNextEmittedBlock(block)) { | 2880 if (!IsNextEmittedBlock(block)) { |
| 2812 __ B(chunk_->GetAssemblyLabel(LookupDestination(block))); | 2881 __ B(chunk_->GetAssemblyLabel(LookupDestination(block))); |
| 2813 } | 2882 } |
| (...skipping 214 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3028 } | 3097 } |
| 3029 | 3098 |
| 3030 | 3099 |
| 3031 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { | 3100 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { |
| 3032 Register value = ToRegister32(instr->value()); | 3101 Register value = ToRegister32(instr->value()); |
| 3033 DoubleRegister result = ToDoubleRegister(instr->result()); | 3102 DoubleRegister result = ToDoubleRegister(instr->result()); |
| 3034 __ Scvtf(result, value); | 3103 __ Scvtf(result, value); |
| 3035 } | 3104 } |
| 3036 | 3105 |
| 3037 | 3106 |
| 3038 void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) { | |
| 3039 // A64 smis can represent all Integer32 values, so this cannot deoptimize. | |
| 3040 ASSERT(!instr->hydrogen()->value()->HasRange() || | |
| 3041 instr->hydrogen()->value()->range()->IsInSmiRange()); | |
| 3042 | |
| 3043 Register value = ToRegister32(instr->value()); | |
| 3044 Register result = ToRegister(instr->result()); | |
| 3045 __ SmiTag(result, value.X()); | |
| 3046 } | |
| 3047 | |
| 3048 | |
| 3049 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { | 3107 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { |
| 3050 ASSERT(ToRegister(instr->context()).is(cp)); | 3108 ASSERT(ToRegister(instr->context()).is(cp)); |
| 3051 // The function is required to be in x1. | 3109 // The function is required to be in x1. |
| 3052 ASSERT(ToRegister(instr->function()).is(x1)); | 3110 ASSERT(ToRegister(instr->function()).is(x1)); |
| 3053 ASSERT(instr->HasPointerMap()); | 3111 ASSERT(instr->HasPointerMap()); |
| 3054 | 3112 |
| 3055 Handle<JSFunction> known_function = instr->hydrogen()->known_function(); | 3113 Handle<JSFunction> known_function = instr->hydrogen()->known_function(); |
| 3056 if (known_function.is_null()) { | 3114 if (known_function.is_null()) { |
| 3057 LPointerMap* pointers = instr->pointer_map(); | 3115 LPointerMap* pointers = instr->pointer_map(); |
| 3058 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); | 3116 SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); |
| (...skipping 343 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3402 case FLOAT64_ELEMENTS: | 3460 case FLOAT64_ELEMENTS: |
| 3403 case EXTERNAL_FLOAT32_ELEMENTS: | 3461 case EXTERNAL_FLOAT32_ELEMENTS: |
| 3404 case EXTERNAL_FLOAT64_ELEMENTS: | 3462 case EXTERNAL_FLOAT64_ELEMENTS: |
| 3405 case FAST_HOLEY_DOUBLE_ELEMENTS: | 3463 case FAST_HOLEY_DOUBLE_ELEMENTS: |
| 3406 case FAST_HOLEY_ELEMENTS: | 3464 case FAST_HOLEY_ELEMENTS: |
| 3407 case FAST_HOLEY_SMI_ELEMENTS: | 3465 case FAST_HOLEY_SMI_ELEMENTS: |
| 3408 case FAST_DOUBLE_ELEMENTS: | 3466 case FAST_DOUBLE_ELEMENTS: |
| 3409 case FAST_ELEMENTS: | 3467 case FAST_ELEMENTS: |
| 3410 case FAST_SMI_ELEMENTS: | 3468 case FAST_SMI_ELEMENTS: |
| 3411 case DICTIONARY_ELEMENTS: | 3469 case DICTIONARY_ELEMENTS: |
| 3412 case NON_STRICT_ARGUMENTS_ELEMENTS: | 3470 case SLOPPY_ARGUMENTS_ELEMENTS: |
| 3413 UNREACHABLE(); | 3471 UNREACHABLE(); |
| 3414 break; | 3472 break; |
| 3415 } | 3473 } |
| 3416 } | 3474 } |
| 3417 } | 3475 } |
| 3418 | 3476 |
| 3419 | 3477 |
| 3420 void LCodeGen::CalcKeyedArrayBaseRegister(Register base, | 3478 void LCodeGen::CalcKeyedArrayBaseRegister(Register base, |
| 3421 Register elements, | 3479 Register elements, |
| 3422 Register key, | 3480 Register key, |
| (...skipping 242 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3665 __ Bind(&runtime_allocation); | 3723 __ Bind(&runtime_allocation); |
| 3666 if (FLAG_debug_code) { | 3724 if (FLAG_debug_code) { |
| 3667 // Because result is in the pointer map, we need to make sure it has a valid | 3725 // Because result is in the pointer map, we need to make sure it has a valid |
| 3668 // tagged value before we call the runtime. We speculatively set it to the | 3726 // tagged value before we call the runtime. We speculatively set it to the |
| 3669 // input (for abs(+x)) or to a smi (for abs(-SMI_MIN)), so it should already | 3727 // input (for abs(+x)) or to a smi (for abs(-SMI_MIN)), so it should already |
| 3670 // be valid. | 3728 // be valid. |
| 3671 Label result_ok; | 3729 Label result_ok; |
| 3672 Register input = ToRegister(instr->value()); | 3730 Register input = ToRegister(instr->value()); |
| 3673 __ JumpIfSmi(result, &result_ok); | 3731 __ JumpIfSmi(result, &result_ok); |
| 3674 __ Cmp(input, result); | 3732 __ Cmp(input, result); |
| 3675 // TODO(all): Shouldn't we assert here? | 3733 __ Assert(eq, kUnexpectedValue); |
| 3676 DeoptimizeIf(ne, instr->environment()); | |
| 3677 __ Bind(&result_ok); | 3734 __ Bind(&result_ok); |
| 3678 } | 3735 } |
| 3679 | 3736 |
| 3680 { PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); | 3737 { PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); |
| 3681 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr, | 3738 CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr, |
| 3682 instr->context()); | 3739 instr->context()); |
| 3683 __ StoreToSafepointRegisterSlot(x0, result); | 3740 __ StoreToSafepointRegisterSlot(x0, result); |
| 3684 } | 3741 } |
| 3685 // The inline (non-deferred) code will store result_bits into result. | 3742 // The inline (non-deferred) code will store result_bits into result. |
| 3686 } | 3743 } |
| (...skipping 29 matching lines...) Expand all Loading... |
| 3716 Register result_bits = ToRegister(instr->temp3()); | 3773 Register result_bits = ToRegister(instr->temp3()); |
| 3717 Register result = ToRegister(instr->result()); | 3774 Register result = ToRegister(instr->result()); |
| 3718 Label done; | 3775 Label done; |
| 3719 | 3776 |
| 3720 // Handle smis inline. | 3777 // Handle smis inline. |
| 3721 // We can treat smis as 64-bit integers, since the (low-order) tag bits will | 3778 // We can treat smis as 64-bit integers, since the (low-order) tag bits will |
| 3722 // never get set by the negation. This is therefore the same as the Integer32 | 3779 // never get set by the negation. This is therefore the same as the Integer32 |
| 3723 // case in DoMathAbs, except that it operates on 64-bit values. | 3780 // case in DoMathAbs, except that it operates on 64-bit values. |
| 3724 STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0)); | 3781 STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0)); |
| 3725 | 3782 |
| 3726 // TODO(jbramley): We can't use JumpIfNotSmi here because the tbz it uses | 3783 __ JumpIfNotSmi(input, deferred->entry()); |
| 3727 // doesn't always have enough range. Consider making a variant of it, or a | |
| 3728 // TestIsSmi helper. | |
| 3729 STATIC_ASSERT(kSmiTag == 0); | |
| 3730 __ Tst(input, kSmiTagMask); | |
| 3731 __ B(ne, deferred->entry()); | |
| 3732 | 3784 |
| 3733 __ Abs(result, input, NULL, &done); | 3785 __ Abs(result, input, NULL, &done); |
| 3734 | 3786 |
| 3735 // The result is the magnitude (abs) of the smallest value a smi can | 3787 // The result is the magnitude (abs) of the smallest value a smi can |
| 3736 // represent, encoded as a double. | 3788 // represent, encoded as a double. |
| 3737 __ Mov(result_bits, double_to_rawbits(0x80000000)); | 3789 __ Mov(result_bits, double_to_rawbits(0x80000000)); |
| 3738 __ B(deferred->allocation_entry()); | 3790 __ B(deferred->allocation_entry()); |
| 3739 | 3791 |
| 3740 __ Bind(deferred->exit()); | 3792 __ Bind(deferred->exit()); |
| 3741 __ Str(result_bits, FieldMemOperand(result, HeapNumber::kValueOffset)); | 3793 __ Str(result_bits, FieldMemOperand(result, HeapNumber::kValueOffset)); |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3780 __ Fccmp(input, input, NoFlag, eq); | 3832 __ Fccmp(input, input, NoFlag, eq); |
| 3781 __ B(&done, eq); | 3833 __ B(&done, eq); |
| 3782 | 3834 |
| 3783 __ Bind(&deopt); | 3835 __ Bind(&deopt); |
| 3784 Deoptimize(instr->environment()); | 3836 Deoptimize(instr->environment()); |
| 3785 | 3837 |
| 3786 __ Bind(&done); | 3838 __ Bind(&done); |
| 3787 } | 3839 } |
| 3788 | 3840 |
| 3789 | 3841 |
| 3790 void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) { | 3842 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { |
| 3843 Register dividend = ToRegister32(instr->dividend()); |
| 3791 Register result = ToRegister32(instr->result()); | 3844 Register result = ToRegister32(instr->result()); |
| 3792 Register left = ToRegister32(instr->left()); | 3845 int32_t divisor = instr->divisor(); |
| 3793 Register right = ToRegister32(instr->right()); | 3846 |
| 3847 // If the divisor is positive, things are easy: There can be no deopts and we |
| 3848 // can simply do an arithmetic right shift. |
| 3849 if (divisor == 1) return; |
| 3850 int32_t shift = WhichPowerOf2Abs(divisor); |
| 3851 if (divisor > 1) { |
| 3852 __ Mov(result, Operand(dividend, ASR, shift)); |
| 3853 return; |
| 3854 } |
| 3855 |
| 3856 // If the divisor is negative, we have to negate and handle edge cases. |
| 3857 Label not_kmin_int, done; |
| 3858 __ Negs(result, dividend); |
| 3859 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3860 DeoptimizeIf(eq, instr->environment()); |
| 3861 } |
| 3862 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { |
| 3863 // Note that we could emit branch-free code, but that would need one more |
| 3864 // register. |
| 3865 if (divisor == -1) { |
| 3866 DeoptimizeIf(vs, instr->environment()); |
| 3867 } else { |
| 3868 __ B(vc, ¬_kmin_int); |
| 3869 __ Mov(result, kMinInt / divisor); |
| 3870 __ B(&done); |
| 3871 } |
| 3872 } |
| 3873 __ bind(¬_kmin_int); |
| 3874 __ Mov(result, Operand(dividend, ASR, shift)); |
| 3875 __ bind(&done); |
| 3876 } |
| 3877 |
| 3878 |
| 3879 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { |
| 3880 Register dividend = ToRegister32(instr->dividend()); |
| 3881 int32_t divisor = instr->divisor(); |
| 3882 Register result = ToRegister32(instr->result()); |
| 3883 ASSERT(!AreAliased(dividend, result)); |
| 3884 |
| 3885 if (divisor == 0) { |
| 3886 Deoptimize(instr->environment()); |
| 3887 return; |
| 3888 } |
| 3889 |
| 3890 // Check for (0 / -x) that will produce negative zero. |
| 3891 HMathFloorOfDiv* hdiv = instr->hydrogen(); |
| 3892 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 3893 __ Cmp(dividend, 0); |
| 3894 DeoptimizeIf(eq, instr->environment()); |
| 3895 } |
| 3896 |
| 3897 __ FlooringDiv(result, dividend, divisor); |
| 3898 } |
| 3899 |
| 3900 |
| 3901 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { |
| 3902 Register dividend = ToRegister32(instr->dividend()); |
| 3903 Register divisor = ToRegister32(instr->divisor()); |
| 3794 Register remainder = ToRegister32(instr->temp()); | 3904 Register remainder = ToRegister32(instr->temp()); |
| 3905 Register result = ToRegister32(instr->result()); |
| 3795 | 3906 |
| 3796 // This can't cause an exception on ARM, so we can speculatively | 3907 // This can't cause an exception on ARM, so we can speculatively |
| 3797 // execute it already now. | 3908 // execute it already now. |
| 3798 __ Sdiv(result, left, right); | 3909 __ Sdiv(result, dividend, divisor); |
| 3799 | 3910 |
| 3800 // Check for x / 0. | 3911 // Check for x / 0. |
| 3801 DeoptimizeIfZero(right, instr->environment()); | 3912 DeoptimizeIfZero(divisor, instr->environment()); |
| 3802 | 3913 |
| 3803 // Check for (kMinInt / -1). | 3914 // Check for (kMinInt / -1). |
| 3804 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { | 3915 if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { |
| 3805 // The V flag will be set iff left == kMinInt. | 3916 // The V flag will be set iff dividend == kMinInt. |
| 3806 __ Cmp(left, 1); | 3917 __ Cmp(dividend, 1); |
| 3807 __ Ccmp(right, -1, NoFlag, vs); | 3918 __ Ccmp(divisor, -1, NoFlag, vs); |
| 3808 DeoptimizeIf(eq, instr->environment()); | 3919 DeoptimizeIf(eq, instr->environment()); |
| 3809 } | 3920 } |
| 3810 | 3921 |
| 3811 // Check for (0 / -x) that will produce negative zero. | 3922 // Check for (0 / -x) that will produce negative zero. |
| 3812 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3923 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3813 __ Cmp(right, 0); | 3924 __ Cmp(divisor, 0); |
| 3814 __ Ccmp(left, 0, ZFlag, mi); | 3925 __ Ccmp(dividend, 0, ZFlag, mi); |
| 3815 // "right" can't be null because the code would have already been | 3926 // "divisor" can't be null because the code would have already been |
| 3816 // deoptimized. The Z flag is set only if (right < 0) and (left == 0). | 3927 // deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0). |
| 3817 // In this case we need to deoptimize to produce a -0. | 3928 // In this case we need to deoptimize to produce a -0. |
| 3818 DeoptimizeIf(eq, instr->environment()); | 3929 DeoptimizeIf(eq, instr->environment()); |
| 3819 } | 3930 } |
| 3820 | 3931 |
| 3821 Label done; | 3932 Label done; |
| 3822 // If both operands have the same sign then we are done. | 3933 // If both operands have the same sign then we are done. |
| 3823 __ Eor(remainder, left, right); | 3934 __ Eor(remainder, dividend, divisor); |
| 3824 __ Tbz(remainder, kWSignBit, &done); | 3935 __ Tbz(remainder, kWSignBit, &done); |
| 3825 | 3936 |
| 3826 // Check if the result needs to be corrected. | 3937 // Check if the result needs to be corrected. |
| 3827 __ Msub(remainder, result, right, left); | 3938 __ Msub(remainder, result, divisor, dividend); |
| 3828 __ Cbz(remainder, &done); | 3939 __ Cbz(remainder, &done); |
| 3829 __ Sub(result, result, 1); | 3940 __ Sub(result, result, 1); |
| 3830 | 3941 |
| 3831 __ Bind(&done); | 3942 __ Bind(&done); |
| 3832 } | 3943 } |
| 3833 | 3944 |
| 3834 | 3945 |
| 3835 void LCodeGen::DoMathLog(LMathLog* instr) { | 3946 void LCodeGen::DoMathLog(LMathLog* instr) { |
| 3836 ASSERT(instr->IsMarkedAsCall()); | 3947 ASSERT(instr->IsMarkedAsCall()); |
| 3837 ASSERT(ToDoubleRegister(instr->value()).is(d0)); | 3948 ASSERT(ToDoubleRegister(instr->value()).is(d0)); |
| (...skipping 20 matching lines...) Expand all Loading... |
| 3858 // Math.pow(-0.0, 0.5) == +0.0 | 3969 // Math.pow(-0.0, 0.5) == +0.0 |
| 3859 | 3970 |
| 3860 // Catch -infinity inputs first. | 3971 // Catch -infinity inputs first. |
| 3861 // TODO(jbramley): A constant infinity register would be helpful here. | 3972 // TODO(jbramley): A constant infinity register would be helpful here. |
| 3862 __ Fmov(double_scratch(), kFP64NegativeInfinity); | 3973 __ Fmov(double_scratch(), kFP64NegativeInfinity); |
| 3863 __ Fcmp(double_scratch(), input); | 3974 __ Fcmp(double_scratch(), input); |
| 3864 __ Fabs(result, input); | 3975 __ Fabs(result, input); |
| 3865 __ B(&done, eq); | 3976 __ B(&done, eq); |
| 3866 | 3977 |
| 3867 // Add +0.0 to convert -0.0 to +0.0. | 3978 // Add +0.0 to convert -0.0 to +0.0. |
| 3868 // TODO(jbramley): A constant zero register would be helpful here. | 3979 __ Fadd(double_scratch(), input, fp_zero); |
| 3869 __ Fmov(double_scratch(), 0.0); | |
| 3870 __ Fadd(double_scratch(), input, double_scratch()); | |
| 3871 __ Fsqrt(result, double_scratch()); | 3980 __ Fsqrt(result, double_scratch()); |
| 3872 | 3981 |
| 3873 __ Bind(&done); | 3982 __ Bind(&done); |
| 3874 } | 3983 } |
| 3875 | 3984 |
| 3876 | 3985 |
| 3877 void LCodeGen::DoPower(LPower* instr) { | 3986 void LCodeGen::DoPower(LPower* instr) { |
| 3878 Representation exponent_type = instr->hydrogen()->right()->representation(); | 3987 Representation exponent_type = instr->hydrogen()->right()->representation(); |
| 3879 // Having marked this as a call, we can use any registers. | 3988 // Having marked this as a call, we can use any registers. |
| 3880 // Just make sure that the input/output registers are the expected ones. | 3989 // Just make sure that the input/output registers are the expected ones. |
| (...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4006 if (op == HMathMinMax::kMathMax) { | 4115 if (op == HMathMinMax::kMathMax) { |
| 4007 __ Fmax(result, left, right); | 4116 __ Fmax(result, left, right); |
| 4008 } else { | 4117 } else { |
| 4009 ASSERT(op == HMathMinMax::kMathMin); | 4118 ASSERT(op == HMathMinMax::kMathMin); |
| 4010 __ Fmin(result, left, right); | 4119 __ Fmin(result, left, right); |
| 4011 } | 4120 } |
| 4012 } | 4121 } |
| 4013 } | 4122 } |
| 4014 | 4123 |
| 4015 | 4124 |
| 4125 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) { |
| 4126 Register dividend = ToRegister32(instr->dividend()); |
| 4127 int32_t divisor = instr->divisor(); |
| 4128 ASSERT(dividend.is(ToRegister32(instr->result()))); |
| 4129 |
| 4130 // Theoretically, a variation of the branch-free code for integer division by |
| 4131 // a power of 2 (calculating the remainder via an additional multiplication |
| 4132 // (which gets simplified to an 'and') and subtraction) should be faster, and |
| 4133 // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to |
| 4134 // indicate that positive dividends are heavily favored, so the branching |
| 4135 // version performs better. |
| 4136 HMod* hmod = instr->hydrogen(); |
| 4137 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
| 4138 Label dividend_is_not_negative, done; |
| 4139 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { |
| 4140 __ Cmp(dividend, 0); |
| 4141 __ B(pl, ÷nd_is_not_negative); |
| 4142 // Note that this is correct even for kMinInt operands. |
| 4143 __ Neg(dividend, dividend); |
| 4144 __ And(dividend, dividend, Operand(mask)); |
| 4145 __ Negs(dividend, dividend); |
| 4146 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 4147 DeoptimizeIf(eq, instr->environment()); |
| 4148 } |
| 4149 __ B(&done); |
| 4150 } |
| 4151 |
| 4152 __ bind(÷nd_is_not_negative); |
| 4153 __ And(dividend, dividend, Operand(mask)); |
| 4154 __ bind(&done); |
| 4155 } |
| 4156 |
| 4157 |
| 4158 void LCodeGen::DoModByConstI(LModByConstI* instr) { |
| 4159 Register dividend = ToRegister32(instr->dividend()); |
| 4160 int32_t divisor = instr->divisor(); |
| 4161 Register result = ToRegister32(instr->result()); |
| 4162 Register temp = ToRegister32(instr->temp()); |
| 4163 ASSERT(!AreAliased(dividend, result, temp)); |
| 4164 |
| 4165 if (divisor == 0) { |
| 4166 Deoptimize(instr->environment()); |
| 4167 return; |
| 4168 } |
| 4169 |
| 4170 __ FlooringDiv(result, dividend, Abs(divisor)); |
| 4171 __ Add(result, result, Operand(dividend, LSR, 31)); |
| 4172 __ Sxtw(dividend.X(), dividend); |
| 4173 __ Mov(temp, Abs(divisor)); |
| 4174 __ Smsubl(result.X(), result, temp, dividend.X()); |
| 4175 |
| 4176 // Check for negative zero. |
| 4177 HMod* hmod = instr->hydrogen(); |
| 4178 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 4179 Label remainder_not_zero; |
| 4180 __ Cbnz(result, &remainder_not_zero); |
| 4181 DeoptimizeIfNegative(dividend, instr->environment()); |
| 4182 __ bind(&remainder_not_zero); |
| 4183 } |
| 4184 } |
| 4185 |
| 4186 |
| 4016 void LCodeGen::DoModI(LModI* instr) { | 4187 void LCodeGen::DoModI(LModI* instr) { |
| 4017 HMod* hmod = instr->hydrogen(); | 4188 Register dividend = ToRegister32(instr->left()); |
| 4018 HValue* hleft = hmod->left(); | 4189 Register divisor = ToRegister32(instr->right()); |
| 4019 HValue* hright = hmod->right(); | 4190 Register result = ToRegister32(instr->result()); |
| 4020 | 4191 |
| 4021 Label done; | 4192 Label deopt, done; |
| 4022 Register result = ToRegister32(instr->result()); | 4193 // modulo = dividend - quotient * divisor |
| 4023 Register dividend = ToRegister32(instr->left()); | 4194 __ Sdiv(result, dividend, divisor); |
| 4024 | 4195 if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { |
| 4025 bool need_minus_zero_check = (hmod->CheckFlag(HValue::kBailoutOnMinusZero) && | 4196 // Combine the deoptimization sites. |
| 4026 hleft->CanBeNegative() && hmod->CanBeZero()); | 4197 Label ok; |
| 4027 | 4198 __ Cbnz(divisor, &ok); |
| 4028 if (hmod->RightIsPowerOf2()) { | 4199 __ Bind(&deopt); |
| 4029 // Note: The code below even works when right contains kMinInt. | 4200 Deoptimize(instr->environment()); |
| 4030 int32_t divisor = Abs(hright->GetInteger32Constant()); | 4201 __ Bind(&ok); |
| 4031 | 4202 } |
| 4032 if (hleft->CanBeNegative()) { | 4203 __ Msub(result, result, divisor, dividend); |
| 4033 __ Cmp(dividend, 0); | 4204 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 4034 __ Cneg(result, dividend, mi); | 4205 __ Cbnz(result, &done); |
| 4035 __ And(result, result, divisor - 1); | 4206 if (deopt.is_bound()) { // TODO(all) This is a hack, remove this... |
| 4036 __ Cneg(result, result, mi); | 4207 __ Tbnz(dividend, kWSignBit, &deopt); |
| 4037 if (need_minus_zero_check) { | |
| 4038 __ Cbnz(result, &done); | |
| 4039 // The result is 0. Deoptimize if the dividend was negative. | |
| 4040 DeoptimizeIf(mi, instr->environment()); | |
| 4041 } | |
| 4042 } else { | 4208 } else { |
| 4043 __ And(result, dividend, divisor - 1); | 4209 DeoptimizeIfNegative(dividend, instr->environment()); |
| 4044 } | |
| 4045 | |
| 4046 } else { | |
| 4047 Label deopt; | |
| 4048 Register divisor = ToRegister32(instr->right()); | |
| 4049 // Compute: | |
| 4050 // modulo = dividend - quotient * divisor | |
| 4051 __ Sdiv(result, dividend, divisor); | |
| 4052 if (hright->CanBeZero()) { | |
| 4053 // Combine the deoptimization sites. | |
| 4054 Label ok; | |
| 4055 __ Cbnz(divisor, &ok); | |
| 4056 __ Bind(&deopt); | |
| 4057 Deoptimize(instr->environment()); | |
| 4058 __ Bind(&ok); | |
| 4059 } | |
| 4060 __ Msub(result, result, divisor, dividend); | |
| 4061 if (need_minus_zero_check) { | |
| 4062 __ Cbnz(result, &done); | |
| 4063 if (deopt.is_bound()) { | |
| 4064 __ Tbnz(dividend, kWSignBit, &deopt); | |
| 4065 } else { | |
| 4066 DeoptimizeIfNegative(dividend, instr->environment()); | |
| 4067 } | |
| 4068 } | 4210 } |
| 4069 } | 4211 } |
| 4070 __ Bind(&done); | 4212 __ Bind(&done); |
| 4071 } | 4213 } |
| 4072 | 4214 |
| 4073 | 4215 |
| 4074 void LCodeGen::DoMulConstIS(LMulConstIS* instr) { | 4216 void LCodeGen::DoMulConstIS(LMulConstIS* instr) { |
| 4075 ASSERT(instr->hydrogen()->representation().IsSmiOrInteger32()); | 4217 ASSERT(instr->hydrogen()->representation().IsSmiOrInteger32()); |
| 4076 bool is_smi = instr->hydrogen()->representation().IsSmi(); | 4218 bool is_smi = instr->hydrogen()->representation().IsSmi(); |
| 4077 Register result = | 4219 Register result = |
| (...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4167 | 4309 |
| 4168 void LCodeGen::DoMulI(LMulI* instr) { | 4310 void LCodeGen::DoMulI(LMulI* instr) { |
| 4169 Register result = ToRegister32(instr->result()); | 4311 Register result = ToRegister32(instr->result()); |
| 4170 Register left = ToRegister32(instr->left()); | 4312 Register left = ToRegister32(instr->left()); |
| 4171 Register right = ToRegister32(instr->right()); | 4313 Register right = ToRegister32(instr->right()); |
| 4172 | 4314 |
| 4173 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 4315 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
| 4174 bool bailout_on_minus_zero = | 4316 bool bailout_on_minus_zero = |
| 4175 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); | 4317 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); |
| 4176 | 4318 |
| 4177 if (bailout_on_minus_zero) { | 4319 if (bailout_on_minus_zero && !left.Is(right)) { |
| 4178 // If one operand is zero and the other is negative, the result is -0. | 4320 // If one operand is zero and the other is negative, the result is -0. |
| 4179 // - Set Z (eq) if either left or right, or both, are 0. | 4321 // - Set Z (eq) if either left or right, or both, are 0. |
| 4180 __ Cmp(left, 0); | 4322 __ Cmp(left, 0); |
| 4181 __ Ccmp(right, 0, ZFlag, ne); | 4323 __ Ccmp(right, 0, ZFlag, ne); |
| 4182 // - If so (eq), set N (mi) if left + right is negative. | 4324 // - If so (eq), set N (mi) if left + right is negative. |
| 4183 // - Otherwise, clear N. | 4325 // - Otherwise, clear N. |
| 4184 __ Ccmn(left, right, NoFlag, eq); | 4326 __ Ccmn(left, right, NoFlag, eq); |
| 4185 DeoptimizeIf(mi, instr->environment()); | 4327 DeoptimizeIf(mi, instr->environment()); |
| 4186 } | 4328 } |
| 4187 | 4329 |
| 4188 if (can_overflow) { | 4330 if (can_overflow) { |
| 4189 __ Smull(result.X(), left, right); | 4331 __ Smull(result.X(), left, right); |
| 4190 __ Cmp(result.X(), Operand(result, SXTW)); | 4332 __ Cmp(result.X(), Operand(result, SXTW)); |
| 4191 DeoptimizeIf(ne, instr->environment()); | 4333 DeoptimizeIf(ne, instr->environment()); |
| 4192 } else { | 4334 } else { |
| 4193 __ Mul(result, left, right); | 4335 __ Mul(result, left, right); |
| 4194 } | 4336 } |
| 4195 } | 4337 } |
| 4196 | 4338 |
| 4197 | 4339 |
| 4198 void LCodeGen::DoMulS(LMulS* instr) { | 4340 void LCodeGen::DoMulS(LMulS* instr) { |
| 4199 Register result = ToRegister(instr->result()); | 4341 Register result = ToRegister(instr->result()); |
| 4200 Register left = ToRegister(instr->left()); | 4342 Register left = ToRegister(instr->left()); |
| 4201 Register right = ToRegister(instr->right()); | 4343 Register right = ToRegister(instr->right()); |
| 4202 | 4344 |
| 4203 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 4345 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
| 4204 bool bailout_on_minus_zero = | 4346 bool bailout_on_minus_zero = |
| 4205 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); | 4347 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); |
| 4206 | 4348 |
| 4207 if (bailout_on_minus_zero) { | 4349 if (bailout_on_minus_zero && !left.Is(right)) { |
| 4208 // If one operand is zero and the other is negative, the result is -0. | 4350 // If one operand is zero and the other is negative, the result is -0. |
| 4209 // - Set Z (eq) if either left or right, or both, are 0. | 4351 // - Set Z (eq) if either left or right, or both, are 0. |
| 4210 __ Cmp(left, 0); | 4352 __ Cmp(left, 0); |
| 4211 __ Ccmp(right, 0, ZFlag, ne); | 4353 __ Ccmp(right, 0, ZFlag, ne); |
| 4212 // - If so (eq), set N (mi) if left + right is negative. | 4354 // - If so (eq), set N (mi) if left + right is negative. |
| 4213 // - Otherwise, clear N. | 4355 // - Otherwise, clear N. |
| 4214 __ Ccmn(left, right, NoFlag, eq); | 4356 __ Ccmn(left, right, NoFlag, eq); |
| 4215 DeoptimizeIf(mi, instr->environment()); | 4357 DeoptimizeIf(mi, instr->environment()); |
| 4216 } | 4358 } |
| 4217 | 4359 |
| 4218 STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0)); | 4360 STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0)); |
| 4219 if (can_overflow) { | 4361 if (can_overflow) { |
| 4220 __ Smulh(result, left, right); | 4362 __ Smulh(result, left, right); |
| 4221 __ Cmp(result, Operand(result.W(), SXTW)); | 4363 __ Cmp(result, Operand(result.W(), SXTW)); |
| 4222 __ SmiTag(result); | 4364 __ SmiTag(result); |
| 4223 DeoptimizeIf(ne, instr->environment()); | 4365 DeoptimizeIf(ne, instr->environment()); |
| 4224 } else { | 4366 } else { |
| 4225 // TODO(jbramley): This could be rewritten to support UseRegisterAtStart. | 4367 if (AreAliased(result, left, right)) { |
| 4226 ASSERT(!AreAliased(result, right)); | 4368 // All three registers are the same: half untag the input and then |
| 4227 __ SmiUntag(result, left); | 4369 // multiply, giving a tagged result. |
| 4228 __ Mul(result, result, right); | 4370 STATIC_ASSERT((kSmiShift % 2) == 0); |
| 4371 __ Asr(result, left, kSmiShift / 2); |
| 4372 __ Mul(result, result, result); |
| 4373 } else if (result.Is(left) && !left.Is(right)) { |
| 4374 // Registers result and left alias, right is distinct: untag left into |
| 4375 // result, and then multiply by right, giving a tagged result. |
| 4376 __ SmiUntag(result, left); |
| 4377 __ Mul(result, result, right); |
| 4378 } else { |
| 4379 ASSERT(!left.Is(result)); |
| 4380 // Registers result and right alias, left is distinct, or all registers |
| 4381 // are distinct: untag right into result, and then multiply by left, |
| 4382 // giving a tagged result. |
| 4383 __ SmiUntag(result, right); |
| 4384 __ Mul(result, left, result); |
| 4385 } |
| 4229 } | 4386 } |
| 4230 } | 4387 } |
| 4231 | 4388 |
| 4232 | 4389 |
| 4233 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { | 4390 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { |
| 4234 // TODO(3095996): Get rid of this. For now, we need to make the | 4391 // TODO(3095996): Get rid of this. For now, we need to make the |
| 4235 // result register contain a valid pointer because it is already | 4392 // result register contain a valid pointer because it is already |
| 4236 // contained in the register pointer map. | 4393 // contained in the register pointer map. |
| 4237 Register result = ToRegister(instr->result()); | 4394 Register result = ToRegister(instr->result()); |
| 4238 __ Mov(result, 0); | 4395 __ Mov(result, 0); |
| (...skipping 242 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4481 LOperand* index, | 4638 LOperand* index, |
| 4482 String::Encoding encoding) { | 4639 String::Encoding encoding) { |
| 4483 if (index->IsConstantOperand()) { | 4640 if (index->IsConstantOperand()) { |
| 4484 int offset = ToInteger32(LConstantOperand::cast(index)); | 4641 int offset = ToInteger32(LConstantOperand::cast(index)); |
| 4485 if (encoding == String::TWO_BYTE_ENCODING) { | 4642 if (encoding == String::TWO_BYTE_ENCODING) { |
| 4486 offset *= kUC16Size; | 4643 offset *= kUC16Size; |
| 4487 } | 4644 } |
| 4488 STATIC_ASSERT(kCharSize == 1); | 4645 STATIC_ASSERT(kCharSize == 1); |
| 4489 return FieldMemOperand(string, SeqString::kHeaderSize + offset); | 4646 return FieldMemOperand(string, SeqString::kHeaderSize + offset); |
| 4490 } | 4647 } |
| 4491 ASSERT(!temp.is(string)); | 4648 |
| 4492 ASSERT(!temp.is(ToRegister(index))); | |
| 4493 if (encoding == String::ONE_BYTE_ENCODING) { | 4649 if (encoding == String::ONE_BYTE_ENCODING) { |
| 4494 __ Add(temp, string, Operand(ToRegister32(index), SXTW)); | 4650 __ Add(temp, string, Operand(ToRegister32(index), SXTW)); |
| 4495 } else { | 4651 } else { |
| 4496 STATIC_ASSERT(kUC16Size == 2); | 4652 STATIC_ASSERT(kUC16Size == 2); |
| 4497 __ Add(temp, string, Operand(ToRegister32(index), SXTW, 1)); | 4653 __ Add(temp, string, Operand(ToRegister32(index), SXTW, 1)); |
| 4498 } | 4654 } |
| 4499 return FieldMemOperand(temp, SeqString::kHeaderSize); | 4655 return FieldMemOperand(temp, SeqString::kHeaderSize); |
| 4500 } | 4656 } |
| 4501 | 4657 |
| 4502 | 4658 |
| 4503 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) { | 4659 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) { |
| 4504 String::Encoding encoding = instr->hydrogen()->encoding(); | 4660 String::Encoding encoding = instr->hydrogen()->encoding(); |
| 4505 Register string = ToRegister(instr->string()); | 4661 Register string = ToRegister(instr->string()); |
| 4506 Register result = ToRegister(instr->result()); | 4662 Register result = ToRegister(instr->result()); |
| 4507 Register temp = ToRegister(instr->temp()); | 4663 Register temp = ToRegister(instr->temp()); |
| 4508 | 4664 |
| 4509 if (FLAG_debug_code) { | 4665 if (FLAG_debug_code) { |
| 4510 __ Ldr(temp, FieldMemOperand(string, HeapObject::kMapOffset)); | 4666 // Even though this lithium instruction comes with a temp register, we |
| 4511 __ Ldrb(temp, FieldMemOperand(temp, Map::kInstanceTypeOffset)); | 4667 // can't use it here because we want to use "AtStart" constraints on the |
| 4668 // inputs and the debug code here needs a scratch register. |
| 4669 UseScratchRegisterScope temps(masm()); |
| 4670 Register dbg_temp = temps.AcquireX(); |
| 4512 | 4671 |
| 4513 __ And(temp, temp, | 4672 __ Ldr(dbg_temp, FieldMemOperand(string, HeapObject::kMapOffset)); |
| 4673 __ Ldrb(dbg_temp, FieldMemOperand(dbg_temp, Map::kInstanceTypeOffset)); |
| 4674 |
| 4675 __ And(dbg_temp, dbg_temp, |
| 4514 Operand(kStringRepresentationMask | kStringEncodingMask)); | 4676 Operand(kStringRepresentationMask | kStringEncodingMask)); |
| 4515 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; | 4677 static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; |
| 4516 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; | 4678 static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; |
| 4517 __ Cmp(temp, Operand(encoding == String::ONE_BYTE_ENCODING | 4679 __ Cmp(dbg_temp, Operand(encoding == String::ONE_BYTE_ENCODING |
| 4518 ? one_byte_seq_type : two_byte_seq_type)); | 4680 ? one_byte_seq_type : two_byte_seq_type)); |
| 4519 __ Check(eq, kUnexpectedStringType); | 4681 __ Check(eq, kUnexpectedStringType); |
| 4520 } | 4682 } |
| 4521 | 4683 |
| 4522 MemOperand operand = | 4684 MemOperand operand = |
| 4523 BuildSeqStringOperand(string, temp, instr->index(), encoding); | 4685 BuildSeqStringOperand(string, temp, instr->index(), encoding); |
| 4524 if (encoding == String::ONE_BYTE_ENCODING) { | 4686 if (encoding == String::ONE_BYTE_ENCODING) { |
| 4525 __ Ldrb(result, operand); | 4687 __ Ldrb(result, operand); |
| 4526 } else { | 4688 } else { |
| 4527 __ Ldrh(result, operand); | 4689 __ Ldrh(result, operand); |
| 4528 } | 4690 } |
| (...skipping 21 matching lines...) Expand all Loading... |
| 4550 BuildSeqStringOperand(string, temp, instr->index(), encoding); | 4712 BuildSeqStringOperand(string, temp, instr->index(), encoding); |
| 4551 if (encoding == String::ONE_BYTE_ENCODING) { | 4713 if (encoding == String::ONE_BYTE_ENCODING) { |
| 4552 __ Strb(value, operand); | 4714 __ Strb(value, operand); |
| 4553 } else { | 4715 } else { |
| 4554 __ Strh(value, operand); | 4716 __ Strh(value, operand); |
| 4555 } | 4717 } |
| 4556 } | 4718 } |
| 4557 | 4719 |
| 4558 | 4720 |
| 4559 void LCodeGen::DoSmiTag(LSmiTag* instr) { | 4721 void LCodeGen::DoSmiTag(LSmiTag* instr) { |
| 4560 ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)); | 4722 HChange* hchange = instr->hydrogen(); |
| 4561 __ SmiTag(ToRegister(instr->result()), ToRegister(instr->value())); | 4723 Register input = ToRegister(instr->value()); |
| 4724 Register output = ToRegister(instr->result()); |
| 4725 if (hchange->CheckFlag(HValue::kCanOverflow) && |
| 4726 hchange->value()->CheckFlag(HValue::kUint32)) { |
| 4727 DeoptimizeIfNegative(input.W(), instr->environment()); |
| 4728 } |
| 4729 __ SmiTag(output, input); |
| 4562 } | 4730 } |
| 4563 | 4731 |
| 4564 | 4732 |
| 4565 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { | 4733 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { |
| 4566 Register input = ToRegister(instr->value()); | 4734 Register input = ToRegister(instr->value()); |
| 4567 Register result = ToRegister(instr->result()); | 4735 Register result = ToRegister(instr->result()); |
| 4568 Label done, untag; | 4736 Label done, untag; |
| 4569 | 4737 |
| 4570 if (instr->needs_check()) { | 4738 if (instr->needs_check()) { |
| 4571 DeoptimizeIfNotSmi(input, instr->environment()); | 4739 DeoptimizeIfNotSmi(input, instr->environment()); |
| (...skipping 341 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4913 case FLOAT64_ELEMENTS: | 5081 case FLOAT64_ELEMENTS: |
| 4914 case EXTERNAL_FLOAT32_ELEMENTS: | 5082 case EXTERNAL_FLOAT32_ELEMENTS: |
| 4915 case EXTERNAL_FLOAT64_ELEMENTS: | 5083 case EXTERNAL_FLOAT64_ELEMENTS: |
| 4916 case FAST_DOUBLE_ELEMENTS: | 5084 case FAST_DOUBLE_ELEMENTS: |
| 4917 case FAST_ELEMENTS: | 5085 case FAST_ELEMENTS: |
| 4918 case FAST_SMI_ELEMENTS: | 5086 case FAST_SMI_ELEMENTS: |
| 4919 case FAST_HOLEY_DOUBLE_ELEMENTS: | 5087 case FAST_HOLEY_DOUBLE_ELEMENTS: |
| 4920 case FAST_HOLEY_ELEMENTS: | 5088 case FAST_HOLEY_ELEMENTS: |
| 4921 case FAST_HOLEY_SMI_ELEMENTS: | 5089 case FAST_HOLEY_SMI_ELEMENTS: |
| 4922 case DICTIONARY_ELEMENTS: | 5090 case DICTIONARY_ELEMENTS: |
| 4923 case NON_STRICT_ARGUMENTS_ELEMENTS: | 5091 case SLOPPY_ARGUMENTS_ELEMENTS: |
| 4924 UNREACHABLE(); | 5092 UNREACHABLE(); |
| 4925 break; | 5093 break; |
| 4926 } | 5094 } |
| 4927 } | 5095 } |
| 4928 } | 5096 } |
| 4929 | 5097 |
| 4930 | 5098 |
| 4931 void LCodeGen::DoStoreKeyedFixedDouble(LStoreKeyedFixedDouble* instr) { | 5099 void LCodeGen::DoStoreKeyedFixedDouble(LStoreKeyedFixedDouble* instr) { |
| 4932 Register elements = ToRegister(instr->elements()); | 5100 Register elements = ToRegister(instr->elements()); |
| 4933 DoubleRegister value = ToDoubleRegister(instr->value()); | 5101 DoubleRegister value = ToDoubleRegister(instr->value()); |
| 4934 Register store_base = ToRegister(instr->temp()); | 5102 Register store_base = no_reg; |
| 4935 int offset = 0; | 5103 int offset = 0; |
| 4936 | 5104 |
| 4937 if (instr->key()->IsConstantOperand()) { | 5105 if (instr->key()->IsConstantOperand()) { |
| 4938 int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); | 5106 int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); |
| 4939 if (constant_key & 0xf0000000) { | 5107 if (constant_key & 0xf0000000) { |
| 4940 Abort(kArrayIndexConstantValueTooBig); | 5108 Abort(kArrayIndexConstantValueTooBig); |
| 4941 } | 5109 } |
| 4942 offset = FixedDoubleArray::OffsetOfElementAt(constant_key + | 5110 offset = FixedDoubleArray::OffsetOfElementAt(constant_key + |
| 4943 instr->additional_index()); | 5111 instr->additional_index()); |
| 4944 store_base = elements; | 5112 store_base = elements; |
| 4945 } else { | 5113 } else { |
| 5114 store_base = ToRegister(instr->temp()); |
| 4946 Register key = ToRegister(instr->key()); | 5115 Register key = ToRegister(instr->key()); |
| 4947 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi(); | 5116 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi(); |
| 4948 CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged, | 5117 CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged, |
| 4949 instr->hydrogen()->elements_kind()); | 5118 instr->hydrogen()->elements_kind()); |
| 4950 offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index()); | 5119 offset = FixedDoubleArray::OffsetOfElementAt(instr->additional_index()); |
| 4951 } | 5120 } |
| 4952 | 5121 |
| 4953 if (instr->NeedsCanonicalization()) { | 5122 if (instr->NeedsCanonicalization()) { |
| 4954 DoubleRegister dbl_scratch = double_scratch(); | 5123 DoubleRegister dbl_scratch = double_scratch(); |
| 4955 __ Fmov(dbl_scratch, | 5124 __ Fmov(dbl_scratch, |
| 4956 FixedDoubleArray::canonical_not_the_hole_nan_as_double()); | 5125 FixedDoubleArray::canonical_not_the_hole_nan_as_double()); |
| 4957 __ Fmaxnm(dbl_scratch, dbl_scratch, value); | 5126 __ Fmaxnm(dbl_scratch, dbl_scratch, value); |
| 4958 __ Str(dbl_scratch, FieldMemOperand(store_base, offset)); | 5127 __ Str(dbl_scratch, FieldMemOperand(store_base, offset)); |
| 4959 } else { | 5128 } else { |
| 4960 __ Str(value, FieldMemOperand(store_base, offset)); | 5129 __ Str(value, FieldMemOperand(store_base, offset)); |
| 4961 } | 5130 } |
| 4962 } | 5131 } |
| 4963 | 5132 |
| 4964 | 5133 |
| 4965 void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) { | 5134 void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) { |
| 4966 Register value = ToRegister(instr->value()); | 5135 Register value = ToRegister(instr->value()); |
| 4967 Register elements = ToRegister(instr->elements()); | 5136 Register elements = ToRegister(instr->elements()); |
| 4968 Register store_base = ToRegister(instr->temp()); | 5137 Register scratch = no_reg; |
| 5138 Register store_base = no_reg; |
| 4969 Register key = no_reg; | 5139 Register key = no_reg; |
| 4970 int offset = 0; | 5140 int offset = 0; |
| 4971 | 5141 |
| 5142 if (!instr->key()->IsConstantOperand() || |
| 5143 instr->hydrogen()->NeedsWriteBarrier()) { |
| 5144 scratch = ToRegister(instr->temp()); |
| 5145 } |
| 5146 |
| 4972 if (instr->key()->IsConstantOperand()) { | 5147 if (instr->key()->IsConstantOperand()) { |
| 4973 ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); | |
| 4974 LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); | 5148 LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); |
| 4975 offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) + | 5149 offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) + |
| 4976 instr->additional_index()); | 5150 instr->additional_index()); |
| 4977 store_base = elements; | 5151 store_base = elements; |
| 4978 } else { | 5152 } else { |
| 5153 store_base = scratch; |
| 4979 key = ToRegister(instr->key()); | 5154 key = ToRegister(instr->key()); |
| 4980 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi(); | 5155 bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi(); |
| 4981 CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged, | 5156 CalcKeyedArrayBaseRegister(store_base, elements, key, key_is_tagged, |
| 4982 instr->hydrogen()->elements_kind()); | 5157 instr->hydrogen()->elements_kind()); |
| 4983 offset = FixedArray::OffsetOfElementAt(instr->additional_index()); | 5158 offset = FixedArray::OffsetOfElementAt(instr->additional_index()); |
| 4984 } | 5159 } |
| 4985 Representation representation = instr->hydrogen()->value()->representation(); | 5160 Representation representation = instr->hydrogen()->value()->representation(); |
| 4986 if (representation.IsInteger32()) { | 5161 if (representation.IsInteger32()) { |
| 4987 ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY); | 5162 ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY); |
| 4988 ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS); | 5163 ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS); |
| 4989 STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0); | 5164 STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0); |
| 4990 __ Store(value, UntagSmiFieldMemOperand(store_base, offset), | 5165 __ Store(value, UntagSmiFieldMemOperand(store_base, offset), |
| 4991 Representation::Integer32()); | 5166 Representation::Integer32()); |
| 4992 } else { | 5167 } else { |
| 4993 __ Store(value, FieldMemOperand(store_base, offset), representation); | 5168 __ Store(value, FieldMemOperand(store_base, offset), representation); |
| 4994 } | 5169 } |
| 4995 | 5170 |
| 4996 if (instr->hydrogen()->NeedsWriteBarrier()) { | 5171 if (instr->hydrogen()->NeedsWriteBarrier()) { |
| 5172 ASSERT(representation.IsTagged()); |
| 5173 // This assignment may cause element_addr to alias store_base. |
| 5174 Register element_addr = scratch; |
| 4997 SmiCheck check_needed = | 5175 SmiCheck check_needed = |
| 4998 instr->hydrogen()->value()->IsHeapObject() | 5176 instr->hydrogen()->value()->IsHeapObject() |
| 4999 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | 5177 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
| 5000 // Compute address of modified element and store it into key register. | 5178 // Compute address of modified element and store it into key register. |
| 5001 __ Add(key, store_base, offset - kHeapObjectTag); | 5179 __ Add(element_addr, store_base, offset - kHeapObjectTag); |
| 5002 __ RecordWrite(elements, key, value, GetLinkRegisterState(), kSaveFPRegs, | 5180 __ RecordWrite(elements, element_addr, value, GetLinkRegisterState(), |
| 5003 EMIT_REMEMBERED_SET, check_needed); | 5181 kSaveFPRegs, EMIT_REMEMBERED_SET, check_needed); |
| 5004 } | 5182 } |
| 5005 } | 5183 } |
| 5006 | 5184 |
| 5007 | 5185 |
| 5008 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { | 5186 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { |
| 5009 ASSERT(ToRegister(instr->context()).is(cp)); | 5187 ASSERT(ToRegister(instr->context()).is(cp)); |
| 5010 ASSERT(ToRegister(instr->object()).Is(x2)); | 5188 ASSERT(ToRegister(instr->object()).Is(x2)); |
| 5011 ASSERT(ToRegister(instr->key()).Is(x1)); | 5189 ASSERT(ToRegister(instr->key()).Is(x1)); |
| 5012 ASSERT(ToRegister(instr->value()).Is(x0)); | 5190 ASSERT(ToRegister(instr->value()).Is(x0)); |
| 5013 | 5191 |
| 5014 Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode) | 5192 Handle<Code> ic = instr->strict_mode() == STRICT |
| 5015 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() | 5193 ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() |
| 5016 : isolate()->builtins()->KeyedStoreIC_Initialize(); | 5194 : isolate()->builtins()->KeyedStoreIC_Initialize(); |
| 5017 CallCode(ic, RelocInfo::CODE_TARGET, instr); | 5195 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
| 5018 } | 5196 } |
| 5019 | 5197 |
| 5020 | 5198 |
| 5021 // TODO(jbramley): Once the merge is done and we're tracking bleeding_edge, try | |
| 5022 // to tidy up this function. | |
| 5023 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { | 5199 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { |
| 5024 Representation representation = instr->representation(); | 5200 Representation representation = instr->representation(); |
| 5025 | 5201 |
| 5026 Register object = ToRegister(instr->object()); | 5202 Register object = ToRegister(instr->object()); |
| 5027 Register temp0 = ToRegister(instr->temp0()); | |
| 5028 Register temp1 = ToRegister(instr->temp1()); | |
| 5029 HObjectAccess access = instr->hydrogen()->access(); | 5203 HObjectAccess access = instr->hydrogen()->access(); |
| 5204 Handle<Map> transition = instr->transition(); |
| 5030 int offset = access.offset(); | 5205 int offset = access.offset(); |
| 5031 | 5206 |
| 5032 if (access.IsExternalMemory()) { | 5207 if (access.IsExternalMemory()) { |
| 5208 ASSERT(transition.is_null()); |
| 5209 ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); |
| 5033 Register value = ToRegister(instr->value()); | 5210 Register value = ToRegister(instr->value()); |
| 5034 __ Store(value, MemOperand(object, offset), representation); | 5211 __ Store(value, MemOperand(object, offset), representation); |
| 5035 return; | 5212 return; |
| 5036 } | |
| 5037 | |
| 5038 Handle<Map> transition = instr->transition(); | |
| 5039 SmiCheck check_needed = | |
| 5040 instr->hydrogen()->value()->IsHeapObject() | |
| 5041 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | |
| 5042 | |
| 5043 if (FLAG_track_heap_object_fields && representation.IsHeapObject()) { | |
| 5044 Register value = ToRegister(instr->value()); | |
| 5045 if (!instr->hydrogen()->value()->type().IsHeapObject()) { | |
| 5046 DeoptimizeIfSmi(value, instr->environment()); | |
| 5047 | |
| 5048 // We know that value is a smi now, so we can omit the check below. | |
| 5049 check_needed = OMIT_SMI_CHECK; | |
| 5050 } | |
| 5051 } else if (representation.IsDouble()) { | 5213 } else if (representation.IsDouble()) { |
| 5052 ASSERT(transition.is_null()); | 5214 ASSERT(transition.is_null()); |
| 5053 ASSERT(access.IsInobject()); | 5215 ASSERT(access.IsInobject()); |
| 5054 ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); | 5216 ASSERT(!instr->hydrogen()->NeedsWriteBarrier()); |
| 5055 FPRegister value = ToDoubleRegister(instr->value()); | 5217 FPRegister value = ToDoubleRegister(instr->value()); |
| 5056 __ Str(value, FieldMemOperand(object, offset)); | 5218 __ Str(value, FieldMemOperand(object, offset)); |
| 5057 return; | 5219 return; |
| 5058 } | 5220 } |
| 5059 | 5221 |
| 5222 Register value = ToRegister(instr->value()); |
| 5223 |
| 5224 SmiCheck check_needed = instr->hydrogen()->value()->IsHeapObject() |
| 5225 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
| 5226 |
| 5227 if (representation.IsHeapObject() && |
| 5228 !instr->hydrogen()->value()->type().IsHeapObject()) { |
| 5229 DeoptimizeIfSmi(value, instr->environment()); |
| 5230 |
| 5231 // We know that value is a smi now, so we can omit the check below. |
| 5232 check_needed = OMIT_SMI_CHECK; |
| 5233 } |
| 5234 |
| 5060 if (!transition.is_null()) { | 5235 if (!transition.is_null()) { |
| 5061 // Store the new map value. | 5236 // Store the new map value. |
| 5062 Register new_map_value = temp0; | 5237 Register new_map_value = ToRegister(instr->temp0()); |
| 5063 __ Mov(new_map_value, Operand(transition)); | 5238 __ Mov(new_map_value, Operand(transition)); |
| 5064 __ Str(new_map_value, FieldMemOperand(object, HeapObject::kMapOffset)); | 5239 __ Str(new_map_value, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 5065 if (instr->hydrogen()->NeedsWriteBarrierForMap()) { | 5240 if (instr->hydrogen()->NeedsWriteBarrierForMap()) { |
| 5066 // Update the write barrier for the map field. | 5241 // Update the write barrier for the map field. |
| 5067 __ RecordWriteField(object, | 5242 __ RecordWriteField(object, |
| 5068 HeapObject::kMapOffset, | 5243 HeapObject::kMapOffset, |
| 5069 new_map_value, | 5244 new_map_value, |
| 5070 temp1, | 5245 ToRegister(instr->temp1()), |
| 5071 GetLinkRegisterState(), | 5246 GetLinkRegisterState(), |
| 5072 kSaveFPRegs, | 5247 kSaveFPRegs, |
| 5073 OMIT_REMEMBERED_SET, | 5248 OMIT_REMEMBERED_SET, |
| 5074 OMIT_SMI_CHECK); | 5249 OMIT_SMI_CHECK); |
| 5075 } | 5250 } |
| 5076 } | 5251 } |
| 5077 | 5252 |
| 5078 // Do the store. | 5253 // Do the store. |
| 5079 Register value = ToRegister(instr->value()); | |
| 5080 Register destination; | 5254 Register destination; |
| 5081 if (access.IsInobject()) { | 5255 if (access.IsInobject()) { |
| 5082 destination = object; | 5256 destination = object; |
| 5083 } else { | 5257 } else { |
| 5258 Register temp0 = ToRegister(instr->temp0()); |
| 5084 __ Ldr(temp0, FieldMemOperand(object, JSObject::kPropertiesOffset)); | 5259 __ Ldr(temp0, FieldMemOperand(object, JSObject::kPropertiesOffset)); |
| 5085 destination = temp0; | 5260 destination = temp0; |
| 5086 } | 5261 } |
| 5087 | 5262 |
| 5088 if (representation.IsSmi() && | 5263 if (representation.IsSmi() && |
| 5089 instr->hydrogen()->value()->representation().IsInteger32()) { | 5264 instr->hydrogen()->value()->representation().IsInteger32()) { |
| 5090 ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY); | 5265 ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY); |
| 5091 #ifdef DEBUG | 5266 #ifdef DEBUG |
| 5092 __ Ldr(temp1, FieldMemOperand(destination, offset)); | 5267 Register temp0 = ToRegister(instr->temp0()); |
| 5093 __ AssertSmi(temp1); | 5268 __ Ldr(temp0, FieldMemOperand(destination, offset)); |
| 5269 __ AssertSmi(temp0); |
| 5270 // If destination aliased temp0, restore it to the address calculated |
| 5271 // earlier. |
| 5272 if (destination.Is(temp0)) { |
| 5273 ASSERT(!access.IsInobject()); |
| 5274 __ Ldr(destination, FieldMemOperand(object, JSObject::kPropertiesOffset)); |
| 5275 } |
| 5094 #endif | 5276 #endif |
| 5095 STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0); | 5277 STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0); |
| 5096 __ Store(value, UntagSmiFieldMemOperand(destination, offset), | 5278 __ Store(value, UntagSmiFieldMemOperand(destination, offset), |
| 5097 Representation::Integer32()); | 5279 Representation::Integer32()); |
| 5098 } else { | 5280 } else { |
| 5099 __ Store(value, FieldMemOperand(destination, offset), representation); | 5281 __ Store(value, FieldMemOperand(destination, offset), representation); |
| 5100 } | 5282 } |
| 5101 if (instr->hydrogen()->NeedsWriteBarrier()) { | 5283 if (instr->hydrogen()->NeedsWriteBarrier()) { |
| 5102 __ RecordWriteField(destination, | 5284 __ RecordWriteField(destination, |
| 5103 offset, | 5285 offset, |
| 5104 value, // Clobbered. | 5286 value, // Clobbered. |
| 5105 temp1, // Clobbered. | 5287 ToRegister(instr->temp1()), // Clobbered. |
| 5106 GetLinkRegisterState(), | 5288 GetLinkRegisterState(), |
| 5107 kSaveFPRegs, | 5289 kSaveFPRegs, |
| 5108 EMIT_REMEMBERED_SET, | 5290 EMIT_REMEMBERED_SET, |
| 5109 check_needed); | 5291 check_needed); |
| 5110 } | 5292 } |
| 5111 } | 5293 } |
| 5112 | 5294 |
| 5113 | 5295 |
| 5114 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { | 5296 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { |
| 5115 ASSERT(ToRegister(instr->context()).is(cp)); | 5297 ASSERT(ToRegister(instr->context()).is(cp)); |
| 5116 ASSERT(ToRegister(instr->value()).is(x0)); | 5298 ASSERT(ToRegister(instr->value()).is(x0)); |
| 5117 ASSERT(ToRegister(instr->object()).is(x1)); | 5299 ASSERT(ToRegister(instr->object()).is(x1)); |
| 5118 | 5300 |
| 5119 // Name must be in x2. | 5301 // Name must be in x2. |
| 5120 __ Mov(x2, Operand(instr->name())); | 5302 __ Mov(x2, Operand(instr->name())); |
| 5121 Handle<Code> ic = StoreIC::initialize_stub(isolate(), | 5303 Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode()); |
| 5122 instr->strict_mode_flag()); | |
| 5123 CallCode(ic, RelocInfo::CODE_TARGET, instr); | 5304 CallCode(ic, RelocInfo::CODE_TARGET, instr); |
| 5124 } | 5305 } |
| 5125 | 5306 |
| 5126 | 5307 |
| 5127 void LCodeGen::DoStringAdd(LStringAdd* instr) { | 5308 void LCodeGen::DoStringAdd(LStringAdd* instr) { |
| 5128 ASSERT(ToRegister(instr->context()).is(cp)); | 5309 ASSERT(ToRegister(instr->context()).is(cp)); |
| 5129 ASSERT(ToRegister(instr->left()).Is(x1)); | 5310 ASSERT(ToRegister(instr->left()).Is(x1)); |
| 5130 ASSERT(ToRegister(instr->right()).Is(x0)); | 5311 ASSERT(ToRegister(instr->right()).Is(x0)); |
| 5131 StringAddStub stub(instr->hydrogen()->flags(), | 5312 StringAddStub stub(instr->hydrogen()->flags(), |
| 5132 instr->hydrogen()->pretenure_flag()); | 5313 instr->hydrogen()->pretenure_flag()); |
| (...skipping 440 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5573 __ B(false_label); | 5754 __ B(false_label); |
| 5574 } | 5755 } |
| 5575 } | 5756 } |
| 5576 | 5757 |
| 5577 | 5758 |
| 5578 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { | 5759 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { |
| 5579 __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value())); | 5760 __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value())); |
| 5580 } | 5761 } |
| 5581 | 5762 |
| 5582 | 5763 |
| 5583 void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) { | |
| 5584 Register value = ToRegister(instr->value()); | |
| 5585 Register result = ToRegister(instr->result()); | |
| 5586 | |
| 5587 if (!instr->hydrogen()->value()->HasRange() || | |
| 5588 !instr->hydrogen()->value()->range()->IsInSmiRange() || | |
| 5589 instr->hydrogen()->value()->range()->upper() == kMaxInt) { | |
| 5590 // The Range class can't express upper bounds in the (kMaxInt, kMaxUint32] | |
| 5591 // interval, so we treat kMaxInt as a sentinel for this entire interval. | |
| 5592 DeoptimizeIfNegative(value.W(), instr->environment()); | |
| 5593 } | |
| 5594 __ SmiTag(result, value); | |
| 5595 } | |
| 5596 | |
| 5597 | |
| 5598 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { | 5764 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { |
| 5599 Register object = ToRegister(instr->value()); | 5765 Register object = ToRegister(instr->value()); |
| 5600 Register map = ToRegister(instr->map()); | 5766 Register map = ToRegister(instr->map()); |
| 5601 Register temp = ToRegister(instr->temp()); | 5767 Register temp = ToRegister(instr->temp()); |
| 5602 __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); | 5768 __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 5603 __ Cmp(map, temp); | 5769 __ Cmp(map, temp); |
| 5604 DeoptimizeIf(ne, instr->environment()); | 5770 DeoptimizeIf(ne, instr->environment()); |
| 5605 } | 5771 } |
| 5606 | 5772 |
| 5607 | 5773 |
| (...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5673 __ Bind(&out_of_object); | 5839 __ Bind(&out_of_object); |
| 5674 __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); | 5840 __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); |
| 5675 // Index is equal to negated out of object property index plus 1. | 5841 // Index is equal to negated out of object property index plus 1. |
| 5676 __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2)); | 5842 __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2)); |
| 5677 __ Ldr(result, FieldMemOperand(result, | 5843 __ Ldr(result, FieldMemOperand(result, |
| 5678 FixedArray::kHeaderSize - kPointerSize)); | 5844 FixedArray::kHeaderSize - kPointerSize)); |
| 5679 __ Bind(&done); | 5845 __ Bind(&done); |
| 5680 } | 5846 } |
| 5681 | 5847 |
| 5682 } } // namespace v8::internal | 5848 } } // namespace v8::internal |
| OLD | NEW |