Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1133)

Side by Side Diff: src/arm/code-stubs-arm.cc

Issue 7945009: Merge experimental/gc branch to the bleeding_edge. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 9 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/code-stubs-arm.h ('k') | src/arm/codegen-arm.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 856 matching lines...) Expand 10 before | Expand all | Expand 10 after
867 if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) { 867 if (the_int_.is(r2) && the_heap_number_.is(r0) && scratch_.is(r3)) {
868 return true; 868 return true;
869 } 869 }
870 // Other register combinations are generated as and when they are needed, 870 // Other register combinations are generated as and when they are needed,
871 // so it is unsafe to call them from stubs (we can't generate a stub while 871 // so it is unsafe to call them from stubs (we can't generate a stub while
872 // we are generating a stub). 872 // we are generating a stub).
873 return false; 873 return false;
874 } 874 }
875 875
876 876
877 void WriteInt32ToHeapNumberStub::GenerateStubsAheadOfTime() { 877 void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime() {
878 WriteInt32ToHeapNumberStub stub1(r1, r0, r2); 878 WriteInt32ToHeapNumberStub stub1(r1, r0, r2);
879 WriteInt32ToHeapNumberStub stub2(r2, r0, r3); 879 WriteInt32ToHeapNumberStub stub2(r2, r0, r3);
880 Handle<Code> code1 = stub1.GetCode(); 880 Handle<Code> code1 = stub1.GetCode();
881 Handle<Code> code2 = stub2.GetCode(); 881 Handle<Code> code2 = stub2.GetCode();
882 } 882 }
883 883
884 884
885 // See comment for class. 885 // See comment for class.
886 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { 886 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
887 Label max_negative_int; 887 Label max_negative_int;
(...skipping 847 matching lines...) Expand 10 before | Expand all | Expand 10 after
1735 __ Push(r3, r2, r1); 1735 __ Push(r3, r2, r1);
1736 // Patch the caller to an appropriate specialized stub and return the 1736 // Patch the caller to an appropriate specialized stub and return the
1737 // operation result to the caller of the stub. 1737 // operation result to the caller of the stub.
1738 __ TailCallExternalReference( 1738 __ TailCallExternalReference(
1739 ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()), 1739 ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
1740 3, 1740 3,
1741 1); 1741 1);
1742 } 1742 }
1743 1743
1744 1744
1745 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
1746 // We don't allow a GC during a store buffer overflow so there is no need to
1747 // store the registers in any particular way, but we do have to store and
1748 // restore them.
1749 __ stm(db_w, sp, kCallerSaved | lr.bit());
1750 if (save_doubles_ == kSaveFPRegs) {
1751 CpuFeatures::Scope scope(VFP3);
1752 __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
1753 for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
1754 DwVfpRegister reg = DwVfpRegister::from_code(i);
1755 __ vstr(reg, MemOperand(sp, i * kDoubleSize));
1756 }
1757 }
1758 const int argument_count = 1;
1759 const int fp_argument_count = 0;
1760 const Register scratch = r1;
1761
1762 AllowExternalCallThatCantCauseGC scope(masm);
1763 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
1764 __ mov(r0, Operand(ExternalReference::isolate_address()));
1765 __ CallCFunction(
1766 ExternalReference::store_buffer_overflow_function(masm->isolate()),
1767 argument_count);
1768 if (save_doubles_ == kSaveFPRegs) {
1769 CpuFeatures::Scope scope(VFP3);
1770 for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
1771 DwVfpRegister reg = DwVfpRegister::from_code(i);
1772 __ vldr(reg, MemOperand(sp, i * kDoubleSize));
1773 }
1774 __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kNumRegisters));
1775 }
1776 __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0).
1777 }
1778
1779
1745 void UnaryOpStub::PrintName(StringStream* stream) { 1780 void UnaryOpStub::PrintName(StringStream* stream) {
1746 const char* op_name = Token::Name(op_); 1781 const char* op_name = Token::Name(op_);
1747 const char* overwrite_name = NULL; // Make g++ happy. 1782 const char* overwrite_name = NULL; // Make g++ happy.
1748 switch (mode_) { 1783 switch (mode_) {
1749 case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break; 1784 case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
1750 case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break; 1785 case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
1751 } 1786 }
1752 stream->Add("UnaryOpStub_%s_%s_%s", 1787 stream->Add("UnaryOpStub_%s_%s_%s",
1753 op_name, 1788 op_name,
1754 overwrite_name, 1789 overwrite_name,
(...skipping 1604 matching lines...) Expand 10 before | Expand all | Expand 10 after
3359 } 3394 }
3360 3395
3361 3396
3362 bool CEntryStub::CompilingCallsToThisStubIsGCSafe() { 3397 bool CEntryStub::CompilingCallsToThisStubIsGCSafe() {
3363 return (!save_doubles_ || ISOLATE->fp_stubs_generated()) && 3398 return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
3364 result_size_ == 1; 3399 result_size_ == 1;
3365 } 3400 }
3366 3401
3367 3402
3368 void CodeStub::GenerateStubsAheadOfTime() { 3403 void CodeStub::GenerateStubsAheadOfTime() {
3369 WriteInt32ToHeapNumberStub::GenerateStubsAheadOfTime(); 3404 WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime();
3370 } 3405 }
3371 3406
3372 3407
3373 void CodeStub::GenerateFPStubs() { 3408 void CodeStub::GenerateFPStubs() {
3374 CEntryStub save_doubles(1); 3409 CEntryStub save_doubles(1, kSaveFPRegs);
3375 save_doubles.SaveDoubles();
3376 Handle<Code> code = save_doubles.GetCode(); 3410 Handle<Code> code = save_doubles.GetCode();
3377 code->GetIsolate()->set_fp_stubs_generated(true); 3411 code->GetIsolate()->set_fp_stubs_generated(true);
3378 } 3412 }
3379 3413
3380 3414
3381 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { 3415 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
3382 __ Throw(r0); 3416 __ Throw(r0);
3383 } 3417 }
3384 3418
3385 3419
(...skipping 1243 matching lines...) Expand 10 before | Expand all | Expand 10 after
4629 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); 4663 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
4630 __ add(r1, r1, Operand(2)); // r1 was a smi. 4664 __ add(r1, r1, Operand(2)); // r1 was a smi.
4631 4665
4632 // r1: number of capture registers 4666 // r1: number of capture registers
4633 // r4: subject string 4667 // r4: subject string
4634 // Store the capture count. 4668 // Store the capture count.
4635 __ mov(r2, Operand(r1, LSL, kSmiTagSize + kSmiShiftSize)); // To smi. 4669 __ mov(r2, Operand(r1, LSL, kSmiTagSize + kSmiShiftSize)); // To smi.
4636 __ str(r2, FieldMemOperand(last_match_info_elements, 4670 __ str(r2, FieldMemOperand(last_match_info_elements,
4637 RegExpImpl::kLastCaptureCountOffset)); 4671 RegExpImpl::kLastCaptureCountOffset));
4638 // Store last subject and last input. 4672 // Store last subject and last input.
4639 __ mov(r3, last_match_info_elements); // Moved up to reduce latency.
4640 __ str(subject, 4673 __ str(subject,
4641 FieldMemOperand(last_match_info_elements, 4674 FieldMemOperand(last_match_info_elements,
4642 RegExpImpl::kLastSubjectOffset)); 4675 RegExpImpl::kLastSubjectOffset));
4643 __ RecordWrite(r3, Operand(RegExpImpl::kLastSubjectOffset), r2, r7); 4676 __ mov(r2, subject);
4677 __ RecordWriteField(last_match_info_elements,
4678 RegExpImpl::kLastSubjectOffset,
4679 r2,
4680 r7,
4681 kLRHasNotBeenSaved,
4682 kDontSaveFPRegs);
4644 __ str(subject, 4683 __ str(subject,
4645 FieldMemOperand(last_match_info_elements, 4684 FieldMemOperand(last_match_info_elements,
4646 RegExpImpl::kLastInputOffset)); 4685 RegExpImpl::kLastInputOffset));
4647 __ mov(r3, last_match_info_elements); 4686 __ RecordWriteField(last_match_info_elements,
4648 __ RecordWrite(r3, Operand(RegExpImpl::kLastInputOffset), r2, r7); 4687 RegExpImpl::kLastInputOffset,
4688 subject,
4689 r7,
4690 kLRHasNotBeenSaved,
4691 kDontSaveFPRegs);
4649 4692
4650 // Get the static offsets vector filled by the native regexp code. 4693 // Get the static offsets vector filled by the native regexp code.
4651 ExternalReference address_of_static_offsets_vector = 4694 ExternalReference address_of_static_offsets_vector =
4652 ExternalReference::address_of_static_offsets_vector(isolate); 4695 ExternalReference::address_of_static_offsets_vector(isolate);
4653 __ mov(r2, Operand(address_of_static_offsets_vector)); 4696 __ mov(r2, Operand(address_of_static_offsets_vector));
4654 4697
4655 // r1: number of capture registers 4698 // r1: number of capture registers
4656 // r2: offsets vector 4699 // r2: offsets vector
4657 Label next_capture, done; 4700 Label next_capture, done;
4658 // Capture register counter starts from number of capture registers and 4701 // Capture register counter starts from number of capture registers and
(...skipping 2100 matching lines...) Expand 10 before | Expand all | Expand 10 after
6759 __ bind(&in_dictionary); 6802 __ bind(&in_dictionary);
6760 __ mov(result, Operand(1)); 6803 __ mov(result, Operand(1));
6761 __ Ret(); 6804 __ Ret();
6762 6805
6763 __ bind(&not_in_dictionary); 6806 __ bind(&not_in_dictionary);
6764 __ mov(result, Operand::Zero()); 6807 __ mov(result, Operand::Zero());
6765 __ Ret(); 6808 __ Ret();
6766 } 6809 }
6767 6810
6768 6811
6812 struct AheadOfTimeWriteBarrierStubList {
6813 Register object, value, address;
6814 RememberedSetAction action;
6815 };
6816
6817
6818 struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
6819 // TODO(1696): Fill this in for ARM.
6820 // Null termination.
6821 { no_reg, no_reg, no_reg, EMIT_REMEMBERED_SET}
6822 };
6823
6824
6825 bool RecordWriteStub::CompilingCallsToThisStubIsGCSafe() {
6826 for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
6827 !entry->object.is(no_reg);
6828 entry++) {
6829 if (object_.is(entry->object) &&
6830 value_.is(entry->value) &&
6831 address_.is(entry->address) &&
6832 remembered_set_action_ == entry->action &&
6833 save_fp_regs_mode_ == kDontSaveFPRegs) {
6834 return true;
6835 }
6836 }
6837 return true; // TODO(1696): Should be false.
6838 }
6839
6840
6841 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
6842 StoreBufferOverflowStub stub1(kDontSaveFPRegs);
6843 stub1.GetCode();
6844 StoreBufferOverflowStub stub2(kSaveFPRegs);
6845 stub2.GetCode();
6846 }
6847
6848
6849 void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
6850 for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
6851 !entry->object.is(no_reg);
6852 entry++) {
6853 RecordWriteStub stub(entry->object,
6854 entry->value,
6855 entry->address,
6856 entry->action,
6857 kDontSaveFPRegs);
6858 stub.GetCode();
6859 }
6860 }
6861
6862
6863 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
6864 // the value has just been written into the object, now this stub makes sure
6865 // we keep the GC informed. The word in the object where the value has been
6866 // written is in the address register.
6867 void RecordWriteStub::Generate(MacroAssembler* masm) {
6868 Label skip_to_incremental_noncompacting;
6869 Label skip_to_incremental_compacting;
6870
6871 // The first two instructions are generated with labels so as to get the
6872 // offset fixed up correctly by the bind(Label*) call. We patch it back and
6873 // forth between a compare instructions (a nop in this position) and the
6874 // real branch when we start and stop incremental heap marking.
6875 // See RecordWriteStub::Patch for details.
6876 __ b(&skip_to_incremental_noncompacting);
6877 __ b(&skip_to_incremental_compacting);
6878
6879 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
6880 __ RememberedSetHelper(
6881 address_, value_, save_fp_regs_mode_, MacroAssembler::kReturnAtEnd);
6882 }
6883 __ Ret();
6884
6885 __ bind(&skip_to_incremental_noncompacting);
6886 GenerateIncremental(masm, INCREMENTAL);
6887
6888 __ bind(&skip_to_incremental_compacting);
6889 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
6890
6891 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
6892 // Will be checked in IncrementalMarking::ActivateGeneratedStub.
6893 ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
6894 ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
6895 PatchBranchIntoNop(masm, 0);
6896 PatchBranchIntoNop(masm, Assembler::kInstrSize);
6897 }
6898
6899
6900 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
6901 regs_.Save(masm);
6902
6903 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
6904 Label dont_need_remembered_set;
6905
6906 __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
6907 __ JumpIfNotInNewSpace(regs_.scratch0(),
6908 regs_.scratch0(),
6909 &dont_need_remembered_set);
6910
6911 __ CheckPageFlag(regs_.object(),
6912 regs_.scratch0(),
6913 1 << MemoryChunk::SCAN_ON_SCAVENGE,
6914 ne,
6915 &dont_need_remembered_set);
6916
6917 // First notify the incremental marker if necessary, then update the
6918 // remembered set.
6919 CheckNeedsToInformIncrementalMarker(
6920 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
6921 InformIncrementalMarker(masm, mode);
6922 regs_.Restore(masm);
6923 __ RememberedSetHelper(
6924 address_, value_, save_fp_regs_mode_, MacroAssembler::kReturnAtEnd);
6925
6926 __ bind(&dont_need_remembered_set);
6927 }
6928
6929 CheckNeedsToInformIncrementalMarker(
6930 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
6931 InformIncrementalMarker(masm, mode);
6932 regs_.Restore(masm);
6933 __ Ret();
6934 }
6935
6936
6937 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
6938 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
6939 int argument_count = 3;
6940 __ PrepareCallCFunction(argument_count, regs_.scratch0());
6941 Register address =
6942 r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
6943 ASSERT(!address.is(regs_.object()));
6944 ASSERT(!address.is(r0));
6945 __ Move(address, regs_.address());
6946 __ Move(r0, regs_.object());
6947 if (mode == INCREMENTAL_COMPACTION) {
6948 __ Move(r1, address);
6949 } else {
6950 ASSERT(mode == INCREMENTAL);
6951 __ ldr(r1, MemOperand(address, 0));
6952 }
6953 __ mov(r2, Operand(ExternalReference::isolate_address()));
6954
6955 AllowExternalCallThatCantCauseGC scope(masm);
6956 if (mode == INCREMENTAL_COMPACTION) {
6957 __ CallCFunction(
6958 ExternalReference::incremental_evacuation_record_write_function(
6959 masm->isolate()),
6960 argument_count);
6961 } else {
6962 ASSERT(mode == INCREMENTAL);
6963 __ CallCFunction(
6964 ExternalReference::incremental_marking_record_write_function(
6965 masm->isolate()),
6966 argument_count);
6967 }
6968 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
6969 }
6970
6971
6972 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
6973 MacroAssembler* masm,
6974 OnNoNeedToInformIncrementalMarker on_no_need,
6975 Mode mode) {
6976 Label on_black;
6977 Label need_incremental;
6978 Label need_incremental_pop_scratch;
6979
6980 // Let's look at the color of the object: If it is not black we don't have
6981 // to inform the incremental marker.
6982 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
6983
6984 regs_.Restore(masm);
6985 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
6986 __ RememberedSetHelper(
6987 address_, value_, save_fp_regs_mode_, MacroAssembler::kReturnAtEnd);
6988 } else {
6989 __ Ret();
6990 }
6991
6992 __ bind(&on_black);
6993
6994 // Get the value from the slot.
6995 __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
6996
6997 if (mode == INCREMENTAL_COMPACTION) {
6998 Label ensure_not_white;
6999
7000 __ CheckPageFlag(regs_.scratch0(), // Contains value.
7001 regs_.scratch1(), // Scratch.
7002 MemoryChunk::kEvacuationCandidateMask,
7003 eq,
7004 &ensure_not_white);
7005
7006 __ CheckPageFlag(regs_.object(),
7007 regs_.scratch1(), // Scratch.
7008 MemoryChunk::kSkipEvacuationSlotsRecordingMask,
7009 eq,
7010 &need_incremental);
7011
7012 __ bind(&ensure_not_white);
7013 }
7014
7015 // We need extra registers for this, so we push the object and the address
7016 // register temporarily.
7017 __ Push(regs_.object(), regs_.address());
7018 __ EnsureNotWhite(regs_.scratch0(), // The value.
7019 regs_.scratch1(), // Scratch.
7020 regs_.object(), // Scratch.
7021 regs_.address(), // Scratch.
7022 &need_incremental_pop_scratch);
7023 __ Pop(regs_.object(), regs_.address());
7024
7025 regs_.Restore(masm);
7026 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
7027 __ RememberedSetHelper(
7028 address_, value_, save_fp_regs_mode_, MacroAssembler::kReturnAtEnd);
7029 } else {
7030 __ Ret();
7031 }
7032
7033 __ bind(&need_incremental_pop_scratch);
7034 __ Pop(regs_.object(), regs_.address());
7035
7036 __ bind(&need_incremental);
7037
7038 // Fall through when we need to inform the incremental marker.
7039 }
7040
7041
6769 #undef __ 7042 #undef __
6770 7043
6771 } } // namespace v8::internal 7044 } } // namespace v8::internal
6772 7045
6773 #endif // V8_TARGET_ARCH_ARM 7046 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/code-stubs-arm.h ('k') | src/arm/codegen-arm.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698