Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(138)

Side by Side Diff: src/a64/code-stubs-a64.cc

Issue 164793003: A64: Use a scope utility to allocate scratch registers. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Rebase. Created 6 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/a64/assembler-a64-inl.h ('k') | src/a64/debug-a64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 1204 matching lines...) Expand 10 before | Expand all | Expand 10 after
1215 // where base is -INFINITY or -0. 1215 // where base is -INFINITY or -0.
1216 1216
1217 // Add +0 to base. This has no effect other than turning -0 into +0. 1217 // Add +0 to base. This has no effect other than turning -0 into +0.
1218 __ Fmov(zero_double, 0.0); 1218 __ Fmov(zero_double, 0.0);
1219 __ Fadd(base_double, base_double, zero_double); 1219 __ Fadd(base_double, base_double, zero_double);
1220 // The operation -0+0 results in +0 in all cases except where the 1220 // The operation -0+0 results in +0 in all cases except where the
1221 // FPCR rounding mode is 'round towards minus infinity' (RM). The 1221 // FPCR rounding mode is 'round towards minus infinity' (RM). The
1222 // A64 simulator does not currently simulate FPCR (where the rounding 1222 // A64 simulator does not currently simulate FPCR (where the rounding
1223 // mode is set), so test the operation with some debug code. 1223 // mode is set), so test the operation with some debug code.
1224 if (masm->emit_debug_code()) { 1224 if (masm->emit_debug_code()) {
1225 Register temp = masm->Tmp1(); 1225 UseScratchRegisterScope temps(masm);
1226 Register temp = temps.AcquireX();
1226 // d5 zero_double The value +0.0 as a double. 1227 // d5 zero_double The value +0.0 as a double.
1227 __ Fneg(scratch0_double, zero_double); 1228 __ Fneg(scratch0_double, zero_double);
1228 // Verify that we correctly generated +0.0 and -0.0. 1229 // Verify that we correctly generated +0.0 and -0.0.
1229 // bits(+0.0) = 0x0000000000000000 1230 // bits(+0.0) = 0x0000000000000000
1230 // bits(-0.0) = 0x8000000000000000 1231 // bits(-0.0) = 0x8000000000000000
1231 __ Fmov(temp, zero_double); 1232 __ Fmov(temp, zero_double);
1232 __ CheckRegisterIsClear(temp, kCouldNotGenerateZero); 1233 __ CheckRegisterIsClear(temp, kCouldNotGenerateZero);
1233 __ Fmov(temp, scratch0_double); 1234 __ Fmov(temp, scratch0_double);
1234 __ Eor(temp, temp, kDSignMask); 1235 __ Eor(temp, temp, kDSignMask);
1235 __ CheckRegisterIsClear(temp, kCouldNotGenerateNegativeZero); 1236 __ CheckRegisterIsClear(temp, kCouldNotGenerateNegativeZero);
(...skipping 257 matching lines...) Expand 10 before | Expand all | Expand 10 after
1493 1494
1494 // Store the return address on the stack, in the space previously allocated 1495 // Store the return address on the stack, in the space previously allocated
1495 // by EnterExitFrame. The return address is queried by 1496 // by EnterExitFrame. The return address is queried by
1496 // ExitFrame::GetStateForFramePointer. 1497 // ExitFrame::GetStateForFramePointer.
1497 Label return_location; 1498 Label return_location;
1498 __ Adr(x12, &return_location); 1499 __ Adr(x12, &return_location);
1499 __ Poke(x12, 0); 1500 __ Poke(x12, 0);
1500 if (__ emit_debug_code()) { 1501 if (__ emit_debug_code()) {
1501 // Verify that the slot below fp[kSPOffset]-8 points to the return location 1502 // Verify that the slot below fp[kSPOffset]-8 points to the return location
1502 // (currently in x12). 1503 // (currently in x12).
1503 Register temp = masm->Tmp1(); 1504 UseScratchRegisterScope temps(masm);
1505 Register temp = temps.AcquireX();
1504 __ Ldr(temp, MemOperand(fp, ExitFrameConstants::kSPOffset)); 1506 __ Ldr(temp, MemOperand(fp, ExitFrameConstants::kSPOffset));
1505 __ Ldr(temp, MemOperand(temp, -static_cast<int64_t>(kXRegSizeInBytes))); 1507 __ Ldr(temp, MemOperand(temp, -static_cast<int64_t>(kXRegSizeInBytes)));
1506 __ Cmp(temp, x12); 1508 __ Cmp(temp, x12);
1507 __ Check(eq, kReturnAddressNotFoundInFrame); 1509 __ Check(eq, kReturnAddressNotFoundInFrame);
1508 } 1510 }
1509 1511
1510 // Call the builtin. 1512 // Call the builtin.
1511 __ Blr(target); 1513 __ Blr(target);
1512 __ Bind(&return_location); 1514 __ Bind(&return_location);
1513 const Register& result = x0; 1515 const Register& result = x0;
(...skipping 3199 matching lines...) Expand 10 before | Expand all | Expand 10 after
4713 value, 4715 value,
4714 1 << MemoryChunk::SCAN_ON_SCAVENGE, 4716 1 << MemoryChunk::SCAN_ON_SCAVENGE,
4715 &dont_need_remembered_set); 4717 &dont_need_remembered_set);
4716 4718
4717 // First notify the incremental marker if necessary, then update the 4719 // First notify the incremental marker if necessary, then update the
4718 // remembered set. 4720 // remembered set.
4719 CheckNeedsToInformIncrementalMarker( 4721 CheckNeedsToInformIncrementalMarker(
4720 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode); 4722 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
4721 InformIncrementalMarker(masm); 4723 InformIncrementalMarker(masm);
4722 regs_.Restore(masm); // Restore the extra scratch registers we used. 4724 regs_.Restore(masm); // Restore the extra scratch registers we used.
4725
4723 __ RememberedSetHelper(object_, 4726 __ RememberedSetHelper(object_,
4724 address_, 4727 address_,
4725 value_, 4728 value_, // scratch1
4726 save_fp_regs_mode_, 4729 save_fp_regs_mode_,
4727 MacroAssembler::kReturnAtEnd); 4730 MacroAssembler::kReturnAtEnd);
4728 4731
4729 __ Bind(&dont_need_remembered_set); 4732 __ Bind(&dont_need_remembered_set);
4730 } 4733 }
4731 4734
4732 CheckNeedsToInformIncrementalMarker( 4735 CheckNeedsToInformIncrementalMarker(
4733 masm, kReturnOnNoNeedToInformIncrementalMarker, mode); 4736 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
4734 InformIncrementalMarker(masm); 4737 InformIncrementalMarker(masm);
4735 regs_.Restore(masm); // Restore the extra scratch registers we used. 4738 regs_.Restore(masm); // Restore the extra scratch registers we used.
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
4776 MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset)); 4779 MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
4777 __ B(mi, &need_incremental); 4780 __ B(mi, &need_incremental);
4778 4781
4779 // If the object is not black we don't have to inform the incremental marker. 4782 // If the object is not black we don't have to inform the incremental marker.
4780 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black); 4783 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
4781 4784
4782 regs_.Restore(masm); // Restore the extra scratch registers we used. 4785 regs_.Restore(masm); // Restore the extra scratch registers we used.
4783 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { 4786 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4784 __ RememberedSetHelper(object_, 4787 __ RememberedSetHelper(object_,
4785 address_, 4788 address_,
4786 value_, 4789 value_, // scratch1
4787 save_fp_regs_mode_, 4790 save_fp_regs_mode_,
4788 MacroAssembler::kReturnAtEnd); 4791 MacroAssembler::kReturnAtEnd);
4789 } else { 4792 } else {
4790 __ Ret(); 4793 __ Ret();
4791 } 4794 }
4792 4795
4793 __ Bind(&on_black); 4796 __ Bind(&on_black);
4794 // Get the value from the slot. 4797 // Get the value from the slot.
4795 Register value = regs_.scratch0(); 4798 Register value = regs_.scratch0();
4796 __ Ldr(value, MemOperand(regs_.address())); 4799 __ Ldr(value, MemOperand(regs_.address()));
(...skipping 22 matching lines...) Expand all
4819 regs_.object(), // Scratch. 4822 regs_.object(), // Scratch.
4820 regs_.address(), // Scratch. 4823 regs_.address(), // Scratch.
4821 regs_.scratch2(), // Scratch. 4824 regs_.scratch2(), // Scratch.
4822 &need_incremental_pop_scratch); 4825 &need_incremental_pop_scratch);
4823 __ Pop(regs_.object(), regs_.address()); 4826 __ Pop(regs_.object(), regs_.address());
4824 4827
4825 regs_.Restore(masm); // Restore the extra scratch registers we used. 4828 regs_.Restore(masm); // Restore the extra scratch registers we used.
4826 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { 4829 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4827 __ RememberedSetHelper(object_, 4830 __ RememberedSetHelper(object_,
4828 address_, 4831 address_,
4829 value_, 4832 value_, // scratch1
4830 save_fp_regs_mode_, 4833 save_fp_regs_mode_,
4831 MacroAssembler::kReturnAtEnd); 4834 MacroAssembler::kReturnAtEnd);
4832 } else { 4835 } else {
4833 __ Ret(); 4836 __ Ret();
4834 } 4837 }
4835 4838
4836 __ Bind(&need_incremental_pop_scratch); 4839 __ Bind(&need_incremental_pop_scratch);
4837 __ Pop(regs_.object(), regs_.address()); 4840 __ Pop(regs_.object(), regs_.address());
4838 4841
4839 __ Bind(&need_incremental); 4842 __ Bind(&need_incremental);
(...skipping 12 matching lines...) Expand all
4852 // See RecordWriteStub::Patch for details. 4855 // See RecordWriteStub::Patch for details.
4853 { 4856 {
4854 InstructionAccurateScope scope(masm, 2); 4857 InstructionAccurateScope scope(masm, 2);
4855 __ adr(xzr, &skip_to_incremental_noncompacting); 4858 __ adr(xzr, &skip_to_incremental_noncompacting);
4856 __ adr(xzr, &skip_to_incremental_compacting); 4859 __ adr(xzr, &skip_to_incremental_compacting);
4857 } 4860 }
4858 4861
4859 if (remembered_set_action_ == EMIT_REMEMBERED_SET) { 4862 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
4860 __ RememberedSetHelper(object_, 4863 __ RememberedSetHelper(object_,
4861 address_, 4864 address_,
4862 value_, 4865 value_, // scratch1
4863 save_fp_regs_mode_, 4866 save_fp_regs_mode_,
4864 MacroAssembler::kReturnAtEnd); 4867 MacroAssembler::kReturnAtEnd);
4865 } 4868 }
4866 __ Ret(); 4869 __ Ret();
4867 4870
4868 __ Bind(&skip_to_incremental_noncompacting); 4871 __ Bind(&skip_to_incremental_noncompacting);
4869 GenerateIncremental(masm, INCREMENTAL); 4872 GenerateIncremental(masm, INCREMENTAL);
4870 4873
4871 __ Bind(&skip_to_incremental_compacting); 4874 __ Bind(&skip_to_incremental_compacting);
4872 GenerateIncremental(masm, INCREMENTAL_COMPACTION); 4875 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
(...skipping 210 matching lines...) Expand 10 before | Expand all | Expand 10 after
5083 __ Add(scratch2, scratch2, Operand( 5086 __ Add(scratch2, scratch2, Operand(
5084 NameDictionary::GetProbeOffset(i) << Name::kHashShift)); 5087 NameDictionary::GetProbeOffset(i) << Name::kHashShift));
5085 } 5088 }
5086 __ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift)); 5089 __ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
5087 5090
5088 // Scale the index by multiplying by the element size. 5091 // Scale the index by multiplying by the element size.
5089 ASSERT(NameDictionary::kEntrySize == 3); 5092 ASSERT(NameDictionary::kEntrySize == 3);
5090 __ Add(scratch2, scratch2, Operand(scratch2, LSL, 1)); 5093 __ Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
5091 5094
5092 // Check if the key is identical to the name. 5095 // Check if the key is identical to the name.
5096 UseScratchRegisterScope temps(masm);
5097 Register scratch3 = temps.AcquireX();
5093 __ Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2)); 5098 __ Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
5094 // TODO(jbramley): We need another scratch here, but some callers can't 5099 __ Ldr(scratch3, FieldMemOperand(scratch2, kElementsStartOffset));
5095 // provide a scratch3 so we have to use Tmp1(). We should find a clean way 5100 __ Cmp(name, scratch3);
5096 // to make it unavailable to the MacroAssembler for a short time.
5097 __ Ldr(__ Tmp1(), FieldMemOperand(scratch2, kElementsStartOffset));
5098 __ Cmp(name, __ Tmp1());
5099 __ B(eq, done); 5101 __ B(eq, done);
5100 } 5102 }
5101 5103
5102 // The inlined probes didn't find the entry. 5104 // The inlined probes didn't find the entry.
5103 // Call the complete stub to scan the whole dictionary. 5105 // Call the complete stub to scan the whole dictionary.
5104 5106
5105 CPURegList spill_list(CPURegister::kRegister, kXRegSize, 0, 6); 5107 CPURegList spill_list(CPURegister::kRegister, kXRegSize, 0, 6);
5106 spill_list.Combine(lr); 5108 spill_list.Combine(lr);
5107 spill_list.Remove(scratch1); 5109 spill_list.Remove(scratch1);
5108 spill_list.Remove(scratch2); 5110 spill_list.Remove(scratch2);
(...skipping 693 matching lines...) Expand 10 before | Expand all | Expand 10 after
5802 MemOperand(fp, 6 * kPointerSize), 5804 MemOperand(fp, 6 * kPointerSize),
5803 NULL); 5805 NULL);
5804 } 5806 }
5805 5807
5806 5808
5807 #undef __ 5809 #undef __
5808 5810
5809 } } // namespace v8::internal 5811 } } // namespace v8::internal
5810 5812
5811 #endif // V8_TARGET_ARCH_A64 5813 #endif // V8_TARGET_ARCH_A64
OLDNEW
« no previous file with comments | « src/a64/assembler-a64-inl.h ('k') | src/a64/debug-a64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698