Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(11)

Unified Diff: src/a64/code-stubs-a64.h

Issue 148293020: Merge experimental/a64 to bleeding_edge. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Remove ARM from OWNERS Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/a64/builtins-a64.cc ('k') | src/a64/code-stubs-a64.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/a64/code-stubs-a64.h
diff --git a/src/arm/code-stubs-arm.h b/src/a64/code-stubs-a64.h
similarity index 57%
copy from src/arm/code-stubs-arm.h
copy to src/a64/code-stubs-a64.h
index 7a371f169490954dea366b6cde955d4936174989..0709bfc5118e28cb92e66c195d19452411bdfee0 100644
--- a/src/arm/code-stubs-arm.h
+++ b/src/a64/code-stubs-a64.h
@@ -1,4 +1,4 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#ifndef V8_ARM_CODE_STUBS_ARM_H_
-#define V8_ARM_CODE_STUBS_ARM_H_
+#ifndef V8_A64_CODE_STUBS_A64_H_
+#define V8_A64_CODE_STUBS_A64_H_
#include "ic-inl.h"
@@ -40,7 +40,7 @@ void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
class StoreBufferOverflowStub: public PlatformCodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
- : save_doubles_(save_fp) {}
+ : save_doubles_(save_fp) { }
void Generate(MacroAssembler* masm);
@@ -57,20 +57,7 @@ class StoreBufferOverflowStub: public PlatformCodeStub {
class StringHelper : public AllStatic {
public:
- // Generate code for copying a large number of characters. This function
- // is allowed to spend extra time setting up conditions to make copying
- // faster. Copying of overlapping regions is not supported.
- // Dest register ends at the position after the last character written.
- static void GenerateCopyCharactersLong(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4,
- int flags);
-
+ // TODO(all): These don't seem to be used any more. Delete them.
// Generate string hash.
static void GenerateHashInit(MacroAssembler* masm,
@@ -82,101 +69,19 @@ class StringHelper : public AllStatic {
Register character);
static void GenerateHashGetHash(MacroAssembler* masm,
- Register hash);
+ Register hash,
+ Register scratch);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
};
-class SubStringStub: public PlatformCodeStub {
- public:
- SubStringStub() {}
-
- private:
- Major MajorKey() { return SubString; }
- int MinorKey() { return 0; }
-
- void Generate(MacroAssembler* masm);
-};
-
-
-
-class StringCompareStub: public PlatformCodeStub {
- public:
- StringCompareStub() { }
-
- // Compares two flat ASCII strings and returns result in r0.
- static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Register scratch4);
-
- // Compares two flat ASCII strings for equality and returns result
- // in r0.
- static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
- Register left,
- Register right,
- Register scratch1,
- Register scratch2,
- Register scratch3);
-
- private:
- virtual Major MajorKey() { return StringCompare; }
- virtual int MinorKey() { return 0; }
- virtual void Generate(MacroAssembler* masm);
-
- static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
- Register left,
- Register right,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* chars_not_equal);
-};
-
-
-// This stub can convert a signed int32 to a heap number (double). It does
-// not work for int32s that are in Smi range! No GC occurs during this stub
-// so you don't have to set up the frame.
-class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
- public:
- WriteInt32ToHeapNumberStub(Register the_int,
- Register the_heap_number,
- Register scratch)
- : the_int_(the_int),
- the_heap_number_(the_heap_number),
- scratch_(scratch) { }
-
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
-
- private:
- Register the_int_;
- Register the_heap_number_;
- Register scratch_;
-
- // Minor key encoding in 16 bits.
- class IntRegisterBits: public BitField<int, 0, 4> {};
- class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
- class ScratchRegisterBits: public BitField<int, 8, 4> {};
-
- Major MajorKey() { return WriteInt32ToHeapNumber; }
- int MinorKey() {
- // Encode the parameters in a unique 16 bit value.
- return IntRegisterBits::encode(the_int_.code())
- | HeapNumberRegisterBits::encode(the_heap_number_.code())
- | ScratchRegisterBits::encode(scratch_.code());
- }
-
- void Generate(MacroAssembler* masm);
-};
-
-
class RecordWriteStub: public PlatformCodeStub {
public:
+ // Stub to record the write of 'value' at 'address' in 'object'.
+ // Typically 'address' = 'object' + <some offset>.
+ // See MacroAssembler::RecordWriteField() for example.
RecordWriteStub(Register object,
Register value,
Register address,
@@ -200,126 +105,190 @@ class RecordWriteStub: public PlatformCodeStub {
virtual bool SometimesSetsUpAFrame() { return false; }
- static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
- masm->instr_at_put(pos, (masm->instr_at(pos) & ~B27) | (B24 | B20));
- ASSERT(Assembler::IsTstImmediate(masm->instr_at(pos)));
- }
-
- static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
- masm->instr_at_put(pos, (masm->instr_at(pos) & ~(B24 | B20)) | B27);
- ASSERT(Assembler::IsBranch(masm->instr_at(pos)));
- }
-
static Mode GetMode(Code* stub) {
- Instr first_instruction = Assembler::instr_at(stub->instruction_start());
- Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
- Assembler::kInstrSize);
+ // Find the mode depending on the first two instructions.
+ Instruction* instr1 =
+ reinterpret_cast<Instruction*>(stub->instruction_start());
+ Instruction* instr2 = instr1->following();
- if (Assembler::IsBranch(first_instruction)) {
+ if (instr1->IsUncondBranchImm()) {
+ ASSERT(instr2->IsPCRelAddressing() && (instr2->Rd() == xzr.code()));
return INCREMENTAL;
}
- ASSERT(Assembler::IsTstImmediate(first_instruction));
+ ASSERT(instr1->IsPCRelAddressing() && (instr1->Rd() == xzr.code()));
- if (Assembler::IsBranch(second_instruction)) {
+ if (instr2->IsUncondBranchImm()) {
return INCREMENTAL_COMPACTION;
}
- ASSERT(Assembler::IsTstImmediate(second_instruction));
+ ASSERT(instr2->IsPCRelAddressing());
return STORE_BUFFER_ONLY;
}
+ // We patch the two first instructions of the stub back and forth between an
+ // adr and branch when we start and stop incremental heap marking.
+ // The branch is
+ // b label
+ // The adr is
+ // adr xzr label
+ // so effectively a nop.
static void Patch(Code* stub, Mode mode) {
- MacroAssembler masm(NULL,
- stub->instruction_start(),
- stub->instruction_size());
+ // We are going to patch the two first instructions of the stub.
+ PatchingAssembler patcher(
+ reinterpret_cast<Instruction*>(stub->instruction_start()), 2);
+ Instruction* instr1 = patcher.InstructionAt(0);
+ Instruction* instr2 = patcher.InstructionAt(kInstructionSize);
+ // Instructions must be either 'adr' or 'b'.
+ ASSERT(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm());
+ ASSERT(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm());
+ // Retrieve the offsets to the labels.
+ int32_t offset_to_incremental_noncompacting = instr1->ImmPCOffset();
+ int32_t offset_to_incremental_compacting = instr2->ImmPCOffset();
+
switch (mode) {
case STORE_BUFFER_ONLY:
ASSERT(GetMode(stub) == INCREMENTAL ||
GetMode(stub) == INCREMENTAL_COMPACTION);
- PatchBranchIntoNop(&masm, 0);
- PatchBranchIntoNop(&masm, Assembler::kInstrSize);
+ patcher.adr(xzr, offset_to_incremental_noncompacting);
+ patcher.adr(xzr, offset_to_incremental_compacting);
break;
case INCREMENTAL:
ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
- PatchNopIntoBranch(&masm, 0);
+ patcher.b(offset_to_incremental_noncompacting >> kInstructionSizeLog2);
+ patcher.adr(xzr, offset_to_incremental_compacting);
break;
case INCREMENTAL_COMPACTION:
ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
- PatchNopIntoBranch(&masm, Assembler::kInstrSize);
+ patcher.adr(xzr, offset_to_incremental_noncompacting);
+ patcher.b(offset_to_incremental_compacting >> kInstructionSizeLog2);
break;
}
ASSERT(GetMode(stub) == mode);
- CPU::FlushICache(stub->instruction_start(), 2 * Assembler::kInstrSize);
}
private:
- // This is a helper class for freeing up 3 scratch registers. The input is
- // two registers that must be preserved and one scratch register provided by
- // the caller.
+ // This is a helper class to manage the registers associated with the stub.
+ // The 'object' and 'address' registers must be preserved.
class RegisterAllocation {
public:
RegisterAllocation(Register object,
Register address,
- Register scratch0)
+ Register scratch)
: object_(object),
address_(address),
- scratch0_(scratch0) {
- ASSERT(!AreAliased(scratch0, object, address, no_reg));
- scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
+ scratch0_(scratch),
+ saved_regs_(kCallerSaved) {
+ ASSERT(!AreAliased(scratch, object, address));
+
+ // We would like to require more scratch registers for this stub,
+ // but the number of registers comes down to the ones used in
+ // FullCodeGen::SetVar(), which is architecture independent.
+ // We allocate 2 extra scratch registers that we'll save on the stack.
+ CPURegList pool_available = GetValidRegistersForAllocation();
+ CPURegList used_regs(object, address, scratch);
+ pool_available.Remove(used_regs);
+ scratch1_ = Register(pool_available.PopLowestIndex());
+ scratch2_ = Register(pool_available.PopLowestIndex());
+
+ // SaveCallerRegisters method needs to save caller saved register, however
+ // we don't bother saving ip0 and ip1 because they are used as scratch
+ // registers by the MacroAssembler.
+ saved_regs_.Remove(ip0);
+ saved_regs_.Remove(ip1);
+
+ // The scratch registers will be restored by other means so we don't need
+ // to save them with the other caller saved registers.
+ saved_regs_.Remove(scratch0_);
+ saved_regs_.Remove(scratch1_);
+ saved_regs_.Remove(scratch2_);
}
void Save(MacroAssembler* masm) {
- ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
// We don't have to save scratch0_ because it was given to us as
// a scratch register.
- masm->push(scratch1_);
+ masm->Push(scratch1_, scratch2_);
}
void Restore(MacroAssembler* masm) {
- masm->pop(scratch1_);
+ masm->Pop(scratch2_, scratch1_);
}
// If we have to call into C then we need to save and restore all caller-
- // saved registers that were not already preserved. The scratch registers
- // will be restored by other means so we don't bother pushing them here.
+ // saved registers that were not already preserved.
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
- masm->stm(db_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
+ // TODO(all): This can be very expensive, and it is likely that not every
+ // register will need to be preserved. Can we improve this?
+ masm->PushCPURegList(saved_regs_);
if (mode == kSaveFPRegs) {
- masm->SaveFPRegs(sp, scratch0_);
+ masm->PushCPURegList(kCallerSavedFP);
}
}
- inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
- SaveFPRegsMode mode) {
+ void RestoreCallerSaveRegisters(MacroAssembler*masm, SaveFPRegsMode mode) {
+ // TODO(all): This can be very expensive, and it is likely that not every
+ // register will need to be preserved. Can we improve this?
if (mode == kSaveFPRegs) {
- masm->RestoreFPRegs(sp, scratch0_);
+ masm->PopCPURegList(kCallerSavedFP);
}
- masm->ldm(ia_w, sp, (kCallerSaved | lr.bit()) & ~scratch1_.bit());
+ masm->PopCPURegList(saved_regs_);
}
- inline Register object() { return object_; }
- inline Register address() { return address_; }
- inline Register scratch0() { return scratch0_; }
- inline Register scratch1() { return scratch1_; }
+ Register object() { return object_; }
+ Register address() { return address_; }
+ Register scratch0() { return scratch0_; }
+ Register scratch1() { return scratch1_; }
+ Register scratch2() { return scratch2_; }
private:
Register object_;
Register address_;
Register scratch0_;
Register scratch1_;
+ Register scratch2_;
+ CPURegList saved_regs_;
+
+ // TODO(all): We should consider moving this somewhere else.
+ static CPURegList GetValidRegistersForAllocation() {
+ // The list of valid registers for allocation is defined as all the
+ // registers without those with a special meaning.
+ //
+ // The default list excludes registers x26 to x31 because they are
+ // reserved for the following purpose:
+ // - x26 root register
+ // - x27 context pointer register
+ // - x28 jssp
+ // - x29 frame pointer
+ // - x30 link register(lr)
+ // - x31 xzr/stack pointer
+ CPURegList list(CPURegister::kRegister, kXRegSize, 0, 25);
+
+ // We also remove MacroAssembler's scratch registers.
+ list.Remove(ip0);
+ list.Remove(ip1);
+ list.Remove(x8);
+ list.Remove(x9);
+
+ return list;
+ }
friend class RecordWriteStub;
};
+ // A list of stub variants which are pregenerated.
+ // The variants are stored in the same format as the minor key, so
+ // MinorKeyFor() can be used to populate and check this list.
+ static const int kAheadOfTime[];
+
+ void Generate(MacroAssembler* masm);
+ void GenerateIncremental(MacroAssembler* masm, Mode mode);
+
enum OnNoNeedToInformIncrementalMarker {
kReturnOnNoNeedToInformIncrementalMarker,
kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
};
- void Generate(MacroAssembler* masm);
- void GenerateIncremental(MacroAssembler* masm, Mode mode);
void CheckNeedsToInformIncrementalMarker(
MacroAssembler* masm,
OnNoNeedToInformIncrementalMarker on_no_need,
@@ -329,22 +298,34 @@ class RecordWriteStub: public PlatformCodeStub {
Major MajorKey() { return RecordWrite; }
int MinorKey() {
- return ObjectBits::encode(object_.code()) |
- ValueBits::encode(value_.code()) |
- AddressBits::encode(address_.code()) |
- RememberedSetActionBits::encode(remembered_set_action_) |
- SaveFPRegsModeBits::encode(save_fp_regs_mode_);
+ return MinorKeyFor(object_, value_, address_, remembered_set_action_,
+ save_fp_regs_mode_);
+ }
+
+ static int MinorKeyFor(Register object,
+ Register value,
+ Register address,
+ RememberedSetAction action,
+ SaveFPRegsMode fp_mode) {
+ ASSERT(object.Is64Bits());
+ ASSERT(value.Is64Bits());
+ ASSERT(address.Is64Bits());
+ return ObjectBits::encode(object.code()) |
+ ValueBits::encode(value.code()) |
+ AddressBits::encode(address.code()) |
+ RememberedSetActionBits::encode(action) |
+ SaveFPRegsModeBits::encode(fp_mode);
}
void Activate(Code* code) {
code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
}
- class ObjectBits: public BitField<int, 0, 4> {};
- class ValueBits: public BitField<int, 4, 4> {};
- class AddressBits: public BitField<int, 8, 4> {};
- class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {};
- class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {};
+ class ObjectBits: public BitField<int, 0, 5> {};
+ class ValueBits: public BitField<int, 5, 5> {};
+ class AddressBits: public BitField<int, 10, 5> {};
+ class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
+ class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
Register object_;
Register value_;
@@ -356,11 +337,8 @@ class RecordWriteStub: public PlatformCodeStub {
};
-// Trampoline stub to call into native code. To call safely into native code
-// in the presence of compacting GC (which can move code objects) we need to
-// keep the code which called into native pinned in the memory. Currently the
-// simplest approach is to generate such stub early enough so it can never be
-// moved by GC
+// Helper to call C++ functions from generated code. The caller must prepare
+// the exit frame before doing the call with GenerateCall.
class DirectCEntryStub: public PlatformCodeStub {
public:
DirectCEntryStub() {}
@@ -396,8 +374,8 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
Label* done,
Register elements,
Register name,
- Register r0,
- Register r1);
+ Register scratch1,
+ Register scratch2);
virtual bool SometimesSetsUpAFrame() { return false; }
@@ -425,6 +403,55 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
};
+class SubStringStub: public PlatformCodeStub {
+ public:
+ SubStringStub() {}
+
+ private:
+ Major MajorKey() { return SubString; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+class StringCompareStub: public PlatformCodeStub {
+ public:
+ StringCompareStub() { }
+
+ // Compares two flat ASCII strings and returns result in x0.
+ static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3,
+ Register scratch4);
+
+ // Compare two flat ASCII strings for equality and returns result
+ // in x0.
+ static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3);
+
+ private:
+ virtual Major MajorKey() { return StringCompare; }
+ virtual int MinorKey() { return 0; }
+ virtual void Generate(MacroAssembler* masm);
+
+ static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* chars_not_equal);
+};
+
+
struct PlatformCallInterfaceDescriptor {
explicit PlatformCallInterfaceDescriptor(
TargetAddressStorageMode storage_mode)
@@ -439,4 +466,4 @@ struct PlatformCallInterfaceDescriptor {
} } // namespace v8::internal
-#endif // V8_ARM_CODE_STUBS_ARM_H_
+#endif // V8_A64_CODE_STUBS_A64_H_
« no previous file with comments | « src/a64/builtins-a64.cc ('k') | src/a64/code-stubs-a64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698