Index: src/x64/code-stubs-x64.h |
=================================================================== |
--- src/x64/code-stubs-x64.h (revision 9327) |
+++ src/x64/code-stubs-x64.h (working copy) |
@@ -59,6 +59,32 @@ |
}; |
+class StoreBufferOverflowStub: public CodeStub { |
+ public: |
+ explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp) |
+ : save_doubles_(save_fp) { } |
+ |
+ void Generate(MacroAssembler* masm); |
+ |
+ virtual bool CompilingCallsToThisStubIsGCSafe() { return true; } |
+ static void GenerateFixedRegStubsAheadOfTime(); |
+ virtual bool SometimesSetsUpAFrame() { return false; } |
+ |
+ private: |
+ SaveFPRegsMode save_doubles_; |
+ |
+ Major MajorKey() { return StoreBufferOverflow; } |
+ int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; } |
+}; |
+ |
+ |
+// Flag that indicates how to generate code for the stub GenericBinaryOpStub. |
+enum GenericBinaryFlags { |
+ NO_GENERIC_BINARY_FLAGS = 0, |
+ NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub. |
+}; |
+ |
+ |
class UnaryOpStub: public CodeStub { |
public: |
UnaryOpStub(Token::Value op, |
@@ -448,6 +474,253 @@ |
}; |
+class RecordWriteStub: public CodeStub { |
+ public: |
+ RecordWriteStub(Register object, |
+ Register value, |
+ Register address, |
+ RememberedSetAction remembered_set_action, |
+ SaveFPRegsMode fp_mode) |
+ : object_(object), |
+ value_(value), |
+ address_(address), |
+ remembered_set_action_(remembered_set_action), |
+ save_fp_regs_mode_(fp_mode), |
+ regs_(object, // An input reg. |
+ address, // An input reg. |
+ value) { // One scratch reg. |
+ } |
+ |
+ enum Mode { |
+ STORE_BUFFER_ONLY, |
+ INCREMENTAL, |
+ INCREMENTAL_COMPACTION |
+ }; |
+ |
+ virtual bool CompilingCallsToThisStubIsGCSafe(); |
+ static void GenerateFixedRegStubsAheadOfTime(); |
+ virtual bool SometimesSetsUpAFrame() { return false; } |
+ |
+ static const byte kTwoByteNopInstruction = 0x3c; // Cmpb al, #imm8. |
+ static const byte kTwoByteJumpInstruction = 0xeb; // Jmp #imm8. |
+ |
+ static const byte kFiveByteNopInstruction = 0x3d; // Cmpl eax, #imm32. |
+ static const byte kFiveByteJumpInstruction = 0xe9; // Jmp #imm32. |
+ |
+ static Mode GetMode(Code* stub) { |
+ byte first_instruction = stub->instruction_start()[0]; |
+ byte second_instruction = stub->instruction_start()[2]; |
+ |
+ if (first_instruction == kTwoByteJumpInstruction) { |
+ return INCREMENTAL; |
+ } |
+ |
+ ASSERT(first_instruction == kTwoByteNopInstruction); |
+ |
+ if (second_instruction == kFiveByteJumpInstruction) { |
+ return INCREMENTAL_COMPACTION; |
+ } |
+ |
+ ASSERT(second_instruction == kFiveByteNopInstruction); |
+ |
+ return STORE_BUFFER_ONLY; |
+ } |
+ |
+ static void Patch(Code* stub, Mode mode) { |
+ switch (mode) { |
+ case STORE_BUFFER_ONLY: |
+ ASSERT(GetMode(stub) == INCREMENTAL || |
+ GetMode(stub) == INCREMENTAL_COMPACTION); |
+ stub->instruction_start()[0] = kTwoByteNopInstruction; |
+ stub->instruction_start()[2] = kFiveByteNopInstruction; |
+ break; |
+ case INCREMENTAL: |
+ ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); |
+ stub->instruction_start()[0] = kTwoByteJumpInstruction; |
+ break; |
+ case INCREMENTAL_COMPACTION: |
+ ASSERT(GetMode(stub) == STORE_BUFFER_ONLY); |
+ stub->instruction_start()[0] = kTwoByteNopInstruction; |
+ stub->instruction_start()[2] = kFiveByteJumpInstruction; |
+ break; |
+ } |
+ ASSERT(GetMode(stub) == mode); |
+ CPU::FlushICache(stub->instruction_start(), 7); |
+ } |
+ |
+ private: |
+ // This is a helper class for freeing up 3 scratch registers, where the third |
+ // is always rcx (needed for shift operations). The input is two registers |
+ // that must be preserved and one scratch register provided by the caller. |
+ class RegisterAllocation { |
+ public: |
+ RegisterAllocation(Register object, |
+ Register address, |
+ Register scratch0) |
+ : object_orig_(object), |
+ address_orig_(address), |
+ scratch0_orig_(scratch0), |
+ object_(object), |
+ address_(address), |
+ scratch0_(scratch0) { |
+ ASSERT(!AreAliased(scratch0, object, address, no_reg)); |
+ scratch1_ = GetRegThatIsNotRcxOr(object_, address_, scratch0_); |
+ if (scratch0.is(rcx)) { |
+ scratch0_ = GetRegThatIsNotRcxOr(object_, address_, scratch1_); |
+ } |
+ if (object.is(rcx)) { |
+ object_ = GetRegThatIsNotRcxOr(address_, scratch0_, scratch1_); |
+ } |
+ if (address.is(rcx)) { |
+ address_ = GetRegThatIsNotRcxOr(object_, scratch0_, scratch1_); |
+ } |
+ ASSERT(!AreAliased(scratch0_, object_, address_, rcx)); |
+ } |
+ |
+ void Save(MacroAssembler* masm) { |
+ ASSERT(!address_orig_.is(object_)); |
+ ASSERT(object_.is(object_orig_) || address_.is(address_orig_)); |
+ ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_)); |
+ ASSERT(!AreAliased(object_orig_, address_, scratch1_, scratch0_)); |
+ ASSERT(!AreAliased(object_, address_orig_, scratch1_, scratch0_)); |
+ // We don't have to save scratch0_orig_ because it was given to us as |
+ // a scratch register. But if we had to switch to a different reg then |
+ // we should save the new scratch0_. |
+ if (!scratch0_.is(scratch0_orig_)) masm->push(scratch0_); |
+ if (!rcx.is(scratch0_orig_) && |
+ !rcx.is(object_orig_) && |
+ !rcx.is(address_orig_)) { |
+ masm->push(rcx); |
+ } |
+ masm->push(scratch1_); |
+ if (!address_.is(address_orig_)) { |
+ masm->push(address_); |
+ masm->movq(address_, address_orig_); |
+ } |
+ if (!object_.is(object_orig_)) { |
+ masm->push(object_); |
+ masm->movq(object_, object_orig_); |
+ } |
+ } |
+ |
+ void Restore(MacroAssembler* masm) { |
+ // These will have been preserved the entire time, so we just need to move |
+ // them back. Only in one case is the orig_ reg different from the plain |
+ // one, since only one of them can alias with rcx. |
+ if (!object_.is(object_orig_)) { |
+ masm->movq(object_orig_, object_); |
+ masm->pop(object_); |
+ } |
+ if (!address_.is(address_orig_)) { |
+ masm->movq(address_orig_, address_); |
+ masm->pop(address_); |
+ } |
+ masm->pop(scratch1_); |
+ if (!rcx.is(scratch0_orig_) && |
+ !rcx.is(object_orig_) && |
+ !rcx.is(address_orig_)) { |
+ masm->pop(rcx); |
+ } |
+ if (!scratch0_.is(scratch0_orig_)) masm->pop(scratch0_); |
+ } |
+ |
+ // If we have to call into C then we need to save and restore all caller- |
+ // saved registers that were not already preserved. |
+ |
+ // The three scratch registers (incl. rcx) will be restored by other means |
+ // so we don't bother pushing them here. Rbx, rbp and r12-15 are callee |
+ // save and don't need to be preserved. |
+ void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) { |
+ masm->PushCallerSaved(mode, scratch0_, scratch1_, rcx); |
+ } |
+ |
+ inline void RestoreCallerSaveRegisters(MacroAssembler*masm, |
+ SaveFPRegsMode mode) { |
+ masm->PopCallerSaved(mode, scratch0_, scratch1_, rcx); |
+ } |
+ |
+ inline Register object() { return object_; } |
+ inline Register address() { return address_; } |
+ inline Register scratch0() { return scratch0_; } |
+ inline Register scratch1() { return scratch1_; } |
+ |
+ private: |
+ Register object_orig_; |
+ Register address_orig_; |
+ Register scratch0_orig_; |
+ Register object_; |
+ Register address_; |
+ Register scratch0_; |
+ Register scratch1_; |
+ // Third scratch register is always rcx. |
+ |
+ Register GetRegThatIsNotRcxOr(Register r1, |
+ Register r2, |
+ Register r3) { |
+ for (int i = 0; i < Register::kNumAllocatableRegisters; i++) { |
+ Register candidate = Register::FromAllocationIndex(i); |
+ if (candidate.is(rcx)) continue; |
+ if (candidate.is(r1)) continue; |
+ if (candidate.is(r2)) continue; |
+ if (candidate.is(r3)) continue; |
+ return candidate; |
+ } |
+ UNREACHABLE(); |
+ return no_reg; |
+ } |
+ friend class RecordWriteStub; |
+ }; |
+ |
+ enum OnNoNeedToInformIncrementalMarker { |
+ kReturnOnNoNeedToInformIncrementalMarker, |
+ kUpdateRememberedSetOnNoNeedToInformIncrementalMarker |
+ }; |
+ |
+ void Generate(MacroAssembler* masm); |
+ void GenerateIncremental(MacroAssembler* masm, Mode mode); |
+ void CheckNeedsToInformIncrementalMarker( |
+ MacroAssembler* masm, |
+ OnNoNeedToInformIncrementalMarker on_no_need, |
+ Mode mode); |
+ void InformIncrementalMarker(MacroAssembler* masm, Mode mode); |
+ |
+ Major MajorKey() { return RecordWrite; } |
+ |
+ int MinorKey() { |
+ return ObjectBits::encode(object_.code()) | |
+ ValueBits::encode(value_.code()) | |
+ AddressBits::encode(address_.code()) | |
+ RememberedSetActionBits::encode(remembered_set_action_) | |
+ SaveFPRegsModeBits::encode(save_fp_regs_mode_); |
+ } |
+ |
+ bool MustBeInStubCache() { |
+ // All stubs must be registered in the stub cache |
+ // otherwise IncrementalMarker would not be able to find |
+ // and patch it. |
+ return true; |
+ } |
+ |
+ void Activate(Code* code) { |
+ code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code); |
+ } |
+ |
+ class ObjectBits: public BitField<int, 0, 4> {}; |
+ class ValueBits: public BitField<int, 4, 4> {}; |
+ class AddressBits: public BitField<int, 8, 4> {}; |
+ class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {}; |
+ class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {}; |
+ |
+ Register object_; |
+ Register value_; |
+ Register address_; |
+ RememberedSetAction remembered_set_action_; |
+ SaveFPRegsMode save_fp_regs_mode_; |
+ Label slow_; |
+ RegisterAllocation regs_; |
+}; |
+ |
+ |
} } // namespace v8::internal |
#endif // V8_X64_CODE_STUBS_X64_H_ |