| Index: runtime/vm/code_patcher_ia32.cc
|
| diff --git a/runtime/vm/code_patcher_ia32.cc b/runtime/vm/code_patcher_ia32.cc
|
| index e1f54839c44a723a23eb81fb46670aded5a5d961..95d64e08e59c73a8175017f3b67af494df5627e5 100644
|
| --- a/runtime/vm/code_patcher_ia32.cc
|
| +++ b/runtime/vm/code_patcher_ia32.cc
|
| @@ -18,38 +18,52 @@ namespace dart {
|
|
|
| // The expected pattern of a Dart unoptimized call (static and instance):
|
| // mov ECX, ic-data
|
| -// mov EDI, target-code-object
|
| // call target_address (stub)
|
| // <- return address
|
| class UnoptimizedCall : public ValueObject {
|
| public:
|
| explicit UnoptimizedCall(uword return_address)
|
| - : start_(return_address - kPatternSize) {
|
| - ASSERT(IsValid());
|
| + : start_(return_address - (kNumInstructions * kInstructionSize)) {
|
| + ASSERT(IsValid(return_address));
|
| + ASSERT(kInstructionSize == Assembler::kCallExternalLabelSize);
|
| + }
|
| +
|
| + static bool IsValid(uword return_address) {
|
| + uint8_t* code_bytes =
|
| + reinterpret_cast<uint8_t*>(
|
| + return_address - (kNumInstructions * kInstructionSize));
|
| + return (code_bytes[0] == 0xB9) &&
|
| + (code_bytes[1 * kInstructionSize] == 0xE8);
|
| + }
|
| +
|
| + uword target() const {
|
| + const uword offset = *reinterpret_cast<uword*>(call_address() + 1);
|
| + return return_address() + offset;
|
| + }
|
| +
|
| + void set_target(uword target) const {
|
| + uword* target_addr = reinterpret_cast<uword*>(call_address() + 1);
|
| + uword offset = target - return_address();
|
| + WritableInstructionsScope writable(reinterpret_cast<uword>(target_addr),
|
| + sizeof(offset));
|
| + *target_addr = offset;
|
| + CPU::FlushICache(call_address(), kInstructionSize);
|
| }
|
|
|
| RawObject* ic_data() const {
|
| return *reinterpret_cast<RawObject**>(start_ + 1);
|
| }
|
|
|
| - static const int kMovInstructionSize = 5;
|
| - static const int kCallInstructionSize = 3;
|
| - static const int kPatternSize =
|
| - 2 * kMovInstructionSize + kCallInstructionSize;
|
| + static const int kNumInstructions = 2;
|
| + static const int kInstructionSize = 5; // All instructions have same length.
|
|
|
| private:
|
| - bool IsValid() {
|
| - uint8_t* code_bytes = reinterpret_cast<uint8_t*>(start_);
|
| - return (code_bytes[0] == 0xB9) &&
|
| - (code_bytes[2 * kMovInstructionSize] == 0xFF);
|
| - }
|
| -
|
| uword return_address() const {
|
| - return start_ + kPatternSize;
|
| + return start_ + kNumInstructions * kInstructionSize;
|
| }
|
|
|
| uword call_address() const {
|
| - return start_ + 2 * kMovInstructionSize;
|
| + return start_ + 1 * kInstructionSize;
|
| }
|
|
|
| protected:
|
| @@ -113,43 +127,45 @@ class UnoptimizedStaticCall : public UnoptimizedCall {
|
|
|
| // The expected pattern of a dart static call:
|
| // mov EDX, arguments_descriptor_array (optional in polymorphic calls)
|
| -// mov EDI, Immediate(code_object)
|
| -// call [EDI + entry_point_offset]
|
| +// call target_address
|
| // <- return address
|
| class StaticCall : public ValueObject {
|
| public:
|
| explicit StaticCall(uword return_address)
|
| - : start_(return_address - (kMovInstructionSize + kCallInstructionSize)) {
|
| - ASSERT(IsValid());
|
| + : start_(return_address - (kNumInstructions * kInstructionSize)) {
|
| + ASSERT(IsValid(return_address));
|
| + ASSERT(kInstructionSize == Assembler::kCallExternalLabelSize);
|
| }
|
|
|
| - bool IsValid() {
|
| - uint8_t* code_bytes = reinterpret_cast<uint8_t*>(start_);
|
| - return (code_bytes[0] == 0xBF) && (code_bytes[5] == 0xFF);
|
| + static bool IsValid(uword return_address) {
|
| + uint8_t* code_bytes =
|
| + reinterpret_cast<uint8_t*>(
|
| + return_address - (kNumInstructions * kInstructionSize));
|
| + return (code_bytes[0] == 0xE8);
|
| }
|
|
|
| - RawCode* target() const {
|
| - const uword imm = *reinterpret_cast<uword*>(start_ + 1);
|
| - return reinterpret_cast<RawCode*>(imm);
|
| + uword target() const {
|
| + const uword offset = *reinterpret_cast<uword*>(call_address() + 1);
|
| + return return_address() + offset;
|
| }
|
|
|
| - void set_target(const Code& target) const {
|
| - uword* target_addr = reinterpret_cast<uword*>(start_ + 1);
|
| - uword imm = reinterpret_cast<uword>(target.raw());
|
| - *target_addr = imm;
|
| - CPU::FlushICache(start_ + 1, sizeof(imm));
|
| + void set_target(uword target) const {
|
| + uword* target_addr = reinterpret_cast<uword*>(call_address() + 1);
|
| + uword offset = target - return_address();
|
| + *target_addr = offset;
|
| + CPU::FlushICache(call_address(), kInstructionSize);
|
| }
|
|
|
| - static const int kMovInstructionSize = 5;
|
| - static const int kCallInstructionSize = 3;
|
| + static const int kNumInstructions = 1;
|
| + static const int kInstructionSize = 5; // All instructions have same length.
|
|
|
| private:
|
| uword return_address() const {
|
| - return start_ + kMovInstructionSize + kCallInstructionSize;
|
| + return start_ + kNumInstructions * kInstructionSize;
|
| }
|
|
|
| uword call_address() const {
|
| - return start_ + kMovInstructionSize;
|
| + return start_;
|
| }
|
|
|
| uword start_;
|
| @@ -158,8 +174,8 @@ class StaticCall : public ValueObject {
|
| };
|
|
|
|
|
| -RawCode* CodePatcher::GetStaticCallTargetAt(uword return_address,
|
| - const Code& code) {
|
| +uword CodePatcher::GetStaticCallTargetAt(uword return_address,
|
| + const Code& code) {
|
| ASSERT(code.ContainsInstructionAt(return_address));
|
| StaticCall call(return_address);
|
| return call.target();
|
| @@ -168,16 +184,23 @@ RawCode* CodePatcher::GetStaticCallTargetAt(uword return_address,
|
|
|
| void CodePatcher::PatchStaticCallAt(uword return_address,
|
| const Code& code,
|
| - const Code& new_target) {
|
| - const Instructions& instrs = Instructions::Handle(code.instructions());
|
| - WritableInstructionsScope writable(instrs.EntryPoint(), instrs.size());
|
| + uword new_target) {
|
| ASSERT(code.ContainsInstructionAt(return_address));
|
| StaticCall call(return_address);
|
| call.set_target(new_target);
|
| }
|
|
|
|
|
| -void CodePatcher::InsertDeoptimizationCallAt(uword start, uword target) {
|
| +void CodePatcher::PatchInstanceCallAt(uword return_address,
|
| + const Code& code,
|
| + uword new_target) {
|
| + ASSERT(code.ContainsInstructionAt(return_address));
|
| + InstanceCall call(return_address);
|
| + call.set_target(new_target);
|
| +}
|
| +
|
| +
|
| +void CodePatcher::InsertCallAt(uword start, uword target) {
|
| // The inserted call should not overlap the lazy deopt jump code.
|
| ASSERT(start + CallPattern::pattern_length_in_bytes() <= target);
|
| *reinterpret_cast<uint8_t*>(start) = 0xE8;
|
| @@ -187,14 +210,14 @@ void CodePatcher::InsertDeoptimizationCallAt(uword start, uword target) {
|
| }
|
|
|
|
|
| -RawCode* CodePatcher::GetInstanceCallAt(
|
| +uword CodePatcher::GetInstanceCallAt(
|
| uword return_address, const Code& code, ICData* ic_data) {
|
| ASSERT(code.ContainsInstructionAt(return_address));
|
| InstanceCall call(return_address);
|
| if (ic_data != NULL) {
|
| *ic_data ^= call.ic_data();
|
| }
|
| - return Code::null();
|
| + return call.target();
|
| }
|
|
|
|
|
| @@ -215,21 +238,26 @@ void CodePatcher::PatchNativeCallAt(uword return_address,
|
| const Code& code,
|
| NativeFunction target,
|
| const Code& trampoline) {
|
| - UNREACHABLE();
|
| + ASSERT(code.ContainsInstructionAt(return_address));
|
| + NativeCall call(return_address);
|
| + call.set_target(trampoline.EntryPoint());
|
| + call.set_native_function(target);
|
| }
|
|
|
|
|
| -RawCode* CodePatcher::GetNativeCallAt(uword return_address,
|
| - const Code& code,
|
| - NativeFunction* target) {
|
| - UNREACHABLE();
|
| - return NULL;
|
| +uword CodePatcher::GetNativeCallAt(uword return_address,
|
| + const Code& code,
|
| + NativeFunction* target) {
|
| + ASSERT(code.ContainsInstructionAt(return_address));
|
| + NativeCall call(return_address);
|
| + *target = call.native_function();
|
| + return call.target();
|
| }
|
|
|
|
|
|
|
| intptr_t CodePatcher::InstanceCallSizeInBytes() {
|
| - return InstanceCall::kPatternSize;
|
| + return InstanceCall::kNumInstructions * InstanceCall::kInstructionSize;
|
| }
|
|
|
|
|
|
|