| Index: src/ppc/assembler-ppc-inl.h
|
| diff --git a/src/arm/assembler-arm-inl.h b/src/ppc/assembler-ppc-inl.h
|
| similarity index 55%
|
| copy from src/arm/assembler-arm-inl.h
|
| copy to src/ppc/assembler-ppc-inl.h
|
| index 1cfe34b241fc1ab804032d2babe2126afec9eab2..1465f6df0b0f56a9f6874260d495b15ae53b1dfb 100644
|
| --- a/src/arm/assembler-arm-inl.h
|
| +++ b/src/ppc/assembler-ppc-inl.h
|
| @@ -34,10 +34,14 @@
|
| // significantly by Google Inc.
|
| // Copyright 2012 the V8 project authors. All rights reserved.
|
|
|
| -#ifndef V8_ARM_ASSEMBLER_ARM_INL_H_
|
| -#define V8_ARM_ASSEMBLER_ARM_INL_H_
|
| +//
|
| +// Copyright IBM Corp. 2012, 2013. All rights reserved.
|
| +//
|
| +
|
| +#ifndef V8_PPC_ASSEMBLER_PPC_INL_H_
|
| +#define V8_PPC_ASSEMBLER_PPC_INL_H_
|
|
|
| -#include "src/arm/assembler-arm.h"
|
| +#include "src/ppc/assembler-ppc.h"
|
|
|
| #include "src/assembler.h"
|
| #include "src/debug.h"
|
| @@ -47,57 +51,17 @@ namespace v8 {
|
| namespace internal {
|
|
|
|
|
| -bool CpuFeatures::SupportsCrankshaft() { return IsSupported(VFP3); }
|
| -
|
| -
|
| -int Register::NumAllocatableRegisters() {
|
| - return kMaxNumAllocatableRegisters;
|
| -}
|
| -
|
| -
|
| -int DwVfpRegister::NumRegisters() {
|
| - return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16;
|
| -}
|
| -
|
| -
|
| -int DwVfpRegister::NumReservedRegisters() {
|
| - return kNumReservedRegisters;
|
| -}
|
| -
|
| -
|
| -int DwVfpRegister::NumAllocatableRegisters() {
|
| - return NumRegisters() - kNumReservedRegisters;
|
| -}
|
| -
|
| -
|
| -int DwVfpRegister::ToAllocationIndex(DwVfpRegister reg) {
|
| - DCHECK(!reg.is(kDoubleRegZero));
|
| - DCHECK(!reg.is(kScratchDoubleReg));
|
| - if (reg.code() > kDoubleRegZero.code()) {
|
| - return reg.code() - kNumReservedRegisters;
|
| - }
|
| - return reg.code();
|
| -}
|
| -
|
| -
|
| -DwVfpRegister DwVfpRegister::FromAllocationIndex(int index) {
|
| - DCHECK(index >= 0 && index < NumAllocatableRegisters());
|
| - DCHECK(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
|
| - kNumReservedRegisters - 1);
|
| - if (index >= kDoubleRegZero.code()) {
|
| - return from_code(index + kNumReservedRegisters);
|
| - }
|
| - return from_code(index);
|
| -}
|
| +bool CpuFeatures::SupportsCrankshaft() { return true; }
|
|
|
|
|
| void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
|
| +#if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL
|
| if (RelocInfo::IsInternalReference(rmode_)) {
|
| // absolute code pointer inside code object moves with the code object.
|
| - int32_t* p = reinterpret_cast<int32_t*>(pc_);
|
| - *p += delta; // relocate entry
|
| + Assembler::RelocateInternalReference(pc_, delta, 0, icache_flush_mode);
|
| }
|
| - // We do not use pc relative addressing on ARM, so there is
|
| +#endif
|
| + // We do not use pc relative addressing on PPC, so there is
|
| // nothing else to do.
|
| }
|
|
|
| @@ -112,26 +76,41 @@ Address RelocInfo::target_address_address() {
|
| DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
|
| || rmode_ == EMBEDDED_OBJECT
|
| || rmode_ == EXTERNAL_REFERENCE);
|
| - if (FLAG_enable_ool_constant_pool ||
|
| - Assembler::IsMovW(Memory::int32_at(pc_))) {
|
| +
|
| +#if V8_OOL_CONSTANT_POOL
|
| + if (Assembler::IsConstantPoolLoadStart(pc_)) {
|
| // We return the PC for ool constant pool since this function is used by the
|
| // serializerer and expects the address to reside within the code object.
|
| return reinterpret_cast<Address>(pc_);
|
| - } else {
|
| - DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)));
|
| - return constant_pool_entry_address();
|
| }
|
| +#endif
|
| +
|
| + // Read the address of the word containing the target_address in an
|
| + // instruction stream.
|
| + // The only architecture-independent user of this function is the serializer.
|
| + // The serializer uses it to find out how many raw bytes of instruction to
|
| + // output before the next target.
|
| + // For an instruction like LIS/ORI where the target bits are mixed into the
|
| + // instruction bits, the size of the target will be zero, indicating that the
|
| + // serializer should not step forward in memory after a target is resolved
|
| + // and written.
|
| + return reinterpret_cast<Address>(pc_);
|
| }
|
|
|
|
|
| Address RelocInfo::constant_pool_entry_address() {
|
| - DCHECK(IsInConstantPool());
|
| - return Assembler::constant_pool_entry_address(pc_, host_->constant_pool());
|
| +#if V8_OOL_CONSTANT_POOL
|
| + return Assembler::target_constant_pool_address_at(pc_,
|
| + host_->constant_pool());
|
| +#else
|
| + UNREACHABLE();
|
| + return NULL;
|
| +#endif
|
| }
|
|
|
|
|
| int RelocInfo::target_address_size() {
|
| - return kPointerSize;
|
| + return Assembler::kSpecialTargetSize;
|
| }
|
|
|
|
|
| @@ -149,6 +128,38 @@ void RelocInfo::set_target_address(Address target,
|
| }
|
|
|
|
|
| +Address Assembler::break_address_from_return_address(Address pc) {
|
| + return target_address_from_return_address(pc);
|
| +}
|
| +
|
| +
|
| +Address Assembler::target_address_from_return_address(Address pc) {
|
| + // Returns the address of the call target from the return address that will
|
| + // be returned to after a call.
|
| + // Call sequence is :
|
| + // mov ip, @ call address
|
| + // mtlr ip
|
| + // blrl
|
| + // @ return address
|
| +#if V8_OOL_CONSTANT_POOL
|
| + if (IsConstantPoolLoadEnd(pc - 3 * kInstrSize)) {
|
| + return pc - (kMovInstructionsConstantPool + 2) * kInstrSize;
|
| + }
|
| +#endif
|
| + return pc - (kMovInstructionsNoConstantPool + 2) * kInstrSize;
|
| +}
|
| +
|
| +
|
| +Address Assembler::return_address_from_call_start(Address pc) {
|
| +#if V8_OOL_CONSTANT_POOL
|
| + Address load_address = pc + (kMovInstructionsConstantPool - 1) * kInstrSize;
|
| + if (IsConstantPoolLoadEnd(load_address))
|
| + return pc + (kMovInstructionsConstantPool + 2) * kInstrSize;
|
| +#endif
|
| + return pc + (kMovInstructionsNoConstantPool + 2) * kInstrSize;
|
| +}
|
| +
|
| +
|
| Object* RelocInfo::target_object() {
|
| DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
|
| return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
|
| @@ -227,11 +238,29 @@ void RelocInfo::set_target_cell(Cell* cell,
|
| }
|
|
|
|
|
| -static const int kNoCodeAgeSequenceLength = 3 * Assembler::kInstrSize;
|
| +#if V8_OOL_CONSTANT_POOL
|
| +static const int kNoCodeAgeInstructions = 7;
|
| +#else
|
| +static const int kNoCodeAgeInstructions = 6;
|
| +#endif
|
| +static const int kCodeAgingInstructions =
|
| + Assembler::kMovInstructionsNoConstantPool + 3;
|
| +static const int kNoCodeAgeSequenceInstructions =
|
| + ((kNoCodeAgeInstructions >= kCodeAgingInstructions) ?
|
| + kNoCodeAgeInstructions : kCodeAgingInstructions);
|
| +static const int kNoCodeAgeSequenceNops = (kNoCodeAgeSequenceInstructions -
|
| + kNoCodeAgeInstructions);
|
| +static const int kCodeAgingSequenceNops = (kNoCodeAgeSequenceInstructions -
|
| + kCodeAgingInstructions);
|
| +static const int kCodeAgingTargetDelta = 1 * Assembler::kInstrSize;
|
| +static const int kCodeAgingPatchDelta = (kCodeAgingInstructions *
|
| + Assembler::kInstrSize);
|
| +static const int kNoCodeAgeSequenceLength = (kNoCodeAgeSequenceInstructions *
|
| + Assembler::kInstrSize);
|
|
|
|
|
| Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
|
| - UNREACHABLE(); // This should never be reached on Arm.
|
| + UNREACHABLE(); // This should never be reached on PPC.
|
| return Handle<Object>();
|
| }
|
|
|
| @@ -239,33 +268,34 @@ Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
|
| Code* RelocInfo::code_age_stub() {
|
| DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
|
| return Code::GetCodeFromTargetAddress(
|
| - Memory::Address_at(pc_ +
|
| - (kNoCodeAgeSequenceLength - Assembler::kInstrSize)));
|
| + Assembler::target_address_at(pc_ + kCodeAgingTargetDelta, host_));
|
| }
|
|
|
|
|
| void RelocInfo::set_code_age_stub(Code* stub,
|
| ICacheFlushMode icache_flush_mode) {
|
| DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
|
| - Memory::Address_at(pc_ +
|
| - (kNoCodeAgeSequenceLength - Assembler::kInstrSize)) =
|
| - stub->instruction_start();
|
| + Assembler::set_target_address_at(pc_ + kCodeAgingTargetDelta,
|
| + host_,
|
| + stub->instruction_start(),
|
| + icache_flush_mode);
|
| }
|
|
|
|
|
| Address RelocInfo::call_address() {
|
| - // The 2 instructions offset assumes patched debug break slot or return
|
| - // sequence.
|
| DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
|
| (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
|
| - return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
|
| + // The pc_ offset of 0 assumes patched return sequence per
|
| + // BreakLocationIterator::SetDebugBreakAtReturn(), or debug break
|
| + // slot per BreakLocationIterator::SetDebugBreakAtSlot().
|
| + return Assembler::target_address_at(pc_, host_);
|
| }
|
|
|
|
|
| void RelocInfo::set_call_address(Address target) {
|
| DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
|
| (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
|
| - Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
|
| + Assembler::set_target_address_at(pc_, host_, target);
|
| if (host() != NULL) {
|
| Object* target_code = Code::GetCodeFromTargetAddress(target);
|
| host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
|
| @@ -301,13 +331,30 @@ void RelocInfo::WipeOut() {
|
|
|
|
|
| bool RelocInfo::IsPatchedReturnSequence() {
|
| - Instr current_instr = Assembler::instr_at(pc_);
|
| - Instr next_instr = Assembler::instr_at(pc_ + Assembler::kInstrSize);
|
| - // A patched return sequence is:
|
| - // ldr ip, [pc, #0]
|
| - // blx ip
|
| - return Assembler::IsLdrPcImmediateOffset(current_instr) &&
|
| - Assembler::IsBlxReg(next_instr);
|
| + //
|
| + // The patched return sequence is defined by
|
| + // BreakLocationIterator::SetDebugBreakAtReturn()
|
| + // FIXED_SEQUENCE
|
| +
|
| + Instr instr0 = Assembler::instr_at(pc_);
|
| + Instr instr1 = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize);
|
| +#if V8_TARGET_ARCH_PPC64
|
| + Instr instr3 = Assembler::instr_at(pc_ + (3 * Assembler::kInstrSize));
|
| + Instr instr4 = Assembler::instr_at(pc_ + (4 * Assembler::kInstrSize));
|
| + Instr binstr = Assembler::instr_at(pc_ + (7 * Assembler::kInstrSize));
|
| +#else
|
| + Instr binstr = Assembler::instr_at(pc_ + 4 * Assembler::kInstrSize);
|
| +#endif
|
| + bool patched_return = ((instr0 & kOpcodeMask) == ADDIS &&
|
| + (instr1 & kOpcodeMask) == ORI &&
|
| +#if V8_TARGET_ARCH_PPC64
|
| + (instr3 & kOpcodeMask) == ORIS &&
|
| + (instr4 & kOpcodeMask) == ORI &&
|
| +#endif
|
| + (binstr == 0x7d821008)); // twge r2, r2
|
| +
|
| +// printf("IsPatchedReturnSequence: %d\n", patched_return);
|
| + return patched_return;
|
| }
|
|
|
|
|
| @@ -335,7 +382,7 @@ void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
|
| IsPatchedDebugBreakSlotSequence())) &&
|
| isolate->debug()->has_break_points()) {
|
| visitor->VisitDebugTarget(this);
|
| - } else if (RelocInfo::IsRuntimeEntry(mode)) {
|
| + } else if (IsRuntimeEntry(mode)) {
|
| visitor->VisitRuntimeEntry(this);
|
| }
|
| }
|
| @@ -360,224 +407,210 @@ void RelocInfo::Visit(Heap* heap) {
|
| (RelocInfo::IsDebugBreakSlot(mode) &&
|
| IsPatchedDebugBreakSlotSequence()))) {
|
| StaticVisitor::VisitDebugTarget(heap, this);
|
| - } else if (RelocInfo::IsRuntimeEntry(mode)) {
|
| + } else if (IsRuntimeEntry(mode)) {
|
| StaticVisitor::VisitRuntimeEntry(this);
|
| }
|
| }
|
|
|
| -
|
| -Operand::Operand(int32_t immediate, RelocInfo::Mode rmode) {
|
| +Operand::Operand(intptr_t immediate, RelocInfo::Mode rmode) {
|
| rm_ = no_reg;
|
| - imm32_ = immediate;
|
| + imm_ = immediate;
|
| rmode_ = rmode;
|
| }
|
|
|
| -
|
| Operand::Operand(const ExternalReference& f) {
|
| rm_ = no_reg;
|
| - imm32_ = reinterpret_cast<int32_t>(f.address());
|
| + imm_ = reinterpret_cast<intptr_t>(f.address());
|
| rmode_ = RelocInfo::EXTERNAL_REFERENCE;
|
| }
|
|
|
| -
|
| Operand::Operand(Smi* value) {
|
| rm_ = no_reg;
|
| - imm32_ = reinterpret_cast<intptr_t>(value);
|
| - rmode_ = RelocInfo::NONE32;
|
| + imm_ = reinterpret_cast<intptr_t>(value);
|
| + rmode_ = kRelocInfo_NONEPTR;
|
| }
|
|
|
| -
|
| Operand::Operand(Register rm) {
|
| rm_ = rm;
|
| - rs_ = no_reg;
|
| - shift_op_ = LSL;
|
| - shift_imm_ = 0;
|
| -}
|
| -
|
| -
|
| -bool Operand::is_reg() const {
|
| - return rm_.is_valid() &&
|
| - rs_.is(no_reg) &&
|
| - shift_op_ == LSL &&
|
| - shift_imm_ == 0;
|
| + rmode_ = kRelocInfo_NONEPTR; // PPC -why doesn't ARM do this?
|
| }
|
|
|
| -
|
| void Assembler::CheckBuffer() {
|
| if (buffer_space() <= kGap) {
|
| GrowBuffer();
|
| }
|
| +}
|
| +
|
| +void Assembler::CheckTrampolinePoolQuick() {
|
| if (pc_offset() >= next_buffer_check_) {
|
| - CheckConstPool(false, true);
|
| + CheckTrampolinePool();
|
| }
|
| }
|
|
|
| -
|
| void Assembler::emit(Instr x) {
|
| CheckBuffer();
|
| *reinterpret_cast<Instr*>(pc_) = x;
|
| pc_ += kInstrSize;
|
| + CheckTrampolinePoolQuick();
|
| +}
|
| +
|
| +bool Operand::is_reg() const {
|
| + return rm_.is_valid();
|
| }
|
|
|
|
|
| -Address Assembler::target_address_from_return_address(Address pc) {
|
| - // Returns the address of the call target from the return address that will
|
| - // be returned to after a call.
|
| - // Call sequence on V7 or later is :
|
| - // movw ip, #... @ call address low 16
|
| - // movt ip, #... @ call address high 16
|
| - // blx ip
|
| - // @ return address
|
| - // Or pre-V7 or cases that need frequent patching, the address is in the
|
| - // constant pool. It could be a small constant pool load:
|
| - // ldr ip, [pc / pp, #...] @ call address
|
| - // blx ip
|
| - // @ return address
|
| - // Or an extended constant pool load:
|
| - // movw ip, #...
|
| - // movt ip, #...
|
| - // ldr ip, [pc, ip] @ call address
|
| - // blx ip
|
| - // @ return address
|
| - Address candidate = pc - 2 * Assembler::kInstrSize;
|
| - Instr candidate_instr(Memory::int32_at(candidate));
|
| - if (IsLdrPcImmediateOffset(candidate_instr) |
|
| - IsLdrPpImmediateOffset(candidate_instr)) {
|
| - return candidate;
|
| - } else if (IsLdrPpRegOffset(candidate_instr)) {
|
| - candidate = pc - 4 * Assembler::kInstrSize;
|
| - DCHECK(IsMovW(Memory::int32_at(candidate)) &&
|
| - IsMovT(Memory::int32_at(candidate + Assembler::kInstrSize)));
|
| - return candidate;
|
| - } else {
|
| - candidate = pc - 3 * Assembler::kInstrSize;
|
| - DCHECK(IsMovW(Memory::int32_at(candidate)) &&
|
| - IsMovT(Memory::int32_at(candidate + kInstrSize)));
|
| - return candidate;
|
| +// Fetch the 32bit value from the FIXED_SEQUENCE lis/ori
|
| +Address Assembler::target_address_at(Address pc,
|
| + ConstantPoolArray* constant_pool) {
|
| + Instr instr1 = instr_at(pc);
|
| + Instr instr2 = instr_at(pc + kInstrSize);
|
| + // Interpret 2 instructions generated by lis/ori
|
| + if (IsLis(instr1) && IsOri(instr2)) {
|
| +#if V8_TARGET_ARCH_PPC64
|
| + Instr instr4 = instr_at(pc + (3*kInstrSize));
|
| + Instr instr5 = instr_at(pc + (4*kInstrSize));
|
| + // Assemble the 64 bit value.
|
| + uint64_t hi = (static_cast<uint32_t>((instr1 & kImm16Mask) << 16) |
|
| + static_cast<uint32_t>(instr2 & kImm16Mask));
|
| + uint64_t lo = (static_cast<uint32_t>((instr4 & kImm16Mask) << 16) |
|
| + static_cast<uint32_t>(instr5 & kImm16Mask));
|
| + return reinterpret_cast<Address>((hi << 32) | lo);
|
| +#else
|
| + // Assemble the 32 bit value.
|
| + return reinterpret_cast<Address>(
|
| + ((instr1 & kImm16Mask) << 16) | (instr2 & kImm16Mask));
|
| +#endif
|
| }
|
| +#if V8_OOL_CONSTANT_POOL
|
| + return Memory::Address_at(
|
| + target_constant_pool_address_at(pc, constant_pool));
|
| +#else
|
| + DCHECK(false);
|
| + return (Address)0;
|
| +#endif
|
| }
|
|
|
|
|
| -Address Assembler::break_address_from_return_address(Address pc) {
|
| - return pc - Assembler::kPatchDebugBreakSlotReturnOffset;
|
| +#if V8_OOL_CONSTANT_POOL
|
| +bool Assembler::IsConstantPoolLoadStart(Address pc) {
|
| +#if V8_TARGET_ARCH_PPC64
|
| + if (!IsLi(instr_at(pc))) return false;
|
| + pc += kInstrSize;
|
| +#endif
|
| + return GetRA(instr_at(pc)).is(kConstantPoolRegister);
|
| }
|
|
|
|
|
| -Address Assembler::return_address_from_call_start(Address pc) {
|
| - if (IsLdrPcImmediateOffset(Memory::int32_at(pc)) |
|
| - IsLdrPpImmediateOffset(Memory::int32_at(pc))) {
|
| - // Load from constant pool, small section.
|
| - return pc + kInstrSize * 2;
|
| - } else {
|
| - DCHECK(IsMovW(Memory::int32_at(pc)));
|
| - DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)));
|
| - if (IsLdrPpRegOffset(Memory::int32_at(pc + kInstrSize))) {
|
| - // Load from constant pool, extended section.
|
| - return pc + kInstrSize * 4;
|
| - } else {
|
| - // A movw / movt load immediate.
|
| - return pc + kInstrSize * 3;
|
| - }
|
| - }
|
| +bool Assembler::IsConstantPoolLoadEnd(Address pc) {
|
| +#if V8_TARGET_ARCH_PPC64
|
| + pc -= kInstrSize;
|
| +#endif
|
| + return IsConstantPoolLoadStart(pc);
|
| }
|
|
|
|
|
| -void Assembler::deserialization_set_special_target_at(
|
| - Address constant_pool_entry, Code* code, Address target) {
|
| - if (FLAG_enable_ool_constant_pool) {
|
| - set_target_address_at(constant_pool_entry, code, target);
|
| - } else {
|
| - Memory::Address_at(constant_pool_entry) = target;
|
| - }
|
| +int Assembler::GetConstantPoolOffset(Address pc) {
|
| + DCHECK(IsConstantPoolLoadStart(pc));
|
| + Instr instr = instr_at(pc);
|
| + int offset = SIGN_EXT_IMM16((instr & kImm16Mask));
|
| + return offset;
|
| }
|
|
|
|
|
| -bool Assembler::is_constant_pool_load(Address pc) {
|
| - return !Assembler::IsMovW(Memory::int32_at(pc)) ||
|
| - (FLAG_enable_ool_constant_pool &&
|
| - Assembler::IsLdrPpRegOffset(
|
| - Memory::int32_at(pc + 2 * Assembler::kInstrSize)));
|
| -}
|
| -
|
| -
|
| -Address Assembler::constant_pool_entry_address(
|
| - Address pc, ConstantPoolArray* constant_pool) {
|
| - if (FLAG_enable_ool_constant_pool) {
|
| - DCHECK(constant_pool != NULL);
|
| - int cp_offset;
|
| - if (IsMovW(Memory::int32_at(pc))) {
|
| - DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)) &&
|
| - IsLdrPpRegOffset(Memory::int32_at(pc + 2 * kInstrSize)));
|
| - // This is an extended constant pool lookup.
|
| - Instruction* movw_instr = Instruction::At(pc);
|
| - Instruction* movt_instr = Instruction::At(pc + kInstrSize);
|
| - cp_offset = (movt_instr->ImmedMovwMovtValue() << 16) |
|
| - movw_instr->ImmedMovwMovtValue();
|
| - } else {
|
| - // This is a small constant pool lookup.
|
| - DCHECK(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc)));
|
| - cp_offset = GetLdrRegisterImmediateOffset(Memory::int32_at(pc));
|
| - }
|
| - return reinterpret_cast<Address>(constant_pool) + cp_offset;
|
| - } else {
|
| - DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc)));
|
| - Instr instr = Memory::int32_at(pc);
|
| - return pc + GetLdrRegisterImmediateOffset(instr) + kPcLoadDelta;
|
| - }
|
| +void Assembler::SetConstantPoolOffset(Address pc, int offset) {
|
| + DCHECK(IsConstantPoolLoadStart(pc));
|
| + DCHECK(is_int16(offset));
|
| + Instr instr = instr_at(pc);
|
| + instr &= ~kImm16Mask;
|
| + instr |= (offset & kImm16Mask);
|
| + instr_at_put(pc, instr);
|
| }
|
|
|
|
|
| -Address Assembler::target_address_at(Address pc,
|
| - ConstantPoolArray* constant_pool) {
|
| - if (is_constant_pool_load(pc)) {
|
| - // This is a constant pool lookup. Return the value in the constant pool.
|
| - return Memory::Address_at(constant_pool_entry_address(pc, constant_pool));
|
| - } else {
|
| - // This is an movw_movt immediate load. Return the immediate.
|
| - DCHECK(IsMovW(Memory::int32_at(pc)) &&
|
| - IsMovT(Memory::int32_at(pc + kInstrSize)));
|
| - Instruction* movw_instr = Instruction::At(pc);
|
| - Instruction* movt_instr = Instruction::At(pc + kInstrSize);
|
| - return reinterpret_cast<Address>(
|
| - (movt_instr->ImmedMovwMovtValue() << 16) |
|
| - movw_instr->ImmedMovwMovtValue());
|
| - }
|
| +Address Assembler::target_constant_pool_address_at(
|
| + Address pc, ConstantPoolArray* constant_pool) {
|
| + Address addr = reinterpret_cast<Address>(constant_pool);
|
| + DCHECK(addr);
|
| + addr += GetConstantPoolOffset(pc);
|
| + return addr;
|
| }
|
| +#endif
|
|
|
|
|
| +// This sets the branch destination (which gets loaded at the call address).
|
| +// This is for calls and branches within generated code. The serializer
|
| +// has already deserialized the mov instructions etc.
|
| +// There is a FIXED_SEQUENCE assumption here
|
| +void Assembler::deserialization_set_special_target_at(
|
| + Address instruction_payload, Code* code, Address target) {
|
| + set_target_address_at(instruction_payload, code, target);
|
| +}
|
| +
|
| +// This code assumes the FIXED_SEQUENCE of lis/ori
|
| void Assembler::set_target_address_at(Address pc,
|
| ConstantPoolArray* constant_pool,
|
| Address target,
|
| ICacheFlushMode icache_flush_mode) {
|
| - if (is_constant_pool_load(pc)) {
|
| - // This is a constant pool lookup. Update the entry in the constant pool.
|
| - Memory::Address_at(constant_pool_entry_address(pc, constant_pool)) = target;
|
| - // Intuitively, we would think it is necessary to always flush the
|
| - // instruction cache after patching a target address in the code as follows:
|
| - // CpuFeatures::FlushICache(pc, sizeof(target));
|
| - // However, on ARM, no instruction is actually patched in the case
|
| - // of embedded constants of the form:
|
| - // ldr ip, [pp, #...]
|
| - // since the instruction accessing this address in the constant pool remains
|
| - // unchanged.
|
| - } else {
|
| - // This is an movw_movt immediate load. Patch the immediate embedded in the
|
| - // instructions.
|
| - DCHECK(IsMovW(Memory::int32_at(pc)));
|
| - DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)));
|
| - uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
|
| - uint32_t immediate = reinterpret_cast<uint32_t>(target);
|
| - instr_ptr[0] = PatchMovwImmediate(instr_ptr[0], immediate & 0xFFFF);
|
| - instr_ptr[1] = PatchMovwImmediate(instr_ptr[1], immediate >> 16);
|
| - DCHECK(IsMovW(Memory::int32_at(pc)));
|
| - DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)));
|
| + Instr instr1 = instr_at(pc);
|
| + Instr instr2 = instr_at(pc + kInstrSize);
|
| + // Interpret 2 instructions generated by lis/ori
|
| + if (IsLis(instr1) && IsOri(instr2)) {
|
| +#if V8_TARGET_ARCH_PPC64
|
| + Instr instr4 = instr_at(pc + (3*kInstrSize));
|
| + Instr instr5 = instr_at(pc + (4*kInstrSize));
|
| + // Needs to be fixed up when mov changes to handle 64-bit values.
|
| + uint32_t* p = reinterpret_cast<uint32_t*>(pc);
|
| + uintptr_t itarget = reinterpret_cast<uintptr_t>(target);
|
| +
|
| + instr5 &= ~kImm16Mask;
|
| + instr5 |= itarget & kImm16Mask;
|
| + itarget = itarget >> 16;
|
| +
|
| + instr4 &= ~kImm16Mask;
|
| + instr4 |= itarget & kImm16Mask;
|
| + itarget = itarget >> 16;
|
| +
|
| + instr2 &= ~kImm16Mask;
|
| + instr2 |= itarget & kImm16Mask;
|
| + itarget = itarget >> 16;
|
| +
|
| + instr1 &= ~kImm16Mask;
|
| + instr1 |= itarget & kImm16Mask;
|
| + itarget = itarget >> 16;
|
| +
|
| + *p = instr1;
|
| + *(p+1) = instr2;
|
| + *(p+3) = instr4;
|
| + *(p+4) = instr5;
|
| + if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
|
| + CpuFeatures::FlushICache(p, 5 * kInstrSize);
|
| + }
|
| +#else
|
| + uint32_t* p = reinterpret_cast<uint32_t*>(pc);
|
| + uint32_t itarget = reinterpret_cast<uint32_t>(target);
|
| + int lo_word = itarget & kImm16Mask;
|
| + int hi_word = itarget >> 16;
|
| + instr1 &= ~kImm16Mask;
|
| + instr1 |= hi_word;
|
| + instr2 &= ~kImm16Mask;
|
| + instr2 |= lo_word;
|
| +
|
| + *p = instr1;
|
| + *(p+1) = instr2;
|
| if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
|
| - CpuFeatures::FlushICache(pc, 2 * kInstrSize);
|
| + CpuFeatures::FlushICache(p, 2 * kInstrSize);
|
| }
|
| +#endif
|
| + } else {
|
| +#if V8_OOL_CONSTANT_POOL
|
| + Memory::Address_at(
|
| + target_constant_pool_address_at(pc, constant_pool)) = target;
|
| +#else
|
| + UNREACHABLE();
|
| +#endif
|
| }
|
| }
|
|
|
| -
|
| } } // namespace v8::internal
|
|
|
| -#endif // V8_ARM_ASSEMBLER_ARM_INL_H_
|
| +#endif // V8_PPC_ASSEMBLER_PPC_INL_H_
|
|
|