| Index: src/crankshaft/s390/lithium-codegen-s390.cc
|
| diff --git a/src/crankshaft/ppc/lithium-codegen-ppc.cc b/src/crankshaft/s390/lithium-codegen-s390.cc
|
| similarity index 77%
|
| copy from src/crankshaft/ppc/lithium-codegen-ppc.cc
|
| copy to src/crankshaft/s390/lithium-codegen-s390.cc
|
| index cf06108660a7951533facb778dce569a2d8c097a..10edf3130476caee501407abf1064428b16a0fe3 100644
|
| --- a/src/crankshaft/ppc/lithium-codegen-ppc.cc
|
| +++ b/src/crankshaft/s390/lithium-codegen-s390.cc
|
| @@ -1,14 +1,15 @@
|
| // Copyright 2014 the V8 project authors. All rights reserved.
|
| +//
|
| // Use of this source code is governed by a BSD-style license that can be
|
| // found in the LICENSE file.
|
|
|
| -#include "src/crankshaft/ppc/lithium-codegen-ppc.h"
|
| +#include "src/crankshaft/s390/lithium-codegen-s390.h"
|
|
|
| #include "src/base/bits.h"
|
| #include "src/code-factory.h"
|
| #include "src/code-stubs.h"
|
| #include "src/crankshaft/hydrogen-osr.h"
|
| -#include "src/crankshaft/ppc/lithium-gap-resolver-ppc.h"
|
| +#include "src/crankshaft/s390/lithium-gap-resolver-s390.h"
|
| #include "src/ic/ic.h"
|
| #include "src/ic/stub-cache.h"
|
| #include "src/profiler/cpu-profiler.h"
|
| @@ -16,7 +17,6 @@
|
| namespace v8 {
|
| namespace internal {
|
|
|
| -
|
| class SafepointGenerator final : public CallWrapper {
|
| public:
|
| SafepointGenerator(LCodeGen* codegen, LPointerMap* pointers,
|
| @@ -36,7 +36,6 @@ class SafepointGenerator final : public CallWrapper {
|
| Safepoint::DeoptMode deopt_mode_;
|
| };
|
|
|
| -
|
| #define __ masm()->
|
|
|
| bool LCodeGen::GenerateCode() {
|
| @@ -49,15 +48,10 @@ bool LCodeGen::GenerateCode() {
|
| // the frame (that is done in GeneratePrologue).
|
| FrameScope frame_scope(masm_, StackFrame::NONE);
|
|
|
| - bool rc = GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
|
| - GenerateJumpTable() && GenerateSafepointTable();
|
| - if (FLAG_enable_embedded_constant_pool && !rc) {
|
| - masm()->AbortConstantPoolBuilding();
|
| - }
|
| - return rc;
|
| + return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
|
| + GenerateJumpTable() && GenerateSafepointTable();
|
| }
|
|
|
| -
|
| void LCodeGen::FinishCode(Handle<Code> code) {
|
| DCHECK(is_done());
|
| code->set_stack_slots(GetTotalFrameSlotCount());
|
| @@ -65,7 +59,6 @@ void LCodeGen::FinishCode(Handle<Code> code) {
|
| PopulateDeoptimizationData(code);
|
| }
|
|
|
| -
|
| void LCodeGen::SaveCallerDoubles() {
|
| DCHECK(info()->saves_caller_doubles());
|
| DCHECK(NeedsEagerFrame());
|
| @@ -74,14 +67,13 @@ void LCodeGen::SaveCallerDoubles() {
|
| BitVector* doubles = chunk()->allocated_double_registers();
|
| BitVector::Iterator save_iterator(doubles);
|
| while (!save_iterator.Done()) {
|
| - __ stfd(DoubleRegister::from_code(save_iterator.Current()),
|
| - MemOperand(sp, count * kDoubleSize));
|
| + __ std(DoubleRegister::from_code(save_iterator.Current()),
|
| + MemOperand(sp, count * kDoubleSize));
|
| save_iterator.Advance();
|
| count++;
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::RestoreCallerDoubles() {
|
| DCHECK(info()->saves_caller_doubles());
|
| DCHECK(NeedsEagerFrame());
|
| @@ -90,23 +82,21 @@ void LCodeGen::RestoreCallerDoubles() {
|
| BitVector::Iterator save_iterator(doubles);
|
| int count = 0;
|
| while (!save_iterator.Done()) {
|
| - __ lfd(DoubleRegister::from_code(save_iterator.Current()),
|
| - MemOperand(sp, count * kDoubleSize));
|
| + __ ld(DoubleRegister::from_code(save_iterator.Current()),
|
| + MemOperand(sp, count * kDoubleSize));
|
| save_iterator.Advance();
|
| count++;
|
| }
|
| }
|
|
|
| -
|
| bool LCodeGen::GeneratePrologue() {
|
| DCHECK(is_generating());
|
|
|
| if (info()->IsOptimizing()) {
|
| ProfileEntryHookStub::MaybeCallEntryHook(masm_);
|
|
|
| - // r4: Callee's JS function.
|
| + // r3: Callee's JS function.
|
| // cp: Callee's context.
|
| - // pp: Callee's constant pool pointer (if enabled)
|
| // fp: Caller's frame pointer.
|
| // lr: Caller's pc.
|
| // ip: Our own function entry (required by the prologue)
|
| @@ -115,10 +105,10 @@ bool LCodeGen::GeneratePrologue() {
|
| int prologue_offset = masm_->pc_offset();
|
|
|
| if (prologue_offset) {
|
| - // Prologue logic requires it's starting address in ip and the
|
| - // corresponding offset from the function entry.
|
| - prologue_offset += Instruction::kInstrSize;
|
| - __ addi(ip, ip, Operand(prologue_offset));
|
| + // Prologue logic requires its starting address in ip and the
|
| + // corresponding offset from the function entry. Need to add
|
| + // 4 bytes for the size of AHI/AGHI that AddP expands into.
|
| + __ AddP(ip, ip, Operand(prologue_offset + sizeof(FourByteInstr)));
|
| }
|
| info()->set_prologue_offset(prologue_offset);
|
| if (NeedsEagerFrame()) {
|
| @@ -133,18 +123,18 @@ bool LCodeGen::GeneratePrologue() {
|
| // Reserve space for the stack slots needed by the code.
|
| int slots = GetStackSlotCount();
|
| if (slots > 0) {
|
| - __ subi(sp, sp, Operand(slots * kPointerSize));
|
| + __ lay(sp, MemOperand(sp, -(slots * kPointerSize)));
|
| if (FLAG_debug_code) {
|
| - __ Push(r3, r4);
|
| - __ li(r0, Operand(slots));
|
| - __ mtctr(r0);
|
| - __ addi(r3, sp, Operand((slots + 2) * kPointerSize));
|
| - __ mov(r4, Operand(kSlotsZapValue));
|
| + __ Push(r2, r3);
|
| + __ mov(r2, Operand(slots * kPointerSize));
|
| + __ mov(r3, Operand(kSlotsZapValue));
|
| Label loop;
|
| __ bind(&loop);
|
| - __ StorePU(r4, MemOperand(r3, -kPointerSize));
|
| - __ bdnz(&loop);
|
| - __ Pop(r3, r4);
|
| + __ StoreP(r3, MemOperand(sp, r2, kPointerSize));
|
| + __ lay(r2, MemOperand(r2, -kPointerSize));
|
| + __ CmpP(r2, Operand::Zero());
|
| + __ bne(&loop);
|
| + __ Pop(r2, r3);
|
| }
|
| }
|
|
|
| @@ -154,7 +144,6 @@ bool LCodeGen::GeneratePrologue() {
|
| return !is_aborted();
|
| }
|
|
|
| -
|
| void LCodeGen::DoPrologue(LPrologue* instr) {
|
| Comment(";;; Prologue begin");
|
|
|
| @@ -162,11 +151,11 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
|
| if (info()->scope()->num_heap_slots() > 0) {
|
| Comment(";;; Allocate local context");
|
| bool need_write_barrier = true;
|
| - // Argument to NewContext is the function, which is in r4.
|
| + // Argument to NewContext is the function, which is in r3.
|
| int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
|
| Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
|
| if (info()->scope()->is_script_scope()) {
|
| - __ push(r4);
|
| + __ push(r3);
|
| __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
|
| __ CallRuntime(Runtime::kNewScriptContext);
|
| deopt_mode = Safepoint::kLazyDeopt;
|
| @@ -176,15 +165,15 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
|
| // Result of FastNewContextStub is always in new space.
|
| need_write_barrier = false;
|
| } else {
|
| - __ push(r4);
|
| + __ push(r3);
|
| __ CallRuntime(Runtime::kNewFunctionContext);
|
| }
|
| RecordSafepoint(deopt_mode);
|
|
|
| - // Context is returned in both r3 and cp. It replaces the context
|
| + // Context is returned in both r2 and cp. It replaces the context
|
| // passed to us. It's saved in the stack and kept live in cp.
|
| - __ mr(cp, r3);
|
| - __ StoreP(r3, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
| + __ LoadRR(cp, r2);
|
| + __ StoreP(r2, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
| // Copy any necessary parameters into the context.
|
| int num_parameters = scope()->num_parameters();
|
| int first_parameter = scope()->has_this_declaration() ? -1 : 0;
|
| @@ -194,17 +183,17 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
|
| int parameter_offset = StandardFrameConstants::kCallerSPOffset +
|
| (num_parameters - 1 - i) * kPointerSize;
|
| // Load parameter from stack.
|
| - __ LoadP(r3, MemOperand(fp, parameter_offset));
|
| + __ LoadP(r2, MemOperand(fp, parameter_offset));
|
| // Store it in the context.
|
| MemOperand target = ContextMemOperand(cp, var->index());
|
| - __ StoreP(r3, target, r0);
|
| - // Update the write barrier. This clobbers r6 and r3.
|
| + __ StoreP(r2, target);
|
| + // Update the write barrier. This clobbers r5 and r2.
|
| if (need_write_barrier) {
|
| - __ RecordWriteContextSlot(cp, target.offset(), r3, r6,
|
| + __ RecordWriteContextSlot(cp, target.offset(), r2, r5,
|
| GetLinkRegisterState(), kSaveFPRegs);
|
| } else if (FLAG_debug_code) {
|
| Label done;
|
| - __ JumpIfInNewSpace(cp, r3, &done);
|
| + __ JumpIfInNewSpace(cp, r2, &done);
|
| __ Abort(kExpectedNewSpaceObject);
|
| __ bind(&done);
|
| }
|
| @@ -216,7 +205,6 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
|
| Comment(";;; Prologue end");
|
| }
|
|
|
| -
|
| void LCodeGen::GenerateOsrPrologue() {
|
| // Generate the OSR entry prologue at the first unknown OSR value, or if there
|
| // are none, at the OSR entrypoint instruction.
|
| @@ -228,10 +216,9 @@ void LCodeGen::GenerateOsrPrologue() {
|
| // optimized frame.
|
| int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
|
| DCHECK(slots >= 0);
|
| - __ subi(sp, sp, Operand(slots * kPointerSize));
|
| + __ lay(sp, MemOperand(sp, -slots * kPointerSize));
|
| }
|
|
|
| -
|
| void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
|
| if (instr->IsCall()) {
|
| EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
|
| @@ -241,7 +228,6 @@ void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
|
| }
|
| }
|
|
|
| -
|
| bool LCodeGen::GenerateDeferredCode() {
|
| DCHECK(is_generating());
|
| if (deferred_.length() > 0) {
|
| @@ -266,7 +252,8 @@ bool LCodeGen::GenerateDeferredCode() {
|
| frame_is_built_ = true;
|
| __ LoadSmiLiteral(scratch0(), Smi::FromInt(StackFrame::STUB));
|
| __ PushFixedFrame(scratch0());
|
| - __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
|
| + __ la(fp,
|
| + MemOperand(sp, StandardFrameConstants::kFixedFrameSizeFromFp));
|
| Comment(";;; Deferred code");
|
| }
|
| code->Generate();
|
| @@ -283,17 +270,16 @@ bool LCodeGen::GenerateDeferredCode() {
|
| return !is_aborted();
|
| }
|
|
|
| -
|
| bool LCodeGen::GenerateJumpTable() {
|
| // Check that the jump table is accessible from everywhere in the function
|
| - // code, i.e. that offsets to the table can be encoded in the 24bit signed
|
| - // immediate of a branch instruction.
|
| + // code, i.e. that offsets in halfworld to the table can be encoded in the
|
| + // 32-bit signed immediate of a branch instruction.
|
| // To simplify we consider the code size from the first instruction to the
|
| // end of the jump table. We also don't consider the pc load delta.
|
| // Each entry in the jump table generates one instruction and inlines one
|
| // 32bit data after it.
|
| - if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
|
| - jump_table_.length() * 7)) {
|
| + // TODO(joransiu): The Int24 condition can likely be relaxed for S390
|
| + if (!is_int24(masm()->pc_offset() + jump_table_.length() * 7)) {
|
| Abort(kGeneratedCodeIsTooLarge);
|
| }
|
|
|
| @@ -323,9 +309,9 @@ bool LCodeGen::GenerateJumpTable() {
|
| DCHECK(!info()->saves_caller_doubles());
|
| Comment(";;; call deopt with frame");
|
| __ PushFixedFrame();
|
| - __ b(&needs_frame, SetLK);
|
| + __ b(r14, &needs_frame);
|
| } else {
|
| - __ b(&call_deopt_entry, SetLK);
|
| + __ b(r14, &call_deopt_entry);
|
| }
|
| info()->LogDeoptCallPosition(masm()->pc_offset(),
|
| table_entry->deopt_info.inlining_id);
|
| @@ -339,7 +325,7 @@ bool LCodeGen::GenerateJumpTable() {
|
| DCHECK(info()->IsStub());
|
| __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::STUB));
|
| __ push(ip);
|
| - __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
|
| + __ lay(fp, MemOperand(sp, StandardFrameConstants::kFixedFrameSizeFromFp));
|
| }
|
|
|
| Comment(";;; call deopt");
|
| @@ -352,7 +338,7 @@ bool LCodeGen::GenerateJumpTable() {
|
|
|
| // Add the base address to the offset previously loaded in entry_offset.
|
| __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base)));
|
| - __ add(ip, entry_offset, ip);
|
| + __ AddP(ip, entry_offset, ip);
|
| __ Jump(ip);
|
| }
|
|
|
| @@ -362,30 +348,25 @@ bool LCodeGen::GenerateJumpTable() {
|
| return !is_aborted();
|
| }
|
|
|
| -
|
| bool LCodeGen::GenerateSafepointTable() {
|
| DCHECK(is_done());
|
| safepoints_.Emit(masm(), GetTotalFrameSlotCount());
|
| return !is_aborted();
|
| }
|
|
|
| -
|
| Register LCodeGen::ToRegister(int code) const {
|
| return Register::from_code(code);
|
| }
|
|
|
| -
|
| DoubleRegister LCodeGen::ToDoubleRegister(int code) const {
|
| return DoubleRegister::from_code(code);
|
| }
|
|
|
| -
|
| Register LCodeGen::ToRegister(LOperand* op) const {
|
| DCHECK(op->IsRegister());
|
| return ToRegister(op->index());
|
| }
|
|
|
| -
|
| Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
|
| if (op->IsRegister()) {
|
| return ToRegister(op->index());
|
| @@ -413,7 +394,6 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
|
| return scratch;
|
| }
|
|
|
| -
|
| void LCodeGen::EmitLoadIntegerConstant(LConstantOperand* const_op,
|
| Register dst) {
|
| DCHECK(IsInteger32(const_op));
|
| @@ -426,35 +406,29 @@ void LCodeGen::EmitLoadIntegerConstant(LConstantOperand* const_op,
|
| }
|
| }
|
|
|
| -
|
| DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
|
| DCHECK(op->IsDoubleRegister());
|
| return ToDoubleRegister(op->index());
|
| }
|
|
|
| -
|
| Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
|
| HConstant* constant = chunk_->LookupConstant(op);
|
| DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
|
| return constant->handle(isolate());
|
| }
|
|
|
| -
|
| bool LCodeGen::IsInteger32(LConstantOperand* op) const {
|
| return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
|
| }
|
|
|
| -
|
| bool LCodeGen::IsSmi(LConstantOperand* op) const {
|
| return chunk_->LookupLiteralRepresentation(op).IsSmi();
|
| }
|
|
|
| -
|
| int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
|
| return ToRepresentation(op, Representation::Integer32());
|
| }
|
|
|
| -
|
| intptr_t LCodeGen::ToRepresentation(LConstantOperand* op,
|
| const Representation& r) const {
|
| HConstant* constant = chunk_->LookupConstant(op);
|
| @@ -464,20 +438,17 @@ intptr_t LCodeGen::ToRepresentation(LConstantOperand* op,
|
| return reinterpret_cast<intptr_t>(Smi::FromInt(value));
|
| }
|
|
|
| -
|
| Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
|
| HConstant* constant = chunk_->LookupConstant(op);
|
| return Smi::FromInt(constant->Integer32Value());
|
| }
|
|
|
| -
|
| double LCodeGen::ToDouble(LConstantOperand* op) const {
|
| HConstant* constant = chunk_->LookupConstant(op);
|
| DCHECK(constant->HasDoubleValue());
|
| return constant->DoubleValue();
|
| }
|
|
|
| -
|
| Operand LCodeGen::ToOperand(LOperand* op) {
|
| if (op->IsConstantOperand()) {
|
| LConstantOperand* const_op = LConstantOperand::cast(op);
|
| @@ -505,13 +476,11 @@ Operand LCodeGen::ToOperand(LOperand* op) {
|
| return Operand::Zero();
|
| }
|
|
|
| -
|
| static int ArgumentsOffsetWithoutFrame(int index) {
|
| DCHECK(index < 0);
|
| return -(index + 1) * kPointerSize;
|
| }
|
|
|
| -
|
| MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
|
| DCHECK(!op->IsRegister());
|
| DCHECK(!op->IsDoubleRegister());
|
| @@ -525,7 +494,6 @@ MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
|
| }
|
| }
|
|
|
| -
|
| MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
|
| DCHECK(op->IsDoubleStackSlot());
|
| if (NeedsEagerFrame()) {
|
| @@ -538,7 +506,6 @@ MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::WriteTranslation(LEnvironment* environment,
|
| Translation* translation) {
|
| if (environment == NULL) return;
|
| @@ -559,7 +526,6 @@ void LCodeGen::WriteTranslation(LEnvironment* environment,
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::AddToTranslation(LEnvironment* environment,
|
| Translation* translation, LOperand* op,
|
| bool is_tagged, bool is_uint32,
|
| @@ -624,13 +590,11 @@ void LCodeGen::AddToTranslation(LEnvironment* environment,
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::CallCode(Handle<Code> code, RelocInfo::Mode mode,
|
| LInstruction* instr) {
|
| CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
|
| }
|
|
|
| -
|
| void LCodeGen::CallCodeGeneric(Handle<Code> code, RelocInfo::Mode mode,
|
| LInstruction* instr,
|
| SafepointMode safepoint_mode) {
|
| @@ -645,7 +609,6 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code, RelocInfo::Mode mode,
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::CallRuntime(const Runtime::Function* function, int num_arguments,
|
| LInstruction* instr, SaveFPRegsMode save_doubles) {
|
| DCHECK(instr != NULL);
|
| @@ -655,7 +618,6 @@ void LCodeGen::CallRuntime(const Runtime::Function* function, int num_arguments,
|
| RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
|
| }
|
|
|
| -
|
| void LCodeGen::LoadContextFromDeferred(LOperand* context) {
|
| if (context->IsRegister()) {
|
| __ Move(cp, ToRegister(context));
|
| @@ -670,7 +632,6 @@ void LCodeGen::LoadContextFromDeferred(LOperand* context) {
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
|
| LInstruction* instr, LOperand* context) {
|
| LoadContextFromDeferred(context);
|
| @@ -679,7 +640,6 @@ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
|
| Safepoint::kNoLazyDeopt);
|
| }
|
|
|
| -
|
| void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
|
| Safepoint::DeoptMode mode) {
|
| environment->set_has_been_used();
|
| @@ -715,7 +675,6 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
|
| Deoptimizer::DeoptReason deopt_reason,
|
| Deoptimizer::BailoutType bailout_type,
|
| @@ -732,25 +691,56 @@ void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
|
| }
|
|
|
| if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
|
| - CRegister alt_cr = cr6;
|
| Register scratch = scratch0();
|
| ExternalReference count = ExternalReference::stress_deopt_count(isolate());
|
| Label no_deopt;
|
| - DCHECK(!alt_cr.is(cr));
|
| - __ Push(r4, scratch);
|
| +
|
| + // Store the condition on the stack if necessary
|
| + if (cond != al) {
|
| + Label done;
|
| + __ LoadImmP(scratch, Operand::Zero());
|
| + __ b(NegateCondition(cond), &done, Label::kNear);
|
| + __ LoadImmP(scratch, Operand(1));
|
| + __ bind(&done);
|
| + __ push(scratch);
|
| + }
|
| +
|
| + Label done;
|
| + __ Push(r3);
|
| __ mov(scratch, Operand(count));
|
| - __ lwz(r4, MemOperand(scratch));
|
| - __ subi(r4, r4, Operand(1));
|
| - __ cmpi(r4, Operand::Zero(), alt_cr);
|
| - __ bne(&no_deopt, alt_cr);
|
| - __ li(r4, Operand(FLAG_deopt_every_n_times));
|
| - __ stw(r4, MemOperand(scratch));
|
| - __ Pop(r4, scratch);
|
| + __ LoadW(r3, MemOperand(scratch));
|
| + __ Sub32(r3, r3, Operand(1));
|
| + __ Cmp32(r3, Operand::Zero());
|
| + __ bne(&no_deopt, Label::kNear);
|
| +
|
| + __ LoadImmP(r3, Operand(FLAG_deopt_every_n_times));
|
| + __ StoreW(r3, MemOperand(scratch));
|
| + __ Pop(r3);
|
| +
|
| + if (cond != al) {
|
| + // Clean up the stack before the deoptimizer call
|
| + __ pop(scratch);
|
| + }
|
|
|
| __ Call(entry, RelocInfo::RUNTIME_ENTRY);
|
| +
|
| + __ b(&done);
|
| +
|
| __ bind(&no_deopt);
|
| - __ stw(r4, MemOperand(scratch));
|
| - __ Pop(r4, scratch);
|
| + __ StoreW(r3, MemOperand(scratch));
|
| + __ Pop(r3);
|
| +
|
| + if (cond != al) {
|
| + // Clean up the stack before the deoptimizer call
|
| + __ pop(scratch);
|
| + }
|
| +
|
| + __ bind(&done);
|
| +
|
| + if (cond != al) {
|
| + cond = ne;
|
| + __ CmpP(scratch, Operand::Zero());
|
| + }
|
| }
|
|
|
| if (info()->ShouldTrapOnDeopt()) {
|
| @@ -763,7 +753,6 @@ void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
|
| // Go through jump table if we need to handle condition, build frame, or
|
| // restore caller doubles.
|
| if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) {
|
| - DeoptComment(deopt_info);
|
| __ Call(entry, RelocInfo::RUNTIME_ENTRY);
|
| info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
|
| } else {
|
| @@ -776,20 +765,18 @@ void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
|
| !table_entry.IsEquivalentTo(jump_table_.last())) {
|
| jump_table_.Add(table_entry, zone());
|
| }
|
| - __ b(cond, &jump_table_.last().label, cr);
|
| + __ b(cond, &jump_table_.last().label /*, cr*/);
|
| }
|
| }
|
|
|
| -
|
| -void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
|
| +void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
|
| Deoptimizer::DeoptReason deopt_reason,
|
| CRegister cr) {
|
| Deoptimizer::BailoutType bailout_type =
|
| info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
|
| - DeoptimizeIf(condition, instr, deopt_reason, bailout_type, cr);
|
| + DeoptimizeIf(cond, instr, deopt_reason, bailout_type, cr);
|
| }
|
|
|
| -
|
| void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
|
| SafepointMode safepoint_mode) {
|
| if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
|
| @@ -801,7 +788,6 @@ void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind,
|
| int arguments, Safepoint::DeoptMode deopt_mode) {
|
| DCHECK(expected_safepoint_kind_ == kind);
|
| @@ -819,40 +805,34 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind,
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::RecordSafepoint(LPointerMap* pointers,
|
| Safepoint::DeoptMode deopt_mode) {
|
| RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
|
| }
|
|
|
| -
|
| void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
|
| LPointerMap empty_pointers(zone());
|
| RecordSafepoint(&empty_pointers, deopt_mode);
|
| }
|
|
|
| -
|
| void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
|
| int arguments,
|
| Safepoint::DeoptMode deopt_mode) {
|
| RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
|
| }
|
|
|
| -
|
| void LCodeGen::RecordAndWritePosition(int position) {
|
| if (position == RelocInfo::kNoPosition) return;
|
| masm()->positions_recorder()->RecordPosition(position);
|
| masm()->positions_recorder()->WriteRecordedPositions();
|
| }
|
|
|
| -
|
| static const char* LabelType(LLabel* label) {
|
| if (label->is_loop_header()) return " (loop header)";
|
| if (label->is_osr_entry()) return " (OSR entry)";
|
| return "";
|
| }
|
|
|
| -
|
| void LCodeGen::DoLabel(LLabel* label) {
|
| Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
|
| current_instruction_, label->hydrogen_value()->id(),
|
| @@ -862,10 +842,8 @@ void LCodeGen::DoLabel(LLabel* label) {
|
| DoGap(label);
|
| }
|
|
|
| -
|
| void LCodeGen::DoParallelMove(LParallelMove* move) { resolver_.Resolve(move); }
|
|
|
| -
|
| void LCodeGen::DoGap(LGap* gap) {
|
| for (int i = LGap::FIRST_INNER_POSITION; i <= LGap::LAST_INNER_POSITION;
|
| i++) {
|
| @@ -875,20 +853,16 @@ void LCodeGen::DoGap(LGap* gap) {
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoInstructionGap(LInstructionGap* instr) { DoGap(instr); }
|
|
|
| -
|
| void LCodeGen::DoParameter(LParameter* instr) {
|
| // Nothing to do.
|
| }
|
|
|
| -
|
| void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
|
| GenerateOsrPrologue();
|
| }
|
|
|
| -
|
| void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
|
| Register dividend = ToRegister(instr->dividend());
|
| int32_t divisor = instr->divisor();
|
| @@ -904,34 +878,33 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
|
| int32_t shift = WhichPowerOf2Abs(divisor);
|
| Label dividend_is_not_negative, done;
|
| if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
|
| - __ cmpwi(dividend, Operand::Zero());
|
| - __ bge(÷nd_is_not_negative);
|
| + __ CmpP(dividend, Operand::Zero());
|
| + __ bge(÷nd_is_not_negative, Label::kNear);
|
| if (shift) {
|
| // Note that this is correct even for kMinInt operands.
|
| - __ neg(dividend, dividend);
|
| + __ LoadComplementRR(dividend, dividend);
|
| __ ExtractBitRange(dividend, dividend, shift - 1, 0);
|
| - __ neg(dividend, dividend, LeaveOE, SetRC);
|
| + __ LoadComplementRR(dividend, dividend);
|
| if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
| - DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0);
|
| + DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
|
| }
|
| } else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
| - __ li(dividend, Operand::Zero());
|
| + __ mov(dividend, Operand::Zero());
|
| } else {
|
| DeoptimizeIf(al, instr, Deoptimizer::kMinusZero);
|
| }
|
| - __ b(&done);
|
| + __ b(&done, Label::kNear);
|
| }
|
|
|
| __ bind(÷nd_is_not_negative);
|
| if (shift) {
|
| __ ExtractBitRange(dividend, dividend, shift - 1, 0);
|
| } else {
|
| - __ li(dividend, Operand::Zero());
|
| + __ mov(dividend, Operand::Zero());
|
| }
|
| __ bind(&done);
|
| }
|
|
|
| -
|
| void LCodeGen::DoModByConstI(LModByConstI* instr) {
|
| Register dividend = ToRegister(instr->dividend());
|
| int32_t divisor = instr->divisor();
|
| @@ -945,76 +918,71 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
|
|
|
| __ TruncatingDiv(result, dividend, Abs(divisor));
|
| __ mov(ip, Operand(Abs(divisor)));
|
| - __ mullw(result, result, ip);
|
| - __ sub(result, dividend, result, LeaveOE, SetRC);
|
| + __ Mul(result, result, ip);
|
| + __ SubP(result, dividend, result /*, LeaveOE, SetRC*/);
|
|
|
| // Check for negative zero.
|
| HMod* hmod = instr->hydrogen();
|
| if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
| Label remainder_not_zero;
|
| - __ bne(&remainder_not_zero, cr0);
|
| - __ cmpwi(dividend, Operand::Zero());
|
| + __ bne(&remainder_not_zero, Label::kNear /*, cr0*/);
|
| + __ Cmp32(dividend, Operand::Zero());
|
| DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
|
| __ bind(&remainder_not_zero);
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoModI(LModI* instr) {
|
| HMod* hmod = instr->hydrogen();
|
| Register left_reg = ToRegister(instr->left());
|
| Register right_reg = ToRegister(instr->right());
|
| Register result_reg = ToRegister(instr->result());
|
| - Register scratch = scratch0();
|
| - bool can_overflow = hmod->CheckFlag(HValue::kCanOverflow);
|
| Label done;
|
|
|
| - if (can_overflow) {
|
| - __ li(r0, Operand::Zero()); // clear xer
|
| - __ mtxer(r0);
|
| - }
|
| -
|
| - __ divw(scratch, left_reg, right_reg, SetOE, SetRC);
|
| -
|
| // Check for x % 0.
|
| if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
|
| - __ cmpwi(right_reg, Operand::Zero());
|
| + __ Cmp32(right_reg, Operand::Zero());
|
| DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
|
| }
|
|
|
| - // Check for kMinInt % -1, divw will return undefined, which is not what we
|
| + // Check for kMinInt % -1, dr will return undefined, which is not what we
|
| // want. We have to deopt if we care about -0, because we can't return that.
|
| - if (can_overflow) {
|
| + if (hmod->CheckFlag(HValue::kCanOverflow)) {
|
| + Label no_overflow_possible;
|
| + __ Cmp32(left_reg, Operand(kMinInt));
|
| + __ bne(&no_overflow_possible, Label::kNear);
|
| + __ Cmp32(right_reg, Operand(-1));
|
| if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
| - DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero, cr0);
|
| + DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
|
| } else {
|
| - if (CpuFeatures::IsSupported(ISELECT)) {
|
| - __ isel(overflow, result_reg, r0, result_reg, cr0);
|
| - __ boverflow(&done, cr0);
|
| - } else {
|
| - Label no_overflow_possible;
|
| - __ bnooverflow(&no_overflow_possible, cr0);
|
| - __ li(result_reg, Operand::Zero());
|
| - __ b(&done);
|
| - __ bind(&no_overflow_possible);
|
| - }
|
| + __ b(ne, &no_overflow_possible, Label::kNear);
|
| + __ mov(result_reg, Operand::Zero());
|
| + __ b(&done, Label::kNear);
|
| }
|
| + __ bind(&no_overflow_possible);
|
| }
|
|
|
| - __ mullw(scratch, right_reg, scratch);
|
| - __ sub(result_reg, left_reg, scratch, LeaveOE, SetRC);
|
| + // Divide instruction dr will implicity use register pair
|
| + // r0 & r1 below.
|
| + DCHECK(!left_reg.is(r1));
|
| + DCHECK(!right_reg.is(r1));
|
| + DCHECK(!result_reg.is(r1));
|
| + __ LoadRR(r0, left_reg);
|
| + __ srda(r0, Operand(32));
|
| + __ dr(r0, right_reg); // R0:R1 = R1 / divisor - R0 remainder
|
| +
|
| + __ LoadAndTestP_ExtendSrc(result_reg, r0); // Copy remainder to resultreg
|
|
|
| // If we care about -0, test if the dividend is <0 and the result is 0.
|
| if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
| - __ bne(&done, cr0);
|
| - __ cmpwi(left_reg, Operand::Zero());
|
| + __ bne(&done, Label::kNear);
|
| + __ Cmp32(left_reg, Operand::Zero());
|
| DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
|
| }
|
|
|
| __ bind(&done);
|
| }
|
|
|
| -
|
| void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
|
| Register dividend = ToRegister(instr->dividend());
|
| int32_t divisor = instr->divisor();
|
| @@ -1025,13 +993,12 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
|
| // Check for (0 / -x) that will produce negative zero.
|
| HDiv* hdiv = instr->hydrogen();
|
| if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
|
| - __ cmpwi(dividend, Operand::Zero());
|
| + __ Cmp32(dividend, Operand::Zero());
|
| DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
|
| }
|
| // Check for (kMinInt / -1).
|
| if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
|
| - __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
|
| - __ cmpw(dividend, r0);
|
| + __ Cmp32(dividend, Operand(0x80000000));
|
| DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
|
| }
|
|
|
| @@ -1044,25 +1011,27 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
|
| }
|
|
|
| if (divisor == -1) { // Nice shortcut, not needed for correctness.
|
| - __ neg(result, dividend);
|
| + __ LoadComplementRR(result, dividend);
|
| return;
|
| }
|
| if (shift == 0) {
|
| - __ mr(result, dividend);
|
| + __ LoadRR(result, dividend);
|
| } else {
|
| if (shift == 1) {
|
| - __ srwi(result, dividend, Operand(31));
|
| + __ ShiftRight(result, dividend, Operand(31));
|
| } else {
|
| - __ srawi(result, dividend, 31);
|
| - __ srwi(result, result, Operand(32 - shift));
|
| + __ ShiftRightArith(result, dividend, Operand(31));
|
| + __ ShiftRight(result, result, Operand(32 - shift));
|
| }
|
| - __ add(result, dividend, result);
|
| - __ srawi(result, result, shift);
|
| + __ AddP(result, dividend, result);
|
| + __ ShiftRightArith(result, result, Operand(shift));
|
| +#if V8_TARGET_ARCH_S390X
|
| + __ lgfr(result, result);
|
| +#endif
|
| }
|
| - if (divisor < 0) __ neg(result, result);
|
| + if (divisor < 0) __ LoadComplementRR(result, result);
|
| }
|
|
|
| -
|
| void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
|
| Register dividend = ToRegister(instr->dividend());
|
| int32_t divisor = instr->divisor();
|
| @@ -1077,84 +1046,71 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
|
| // Check for (0 / -x) that will produce negative zero.
|
| HDiv* hdiv = instr->hydrogen();
|
| if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
|
| - __ cmpwi(dividend, Operand::Zero());
|
| + __ Cmp32(dividend, Operand::Zero());
|
| DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
|
| }
|
|
|
| __ TruncatingDiv(result, dividend, Abs(divisor));
|
| - if (divisor < 0) __ neg(result, result);
|
| + if (divisor < 0) __ LoadComplementRR(result, result);
|
|
|
| if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
|
| Register scratch = scratch0();
|
| __ mov(ip, Operand(divisor));
|
| - __ mullw(scratch, result, ip);
|
| - __ cmpw(scratch, dividend);
|
| + __ Mul(scratch, result, ip);
|
| + __ Cmp32(scratch, dividend);
|
| DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
|
| }
|
| }
|
|
|
| -
|
| // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
|
| void LCodeGen::DoDivI(LDivI* instr) {
|
| HBinaryOperation* hdiv = instr->hydrogen();
|
| const Register dividend = ToRegister(instr->dividend());
|
| const Register divisor = ToRegister(instr->divisor());
|
| Register result = ToRegister(instr->result());
|
| - bool can_overflow = hdiv->CheckFlag(HValue::kCanOverflow);
|
|
|
| DCHECK(!dividend.is(result));
|
| DCHECK(!divisor.is(result));
|
|
|
| - if (can_overflow) {
|
| - __ li(r0, Operand::Zero()); // clear xer
|
| - __ mtxer(r0);
|
| - }
|
| -
|
| - __ divw(result, dividend, divisor, SetOE, SetRC);
|
| -
|
| // Check for x / 0.
|
| if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
|
| - __ cmpwi(divisor, Operand::Zero());
|
| + __ Cmp32(divisor, Operand::Zero());
|
| DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
|
| }
|
|
|
| // Check for (0 / -x) that will produce negative zero.
|
| if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
| Label dividend_not_zero;
|
| - __ cmpwi(dividend, Operand::Zero());
|
| - __ bne(÷nd_not_zero);
|
| - __ cmpwi(divisor, Operand::Zero());
|
| + __ Cmp32(dividend, Operand::Zero());
|
| + __ bne(÷nd_not_zero, Label::kNear);
|
| + __ Cmp32(divisor, Operand::Zero());
|
| DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
|
| __ bind(÷nd_not_zero);
|
| }
|
|
|
| // Check for (kMinInt / -1).
|
| - if (can_overflow) {
|
| - if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
|
| - DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
|
| - } else {
|
| - // When truncating, we want kMinInt / -1 = kMinInt.
|
| - if (CpuFeatures::IsSupported(ISELECT)) {
|
| - __ isel(overflow, result, dividend, result, cr0);
|
| - } else {
|
| - Label no_overflow_possible;
|
| - __ bnooverflow(&no_overflow_possible, cr0);
|
| - __ mr(result, dividend);
|
| - __ bind(&no_overflow_possible);
|
| - }
|
| - }
|
| + if (hdiv->CheckFlag(HValue::kCanOverflow)) {
|
| + Label dividend_not_min_int;
|
| + __ Cmp32(dividend, Operand(kMinInt));
|
| + __ bne(÷nd_not_min_int, Label::kNear);
|
| + __ Cmp32(divisor, Operand(-1));
|
| + DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
|
| + __ bind(÷nd_not_min_int);
|
| }
|
|
|
| + __ LoadRR(r0, dividend);
|
| + __ srda(r0, Operand(32));
|
| + __ dr(r0, divisor); // R0:R1 = R1 / divisor - R0 remainder - R1 quotient
|
| +
|
| + __ LoadAndTestP_ExtendSrc(result, r1); // Move quotient to result register
|
| +
|
| if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
|
| // Deoptimize if remainder is not 0.
|
| - Register scratch = scratch0();
|
| - __ mullw(scratch, divisor, result);
|
| - __ cmpw(dividend, scratch);
|
| + __ Cmp32(r0, Operand::Zero());
|
| DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
|
| HBinaryOperation* hdiv = instr->hydrogen();
|
| Register dividend = ToRegister(instr->dividend());
|
| @@ -1167,41 +1123,36 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
|
| int32_t shift = WhichPowerOf2Abs(divisor);
|
| if (divisor > 0) {
|
| if (shift || !result.is(dividend)) {
|
| - __ srawi(result, dividend, shift);
|
| + __ ShiftRightArith(result, dividend, Operand(shift));
|
| +#if V8_TARGET_ARCH_S390X
|
| + __ lgfr(result, result);
|
| +#endif
|
| }
|
| return;
|
| }
|
|
|
| - // If the divisor is negative, we have to negate and handle edge cases.
|
| - OEBit oe = LeaveOE;
|
| -#if V8_TARGET_ARCH_PPC64
|
| +// If the divisor is negative, we have to negate and handle edge cases.
|
| +#if V8_TARGET_ARCH_S390X
|
| if (divisor == -1 && can_overflow) {
|
| - __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
|
| - __ cmpw(dividend, r0);
|
| + __ Cmp32(dividend, Operand(0x80000000));
|
| DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
|
| }
|
| -#else
|
| - if (can_overflow) {
|
| - __ li(r0, Operand::Zero()); // clear xer
|
| - __ mtxer(r0);
|
| - oe = SetOE;
|
| - }
|
| #endif
|
|
|
| - __ neg(result, dividend, oe, SetRC);
|
| + __ LoadComplementRR(result, dividend);
|
| if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
| DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0);
|
| }
|
|
|
| // If the negation could not overflow, simply shifting is OK.
|
| -#if !V8_TARGET_ARCH_PPC64
|
| +#if !V8_TARGET_ARCH_S390X
|
| if (!can_overflow) {
|
| #endif
|
| if (shift) {
|
| - __ ShiftRightArithImm(result, result, shift);
|
| + __ ShiftRightArithP(result, result, Operand(shift));
|
| }
|
| return;
|
| -#if !V8_TARGET_ARCH_PPC64
|
| +#if !V8_TARGET_ARCH_S390X
|
| }
|
|
|
| // Dividing by -1 is basically negation, unless we overflow.
|
| @@ -1210,17 +1161,19 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
|
| return;
|
| }
|
|
|
| - Label overflow, done;
|
| - __ boverflow(&overflow, cr0);
|
| - __ srawi(result, result, shift);
|
| - __ b(&done);
|
| - __ bind(&overflow);
|
| + Label overflow_label, done;
|
| + __ b(overflow, &overflow_label, Label::kNear);
|
| + __ ShiftRightArith(result, result, Operand(shift));
|
| +#if V8_TARGET_ARCH_S390X
|
| + __ lgfr(result, result);
|
| +#endif
|
| + __ b(&done, Label::kNear);
|
| + __ bind(&overflow_label);
|
| __ mov(result, Operand(kMinInt / divisor));
|
| __ bind(&done);
|
| #endif
|
| }
|
|
|
| -
|
| void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
|
| Register dividend = ToRegister(instr->dividend());
|
| int32_t divisor = instr->divisor();
|
| @@ -1235,7 +1188,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
|
| // Check for (0 / -x) that will produce negative zero.
|
| HMathFloorOfDiv* hdiv = instr->hydrogen();
|
| if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
|
| - __ cmpwi(dividend, Operand::Zero());
|
| + __ Cmp32(dividend, Operand::Zero());
|
| DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
|
| }
|
|
|
| @@ -1244,7 +1197,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
|
| if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
|
| (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
|
| __ TruncatingDiv(result, dividend, Abs(divisor));
|
| - if (divisor < 0) __ neg(result, result);
|
| + if (divisor < 0) __ LoadComplementRR(result, result);
|
| return;
|
| }
|
|
|
| @@ -1253,114 +1206,110 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
|
| Register temp = ToRegister(instr->temp());
|
| DCHECK(!temp.is(dividend) && !temp.is(result));
|
| Label needs_adjustment, done;
|
| - __ cmpwi(dividend, Operand::Zero());
|
| + __ Cmp32(dividend, Operand::Zero());
|
| __ b(divisor > 0 ? lt : gt, &needs_adjustment);
|
| __ TruncatingDiv(result, dividend, Abs(divisor));
|
| - if (divisor < 0) __ neg(result, result);
|
| - __ b(&done);
|
| + if (divisor < 0) __ LoadComplementRR(result, result);
|
| + __ b(&done, Label::kNear);
|
| __ bind(&needs_adjustment);
|
| - __ addi(temp, dividend, Operand(divisor > 0 ? 1 : -1));
|
| + __ AddP(temp, dividend, Operand(divisor > 0 ? 1 : -1));
|
| __ TruncatingDiv(result, temp, Abs(divisor));
|
| - if (divisor < 0) __ neg(result, result);
|
| - __ subi(result, result, Operand(1));
|
| + if (divisor < 0) __ LoadComplementRR(result, result);
|
| + __ SubP(result, result, Operand(1));
|
| __ bind(&done);
|
| }
|
|
|
| -
|
| // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
|
| void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
|
| HBinaryOperation* hdiv = instr->hydrogen();
|
| const Register dividend = ToRegister(instr->dividend());
|
| const Register divisor = ToRegister(instr->divisor());
|
| Register result = ToRegister(instr->result());
|
| - bool can_overflow = hdiv->CheckFlag(HValue::kCanOverflow);
|
|
|
| DCHECK(!dividend.is(result));
|
| DCHECK(!divisor.is(result));
|
|
|
| - if (can_overflow) {
|
| - __ li(r0, Operand::Zero()); // clear xer
|
| - __ mtxer(r0);
|
| - }
|
| -
|
| - __ divw(result, dividend, divisor, SetOE, SetRC);
|
| -
|
| // Check for x / 0.
|
| if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
|
| - __ cmpwi(divisor, Operand::Zero());
|
| + __ Cmp32(divisor, Operand::Zero());
|
| DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
|
| }
|
|
|
| // Check for (0 / -x) that will produce negative zero.
|
| if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
| Label dividend_not_zero;
|
| - __ cmpwi(dividend, Operand::Zero());
|
| - __ bne(÷nd_not_zero);
|
| - __ cmpwi(divisor, Operand::Zero());
|
| + __ Cmp32(dividend, Operand::Zero());
|
| + __ bne(÷nd_not_zero, Label::kNear);
|
| + __ Cmp32(divisor, Operand::Zero());
|
| DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
|
| __ bind(÷nd_not_zero);
|
| }
|
|
|
| // Check for (kMinInt / -1).
|
| - if (can_overflow) {
|
| + if (hdiv->CheckFlag(HValue::kCanOverflow)) {
|
| + Label no_overflow_possible;
|
| + __ Cmp32(dividend, Operand(kMinInt));
|
| + __ bne(&no_overflow_possible, Label::kNear);
|
| + __ Cmp32(divisor, Operand(-1));
|
| if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
|
| - DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
|
| + DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
|
| } else {
|
| - // When truncating, we want kMinInt / -1 = kMinInt.
|
| - if (CpuFeatures::IsSupported(ISELECT)) {
|
| - __ isel(overflow, result, dividend, result, cr0);
|
| - } else {
|
| - Label no_overflow_possible;
|
| - __ bnooverflow(&no_overflow_possible, cr0);
|
| - __ mr(result, dividend);
|
| - __ bind(&no_overflow_possible);
|
| - }
|
| + __ bne(&no_overflow_possible, Label::kNear);
|
| + __ LoadRR(result, dividend);
|
| }
|
| + __ bind(&no_overflow_possible);
|
| }
|
|
|
| + __ LoadRR(r0, dividend);
|
| + __ srda(r0, Operand(32));
|
| + __ dr(r0, divisor); // R0:R1 = R1 / divisor - R0 remainder - R1 quotient
|
| +
|
| + __ lr(result, r1); // Move quotient to result register
|
| +
|
| Label done;
|
| Register scratch = scratch0();
|
| -// If both operands have the same sign then we are done.
|
| -#if V8_TARGET_ARCH_PPC64
|
| - __ xor_(scratch, dividend, divisor);
|
| - __ cmpwi(scratch, Operand::Zero());
|
| - __ bge(&done);
|
| -#else
|
| - __ xor_(scratch, dividend, divisor, SetRC);
|
| - __ bge(&done, cr0);
|
| -#endif
|
| + // If both operands have the same sign then we are done.
|
| + __ Xor(scratch, dividend, divisor);
|
| + __ ltr(scratch, scratch); // use 32 bit version LoadAndTestRR even in 64 bit
|
| + __ bge(&done, Label::kNear);
|
|
|
| // If there is no remainder then we are done.
|
| - __ mullw(scratch, divisor, result);
|
| - __ cmpw(dividend, scratch);
|
| - __ beq(&done);
|
| + __ lr(scratch, result);
|
| + __ msr(scratch, divisor);
|
| + __ Cmp32(dividend, scratch);
|
| + __ beq(&done, Label::kNear);
|
|
|
| // We performed a truncating division. Correct the result.
|
| - __ subi(result, result, Operand(1));
|
| + __ SubP(result, result, Operand(1));
|
| __ bind(&done);
|
| }
|
|
|
| -
|
| void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
|
| DoubleRegister addend = ToDoubleRegister(instr->addend());
|
| DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
|
| DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
|
| DoubleRegister result = ToDoubleRegister(instr->result());
|
|
|
| - __ fmadd(result, multiplier, multiplicand, addend);
|
| + // Unable to use madbr as the intermediate value is not rounded
|
| + // to proper precision
|
| + __ ldr(result, multiplier);
|
| + __ mdbr(result, multiplicand);
|
| + __ adbr(result, addend);
|
| }
|
|
|
| -
|
| void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
|
| DoubleRegister minuend = ToDoubleRegister(instr->minuend());
|
| DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
|
| DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
|
| DoubleRegister result = ToDoubleRegister(instr->result());
|
|
|
| - __ fmsub(result, multiplier, multiplicand, minuend);
|
| + // Unable to use msdbr as the intermediate value is not rounded
|
| + // to proper precision
|
| + __ ldr(result, multiplier);
|
| + __ mdbr(result, multiplicand);
|
| + __ sdbr(result, minuend);
|
| }
|
|
|
| -
|
| void LCodeGen::DoMulI(LMulI* instr) {
|
| Register scratch = scratch0();
|
| Register result = ToRegister(instr->result());
|
| @@ -1378,47 +1327,45 @@ void LCodeGen::DoMulI(LMulI* instr) {
|
| if (bailout_on_minus_zero && (constant < 0)) {
|
| // The case of a null constant will be handled separately.
|
| // If constant is negative and left is null, the result should be -0.
|
| - __ cmpi(left, Operand::Zero());
|
| + __ CmpP(left, Operand::Zero());
|
| DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
|
| }
|
|
|
| switch (constant) {
|
| case -1:
|
| if (can_overflow) {
|
| -#if V8_TARGET_ARCH_PPC64
|
| +#if V8_TARGET_ARCH_S390X
|
| if (instr->hydrogen()->representation().IsSmi()) {
|
| #endif
|
| - __ li(r0, Operand::Zero()); // clear xer
|
| - __ mtxer(r0);
|
| - __ neg(result, left, SetOE, SetRC);
|
| - DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
|
| -#if V8_TARGET_ARCH_PPC64
|
| + __ LoadComplementRR(result, left);
|
| + DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
|
| +#if V8_TARGET_ARCH_S390X
|
| } else {
|
| - __ neg(result, left);
|
| + __ LoadComplementRR(result, left);
|
| __ TestIfInt32(result, r0);
|
| DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
|
| }
|
| #endif
|
| } else {
|
| - __ neg(result, left);
|
| + __ LoadComplementRR(result, left);
|
| }
|
| break;
|
| case 0:
|
| if (bailout_on_minus_zero) {
|
| // If left is strictly negative and the constant is null, the
|
| // result is -0. Deoptimize if required, otherwise return 0.
|
| -#if V8_TARGET_ARCH_PPC64
|
| +#if V8_TARGET_ARCH_S390X
|
| if (instr->hydrogen()->representation().IsSmi()) {
|
| #endif
|
| - __ cmpi(left, Operand::Zero());
|
| -#if V8_TARGET_ARCH_PPC64
|
| + __ Cmp32(left, Operand::Zero());
|
| +#if V8_TARGET_ARCH_S390X
|
| } else {
|
| - __ cmpwi(left, Operand::Zero());
|
| + __ Cmp32(left, Operand::Zero());
|
| }
|
| #endif
|
| DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
|
| }
|
| - __ li(result, Operand::Zero());
|
| + __ LoadImmP(result, Operand::Zero());
|
| break;
|
| case 1:
|
| __ Move(result, left);
|
| @@ -1432,25 +1379,25 @@ void LCodeGen::DoMulI(LMulI* instr) {
|
|
|
| if (base::bits::IsPowerOfTwo32(constant_abs)) {
|
| int32_t shift = WhichPowerOf2(constant_abs);
|
| - __ ShiftLeftImm(result, left, Operand(shift));
|
| + __ ShiftLeftP(result, left, Operand(shift));
|
| // Correct the sign of the result if the constant is negative.
|
| - if (constant < 0) __ neg(result, result);
|
| + if (constant < 0) __ LoadComplementRR(result, result);
|
| } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
|
| int32_t shift = WhichPowerOf2(constant_abs - 1);
|
| - __ ShiftLeftImm(scratch, left, Operand(shift));
|
| - __ add(result, scratch, left);
|
| + __ ShiftLeftP(scratch, left, Operand(shift));
|
| + __ AddP(result, scratch, left);
|
| // Correct the sign of the result if the constant is negative.
|
| - if (constant < 0) __ neg(result, result);
|
| + if (constant < 0) __ LoadComplementRR(result, result);
|
| } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
|
| int32_t shift = WhichPowerOf2(constant_abs + 1);
|
| - __ ShiftLeftImm(scratch, left, Operand(shift));
|
| - __ sub(result, scratch, left);
|
| + __ ShiftLeftP(scratch, left, Operand(shift));
|
| + __ SubP(result, scratch, left);
|
| // Correct the sign of the result if the constant is negative.
|
| - if (constant < 0) __ neg(result, result);
|
| + if (constant < 0) __ LoadComplementRR(result, result);
|
| } else {
|
| // Generate standard code.
|
| - __ mov(ip, Operand(constant));
|
| - __ Mul(result, left, ip);
|
| + __ Move(result, left);
|
| + __ MulP(result, Operand(constant));
|
| }
|
| }
|
|
|
| @@ -1459,14 +1406,15 @@ void LCodeGen::DoMulI(LMulI* instr) {
|
| Register right = ToRegister(right_op);
|
|
|
| if (can_overflow) {
|
| -#if V8_TARGET_ARCH_PPC64
|
| +#if V8_TARGET_ARCH_S390X
|
| // result = left * right.
|
| if (instr->hydrogen()->representation().IsSmi()) {
|
| __ SmiUntag(result, left);
|
| __ SmiUntag(scratch, right);
|
| - __ Mul(result, result, scratch);
|
| + __ msgr(result, scratch);
|
| } else {
|
| - __ Mul(result, left, right);
|
| + __ LoadRR(result, left);
|
| + __ msgr(result, right);
|
| }
|
| __ TestIfInt32(result, r0);
|
| DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
|
| @@ -1474,16 +1422,18 @@ void LCodeGen::DoMulI(LMulI* instr) {
|
| __ SmiTag(result);
|
| }
|
| #else
|
| - // scratch:result = left * right.
|
| + // r0:scratch = scratch * right
|
| if (instr->hydrogen()->representation().IsSmi()) {
|
| - __ SmiUntag(result, left);
|
| - __ mulhw(scratch, result, right);
|
| - __ mullw(result, result, right);
|
| + __ SmiUntag(scratch, left);
|
| + __ mr_z(r0, right);
|
| + __ LoadRR(result, scratch);
|
| } else {
|
| - __ mulhw(scratch, left, right);
|
| - __ mullw(result, left, right);
|
| + // r0:scratch = scratch * right
|
| + __ LoadRR(scratch, left);
|
| + __ mr_z(r0, right);
|
| + __ LoadRR(result, scratch);
|
| }
|
| - __ TestIfInt32(scratch, result, r0);
|
| + __ TestIfInt32(r0, result, scratch);
|
| DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
|
| #endif
|
| } else {
|
| @@ -1497,81 +1447,86 @@ void LCodeGen::DoMulI(LMulI* instr) {
|
|
|
| if (bailout_on_minus_zero) {
|
| Label done;
|
| -#if V8_TARGET_ARCH_PPC64
|
| +#if V8_TARGET_ARCH_S390X
|
| if (instr->hydrogen()->representation().IsSmi()) {
|
| #endif
|
| - __ xor_(r0, left, right, SetRC);
|
| - __ bge(&done, cr0);
|
| -#if V8_TARGET_ARCH_PPC64
|
| + __ XorP(r0, left, right);
|
| + __ LoadAndTestRR(r0, r0);
|
| + __ bge(&done, Label::kNear);
|
| +#if V8_TARGET_ARCH_S390X
|
| } else {
|
| - __ xor_(r0, left, right);
|
| - __ cmpwi(r0, Operand::Zero());
|
| - __ bge(&done);
|
| + __ XorP(r0, left, right);
|
| + __ Cmp32(r0, Operand::Zero());
|
| + __ bge(&done, Label::kNear);
|
| }
|
| #endif
|
| // Bail out if the result is minus zero.
|
| - __ cmpi(result, Operand::Zero());
|
| + __ CmpP(result, Operand::Zero());
|
| DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
|
| __ bind(&done);
|
| }
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoBitI(LBitI* instr) {
|
| LOperand* left_op = instr->left();
|
| LOperand* right_op = instr->right();
|
| DCHECK(left_op->IsRegister());
|
| Register left = ToRegister(left_op);
|
| Register result = ToRegister(instr->result());
|
| - Operand right(no_reg);
|
|
|
| - if (right_op->IsStackSlot()) {
|
| - right = Operand(EmitLoadRegister(right_op, ip));
|
| - } else {
|
| - DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
|
| - right = ToOperand(right_op);
|
| -
|
| - if (right_op->IsConstantOperand() && is_uint16(right.immediate())) {
|
| - switch (instr->op()) {
|
| - case Token::BIT_AND:
|
| - __ andi(result, left, right);
|
| - break;
|
| - case Token::BIT_OR:
|
| - __ ori(result, left, right);
|
| - break;
|
| - case Token::BIT_XOR:
|
| - __ xori(result, left, right);
|
| - break;
|
| - default:
|
| - UNREACHABLE();
|
| - break;
|
| - }
|
| - return;
|
| + if (right_op->IsConstantOperand()) {
|
| + switch (instr->op()) {
|
| + case Token::BIT_AND:
|
| + __ AndP(result, left, Operand(ToOperand(right_op)));
|
| + break;
|
| + case Token::BIT_OR:
|
| + __ OrP(result, left, Operand(ToOperand(right_op)));
|
| + break;
|
| + case Token::BIT_XOR:
|
| + __ XorP(result, left, Operand(ToOperand(right_op)));
|
| + break;
|
| + default:
|
| + UNREACHABLE();
|
| + break;
|
| }
|
| - }
|
| + } else if (right_op->IsStackSlot()) {
|
| + // Reg-Mem instruction clobbers, so copy src to dst first.
|
| + if (!left.is(result)) __ LoadRR(result, left);
|
| + switch (instr->op()) {
|
| + case Token::BIT_AND:
|
| + __ AndP(result, ToMemOperand(right_op));
|
| + break;
|
| + case Token::BIT_OR:
|
| + __ OrP(result, ToMemOperand(right_op));
|
| + break;
|
| + case Token::BIT_XOR:
|
| + __ XorP(result, ToMemOperand(right_op));
|
| + break;
|
| + default:
|
| + UNREACHABLE();
|
| + break;
|
| + }
|
| + } else {
|
| + DCHECK(right_op->IsRegister());
|
|
|
| - switch (instr->op()) {
|
| - case Token::BIT_AND:
|
| - __ And(result, left, right);
|
| - break;
|
| - case Token::BIT_OR:
|
| - __ Or(result, left, right);
|
| - break;
|
| - case Token::BIT_XOR:
|
| - if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
|
| - __ notx(result, left);
|
| - } else {
|
| - __ Xor(result, left, right);
|
| - }
|
| - break;
|
| - default:
|
| - UNREACHABLE();
|
| - break;
|
| + switch (instr->op()) {
|
| + case Token::BIT_AND:
|
| + __ AndP(result, left, ToRegister(right_op));
|
| + break;
|
| + case Token::BIT_OR:
|
| + __ OrP(result, left, ToRegister(right_op));
|
| + break;
|
| + case Token::BIT_XOR:
|
| + __ XorP(result, left, ToRegister(right_op));
|
| + break;
|
| + default:
|
| + UNREACHABLE();
|
| + break;
|
| + }
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoShiftI(LShiftI* instr) {
|
| // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
|
| // result may alias either of them.
|
| @@ -1581,31 +1536,40 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
|
| Register scratch = scratch0();
|
| if (right_op->IsRegister()) {
|
| // Mask the right_op operand.
|
| - __ andi(scratch, ToRegister(right_op), Operand(0x1F));
|
| + __ AndP(scratch, ToRegister(right_op), Operand(0x1F));
|
| switch (instr->op()) {
|
| case Token::ROR:
|
| // rotate_right(a, b) == rotate_left(a, 32 - b)
|
| - __ subfic(scratch, scratch, Operand(32));
|
| - __ rotlw(result, left, scratch);
|
| + __ LoadComplementRR(scratch, scratch);
|
| + __ rll(result, left, scratch, Operand(32));
|
| +#if V8_TARGET_ARCH_S390X
|
| + __ lgfr(result, result);
|
| +#endif
|
| break;
|
| case Token::SAR:
|
| - __ sraw(result, left, scratch);
|
| + __ ShiftRightArith(result, left, scratch);
|
| +#if V8_TARGET_ARCH_S390X
|
| + __ lgfr(result, result);
|
| +#endif
|
| break;
|
| case Token::SHR:
|
| + __ ShiftRight(result, left, scratch);
|
| +#if V8_TARGET_ARCH_S390X
|
| + __ lgfr(result, result);
|
| +#endif
|
| if (instr->can_deopt()) {
|
| - __ srw(result, left, scratch, SetRC);
|
| -#if V8_TARGET_ARCH_PPC64
|
| - __ extsw(result, result, SetRC);
|
| +#if V8_TARGET_ARCH_S390X
|
| + __ ltgfr(result, result /*, SetRC*/);
|
| +#else
|
| + __ ltr(result, result); // Set the <,==,> condition
|
| #endif
|
| DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, cr0);
|
| - } else {
|
| - __ srw(result, left, scratch);
|
| }
|
| break;
|
| case Token::SHL:
|
| - __ slw(result, left, scratch);
|
| -#if V8_TARGET_ARCH_PPC64
|
| - __ extsw(result, result);
|
| + __ ShiftLeft(result, left, scratch);
|
| +#if V8_TARGET_ARCH_S390X
|
| + __ lgfr(result, result);
|
| #endif
|
| break;
|
| default:
|
| @@ -1619,24 +1583,33 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
|
| switch (instr->op()) {
|
| case Token::ROR:
|
| if (shift_count != 0) {
|
| - __ rotrwi(result, left, shift_count);
|
| + __ rll(result, left, Operand(32 - shift_count));
|
| +#if V8_TARGET_ARCH_S390X
|
| + __ lgfr(result, result);
|
| +#endif
|
| } else {
|
| __ Move(result, left);
|
| }
|
| break;
|
| case Token::SAR:
|
| if (shift_count != 0) {
|
| - __ srawi(result, left, shift_count);
|
| + __ ShiftRightArith(result, left, Operand(shift_count));
|
| +#if V8_TARGET_ARCH_S390X
|
| + __ lgfr(result, result);
|
| +#endif
|
| } else {
|
| __ Move(result, left);
|
| }
|
| break;
|
| case Token::SHR:
|
| if (shift_count != 0) {
|
| - __ srwi(result, left, Operand(shift_count));
|
| + __ ShiftRight(result, left, Operand(shift_count));
|
| +#if V8_TARGET_ARCH_S390X
|
| + __ lgfr(result, result);
|
| +#endif
|
| } else {
|
| if (instr->can_deopt()) {
|
| - __ cmpwi(left, Operand::Zero());
|
| + __ Cmp32(left, Operand::Zero());
|
| DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue);
|
| }
|
| __ Move(result, left);
|
| @@ -1644,14 +1617,17 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
|
| break;
|
| case Token::SHL:
|
| if (shift_count != 0) {
|
| -#if V8_TARGET_ARCH_PPC64
|
| +#if V8_TARGET_ARCH_S390X
|
| if (instr->hydrogen_value()->representation().IsSmi()) {
|
| - __ sldi(result, left, Operand(shift_count));
|
| + __ ShiftLeftP(result, left, Operand(shift_count));
|
| #else
|
| if (instr->hydrogen_value()->representation().IsSmi() &&
|
| instr->can_deopt()) {
|
| if (shift_count != 1) {
|
| - __ slwi(result, left, Operand(shift_count - 1));
|
| + __ ShiftLeft(result, left, Operand(shift_count - 1));
|
| +#if V8_TARGET_ARCH_S390X
|
| + __ lgfr(result, result);
|
| +#endif
|
| __ SmiTagCheckOverflow(result, result, scratch);
|
| } else {
|
| __ SmiTagCheckOverflow(result, left, scratch);
|
| @@ -1659,9 +1635,9 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
|
| DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
|
| #endif
|
| } else {
|
| - __ slwi(result, left, Operand(shift_count));
|
| -#if V8_TARGET_ARCH_PPC64
|
| - __ extsw(result, result);
|
| + __ ShiftLeft(result, left, Operand(shift_count));
|
| +#if V8_TARGET_ARCH_S390X
|
| + __ lgfr(result, result);
|
| #endif
|
| }
|
| } else {
|
| @@ -1675,41 +1651,66 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoSubI(LSubI* instr) {
|
| + LOperand* left = instr->left();
|
| LOperand* right = instr->right();
|
| - Register left = ToRegister(instr->left());
|
| - Register result = ToRegister(instr->result());
|
| - bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
|
| -#if V8_TARGET_ARCH_PPC64
|
| - const bool isInteger = !instr->hydrogen()->representation().IsSmi();
|
| + LOperand* result = instr->result();
|
| +
|
| + bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
|
| + instr->hydrogen()->representation().IsExternal());
|
| +
|
| +#if V8_TARGET_ARCH_S390X
|
| + // The overflow detection needs to be tested on the lower 32-bits.
|
| + // As a result, on 64-bit, we need to force 32-bit arithmetic operations
|
| + // to set the CC overflow bit properly. The result is then sign-extended.
|
| + bool checkOverflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
|
| #else
|
| - const bool isInteger = false;
|
| -#endif
|
| - if (!can_overflow || isInteger) {
|
| - if (right->IsConstantOperand()) {
|
| - __ Add(result, left, -(ToOperand(right).immediate()), r0);
|
| - } else {
|
| - __ sub(result, left, EmitLoadRegister(right, ip));
|
| - }
|
| -#if V8_TARGET_ARCH_PPC64
|
| - if (can_overflow) {
|
| - __ TestIfInt32(result, r0);
|
| - DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
|
| - }
|
| + bool checkOverflow = true;
|
| #endif
|
| +
|
| + if (right->IsConstantOperand()) {
|
| + if (!isInteger || !checkOverflow)
|
| + __ SubP(ToRegister(result), ToRegister(left), ToOperand(right));
|
| + else
|
| + __ Sub32(ToRegister(result), ToRegister(left), ToOperand(right));
|
| + } else if (right->IsRegister()) {
|
| + if (!isInteger)
|
| + __ SubP(ToRegister(result), ToRegister(left), ToRegister(right));
|
| + else if (!checkOverflow)
|
| + __ SubP_ExtendSrc(ToRegister(result), ToRegister(left),
|
| + ToRegister(right));
|
| + else
|
| + __ Sub32(ToRegister(result), ToRegister(left), ToRegister(right));
|
| } else {
|
| - if (right->IsConstantOperand()) {
|
| - __ AddAndCheckForOverflow(result, left, -(ToOperand(right).immediate()),
|
| - scratch0(), r0);
|
| + if (!left->Equals(instr->result()))
|
| + __ LoadRR(ToRegister(result), ToRegister(left));
|
| +
|
| + MemOperand mem = ToMemOperand(right);
|
| + if (!isInteger) {
|
| + __ SubP(ToRegister(result), mem);
|
| } else {
|
| - __ SubAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
|
| - scratch0(), r0);
|
| +#if V8_TARGET_ARCH_S390X && !V8_TARGET_LITTLE_ENDIAN
|
| + // We want to read the 32-bits directly from memory
|
| + MemOperand Upper32Mem = MemOperand(mem.rb(), mem.rx(), mem.offset() + 4);
|
| +#else
|
| + MemOperand Upper32Mem = ToMemOperand(right);
|
| +#endif
|
| + if (checkOverflow) {
|
| + __ Sub32(ToRegister(result), Upper32Mem);
|
| + } else {
|
| + __ SubP_ExtendSrc(ToRegister(result), Upper32Mem);
|
| + }
|
| }
|
| - DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
|
| }
|
| -}
|
|
|
| +#if V8_TARGET_ARCH_S390X
|
| + if (isInteger && checkOverflow)
|
| + __ lgfr(ToRegister(result), ToRegister(result));
|
| +#endif
|
| + if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
|
| + DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
|
| + }
|
| +}
|
|
|
| void LCodeGen::DoRSubI(LRSubI* instr) {
|
| LOperand* left = instr->left();
|
| @@ -1719,60 +1720,50 @@ void LCodeGen::DoRSubI(LRSubI* instr) {
|
| DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow) &&
|
| right->IsConstantOperand());
|
|
|
| +#if V8_TARGET_ARCH_S390X
|
| + // The overflow detection needs to be tested on the lower 32-bits.
|
| + // As a result, on 64-bit, we need to force 32-bit arithmetic operations
|
| + // to set the CC overflow bit properly. The result is then sign-extended.
|
| + bool checkOverflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
|
| +#else
|
| + bool checkOverflow = true;
|
| +#endif
|
| +
|
| Operand right_operand = ToOperand(right);
|
| - if (is_int16(right_operand.immediate())) {
|
| - __ subfic(ToRegister(result), ToRegister(left), right_operand);
|
| + __ mov(r0, right_operand);
|
| +
|
| + if (!checkOverflow) {
|
| + __ SubP_ExtendSrc(ToRegister(result), r0, ToRegister(left));
|
| } else {
|
| - __ mov(r0, right_operand);
|
| - __ sub(ToRegister(result), r0, ToRegister(left));
|
| + __ Sub32(ToRegister(result), r0, ToRegister(left));
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoConstantI(LConstantI* instr) {
|
| __ mov(ToRegister(instr->result()), Operand(instr->value()));
|
| }
|
|
|
| -
|
| void LCodeGen::DoConstantS(LConstantS* instr) {
|
| __ LoadSmiLiteral(ToRegister(instr->result()), instr->value());
|
| }
|
|
|
| -
|
| void LCodeGen::DoConstantD(LConstantD* instr) {
|
| DCHECK(instr->result()->IsDoubleRegister());
|
| DoubleRegister result = ToDoubleRegister(instr->result());
|
| -#if V8_HOST_ARCH_IA32
|
| - // Need some crappy work-around for x87 sNaN -> qNaN breakage in simulator
|
| - // builds.
|
| uint64_t bits = instr->bits();
|
| - if ((bits & V8_UINT64_C(0x7FF8000000000000)) ==
|
| - V8_UINT64_C(0x7FF0000000000000)) {
|
| - uint32_t lo = static_cast<uint32_t>(bits);
|
| - uint32_t hi = static_cast<uint32_t>(bits >> 32);
|
| - __ mov(ip, Operand(lo));
|
| - __ mov(scratch0(), Operand(hi));
|
| - __ MovInt64ToDouble(result, scratch0(), ip);
|
| - return;
|
| - }
|
| -#endif
|
| - double v = instr->value();
|
| - __ LoadDoubleLiteral(result, v, scratch0());
|
| + __ LoadDoubleLiteral(result, bits, scratch0());
|
| }
|
|
|
| -
|
| void LCodeGen::DoConstantE(LConstantE* instr) {
|
| __ mov(ToRegister(instr->result()), Operand(instr->value()));
|
| }
|
|
|
| -
|
| void LCodeGen::DoConstantT(LConstantT* instr) {
|
| Handle<Object> object = instr->value(isolate());
|
| AllowDeferredHandleDereference smi_check;
|
| __ Move(ToRegister(instr->result()), object);
|
| }
|
|
|
| -
|
| MemOperand LCodeGen::BuildSeqStringOperand(Register string, LOperand* index,
|
| String::Encoding encoding) {
|
| if (index->IsConstantOperand()) {
|
| @@ -1786,17 +1777,17 @@ MemOperand LCodeGen::BuildSeqStringOperand(Register string, LOperand* index,
|
| Register scratch = scratch0();
|
| DCHECK(!scratch.is(string));
|
| DCHECK(!scratch.is(ToRegister(index)));
|
| + // TODO(joransiu) : Fold Add into FieldMemOperand
|
| if (encoding == String::ONE_BYTE_ENCODING) {
|
| - __ add(scratch, string, ToRegister(index));
|
| + __ AddP(scratch, string, ToRegister(index));
|
| } else {
|
| STATIC_ASSERT(kUC16Size == 2);
|
| - __ ShiftLeftImm(scratch, ToRegister(index), Operand(1));
|
| - __ add(scratch, string, scratch);
|
| + __ ShiftLeftP(scratch, ToRegister(index), Operand(1));
|
| + __ AddP(scratch, string, scratch);
|
| }
|
| return FieldMemOperand(scratch, SeqString::kHeaderSize);
|
| }
|
|
|
| -
|
| void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
|
| String::Encoding encoding = instr->hydrogen()->encoding();
|
| Register string = ToRegister(instr->string());
|
| @@ -1805,13 +1796,13 @@ void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
|
| if (FLAG_debug_code) {
|
| Register scratch = scratch0();
|
| __ LoadP(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
|
| - __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
|
| + __ llc(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
|
|
|
| - __ andi(scratch, scratch,
|
| + __ AndP(scratch, scratch,
|
| Operand(kStringRepresentationMask | kStringEncodingMask));
|
| static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
|
| static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
|
| - __ cmpi(scratch,
|
| + __ CmpP(scratch,
|
| Operand(encoding == String::ONE_BYTE_ENCODING ? one_byte_seq_type
|
| : two_byte_seq_type));
|
| __ Check(eq, kUnexpectedStringType);
|
| @@ -1819,13 +1810,12 @@ void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
|
|
|
| MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
|
| if (encoding == String::ONE_BYTE_ENCODING) {
|
| - __ lbz(result, operand);
|
| + __ llc(result, operand);
|
| } else {
|
| - __ lhz(result, operand);
|
| + __ llh(result, operand);
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
|
| String::Encoding encoding = instr->hydrogen()->encoding();
|
| Register string = ToRegister(instr->string());
|
| @@ -1844,49 +1834,71 @@ void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
|
|
|
| MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
|
| if (encoding == String::ONE_BYTE_ENCODING) {
|
| - __ stb(value, operand);
|
| + __ stc(value, operand);
|
| } else {
|
| __ sth(value, operand);
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoAddI(LAddI* instr) {
|
| + LOperand* left = instr->left();
|
| LOperand* right = instr->right();
|
| - Register left = ToRegister(instr->left());
|
| - Register result = ToRegister(instr->result());
|
| - bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
|
| -#if V8_TARGET_ARCH_PPC64
|
| - const bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
|
| - instr->hydrogen()->representation().IsExternal());
|
| + LOperand* result = instr->result();
|
| + bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
|
| + instr->hydrogen()->representation().IsExternal());
|
| +#if V8_TARGET_ARCH_S390X
|
| + // The overflow detection needs to be tested on the lower 32-bits.
|
| + // As a result, on 64-bit, we need to force 32-bit arithmetic operations
|
| + // to set the CC overflow bit properly. The result is then sign-extended.
|
| + bool checkOverflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
|
| #else
|
| - const bool isInteger = false;
|
| + bool checkOverflow = true;
|
| #endif
|
|
|
| - if (!can_overflow || isInteger) {
|
| - if (right->IsConstantOperand()) {
|
| - __ Add(result, left, ToOperand(right).immediate(), r0);
|
| - } else {
|
| - __ add(result, left, EmitLoadRegister(right, ip));
|
| - }
|
| -#if V8_TARGET_ARCH_PPC64
|
| - if (can_overflow) {
|
| - __ TestIfInt32(result, r0);
|
| - DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
|
| - }
|
| -#endif
|
| + if (right->IsConstantOperand()) {
|
| + if (!isInteger || !checkOverflow)
|
| + __ AddP(ToRegister(result), ToRegister(left), ToOperand(right));
|
| + else
|
| + __ Add32(ToRegister(result), ToRegister(left), ToOperand(right));
|
| + } else if (right->IsRegister()) {
|
| + if (!isInteger)
|
| + __ AddP(ToRegister(result), ToRegister(left), ToRegister(right));
|
| + else if (!checkOverflow)
|
| + __ AddP_ExtendSrc(ToRegister(result), ToRegister(left),
|
| + ToRegister(right));
|
| + else
|
| + __ Add32(ToRegister(result), ToRegister(left), ToRegister(right));
|
| } else {
|
| - if (right->IsConstantOperand()) {
|
| - __ AddAndCheckForOverflow(result, left, ToOperand(right).immediate(),
|
| - scratch0(), r0);
|
| + if (!left->Equals(instr->result()))
|
| + __ LoadRR(ToRegister(result), ToRegister(left));
|
| +
|
| + MemOperand mem = ToMemOperand(right);
|
| + if (!isInteger) {
|
| + __ AddP(ToRegister(result), mem);
|
| } else {
|
| - __ AddAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
|
| - scratch0(), r0);
|
| +#if V8_TARGET_ARCH_S390X && !V8_TARGET_LITTLE_ENDIAN
|
| + // We want to read the 32-bits directly from memory
|
| + MemOperand Upper32Mem = MemOperand(mem.rb(), mem.rx(), mem.offset() + 4);
|
| +#else
|
| + MemOperand Upper32Mem = ToMemOperand(right);
|
| +#endif
|
| + if (checkOverflow) {
|
| + __ Add32(ToRegister(result), Upper32Mem);
|
| + } else {
|
| + __ AddP_ExtendSrc(ToRegister(result), Upper32Mem);
|
| + }
|
| }
|
| - DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
|
| }
|
| -}
|
|
|
| +#if V8_TARGET_ARCH_S390X
|
| + if (isInteger && checkOverflow)
|
| + __ lgfr(ToRegister(result), ToRegister(result));
|
| +#endif
|
| + // Doptimize on overflow
|
| + if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
|
| + DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
|
| + }
|
| +}
|
|
|
| void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
|
| LOperand* left = instr->left();
|
| @@ -1898,93 +1910,93 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
|
| Register right_reg = EmitLoadRegister(right, ip);
|
| Register result_reg = ToRegister(instr->result());
|
| Label return_left, done;
|
| -#if V8_TARGET_ARCH_PPC64
|
| +#if V8_TARGET_ARCH_S390X
|
| if (instr->hydrogen_value()->representation().IsSmi()) {
|
| #endif
|
| - __ cmp(left_reg, right_reg);
|
| -#if V8_TARGET_ARCH_PPC64
|
| + __ CmpP(left_reg, right_reg);
|
| +#if V8_TARGET_ARCH_S390X
|
| } else {
|
| - __ cmpw(left_reg, right_reg);
|
| + __ Cmp32(left_reg, right_reg);
|
| }
|
| #endif
|
| - if (CpuFeatures::IsSupported(ISELECT)) {
|
| - __ isel(cond, result_reg, left_reg, right_reg);
|
| - } else {
|
| - __ b(cond, &return_left);
|
| - __ Move(result_reg, right_reg);
|
| - __ b(&done);
|
| - __ bind(&return_left);
|
| - __ Move(result_reg, left_reg);
|
| - __ bind(&done);
|
| - }
|
| + __ b(cond, &return_left, Label::kNear);
|
| + __ Move(result_reg, right_reg);
|
| + __ b(&done, Label::kNear);
|
| + __ bind(&return_left);
|
| + __ Move(result_reg, left_reg);
|
| + __ bind(&done);
|
| } else {
|
| DCHECK(instr->hydrogen()->representation().IsDouble());
|
| DoubleRegister left_reg = ToDoubleRegister(left);
|
| DoubleRegister right_reg = ToDoubleRegister(right);
|
| DoubleRegister result_reg = ToDoubleRegister(instr->result());
|
| Label check_nan_left, check_zero, return_left, return_right, done;
|
| - __ fcmpu(left_reg, right_reg);
|
| - __ bunordered(&check_nan_left);
|
| + __ cdbr(left_reg, right_reg);
|
| + __ bunordered(&check_nan_left, Label::kNear);
|
| __ beq(&check_zero);
|
| - __ b(cond, &return_left);
|
| - __ b(&return_right);
|
| + __ b(cond, &return_left, Label::kNear);
|
| + __ b(&return_right, Label::kNear);
|
|
|
| __ bind(&check_zero);
|
| - __ fcmpu(left_reg, kDoubleRegZero);
|
| - __ bne(&return_left); // left == right != 0.
|
| + __ lzdr(kDoubleRegZero);
|
| + __ cdbr(left_reg, kDoubleRegZero);
|
| + __ bne(&return_left, Label::kNear); // left == right != 0.
|
|
|
| // At this point, both left and right are either 0 or -0.
|
| + // N.B. The following works because +0 + -0 == +0
|
| if (operation == HMathMinMax::kMathMin) {
|
| - // Min: The algorithm is: -((-L) + (-R)), which in case of L and R being
|
| - // different registers is most efficiently expressed as -((-L) - R).
|
| - __ fneg(left_reg, left_reg);
|
| + // For min we want logical-or of sign bit: -(-L + -R)
|
| + __ lcdbr(left_reg, left_reg);
|
| + __ ldr(result_reg, left_reg);
|
| if (left_reg.is(right_reg)) {
|
| - __ fadd(result_reg, left_reg, right_reg);
|
| + __ adbr(result_reg, right_reg);
|
| } else {
|
| - __ fsub(result_reg, left_reg, right_reg);
|
| + __ sdbr(result_reg, right_reg);
|
| }
|
| - __ fneg(result_reg, result_reg);
|
| + __ lcdbr(result_reg, result_reg);
|
| } else {
|
| - // Max: The following works because +0 + -0 == +0
|
| - __ fadd(result_reg, left_reg, right_reg);
|
| + // For max we want logical-and of sign bit: (L + R)
|
| + __ ldr(result_reg, left_reg);
|
| + __ adbr(result_reg, right_reg);
|
| }
|
| - __ b(&done);
|
| + __ b(&done, Label::kNear);
|
|
|
| __ bind(&check_nan_left);
|
| - __ fcmpu(left_reg, left_reg);
|
| - __ bunordered(&return_left); // left == NaN.
|
| + __ cdbr(left_reg, left_reg);
|
| + __ bunordered(&return_left, Label::kNear); // left == NaN.
|
|
|
| __ bind(&return_right);
|
| if (!right_reg.is(result_reg)) {
|
| - __ fmr(result_reg, right_reg);
|
| + __ ldr(result_reg, right_reg);
|
| }
|
| - __ b(&done);
|
| + __ b(&done, Label::kNear);
|
|
|
| __ bind(&return_left);
|
| if (!left_reg.is(result_reg)) {
|
| - __ fmr(result_reg, left_reg);
|
| + __ ldr(result_reg, left_reg);
|
| }
|
| __ bind(&done);
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
|
| DoubleRegister left = ToDoubleRegister(instr->left());
|
| DoubleRegister right = ToDoubleRegister(instr->right());
|
| DoubleRegister result = ToDoubleRegister(instr->result());
|
| + // All operations except MOD are computed in-place.
|
| + DCHECK(instr->op() == Token::MOD || left.is(result));
|
| switch (instr->op()) {
|
| case Token::ADD:
|
| - __ fadd(result, left, right);
|
| + __ adbr(result, right);
|
| break;
|
| case Token::SUB:
|
| - __ fsub(result, left, right);
|
| + __ sdbr(result, right);
|
| break;
|
| case Token::MUL:
|
| - __ fmul(result, left, right);
|
| + __ mdbr(result, right);
|
| break;
|
| case Token::DIV:
|
| - __ fdiv(result, left, right);
|
| + __ ddbr(result, right);
|
| break;
|
| case Token::MOD: {
|
| __ PrepareCallCFunction(0, 2, scratch0());
|
| @@ -2001,20 +2013,18 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
|
| DCHECK(ToRegister(instr->context()).is(cp));
|
| - DCHECK(ToRegister(instr->left()).is(r4));
|
| - DCHECK(ToRegister(instr->right()).is(r3));
|
| - DCHECK(ToRegister(instr->result()).is(r3));
|
| + DCHECK(ToRegister(instr->left()).is(r3));
|
| + DCHECK(ToRegister(instr->right()).is(r2));
|
| + DCHECK(ToRegister(instr->result()).is(r2));
|
|
|
| Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
|
| CallCode(code, RelocInfo::CODE_TARGET, instr);
|
| }
|
|
|
| -
|
| template <class InstrType>
|
| -void LCodeGen::EmitBranch(InstrType instr, Condition cond, CRegister cr) {
|
| +void LCodeGen::EmitBranch(InstrType instr, Condition cond) {
|
| int left_block = instr->TrueDestination(chunk_);
|
| int right_block = instr->FalseDestination(chunk_);
|
|
|
| @@ -2023,57 +2033,52 @@ void LCodeGen::EmitBranch(InstrType instr, Condition cond, CRegister cr) {
|
| if (right_block == left_block || cond == al) {
|
| EmitGoto(left_block);
|
| } else if (left_block == next_block) {
|
| - __ b(NegateCondition(cond), chunk_->GetAssemblyLabel(right_block), cr);
|
| + __ b(NegateCondition(cond), chunk_->GetAssemblyLabel(right_block));
|
| } else if (right_block == next_block) {
|
| - __ b(cond, chunk_->GetAssemblyLabel(left_block), cr);
|
| + __ b(cond, chunk_->GetAssemblyLabel(left_block));
|
| } else {
|
| - __ b(cond, chunk_->GetAssemblyLabel(left_block), cr);
|
| + __ b(cond, chunk_->GetAssemblyLabel(left_block));
|
| __ b(chunk_->GetAssemblyLabel(right_block));
|
| }
|
| }
|
|
|
| -
|
| template <class InstrType>
|
| -void LCodeGen::EmitTrueBranch(InstrType instr, Condition cond, CRegister cr) {
|
| +void LCodeGen::EmitTrueBranch(InstrType instr, Condition cond) {
|
| int true_block = instr->TrueDestination(chunk_);
|
| - __ b(cond, chunk_->GetAssemblyLabel(true_block), cr);
|
| + __ b(cond, chunk_->GetAssemblyLabel(true_block));
|
| }
|
|
|
| -
|
| template <class InstrType>
|
| -void LCodeGen::EmitFalseBranch(InstrType instr, Condition cond, CRegister cr) {
|
| +void LCodeGen::EmitFalseBranch(InstrType instr, Condition cond) {
|
| int false_block = instr->FalseDestination(chunk_);
|
| - __ b(cond, chunk_->GetAssemblyLabel(false_block), cr);
|
| + __ b(cond, chunk_->GetAssemblyLabel(false_block));
|
| }
|
|
|
| -
|
| void LCodeGen::DoDebugBreak(LDebugBreak* instr) { __ stop("LBreak"); }
|
|
|
| -
|
| void LCodeGen::DoBranch(LBranch* instr) {
|
| Representation r = instr->hydrogen()->value()->representation();
|
| DoubleRegister dbl_scratch = double_scratch0();
|
| - const uint crZOrNaNBits = (1 << (31 - Assembler::encode_crbit(cr7, CR_EQ)) |
|
| - 1 << (31 - Assembler::encode_crbit(cr7, CR_FU)));
|
|
|
| if (r.IsInteger32()) {
|
| DCHECK(!info()->IsStub());
|
| Register reg = ToRegister(instr->value());
|
| - __ cmpwi(reg, Operand::Zero());
|
| + __ Cmp32(reg, Operand::Zero());
|
| EmitBranch(instr, ne);
|
| } else if (r.IsSmi()) {
|
| DCHECK(!info()->IsStub());
|
| Register reg = ToRegister(instr->value());
|
| - __ cmpi(reg, Operand::Zero());
|
| + __ CmpP(reg, Operand::Zero());
|
| EmitBranch(instr, ne);
|
| } else if (r.IsDouble()) {
|
| DCHECK(!info()->IsStub());
|
| DoubleRegister reg = ToDoubleRegister(instr->value());
|
| + __ lzdr(kDoubleRegZero);
|
| + __ cdbr(reg, kDoubleRegZero);
|
| // Test the double value. Zero and NaN are false.
|
| - __ fcmpu(reg, kDoubleRegZero, cr7);
|
| - __ mfcr(r0);
|
| - __ andi(r0, r0, Operand(crZOrNaNBits));
|
| - EmitBranch(instr, eq, cr0);
|
| + Condition lt_gt = static_cast<Condition>(lt | gt);
|
| +
|
| + EmitBranch(instr, lt_gt);
|
| } else {
|
| DCHECK(r.IsTagged());
|
| Register reg = ToRegister(instr->value());
|
| @@ -2084,56 +2089,55 @@ void LCodeGen::DoBranch(LBranch* instr) {
|
| EmitBranch(instr, eq);
|
| } else if (type.IsSmi()) {
|
| DCHECK(!info()->IsStub());
|
| - __ cmpi(reg, Operand::Zero());
|
| + __ CmpP(reg, Operand::Zero());
|
| EmitBranch(instr, ne);
|
| } else if (type.IsJSArray()) {
|
| DCHECK(!info()->IsStub());
|
| EmitBranch(instr, al);
|
| } else if (type.IsHeapNumber()) {
|
| DCHECK(!info()->IsStub());
|
| - __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
|
| + __ ld(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
|
| // Test the double value. Zero and NaN are false.
|
| - __ fcmpu(dbl_scratch, kDoubleRegZero, cr7);
|
| - __ mfcr(r0);
|
| - __ andi(r0, r0, Operand(crZOrNaNBits));
|
| - EmitBranch(instr, eq, cr0);
|
| + __ lzdr(kDoubleRegZero);
|
| + __ cdbr(dbl_scratch, kDoubleRegZero);
|
| + Condition lt_gt = static_cast<Condition>(lt | gt);
|
| + EmitBranch(instr, lt_gt);
|
| } else if (type.IsString()) {
|
| DCHECK(!info()->IsStub());
|
| __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
|
| - __ cmpi(ip, Operand::Zero());
|
| + __ CmpP(ip, Operand::Zero());
|
| EmitBranch(instr, ne);
|
| } else {
|
| - ToBooleanICStub::Types expected =
|
| - instr->hydrogen()->expected_input_types();
|
| + ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
|
| // Avoid deopts in the case where we've never executed this path before.
|
| - if (expected.IsEmpty()) expected = ToBooleanICStub::Types::Generic();
|
| + if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
|
|
|
| - if (expected.Contains(ToBooleanICStub::UNDEFINED)) {
|
| + if (expected.Contains(ToBooleanStub::UNDEFINED)) {
|
| // undefined -> false.
|
| __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
|
| __ beq(instr->FalseLabel(chunk_));
|
| }
|
| - if (expected.Contains(ToBooleanICStub::BOOLEAN)) {
|
| + if (expected.Contains(ToBooleanStub::BOOLEAN)) {
|
| // Boolean -> its value.
|
| __ CompareRoot(reg, Heap::kTrueValueRootIndex);
|
| __ beq(instr->TrueLabel(chunk_));
|
| __ CompareRoot(reg, Heap::kFalseValueRootIndex);
|
| __ beq(instr->FalseLabel(chunk_));
|
| }
|
| - if (expected.Contains(ToBooleanICStub::NULL_TYPE)) {
|
| + if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
|
| // 'null' -> false.
|
| __ CompareRoot(reg, Heap::kNullValueRootIndex);
|
| __ beq(instr->FalseLabel(chunk_));
|
| }
|
|
|
| - if (expected.Contains(ToBooleanICStub::SMI)) {
|
| + if (expected.Contains(ToBooleanStub::SMI)) {
|
| // Smis: 0 -> false, all other -> true.
|
| - __ cmpi(reg, Operand::Zero());
|
| + __ CmpP(reg, Operand::Zero());
|
| __ beq(instr->FalseLabel(chunk_));
|
| __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
|
| } else if (expected.NeedsMap()) {
|
| // If we need a map later and have a Smi -> deopt.
|
| - __ TestIfSmi(reg, r0);
|
| + __ TestIfSmi(reg);
|
| DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
|
| }
|
|
|
| @@ -2143,54 +2147,54 @@ void LCodeGen::DoBranch(LBranch* instr) {
|
|
|
| if (expected.CanBeUndetectable()) {
|
| // Undetectable -> false.
|
| - __ lbz(ip, FieldMemOperand(map, Map::kBitFieldOffset));
|
| - __ TestBit(ip, Map::kIsUndetectable, r0);
|
| - __ bne(instr->FalseLabel(chunk_), cr0);
|
| + __ tm(FieldMemOperand(map, Map::kBitFieldOffset),
|
| + Operand(1 << Map::kIsUndetectable));
|
| + __ bne(instr->FalseLabel(chunk_));
|
| }
|
| }
|
|
|
| - if (expected.Contains(ToBooleanICStub::SPEC_OBJECT)) {
|
| + if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
|
| // spec object -> true.
|
| __ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE);
|
| __ bge(instr->TrueLabel(chunk_));
|
| }
|
|
|
| - if (expected.Contains(ToBooleanICStub::STRING)) {
|
| + if (expected.Contains(ToBooleanStub::STRING)) {
|
| // String value -> false iff empty.
|
| Label not_string;
|
| __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
|
| - __ bge(¬_string);
|
| + __ bge(¬_string, Label::kNear);
|
| __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
|
| - __ cmpi(ip, Operand::Zero());
|
| + __ CmpP(ip, Operand::Zero());
|
| __ bne(instr->TrueLabel(chunk_));
|
| __ b(instr->FalseLabel(chunk_));
|
| __ bind(¬_string);
|
| }
|
|
|
| - if (expected.Contains(ToBooleanICStub::SYMBOL)) {
|
| + if (expected.Contains(ToBooleanStub::SYMBOL)) {
|
| // Symbol value -> true.
|
| __ CompareInstanceType(map, ip, SYMBOL_TYPE);
|
| __ beq(instr->TrueLabel(chunk_));
|
| }
|
|
|
| - if (expected.Contains(ToBooleanICStub::SIMD_VALUE)) {
|
| + if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
|
| // SIMD value -> true.
|
| Label not_simd;
|
| __ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE);
|
| __ beq(instr->TrueLabel(chunk_));
|
| }
|
|
|
| - if (expected.Contains(ToBooleanICStub::HEAP_NUMBER)) {
|
| + if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
|
| // heap number -> false iff +0, -0, or NaN.
|
| Label not_heap_number;
|
| __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
|
| - __ bne(¬_heap_number);
|
| - __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
|
| - // Test the double value. Zero and NaN are false.
|
| - __ fcmpu(dbl_scratch, kDoubleRegZero, cr7);
|
| - __ mfcr(r0);
|
| - __ andi(r0, r0, Operand(crZOrNaNBits));
|
| - __ bne(instr->FalseLabel(chunk_), cr0);
|
| + __ bne(¬_heap_number, Label::kNear);
|
| + __ LoadDouble(dbl_scratch,
|
| + FieldMemOperand(reg, HeapNumber::kValueOffset));
|
| + __ lzdr(kDoubleRegZero);
|
| + __ cdbr(dbl_scratch, kDoubleRegZero);
|
| + __ bunordered(instr->FalseLabel(chunk_)); // NaN -> false.
|
| + __ beq(instr->FalseLabel(chunk_)); // +0, -0 -> false.
|
| __ b(instr->TrueLabel(chunk_));
|
| __ bind(¬_heap_number);
|
| }
|
| @@ -2204,17 +2208,14 @@ void LCodeGen::DoBranch(LBranch* instr) {
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::EmitGoto(int block) {
|
| if (!IsNextEmittedBlock(block)) {
|
| __ b(chunk_->GetAssemblyLabel(LookupDestination(block)));
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoGoto(LGoto* instr) { EmitGoto(instr->block_id()); }
|
|
|
| -
|
| Condition LCodeGen::TokenToCondition(Token::Value op) {
|
| Condition cond = kNoCondition;
|
| switch (op) {
|
| @@ -2246,7 +2247,6 @@ Condition LCodeGen::TokenToCondition(Token::Value op) {
|
| return cond;
|
| }
|
|
|
| -
|
| void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
|
| LOperand* left = instr->left();
|
| LOperand* right = instr->right();
|
| @@ -2267,7 +2267,7 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
|
| if (instr->is_double()) {
|
| // Compare left and right operands as doubles and load the
|
| // resulting flags into the normal status register.
|
| - __ fcmpu(ToDoubleRegister(left), ToDoubleRegister(right));
|
| + __ cdbr(ToDoubleRegister(left), ToDoubleRegister(right));
|
| // If a NaN is involved, i.e. the result is unordered,
|
| // jump to false block label.
|
| __ bunordered(instr->FalseLabel(chunk_));
|
| @@ -2276,45 +2276,45 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
|
| int32_t value = ToInteger32(LConstantOperand::cast(right));
|
| if (instr->hydrogen_value()->representation().IsSmi()) {
|
| if (is_unsigned) {
|
| - __ CmplSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
|
| + __ CmpLogicalSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
|
| } else {
|
| __ CmpSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
|
| }
|
| } else {
|
| if (is_unsigned) {
|
| - __ Cmplwi(ToRegister(left), Operand(value), r0);
|
| + __ CmpLogical32(ToRegister(left), ToOperand(right));
|
| } else {
|
| - __ Cmpwi(ToRegister(left), Operand(value), r0);
|
| + __ Cmp32(ToRegister(left), ToOperand(right));
|
| }
|
| }
|
| } else if (left->IsConstantOperand()) {
|
| int32_t value = ToInteger32(LConstantOperand::cast(left));
|
| if (instr->hydrogen_value()->representation().IsSmi()) {
|
| if (is_unsigned) {
|
| - __ CmplSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
|
| + __ CmpLogicalSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
|
| } else {
|
| __ CmpSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
|
| }
|
| } else {
|
| if (is_unsigned) {
|
| - __ Cmplwi(ToRegister(right), Operand(value), r0);
|
| + __ CmpLogical32(ToRegister(right), ToOperand(left));
|
| } else {
|
| - __ Cmpwi(ToRegister(right), Operand(value), r0);
|
| + __ Cmp32(ToRegister(right), ToOperand(left));
|
| }
|
| }
|
| // We commuted the operands, so commute the condition.
|
| cond = CommuteCondition(cond);
|
| } else if (instr->hydrogen_value()->representation().IsSmi()) {
|
| if (is_unsigned) {
|
| - __ cmpl(ToRegister(left), ToRegister(right));
|
| + __ CmpLogicalP(ToRegister(left), ToRegister(right));
|
| } else {
|
| - __ cmp(ToRegister(left), ToRegister(right));
|
| + __ CmpP(ToRegister(left), ToRegister(right));
|
| }
|
| } else {
|
| if (is_unsigned) {
|
| - __ cmplw(ToRegister(left), ToRegister(right));
|
| + __ CmpLogical32(ToRegister(left), ToRegister(right));
|
| } else {
|
| - __ cmpw(ToRegister(left), ToRegister(right));
|
| + __ Cmp32(ToRegister(left), ToRegister(right));
|
| }
|
| }
|
| }
|
| @@ -2322,36 +2322,34 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
|
| Register left = ToRegister(instr->left());
|
| Register right = ToRegister(instr->right());
|
|
|
| - __ cmp(left, right);
|
| + __ CmpP(left, right);
|
| EmitBranch(instr, eq);
|
| }
|
|
|
| -
|
| void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
|
| if (instr->hydrogen()->representation().IsTagged()) {
|
| Register input_reg = ToRegister(instr->object());
|
| - __ mov(ip, Operand(factory()->the_hole_value()));
|
| - __ cmp(input_reg, ip);
|
| + __ CmpP(input_reg, Operand(factory()->the_hole_value()));
|
| EmitBranch(instr, eq);
|
| return;
|
| }
|
|
|
| DoubleRegister input_reg = ToDoubleRegister(instr->object());
|
| - __ fcmpu(input_reg, input_reg);
|
| + __ cdbr(input_reg, input_reg);
|
| EmitFalseBranch(instr, ordered);
|
|
|
| Register scratch = scratch0();
|
| - __ MovDoubleHighToInt(scratch, input_reg);
|
| - __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
|
| + // Convert to GPR and examine the upper 32 bits
|
| + __ lgdr(scratch, input_reg);
|
| + __ srlg(scratch, scratch, Operand(32));
|
| + __ Cmp32(scratch, Operand(kHoleNanUpper32));
|
| EmitBranch(instr, eq);
|
| }
|
|
|
| -
|
| Condition LCodeGen::EmitIsString(Register input, Register temp1,
|
| Label* is_not_string,
|
| SmiCheck check_needed = INLINE_SMI_CHECK) {
|
| @@ -2363,7 +2361,6 @@ Condition LCodeGen::EmitIsString(Register input, Register temp1,
|
| return lt;
|
| }
|
|
|
| -
|
| void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
|
| Register reg = ToRegister(instr->value());
|
| Register temp1 = ToRegister(instr->temp());
|
| @@ -2377,14 +2374,12 @@ void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
|
| EmitBranch(instr, true_cond);
|
| }
|
|
|
| -
|
| void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
|
| Register input_reg = EmitLoadRegister(instr->value(), ip);
|
| - __ TestIfSmi(input_reg, r0);
|
| - EmitBranch(instr, eq, cr0);
|
| + __ TestIfSmi(input_reg);
|
| + EmitBranch(instr, eq);
|
| }
|
|
|
| -
|
| void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
|
| Register input = ToRegister(instr->value());
|
| Register temp = ToRegister(instr->temp());
|
| @@ -2393,12 +2388,11 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
|
| __ JumpIfSmi(input, instr->FalseLabel(chunk_));
|
| }
|
| __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset));
|
| - __ lbz(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
|
| - __ TestBit(temp, Map::kIsUndetectable, r0);
|
| - EmitBranch(instr, ne, cr0);
|
| + __ tm(FieldMemOperand(temp, Map::kBitFieldOffset),
|
| + Operand(1 << Map::kIsUndetectable));
|
| + EmitBranch(instr, ne);
|
| }
|
|
|
| -
|
| static Condition ComputeCompareCondition(Token::Value op) {
|
| switch (op) {
|
| case Token::EQ_STRICT:
|
| @@ -2418,18 +2412,17 @@ static Condition ComputeCompareCondition(Token::Value op) {
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
|
| DCHECK(ToRegister(instr->context()).is(cp));
|
| - DCHECK(ToRegister(instr->left()).is(r4));
|
| - DCHECK(ToRegister(instr->right()).is(r3));
|
| + DCHECK(ToRegister(instr->left()).is(r3));
|
| + DCHECK(ToRegister(instr->right()).is(r2));
|
|
|
| - Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
|
| + Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
|
| CallCode(code, RelocInfo::CODE_TARGET, instr);
|
| - __ CompareRoot(r3, Heap::kTrueValueRootIndex);
|
| - EmitBranch(instr, eq);
|
| -}
|
| + __ CmpP(r2, Operand::Zero());
|
|
|
| + EmitBranch(instr, ComputeCompareCondition(instr->op()));
|
| +}
|
|
|
| static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
|
| InstanceType from = instr->from();
|
| @@ -2439,7 +2432,6 @@ static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
|
| return from;
|
| }
|
|
|
| -
|
| static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
|
| InstanceType from = instr->from();
|
| InstanceType to = instr->to();
|
| @@ -2450,7 +2442,6 @@ static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
|
| return eq;
|
| }
|
|
|
| -
|
| void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
|
| Register scratch = scratch0();
|
| Register input = ToRegister(instr->value());
|
| @@ -2463,30 +2454,27 @@ void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
|
| EmitBranch(instr, BranchCondition(instr->hydrogen()));
|
| }
|
|
|
| -
|
| void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
|
| Register input = ToRegister(instr->value());
|
| Register result = ToRegister(instr->result());
|
|
|
| __ AssertString(input);
|
|
|
| - __ lwz(result, FieldMemOperand(input, String::kHashFieldOffset));
|
| + __ LoadlW(result, FieldMemOperand(input, String::kHashFieldOffset));
|
| __ IndexFromHash(result, result);
|
| }
|
|
|
| -
|
| void LCodeGen::DoHasCachedArrayIndexAndBranch(
|
| LHasCachedArrayIndexAndBranch* instr) {
|
| Register input = ToRegister(instr->value());
|
| Register scratch = scratch0();
|
|
|
| - __ lwz(scratch, FieldMemOperand(input, String::kHashFieldOffset));
|
| + __ LoadlW(scratch, FieldMemOperand(input, String::kHashFieldOffset));
|
| __ mov(r0, Operand(String::kContainsCachedArrayIndexMask));
|
| - __ and_(r0, scratch, r0, SetRC);
|
| - EmitBranch(instr, eq, cr0);
|
| + __ AndP(r0, scratch);
|
| + EmitBranch(instr, eq);
|
| }
|
|
|
| -
|
| // Branches to a label or falls through with the answer in flags. Trashes
|
| // the temp registers, but not the input.
|
| void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
|
| @@ -2510,7 +2498,7 @@ void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
|
| __ GetMapConstructor(temp, temp, temp2, instance_type);
|
|
|
| // Objects with a non-function constructor have class 'Object'.
|
| - __ cmpi(instance_type, Operand(JS_FUNCTION_TYPE));
|
| + __ CmpP(instance_type, Operand(JS_FUNCTION_TYPE));
|
| if (String::Equals(isolate()->factory()->Object_string(), class_name)) {
|
| __ bne(is_true);
|
| } else {
|
| @@ -2528,11 +2516,10 @@ void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
|
| // classes and it doesn't have to because you can't access it with natives
|
| // syntax. Since both sides are internalized it is sufficient to use an
|
| // identity comparison.
|
| - __ Cmpi(temp, Operand(class_name), r0);
|
| + __ CmpP(temp, Operand(class_name));
|
| // End with the answer in flags.
|
| }
|
|
|
| -
|
| void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
|
| Register input = ToRegister(instr->value());
|
| Register temp = scratch0();
|
| @@ -2545,27 +2532,24 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
|
| EmitBranch(instr, eq);
|
| }
|
|
|
| -
|
| void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
|
| Register reg = ToRegister(instr->value());
|
| Register temp = ToRegister(instr->temp());
|
|
|
| - __ LoadP(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
|
| - __ Cmpi(temp, Operand(instr->map()), r0);
|
| + __ mov(temp, Operand(instr->map()));
|
| + __ CmpP(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
|
| EmitBranch(instr, eq);
|
| }
|
|
|
| -
|
| void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
|
| DCHECK(ToRegister(instr->context()).is(cp));
|
| DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
|
| DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
|
| - DCHECK(ToRegister(instr->result()).is(r3));
|
| + DCHECK(ToRegister(instr->result()).is(r2));
|
| InstanceOfStub stub(isolate());
|
| CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
| }
|
|
|
| -
|
| void LCodeGen::DoHasInPrototypeChainAndBranch(
|
| LHasInPrototypeChainAndBranch* instr) {
|
| Register const object = ToRegister(instr->object());
|
| @@ -2578,18 +2562,17 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
|
| // is not a smi, since all other non-spec objects have {null} prototypes and
|
| // will be ruled out below.
|
| if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
|
| - __ TestIfSmi(object, r0);
|
| - EmitFalseBranch(instr, eq, cr0);
|
| + __ TestIfSmi(object);
|
| + EmitFalseBranch(instr, eq);
|
| }
|
| -
|
| // Loop through the {object}s prototype chain looking for the {prototype}.
|
| __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
|
| Label loop;
|
| __ bind(&loop);
|
|
|
| // Deoptimize if the object needs to be access checked.
|
| - __ lbz(object_instance_type,
|
| - FieldMemOperand(object_map, Map::kBitFieldOffset));
|
| + __ LoadlB(object_instance_type,
|
| + FieldMemOperand(object_map, Map::kBitFieldOffset));
|
| __ TestBit(object_instance_type, Map::kIsAccessCheckNeeded, r0);
|
| DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, cr0);
|
| // Deoptimize for proxies.
|
| @@ -2597,7 +2580,7 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
|
| DeoptimizeIf(eq, instr, Deoptimizer::kProxy);
|
| __ LoadP(object_prototype,
|
| FieldMemOperand(object_map, Map::kPrototypeOffset));
|
| - __ cmp(object_prototype, prototype);
|
| + __ CmpP(object_prototype, prototype);
|
| EmitTrueBranch(instr, eq);
|
| __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
|
| EmitFalseBranch(instr, eq);
|
| @@ -2606,7 +2589,6 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
|
| __ b(&loop);
|
| }
|
|
|
| -
|
| void LCodeGen::DoCmpT(LCmpT* instr) {
|
| DCHECK(ToRegister(instr->context()).is(cp));
|
| Token::Value op = instr->op();
|
| @@ -2614,36 +2596,29 @@ void LCodeGen::DoCmpT(LCmpT* instr) {
|
| Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
|
| CallCode(ic, RelocInfo::CODE_TARGET, instr);
|
| // This instruction also signals no smi code inlined
|
| - __ cmpi(r3, Operand::Zero());
|
| + __ CmpP(r2, Operand::Zero());
|
|
|
| Condition condition = ComputeCompareCondition(op);
|
| - if (CpuFeatures::IsSupported(ISELECT)) {
|
| - __ LoadRoot(r4, Heap::kTrueValueRootIndex);
|
| - __ LoadRoot(r5, Heap::kFalseValueRootIndex);
|
| - __ isel(condition, ToRegister(instr->result()), r4, r5);
|
| - } else {
|
| - Label true_value, done;
|
| + Label true_value, done;
|
|
|
| - __ b(condition, &true_value);
|
| + __ b(condition, &true_value, Label::kNear);
|
|
|
| - __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
|
| - __ b(&done);
|
| + __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
|
| + __ b(&done, Label::kNear);
|
|
|
| - __ bind(&true_value);
|
| - __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
|
| + __ bind(&true_value);
|
| + __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
|
|
|
| - __ bind(&done);
|
| - }
|
| + __ bind(&done);
|
| }
|
|
|
| -
|
| void LCodeGen::DoReturn(LReturn* instr) {
|
| if (FLAG_trace && info()->IsOptimizing()) {
|
| // Push the return value on the stack as the parameter.
|
| - // Runtime::TraceExit returns its parameter in r3. We're leaving the code
|
| + // Runtime::TraceExit returns its parameter in r2. We're leaving the code
|
| // managed by the register allocator and tearing down the frame, it's
|
| // safe to write to the context register.
|
| - __ push(r3);
|
| + __ push(r2);
|
| __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
| __ CallRuntime(Runtime::kTraceExit);
|
| }
|
| @@ -2656,7 +2631,11 @@ void LCodeGen::DoReturn(LReturn* instr) {
|
| if (NeedsEagerFrame()) {
|
| masm_->LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
|
| } else if (sp_delta != 0) {
|
| - __ addi(sp, sp, Operand(sp_delta));
|
| + // TODO(joransiu): Clean this up into Macro Assembler
|
| + if (sp_delta >= 0 && sp_delta < 4096)
|
| + __ la(sp, MemOperand(sp, sp_delta));
|
| + else
|
| + __ lay(sp, MemOperand(sp, sp_delta));
|
| }
|
| } else {
|
| DCHECK(info()->IsStub()); // Functions would need to drop one more value.
|
| @@ -2666,19 +2645,18 @@ void LCodeGen::DoReturn(LReturn* instr) {
|
| masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
|
| }
|
| __ SmiToPtrArrayOffset(r0, reg);
|
| - __ add(sp, sp, r0);
|
| + __ AddP(sp, sp, r0);
|
| }
|
|
|
| - __ blr();
|
| + __ Ret();
|
| }
|
|
|
| -
|
| template <class T>
|
| void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
|
| Register vector_register = ToRegister(instr->temp_vector());
|
| Register slot_register = LoadDescriptor::SlotRegister();
|
| DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
|
| - DCHECK(slot_register.is(r3));
|
| + DCHECK(slot_register.is(r2));
|
|
|
| AllowDeferredHandleDereference vector_structure_check;
|
| Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
|
| @@ -2689,7 +2667,6 @@ void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
|
| __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
|
| }
|
|
|
| -
|
| template <class T>
|
| void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
|
| Register vector_register = ToRegister(instr->temp_vector());
|
| @@ -2703,12 +2680,11 @@ void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
|
| __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
|
| }
|
|
|
| -
|
| void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
|
| DCHECK(ToRegister(instr->context()).is(cp));
|
| DCHECK(ToRegister(instr->global_object())
|
| .is(LoadDescriptor::ReceiverRegister()));
|
| - DCHECK(ToRegister(instr->result()).is(r3));
|
| + DCHECK(ToRegister(instr->result()).is(r2));
|
|
|
| __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
|
| EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
|
| @@ -2718,34 +2694,23 @@ void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
|
| CallCode(ic, RelocInfo::CODE_TARGET, instr);
|
| }
|
|
|
| -
|
| void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
|
| Register context = ToRegister(instr->context());
|
| Register result = ToRegister(instr->result());
|
| __ LoadP(result, ContextMemOperand(context, instr->slot_index()));
|
| if (instr->hydrogen()->RequiresHoleCheck()) {
|
| - __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
|
| + __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
|
| if (instr->hydrogen()->DeoptimizesOnHole()) {
|
| - __ cmp(result, ip);
|
| DeoptimizeIf(eq, instr, Deoptimizer::kHole);
|
| } else {
|
| - if (CpuFeatures::IsSupported(ISELECT)) {
|
| - Register scratch = scratch0();
|
| - __ mov(scratch, Operand(factory()->undefined_value()));
|
| - __ cmp(result, ip);
|
| - __ isel(eq, result, scratch, result);
|
| - } else {
|
| - Label skip;
|
| - __ cmp(result, ip);
|
| - __ bne(&skip);
|
| - __ mov(result, Operand(factory()->undefined_value()));
|
| - __ bind(&skip);
|
| - }
|
| + Label skip;
|
| + __ bne(&skip, Label::kNear);
|
| + __ mov(result, Operand(factory()->undefined_value()));
|
| + __ bind(&skip);
|
| }
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
|
| Register context = ToRegister(instr->context());
|
| Register value = ToRegister(instr->value());
|
| @@ -2756,8 +2721,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
|
|
|
| if (instr->hydrogen()->RequiresHoleCheck()) {
|
| __ LoadP(scratch, target);
|
| - __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
|
| - __ cmp(scratch, ip);
|
| + __ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
|
| if (instr->hydrogen()->DeoptimizesOnHole()) {
|
| DeoptimizeIf(eq, instr, Deoptimizer::kHole);
|
| } else {
|
| @@ -2765,7 +2729,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
|
| }
|
| }
|
|
|
| - __ StoreP(value, target, r0);
|
| + __ StoreP(value, target);
|
| if (instr->hydrogen()->NeedsWriteBarrier()) {
|
| SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
|
| ? OMIT_SMI_CHECK
|
| @@ -2778,7 +2742,6 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
|
| __ bind(&skip_assignment);
|
| }
|
|
|
| -
|
| void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
|
| HObjectAccess access = instr->hydrogen()->access();
|
| int offset = access.offset();
|
| @@ -2794,7 +2757,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
|
| if (instr->hydrogen()->representation().IsDouble()) {
|
| DCHECK(access.IsInobject());
|
| DoubleRegister result = ToDoubleRegister(instr->result());
|
| - __ lfd(result, FieldMemOperand(object, offset));
|
| + __ ld(result, FieldMemOperand(object, offset));
|
| return;
|
| }
|
|
|
| @@ -2806,7 +2769,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
|
|
|
| Representation representation = access.representation();
|
|
|
| -#if V8_TARGET_ARCH_PPC64
|
| +#if V8_TARGET_ARCH_S390X
|
| // 64-bit Smi optimization
|
| if (representation.IsSmi() &&
|
| instr->hydrogen()->representation().IsInteger32()) {
|
| @@ -2820,13 +2783,12 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
|
| r0);
|
| }
|
|
|
| -
|
| void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
|
| DCHECK(ToRegister(instr->context()).is(cp));
|
| DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
|
| - DCHECK(ToRegister(instr->result()).is(r3));
|
| + DCHECK(ToRegister(instr->result()).is(r2));
|
|
|
| - // Name is always in r5.
|
| + // Name is always in r4.
|
| __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
|
| EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
|
| Handle<Code> ic = CodeFactory::LoadICInOptimizedCode(
|
| @@ -2836,7 +2798,6 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
|
| CallCode(ic, RelocInfo::CODE_TARGET, instr);
|
| }
|
|
|
| -
|
| void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
|
| Register scratch = scratch0();
|
| Register function = ToRegister(instr->function());
|
| @@ -2847,36 +2808,26 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
|
| FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
|
|
|
| // Check that the function has a prototype or an initial map.
|
| - __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
|
| - __ cmp(result, ip);
|
| + __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
|
| DeoptimizeIf(eq, instr, Deoptimizer::kHole);
|
|
|
| // If the function does not have an initial map, we're done.
|
| - if (CpuFeatures::IsSupported(ISELECT)) {
|
| - // Get the prototype from the initial map (optimistic).
|
| - __ LoadP(ip, FieldMemOperand(result, Map::kPrototypeOffset));
|
| - __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
|
| - __ isel(eq, result, ip, result);
|
| - } else {
|
| - Label done;
|
| - __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
|
| - __ bne(&done);
|
| + Label done;
|
| + __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
|
| + __ bne(&done, Label::kNear);
|
|
|
| - // Get the prototype from the initial map.
|
| - __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
|
| + // Get the prototype from the initial map.
|
| + __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
|
|
|
| - // All done.
|
| - __ bind(&done);
|
| - }
|
| + // All done.
|
| + __ bind(&done);
|
| }
|
|
|
| -
|
| void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
|
| Register result = ToRegister(instr->result());
|
| __ LoadRoot(result, instr->index());
|
| }
|
|
|
| -
|
| void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
|
| Register arguments = ToRegister(instr->arguments());
|
| Register result = ToRegister(instr->result());
|
| @@ -2887,36 +2838,36 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
|
| if (instr->index()->IsConstantOperand()) {
|
| int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
|
| int index = (const_length - const_index) + 1;
|
| - __ LoadP(result, MemOperand(arguments, index * kPointerSize), r0);
|
| + __ LoadP(result, MemOperand(arguments, index * kPointerSize));
|
| } else {
|
| Register index = ToRegister(instr->index());
|
| - __ subfic(result, index, Operand(const_length + 1));
|
| - __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
|
| - __ LoadPX(result, MemOperand(arguments, result));
|
| + __ SubP(result, index, Operand(const_length + 1));
|
| + __ LoadComplementRR(result, result);
|
| + __ ShiftLeftP(result, result, Operand(kPointerSizeLog2));
|
| + __ LoadP(result, MemOperand(arguments, result));
|
| }
|
| } else if (instr->index()->IsConstantOperand()) {
|
| Register length = ToRegister(instr->length());
|
| int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
|
| int loc = const_index - 1;
|
| if (loc != 0) {
|
| - __ subi(result, length, Operand(loc));
|
| - __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
|
| - __ LoadPX(result, MemOperand(arguments, result));
|
| + __ SubP(result, length, Operand(loc));
|
| + __ ShiftLeftP(result, result, Operand(kPointerSizeLog2));
|
| + __ LoadP(result, MemOperand(arguments, result));
|
| } else {
|
| - __ ShiftLeftImm(result, length, Operand(kPointerSizeLog2));
|
| - __ LoadPX(result, MemOperand(arguments, result));
|
| + __ ShiftLeftP(result, length, Operand(kPointerSizeLog2));
|
| + __ LoadP(result, MemOperand(arguments, result));
|
| }
|
| } else {
|
| Register length = ToRegister(instr->length());
|
| Register index = ToRegister(instr->index());
|
| - __ sub(result, length, index);
|
| - __ addi(result, result, Operand(1));
|
| - __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
|
| - __ LoadPX(result, MemOperand(arguments, result));
|
| + __ SubP(result, length, index);
|
| + __ AddP(result, result, Operand(1));
|
| + __ ShiftLeftP(result, result, Operand(kPointerSizeLog2));
|
| + __ LoadP(result, MemOperand(arguments, result));
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
|
| Register external_pointer = ToRegister(instr->elements());
|
| Register key = no_reg;
|
| @@ -2934,20 +2885,33 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
|
| int element_size_shift = ElementsKindToShiftSize(elements_kind);
|
| bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
|
| int base_offset = instr->base_offset();
|
| + bool use_scratch = false;
|
|
|
| if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
|
| DoubleRegister result = ToDoubleRegister(instr->result());
|
| if (key_is_constant) {
|
| - __ Add(scratch0(), external_pointer, constant_key << element_size_shift,
|
| - r0);
|
| + base_offset += constant_key << element_size_shift;
|
| + if (!is_int20(base_offset)) {
|
| + __ mov(scratch0(), Operand(base_offset));
|
| + base_offset = 0;
|
| + use_scratch = true;
|
| + }
|
| } else {
|
| - __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
|
| - __ add(scratch0(), external_pointer, r0);
|
| + __ IndexToArrayOffset(scratch0(), key, element_size_shift, key_is_smi);
|
| + use_scratch = true;
|
| }
|
| if (elements_kind == FLOAT32_ELEMENTS) {
|
| - __ lfs(result, MemOperand(scratch0(), base_offset));
|
| + if (!use_scratch) {
|
| + __ ldeb(result, MemOperand(external_pointer, base_offset));
|
| + } else {
|
| + __ ldeb(result, MemOperand(scratch0(), external_pointer, base_offset));
|
| + }
|
| } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
|
| - __ lfd(result, MemOperand(scratch0(), base_offset));
|
| + if (!use_scratch) {
|
| + __ ld(result, MemOperand(external_pointer, base_offset));
|
| + } else {
|
| + __ ld(result, MemOperand(scratch0(), external_pointer, base_offset));
|
| + }
|
| }
|
| } else {
|
| Register result = ToRegister(instr->result());
|
| @@ -2956,51 +2920,25 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
|
| constant_key, element_size_shift, base_offset);
|
| switch (elements_kind) {
|
| case INT8_ELEMENTS:
|
| - if (key_is_constant) {
|
| - __ LoadByte(result, mem_operand, r0);
|
| - } else {
|
| - __ lbzx(result, mem_operand);
|
| - }
|
| - __ extsb(result, result);
|
| + __ LoadB(result, mem_operand);
|
| break;
|
| case UINT8_ELEMENTS:
|
| case UINT8_CLAMPED_ELEMENTS:
|
| - if (key_is_constant) {
|
| - __ LoadByte(result, mem_operand, r0);
|
| - } else {
|
| - __ lbzx(result, mem_operand);
|
| - }
|
| + __ LoadlB(result, mem_operand);
|
| break;
|
| case INT16_ELEMENTS:
|
| - if (key_is_constant) {
|
| - __ LoadHalfWordArith(result, mem_operand, r0);
|
| - } else {
|
| - __ lhax(result, mem_operand);
|
| - }
|
| + __ LoadHalfWordP(result, mem_operand);
|
| break;
|
| case UINT16_ELEMENTS:
|
| - if (key_is_constant) {
|
| - __ LoadHalfWord(result, mem_operand, r0);
|
| - } else {
|
| - __ lhzx(result, mem_operand);
|
| - }
|
| + __ LoadLogicalHalfWordP(result, mem_operand);
|
| break;
|
| case INT32_ELEMENTS:
|
| - if (key_is_constant) {
|
| - __ LoadWordArith(result, mem_operand, r0);
|
| - } else {
|
| - __ lwax(result, mem_operand);
|
| - }
|
| + __ LoadW(result, mem_operand, r0);
|
| break;
|
| case UINT32_ELEMENTS:
|
| - if (key_is_constant) {
|
| - __ LoadWord(result, mem_operand, r0);
|
| - } else {
|
| - __ lwzx(result, mem_operand);
|
| - }
|
| + __ LoadlW(result, mem_operand, r0);
|
| if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
|
| - __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
|
| - __ cmplw(result, r0);
|
| + __ CmpLogical32(result, Operand(0x80000000));
|
| DeoptimizeIf(ge, instr, Deoptimizer::kNegativeValue);
|
| }
|
| break;
|
| @@ -3024,7 +2962,6 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
|
| Register elements = ToRegister(instr->elements());
|
| bool key_is_constant = instr->key()->IsConstantOperand();
|
| @@ -3044,45 +2981,55 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
|
| key = ToRegister(instr->key());
|
| }
|
|
|
| - int base_offset = instr->base_offset() + constant_key * kDoubleSize;
|
| + bool use_scratch = false;
|
| + intptr_t base_offset = instr->base_offset() + constant_key * kDoubleSize;
|
| if (!key_is_constant) {
|
| - __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
|
| - __ add(scratch, elements, r0);
|
| - elements = scratch;
|
| + use_scratch = true;
|
| + __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
|
| }
|
| - if (!is_int16(base_offset)) {
|
| - __ Add(scratch, elements, base_offset, r0);
|
| +
|
| + // Memory references support up to 20-bits signed displacement in RXY form
|
| + // Include Register::kExponentOffset in check, so we are guaranteed not to
|
| + // overflow displacement later.
|
| + if (!is_int20(base_offset + Register::kExponentOffset)) {
|
| + use_scratch = true;
|
| + if (key_is_constant) {
|
| + __ mov(scratch, Operand(base_offset));
|
| + } else {
|
| + __ AddP(scratch, Operand(base_offset));
|
| + }
|
| base_offset = 0;
|
| - elements = scratch;
|
| }
|
| - __ lfd(result, MemOperand(elements, base_offset));
|
| +
|
| + if (!use_scratch) {
|
| + __ ld(result, MemOperand(elements, base_offset));
|
| + } else {
|
| + __ ld(result, MemOperand(scratch, elements, base_offset));
|
| + }
|
|
|
| if (instr->hydrogen()->RequiresHoleCheck()) {
|
| - if (is_int16(base_offset + Register::kExponentOffset)) {
|
| - __ lwz(scratch,
|
| - MemOperand(elements, base_offset + Register::kExponentOffset));
|
| + if (!use_scratch) {
|
| + __ LoadlW(r0,
|
| + MemOperand(elements, base_offset + Register::kExponentOffset));
|
| } else {
|
| - __ addi(scratch, elements, Operand(base_offset));
|
| - __ lwz(scratch, MemOperand(scratch, Register::kExponentOffset));
|
| + __ LoadlW(r0, MemOperand(scratch, elements,
|
| + base_offset + Register::kExponentOffset));
|
| }
|
| - __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
|
| + __ Cmp32(r0, Operand(kHoleNanUpper32));
|
| DeoptimizeIf(eq, instr, Deoptimizer::kHole);
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
|
| HLoadKeyed* hinstr = instr->hydrogen();
|
| Register elements = ToRegister(instr->elements());
|
| Register result = ToRegister(instr->result());
|
| Register scratch = scratch0();
|
| - Register store_base = scratch;
|
| int offset = instr->base_offset();
|
|
|
| if (instr->key()->IsConstantOperand()) {
|
| LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
|
| offset += ToInteger32(const_operand) * kPointerSize;
|
| - store_base = elements;
|
| } else {
|
| Register key = ToRegister(instr->key());
|
| // Even though the HLoadKeyed instruction forces the input
|
| @@ -3090,17 +3037,16 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
|
| // during bound check elimination with the index argument to the bounds
|
| // check, which can be tagged, so that case must be handled here, too.
|
| if (hinstr->key()->representation().IsSmi()) {
|
| - __ SmiToPtrArrayOffset(r0, key);
|
| + __ SmiToPtrArrayOffset(scratch, key);
|
| } else {
|
| - __ ShiftLeftImm(r0, key, Operand(kPointerSizeLog2));
|
| + __ ShiftLeftP(scratch, key, Operand(kPointerSizeLog2));
|
| }
|
| - __ add(scratch, elements, r0);
|
| }
|
|
|
| bool requires_hole_check = hinstr->RequiresHoleCheck();
|
| Representation representation = hinstr->representation();
|
|
|
| -#if V8_TARGET_ARCH_PPC64
|
| +#if V8_TARGET_ARCH_S390X
|
| // 64-bit Smi optimization
|
| if (representation.IsInteger32() &&
|
| hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
|
| @@ -3110,24 +3056,28 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
|
| }
|
| #endif
|
|
|
| - __ LoadRepresentation(result, MemOperand(store_base, offset), representation,
|
| - r0);
|
| + if (instr->key()->IsConstantOperand()) {
|
| + __ LoadRepresentation(result, MemOperand(elements, offset), representation,
|
| + r1);
|
| + } else {
|
| + __ LoadRepresentation(result, MemOperand(scratch, elements, offset),
|
| + representation, r1);
|
| + }
|
|
|
| // Check for the hole value.
|
| if (requires_hole_check) {
|
| if (IsFastSmiElementsKind(hinstr->elements_kind())) {
|
| - __ TestIfSmi(result, r0);
|
| + __ TestIfSmi(result);
|
| DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
|
| } else {
|
| - __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
|
| - __ cmp(result, scratch);
|
| + __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
|
| DeoptimizeIf(eq, instr, Deoptimizer::kHole);
|
| }
|
| } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
|
| DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
|
| Label done;
|
| __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
|
| - __ cmp(result, scratch);
|
| + __ CmpP(result, scratch);
|
| __ bne(&done);
|
| if (info()->IsStub()) {
|
| // A stub can safely convert the hole to undefined only if the array
|
| @@ -3143,7 +3093,6 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
|
| if (instr->is_fixed_typed_array()) {
|
| DoLoadKeyedExternalArray(instr);
|
| @@ -3154,7 +3103,6 @@ void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
|
| }
|
| }
|
|
|
| -
|
| MemOperand LCodeGen::PrepareKeyedOperand(Register key, Register base,
|
| bool key_is_constant, bool key_is_smi,
|
| int constant_key,
|
| @@ -3163,29 +3111,32 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key, Register base,
|
| Register scratch = scratch0();
|
|
|
| if (key_is_constant) {
|
| - return MemOperand(base, (constant_key << element_size_shift) + base_offset);
|
| + int offset = (base_offset + (constant_key << element_size_shift));
|
| + if (!is_int20(offset)) {
|
| + __ mov(scratch, Operand(offset));
|
| + return MemOperand(base, scratch);
|
| + } else {
|
| + return MemOperand(base,
|
| + (constant_key << element_size_shift) + base_offset);
|
| + }
|
| }
|
|
|
| bool needs_shift =
|
| (element_size_shift != (key_is_smi ? kSmiTagSize + kSmiShiftSize : 0));
|
|
|
| - if (!(base_offset || needs_shift)) {
|
| - return MemOperand(base, key);
|
| - }
|
| -
|
| if (needs_shift) {
|
| __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
|
| - key = scratch;
|
| + } else {
|
| + scratch = key;
|
| }
|
|
|
| - if (base_offset) {
|
| - __ Add(scratch, key, base_offset, r0);
|
| + if (!is_int20(base_offset)) {
|
| + __ AddP(scratch, Operand(base_offset));
|
| + base_offset = 0;
|
| }
|
| -
|
| - return MemOperand(base, scratch);
|
| + return MemOperand(scratch, base, base_offset);
|
| }
|
|
|
| -
|
| void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
|
| DCHECK(ToRegister(instr->context()).is(cp));
|
| DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
|
| @@ -3201,15 +3152,15 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
|
| CallCode(ic, RelocInfo::CODE_TARGET, instr);
|
| }
|
|
|
| -
|
| void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
|
| Register scratch = scratch0();
|
| Register result = ToRegister(instr->result());
|
|
|
| if (instr->hydrogen()->from_inlined()) {
|
| - __ subi(result, sp, Operand(2 * kPointerSize));
|
| + __ lay(result, MemOperand(sp, -2 * kPointerSize));
|
| } else {
|
| // Check if the calling frame is an arguments adaptor frame.
|
| + Label done, adapted;
|
| __ LoadP(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
| __ LoadP(result,
|
| MemOperand(scratch, StandardFrameConstants::kContextOffset));
|
| @@ -3217,22 +3168,16 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
|
|
|
| // Result is the frame pointer for the frame if not adapted and for the real
|
| // frame below the adaptor frame if adapted.
|
| - if (CpuFeatures::IsSupported(ISELECT)) {
|
| - __ isel(eq, result, scratch, fp);
|
| - } else {
|
| - Label done, adapted;
|
| - __ beq(&adapted);
|
| - __ mr(result, fp);
|
| - __ b(&done);
|
| + __ beq(&adapted, Label::kNear);
|
| + __ LoadRR(result, fp);
|
| + __ b(&done, Label::kNear);
|
|
|
| - __ bind(&adapted);
|
| - __ mr(result, scratch);
|
| - __ bind(&done);
|
| - }
|
| + __ bind(&adapted);
|
| + __ LoadRR(result, scratch);
|
| + __ bind(&done);
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
|
| Register elem = ToRegister(instr->elements());
|
| Register result = ToRegister(instr->result());
|
| @@ -3240,9 +3185,9 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
|
| Label done;
|
|
|
| // If no arguments adaptor frame the number of arguments is fixed.
|
| - __ cmp(fp, elem);
|
| + __ CmpP(fp, elem);
|
| __ mov(result, Operand(scope()->num_parameters()));
|
| - __ beq(&done);
|
| + __ beq(&done, Label::kNear);
|
|
|
| // Arguments adaptor frame present. Get argument length from there.
|
| __ LoadP(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
|
| @@ -3254,7 +3199,6 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
|
| __ bind(&done);
|
| }
|
|
|
| -
|
| void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
|
| Register receiver = ToRegister(instr->receiver());
|
| Register function = ToRegister(instr->function());
|
| @@ -3271,28 +3215,26 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
|
| // functions or builtins.
|
| __ LoadP(scratch,
|
| FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
|
| - __ lwz(scratch,
|
| - FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
|
| - __ andi(r0, scratch, Operand((1 << SharedFunctionInfo::kStrictModeBit) |
|
| + __ LoadlW(scratch, FieldMemOperand(
|
| + scratch, SharedFunctionInfo::kCompilerHintsOffset));
|
| + __ AndP(r0, scratch, Operand((1 << SharedFunctionInfo::kStrictModeBit) |
|
| (1 << SharedFunctionInfo::kNativeBit)));
|
| - __ bne(&result_in_receiver, cr0);
|
| + __ bne(&result_in_receiver, Label::kNear);
|
| }
|
|
|
| // Normal function. Replace undefined or null with global receiver.
|
| - __ LoadRoot(scratch, Heap::kNullValueRootIndex);
|
| - __ cmp(receiver, scratch);
|
| - __ beq(&global_object);
|
| - __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
|
| - __ cmp(receiver, scratch);
|
| - __ beq(&global_object);
|
| + __ CompareRoot(receiver, Heap::kNullValueRootIndex);
|
| + __ beq(&global_object, Label::kNear);
|
| + __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
|
| + __ beq(&global_object, Label::kNear);
|
|
|
| // Deoptimize if the receiver is not a JS object.
|
| - __ TestIfSmi(receiver, r0);
|
| + __ TestIfSmi(receiver);
|
| DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
|
| __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE);
|
| DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject);
|
|
|
| - __ b(&result_in_receiver);
|
| + __ b(&result_in_receiver, Label::kNear);
|
| __ bind(&global_object);
|
| __ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset));
|
| __ LoadP(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
|
| @@ -3302,63 +3244,59 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
|
| __ bind(&result_in_receiver);
|
| } else {
|
| Label result_ok;
|
| - __ b(&result_ok);
|
| + __ b(&result_ok, Label::kNear);
|
| __ bind(&result_in_receiver);
|
| - __ mr(result, receiver);
|
| + __ LoadRR(result, receiver);
|
| __ bind(&result_ok);
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
|
| Register receiver = ToRegister(instr->receiver());
|
| Register function = ToRegister(instr->function());
|
| Register length = ToRegister(instr->length());
|
| Register elements = ToRegister(instr->elements());
|
| Register scratch = scratch0();
|
| - DCHECK(receiver.is(r3)); // Used for parameter count.
|
| - DCHECK(function.is(r4)); // Required by InvokeFunction.
|
| - DCHECK(ToRegister(instr->result()).is(r3));
|
| + DCHECK(receiver.is(r2)); // Used for parameter count.
|
| + DCHECK(function.is(r3)); // Required by InvokeFunction.
|
| + DCHECK(ToRegister(instr->result()).is(r2));
|
|
|
| // Copy the arguments to this function possibly from the
|
| // adaptor frame below it.
|
| const uint32_t kArgumentsLimit = 1 * KB;
|
| - __ cmpli(length, Operand(kArgumentsLimit));
|
| + __ CmpLogicalP(length, Operand(kArgumentsLimit));
|
| DeoptimizeIf(gt, instr, Deoptimizer::kTooManyArguments);
|
|
|
| // Push the receiver and use the register to keep the original
|
| // number of arguments.
|
| __ push(receiver);
|
| - __ mr(receiver, length);
|
| + __ LoadRR(receiver, length);
|
| // The arguments are at a one pointer size offset from elements.
|
| - __ addi(elements, elements, Operand(1 * kPointerSize));
|
| + __ AddP(elements, Operand(1 * kPointerSize));
|
|
|
| // Loop through the arguments pushing them onto the execution
|
| // stack.
|
| Label invoke, loop;
|
| // length is a small non-negative integer, due to the test above.
|
| - __ cmpi(length, Operand::Zero());
|
| - __ beq(&invoke);
|
| - __ mtctr(length);
|
| + __ CmpP(length, Operand::Zero());
|
| + __ beq(&invoke, Label::kNear);
|
| __ bind(&loop);
|
| - __ ShiftLeftImm(r0, length, Operand(kPointerSizeLog2));
|
| - __ LoadPX(scratch, MemOperand(elements, r0));
|
| + __ ShiftLeftP(r1, length, Operand(kPointerSizeLog2));
|
| + __ LoadP(scratch, MemOperand(elements, r1));
|
| __ push(scratch);
|
| - __ addi(length, length, Operand(-1));
|
| - __ bdnz(&loop);
|
| + __ BranchOnCount(length, &loop);
|
|
|
| __ bind(&invoke);
|
| DCHECK(instr->HasPointerMap());
|
| LPointerMap* pointers = instr->pointer_map();
|
| SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
|
| - // The number of arguments is stored in receiver which is r3, as expected
|
| + // The number of arguments is stored in receiver which is r2, as expected
|
| // by InvokeFunction.
|
| ParameterCount actual(receiver);
|
| __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
|
| safepoint_generator);
|
| }
|
|
|
| -
|
| void LCodeGen::DoPushArgument(LPushArgument* instr) {
|
| LOperand* argument = instr->value();
|
| if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
|
| @@ -3369,16 +3307,13 @@ void LCodeGen::DoPushArgument(LPushArgument* instr) {
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoDrop(LDrop* instr) { __ Drop(instr->count()); }
|
|
|
| -
|
| void LCodeGen::DoThisFunction(LThisFunction* instr) {
|
| Register result = ToRegister(instr->result());
|
| __ LoadP(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
|
| }
|
|
|
| -
|
| void LCodeGen::DoContext(LContext* instr) {
|
| // If there is a non-return use, the context must be moved to a register.
|
| Register result = ToRegister(instr->result());
|
| @@ -3390,7 +3325,6 @@ void LCodeGen::DoContext(LContext* instr) {
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
|
| DCHECK(ToRegister(instr->context()).is(cp));
|
| __ Move(scratch0(), instr->hydrogen()->pairs());
|
| @@ -3400,7 +3334,6 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
|
| CallRuntime(Runtime::kDeclareGlobals, instr);
|
| }
|
|
|
| -
|
| void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
|
| int formal_parameter_count, int arity,
|
| LInstruction* instr) {
|
| @@ -3409,7 +3342,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
|
| bool can_invoke_directly =
|
| dont_adapt_arguments || formal_parameter_count == arity;
|
|
|
| - Register function_reg = r4;
|
| + Register function_reg = r3;
|
|
|
| LPointerMap* pointers = instr->pointer_map();
|
|
|
| @@ -3418,8 +3351,8 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
|
| __ LoadP(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
|
|
|
| // Always initialize new target and number of actual arguments.
|
| - __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
|
| - __ mov(r3, Operand(arity));
|
| + __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
|
| + __ mov(r2, Operand(arity));
|
|
|
| bool is_self_call = function.is_identical_to(info()->closure());
|
|
|
| @@ -3441,7 +3374,6 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
|
| DCHECK(instr->context() != NULL);
|
| DCHECK(ToRegister(instr->context()).is(cp));
|
| @@ -3451,17 +3383,16 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
|
|
|
| // Deoptimize if not a heap number.
|
| __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
|
| - __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
|
| - __ cmp(scratch, ip);
|
| + __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
|
| DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
|
|
|
| Label done;
|
| Register exponent = scratch0();
|
| scratch = no_reg;
|
| - __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
|
| + __ LoadlW(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
|
| // Check the sign of the argument. If the argument is positive, just
|
| // return it.
|
| - __ cmpwi(exponent, Operand::Zero());
|
| + __ Cmp32(exponent, Operand::Zero());
|
| // Move the input to the result if necessary.
|
| __ Move(result, input);
|
| __ bge(&done);
|
| @@ -3473,10 +3404,10 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
|
|
|
| // Registers were saved at the safepoint, so we can use
|
| // many scratch registers.
|
| - Register tmp1 = input.is(r4) ? r3 : r4;
|
| - Register tmp2 = input.is(r5) ? r3 : r5;
|
| - Register tmp3 = input.is(r6) ? r3 : r6;
|
| - Register tmp4 = input.is(r7) ? r3 : r7;
|
| + Register tmp1 = input.is(r3) ? r2 : r3;
|
| + Register tmp2 = input.is(r4) ? r2 : r4;
|
| + Register tmp3 = input.is(r5) ? r2 : r5;
|
| + Register tmp4 = input.is(r6) ? r2 : r6;
|
|
|
| // exponent: floating point exponent value.
|
|
|
| @@ -3491,19 +3422,20 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
|
| CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
|
| instr->context());
|
| // Set the pointer to the new heap number in tmp.
|
| - if (!tmp1.is(r3)) __ mr(tmp1, r3);
|
| + if (!tmp1.is(r2)) __ LoadRR(tmp1, r2);
|
| // Restore input_reg after call to runtime.
|
| __ LoadFromSafepointRegisterSlot(input, input);
|
| - __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
|
| + __ LoadlW(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
|
|
|
| __ bind(&allocated);
|
| // exponent: floating point exponent value.
|
| // tmp1: allocated heap number.
|
| - STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
|
| - __ clrlwi(exponent, exponent, Operand(1)); // clear sign bit
|
| - __ stw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
|
| - __ lwz(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
|
| - __ stw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
|
| +
|
| + // Clear the sign bit.
|
| + __ nilf(exponent, Operand(~HeapNumber::kSignMask));
|
| + __ StoreW(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
|
| + __ LoadlW(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
|
| + __ StoreW(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
|
|
|
| __ StoreToSafepointRegisterSlot(tmp1, result);
|
| }
|
| @@ -3511,43 +3443,37 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
|
| __ bind(&done);
|
| }
|
|
|
| -
|
| void LCodeGen::EmitMathAbs(LMathAbs* instr) {
|
| Register input = ToRegister(instr->value());
|
| Register result = ToRegister(instr->result());
|
| Label done;
|
| - __ cmpi(input, Operand::Zero());
|
| + __ CmpP(input, Operand::Zero());
|
| __ Move(result, input);
|
| - __ bge(&done);
|
| - __ li(r0, Operand::Zero()); // clear xer
|
| - __ mtxer(r0);
|
| - __ neg(result, result, SetOE, SetRC);
|
| + __ bge(&done, Label::kNear);
|
| + __ LoadComplementRR(result, result);
|
| // Deoptimize on overflow.
|
| DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
|
| __ bind(&done);
|
| }
|
|
|
| -
|
| -#if V8_TARGET_ARCH_PPC64
|
| +#if V8_TARGET_ARCH_S390X
|
| void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) {
|
| Register input = ToRegister(instr->value());
|
| Register result = ToRegister(instr->result());
|
| Label done;
|
| - __ cmpwi(input, Operand::Zero());
|
| + __ Cmp32(input, Operand::Zero());
|
| __ Move(result, input);
|
| - __ bge(&done);
|
| + __ bge(&done, Label::kNear);
|
|
|
| // Deoptimize on overflow.
|
| - __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
|
| - __ cmpw(input, r0);
|
| + __ Cmp32(input, Operand(0x80000000));
|
| DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
|
|
|
| - __ neg(result, result);
|
| + __ LoadComplementRR(result, result);
|
| __ bind(&done);
|
| }
|
| #endif
|
|
|
| -
|
| void LCodeGen::DoMathAbs(LMathAbs* instr) {
|
| // Class for deferred case.
|
| class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
|
| @@ -3567,8 +3493,8 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
|
| if (r.IsDouble()) {
|
| DoubleRegister input = ToDoubleRegister(instr->value());
|
| DoubleRegister result = ToDoubleRegister(instr->result());
|
| - __ fabs(result, input);
|
| -#if V8_TARGET_ARCH_PPC64
|
| + __ lpdbr(result, input);
|
| +#if V8_TARGET_ARCH_S390X
|
| } else if (r.IsInteger32()) {
|
| EmitInteger32MathAbs(instr);
|
| } else if (r.IsSmi()) {
|
| @@ -3589,7 +3515,6 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoMathFloor(LMathFloor* instr) {
|
| DoubleRegister input = ToDoubleRegister(instr->value());
|
| Register result = ToRegister(instr->result());
|
| @@ -3604,15 +3529,14 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
|
| __ bind(&exact);
|
| if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
| // Test for -0.
|
| - __ cmpi(result, Operand::Zero());
|
| - __ bne(&done);
|
| - __ cmpwi(input_high, Operand::Zero());
|
| + __ CmpP(result, Operand::Zero());
|
| + __ bne(&done, Label::kNear);
|
| + __ Cmp32(input_high, Operand::Zero());
|
| DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
|
| }
|
| __ bind(&done);
|
| }
|
|
|
| -
|
| void LCodeGen::DoMathRound(LMathRound* instr) {
|
| DoubleRegister input = ToDoubleRegister(instr->value());
|
| Register result = ToRegister(instr->result());
|
| @@ -3624,37 +3548,32 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
|
| Label convert, done;
|
|
|
| __ LoadDoubleLiteral(dot_five, 0.5, r0);
|
| - __ fabs(double_scratch1, input);
|
| - __ fcmpu(double_scratch1, dot_five);
|
| + __ lpdbr(double_scratch1, input);
|
| + __ cdbr(double_scratch1, dot_five);
|
| DeoptimizeIf(unordered, instr, Deoptimizer::kLostPrecisionOrNaN);
|
| // If input is in [-0.5, -0], the result is -0.
|
| // If input is in [+0, +0.5[, the result is +0.
|
| // If the input is +0.5, the result is 1.
|
| - __ bgt(&convert); // Out of [-0.5, +0.5].
|
| + __ bgt(&convert, Label::kNear); // Out of [-0.5, +0.5].
|
| if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
| // [-0.5, -0] (negative) yields minus zero.
|
| __ TestDoubleSign(input, scratch1);
|
| DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
|
| }
|
| - __ fcmpu(input, dot_five);
|
| - if (CpuFeatures::IsSupported(ISELECT)) {
|
| - __ li(result, Operand(1));
|
| - __ isel(lt, result, r0, result);
|
| - __ b(&done);
|
| - } else {
|
| - Label return_zero;
|
| - __ bne(&return_zero);
|
| - __ li(result, Operand(1)); // +0.5.
|
| - __ b(&done);
|
| - // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
|
| - // flag kBailoutOnMinusZero.
|
| - __ bind(&return_zero);
|
| - __ li(result, Operand::Zero());
|
| - __ b(&done);
|
| - }
|
| + Label return_zero;
|
| + __ cdbr(input, dot_five);
|
| + __ bne(&return_zero, Label::kNear);
|
| + __ LoadImmP(result, Operand(1)); // +0.5.
|
| + __ b(&done, Label::kNear);
|
| + // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
|
| + // flag kBailoutOnMinusZero.
|
| + __ bind(&return_zero);
|
| + __ LoadImmP(result, Operand::Zero());
|
| + __ b(&done, Label::kNear);
|
|
|
| __ bind(&convert);
|
| - __ fadd(input_plus_dot_five, input, dot_five);
|
| + __ ldr(input_plus_dot_five, input);
|
| + __ adbr(input_plus_dot_five, dot_five);
|
| // Reuse dot_five (double_scratch0) as we no longer need this value.
|
| __ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2,
|
| double_scratch0(), &done, &done);
|
| @@ -3662,21 +3581,22 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
|
| __ bind(&done);
|
| }
|
|
|
| -
|
| void LCodeGen::DoMathFround(LMathFround* instr) {
|
| DoubleRegister input_reg = ToDoubleRegister(instr->value());
|
| DoubleRegister output_reg = ToDoubleRegister(instr->result());
|
| - __ frsp(output_reg, input_reg);
|
| -}
|
|
|
| + // Round double to float
|
| + __ ledbr(output_reg, input_reg);
|
| + // Extend from float to double
|
| + __ ldebr(output_reg, output_reg);
|
| +}
|
|
|
| void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
|
| DoubleRegister input = ToDoubleRegister(instr->value());
|
| DoubleRegister result = ToDoubleRegister(instr->result());
|
| - __ fsqrt(result, input);
|
| + __ sqdbr(result, input);
|
| }
|
|
|
| -
|
| void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
|
| DoubleRegister input = ToDoubleRegister(instr->value());
|
| DoubleRegister result = ToDoubleRegister(instr->result());
|
| @@ -3688,23 +3608,24 @@ void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
|
| Label skip, done;
|
|
|
| __ LoadDoubleLiteral(temp, -V8_INFINITY, scratch0());
|
| - __ fcmpu(input, temp);
|
| - __ bne(&skip);
|
| - __ fneg(result, temp);
|
| - __ b(&done);
|
| + __ cdbr(input, temp);
|
| + __ bne(&skip, Label::kNear);
|
| + __ lcdbr(result, temp);
|
| + __ b(&done, Label::kNear);
|
|
|
| // Add +0 to convert -0 to +0.
|
| __ bind(&skip);
|
| - __ fadd(result, input, kDoubleRegZero);
|
| - __ fsqrt(result, result);
|
| + __ ldr(result, input);
|
| + __ lzdr(kDoubleRegZero);
|
| + __ adbr(result, kDoubleRegZero);
|
| + __ sqdbr(result, result);
|
| __ bind(&done);
|
| }
|
|
|
| -
|
| void LCodeGen::DoPower(LPower* instr) {
|
| Representation exponent_type = instr->hydrogen()->right()->representation();
|
| -// Having marked this as a call, we can use any registers.
|
| -// Just make sure that the input/output registers are the expected ones.
|
| + // Having marked this as a call, we can use any registers.
|
| + // Just make sure that the input/output registers are the expected ones.
|
| Register tagged_exponent = MathPowTaggedDescriptor::exponent();
|
| DCHECK(!instr->right()->IsDoubleRegister() ||
|
| ToDoubleRegister(instr->right()).is(d2));
|
| @@ -3719,10 +3640,8 @@ void LCodeGen::DoPower(LPower* instr) {
|
| } else if (exponent_type.IsTagged()) {
|
| Label no_deopt;
|
| __ JumpIfSmi(tagged_exponent, &no_deopt);
|
| - DCHECK(!r10.is(tagged_exponent));
|
| - __ LoadP(r10, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
|
| - __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
|
| - __ cmp(r10, ip);
|
| + __ LoadP(r9, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
|
| + __ CompareRoot(r9, Heap::kHeapNumberMapRootIndex);
|
| DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
|
| __ bind(&no_deopt);
|
| MathPowStub stub(isolate(), MathPowStub::TAGGED);
|
| @@ -3737,7 +3656,6 @@ void LCodeGen::DoPower(LPower* instr) {
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoMathExp(LMathExp* instr) {
|
| DoubleRegister input = ToDoubleRegister(instr->value());
|
| DoubleRegister result = ToDoubleRegister(instr->result());
|
| @@ -3750,7 +3668,6 @@ void LCodeGen::DoMathExp(LMathExp* instr) {
|
| double_scratch2, temp1, temp2, scratch0());
|
| }
|
|
|
| -
|
| void LCodeGen::DoMathLog(LMathLog* instr) {
|
| __ PrepareCallCFunction(0, 1, scratch0());
|
| __ MovToFloatParameter(ToDoubleRegister(instr->value()));
|
| @@ -3759,17 +3676,22 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
|
| __ MovFromFloatResult(ToDoubleRegister(instr->result()));
|
| }
|
|
|
| -
|
| void LCodeGen::DoMathClz32(LMathClz32* instr) {
|
| Register input = ToRegister(instr->value());
|
| Register result = ToRegister(instr->result());
|
| - __ cntlzw_(result, input);
|
| + Label done;
|
| + __ llgfr(result, input);
|
| + __ flogr(r0, result);
|
| + __ LoadRR(result, r0);
|
| + __ CmpP(r0, Operand::Zero());
|
| + __ beq(&done, Label::kNear);
|
| + __ SubP(result, Operand(32));
|
| + __ bind(&done);
|
| }
|
|
|
| -
|
| void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
|
| DCHECK(ToRegister(instr->context()).is(cp));
|
| - DCHECK(ToRegister(instr->function()).is(r4));
|
| + DCHECK(ToRegister(instr->function()).is(r3));
|
| DCHECK(instr->HasPointerMap());
|
|
|
| Handle<JSFunction> known_function = instr->hydrogen()->known_function();
|
| @@ -3777,7 +3699,7 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
|
| LPointerMap* pointers = instr->pointer_map();
|
| SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
|
| ParameterCount count(instr->arity());
|
| - __ InvokeFunction(r4, no_reg, count, CALL_FUNCTION, generator);
|
| + __ InvokeFunction(r3, no_reg, count, CALL_FUNCTION, generator);
|
| } else {
|
| CallKnownFunction(known_function,
|
| instr->hydrogen()->formal_parameter_count(),
|
| @@ -3785,9 +3707,8 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
|
| - DCHECK(ToRegister(instr->result()).is(r3));
|
| + DCHECK(ToRegister(instr->result()).is(r2));
|
|
|
| if (instr->hydrogen()->IsTailCall()) {
|
| if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
|
| @@ -3799,7 +3720,7 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
|
| } else {
|
| DCHECK(instr->target()->IsRegister());
|
| Register target = ToRegister(instr->target());
|
| - __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
|
| + __ AddP(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
|
| __ JumpToJSEntry(ip);
|
| }
|
| } else {
|
| @@ -3815,27 +3736,26 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
|
| DCHECK(instr->target()->IsRegister());
|
| Register target = ToRegister(instr->target());
|
| generator.BeforeCall(__ CallSize(target));
|
| - __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
|
| + __ AddP(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
|
| __ CallJSEntry(ip);
|
| }
|
| generator.AfterCall();
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
|
| DCHECK(ToRegister(instr->context()).is(cp));
|
| - DCHECK(ToRegister(instr->constructor()).is(r4));
|
| - DCHECK(ToRegister(instr->result()).is(r3));
|
| + DCHECK(ToRegister(instr->constructor()).is(r3));
|
| + DCHECK(ToRegister(instr->result()).is(r2));
|
|
|
| - __ mov(r3, Operand(instr->arity()));
|
| + __ mov(r2, Operand(instr->arity()));
|
| if (instr->arity() == 1) {
|
| // We only need the allocation site for the case we have a length argument.
|
| // The case may bail out to the runtime, which will determine the correct
|
| // elements kind with the site.
|
| - __ Move(r5, instr->hydrogen()->site());
|
| + __ Move(r4, instr->hydrogen()->site());
|
| } else {
|
| - __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
|
| + __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
|
| }
|
| ElementsKind kind = instr->hydrogen()->elements_kind();
|
| AllocationSiteOverrideMode override_mode =
|
| @@ -3852,15 +3772,15 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
|
| Label packed_case;
|
| // We might need a change here
|
| // look at the first argument
|
| - __ LoadP(r8, MemOperand(sp, 0));
|
| - __ cmpi(r8, Operand::Zero());
|
| - __ beq(&packed_case);
|
| + __ LoadP(r7, MemOperand(sp, 0));
|
| + __ CmpP(r7, Operand::Zero());
|
| + __ beq(&packed_case, Label::kNear);
|
|
|
| ElementsKind holey_kind = GetHoleyElementsKind(kind);
|
| ArraySingleArgumentConstructorStub stub(isolate(), holey_kind,
|
| override_mode);
|
| CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
| - __ b(&done);
|
| + __ b(&done, Label::kNear);
|
| __ bind(&packed_case);
|
| }
|
|
|
| @@ -3873,35 +3793,31 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
|
| CallRuntime(instr->function(), instr->arity(), instr);
|
| }
|
|
|
| -
|
| void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
|
| Register function = ToRegister(instr->function());
|
| Register code_object = ToRegister(instr->code_object());
|
| - __ addi(code_object, code_object,
|
| - Operand(Code::kHeaderSize - kHeapObjectTag));
|
| + __ lay(code_object,
|
| + MemOperand(code_object, Code::kHeaderSize - kHeapObjectTag));
|
| __ StoreP(code_object,
|
| FieldMemOperand(function, JSFunction::kCodeEntryOffset), r0);
|
| }
|
|
|
| -
|
| void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
|
| Register result = ToRegister(instr->result());
|
| Register base = ToRegister(instr->base_object());
|
| if (instr->offset()->IsConstantOperand()) {
|
| LConstantOperand* offset = LConstantOperand::cast(instr->offset());
|
| - __ Add(result, base, ToInteger32(offset), r0);
|
| + __ lay(result, MemOperand(base, ToInteger32(offset)));
|
| } else {
|
| Register offset = ToRegister(instr->offset());
|
| - __ add(result, base, offset);
|
| + __ lay(result, MemOperand(base, offset));
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
|
| HStoreNamedField* hinstr = instr->hydrogen();
|
| Representation representation = instr->representation();
|
| @@ -3920,7 +3836,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
|
|
|
| __ AssertNotSmi(object);
|
|
|
| -#if V8_TARGET_ARCH_PPC64
|
| +#if V8_TARGET_ARCH_S390X
|
| DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
|
| IsInteger32(LConstantOperand::cast(instr->value())));
|
| #else
|
| @@ -3932,7 +3848,8 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
|
| DCHECK(!hinstr->has_transition());
|
| DCHECK(!hinstr->NeedsWriteBarrier());
|
| DoubleRegister value = ToDoubleRegister(instr->value());
|
| - __ stfd(value, FieldMemOperand(object, offset));
|
| + DCHECK(offset >= 0);
|
| + __ std(value, FieldMemOperand(object, offset));
|
| return;
|
| }
|
|
|
| @@ -3953,11 +3870,11 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
|
| Register record_dest = object;
|
| Register record_value = no_reg;
|
| Register record_scratch = scratch;
|
| -#if V8_TARGET_ARCH_PPC64
|
| +#if V8_TARGET_ARCH_S390X
|
| if (FLAG_unbox_double_fields && representation.IsDouble()) {
|
| DCHECK(access.IsInobject());
|
| DoubleRegister value = ToDoubleRegister(instr->value());
|
| - __ stfd(value, FieldMemOperand(object, offset));
|
| + __ std(value, FieldMemOperand(object, offset));
|
| if (hinstr->NeedsWriteBarrier()) {
|
| record_value = ToRegister(instr->value());
|
| }
|
| @@ -3985,7 +3902,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
|
| record_value = value;
|
| record_scratch = object;
|
| }
|
| -#if V8_TARGET_ARCH_PPC64
|
| +#if V8_TARGET_ARCH_S390X
|
| }
|
| #endif
|
|
|
| @@ -3997,7 +3914,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
|
| DCHECK(ToRegister(instr->context()).is(cp));
|
| DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
|
| @@ -4010,11 +3926,11 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
|
| __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
|
| Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
|
| isolate(), instr->language_mode(),
|
| - instr->hydrogen()->initialization_state()).code();
|
| + instr->hydrogen()->initialization_state())
|
| + .code();
|
| CallCode(ic, RelocInfo::CODE_TARGET, instr);
|
| }
|
|
|
| -
|
| void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
|
| Representation representation = instr->hydrogen()->length()->representation();
|
| DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
|
| @@ -4025,31 +3941,31 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
|
| int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
|
| Register index = ToRegister(instr->index());
|
| if (representation.IsSmi()) {
|
| - __ Cmpli(index, Operand(Smi::FromInt(length)), r0);
|
| + __ CmpLogicalP(index, Operand(Smi::FromInt(length)));
|
| } else {
|
| - __ Cmplwi(index, Operand(length), r0);
|
| + __ CmpLogical32(index, Operand(length));
|
| }
|
| cc = CommuteCondition(cc);
|
| } else if (instr->index()->IsConstantOperand()) {
|
| int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
|
| Register length = ToRegister(instr->length());
|
| if (representation.IsSmi()) {
|
| - __ Cmpli(length, Operand(Smi::FromInt(index)), r0);
|
| + __ CmpLogicalP(length, Operand(Smi::FromInt(index)));
|
| } else {
|
| - __ Cmplwi(length, Operand(index), r0);
|
| + __ CmpLogical32(length, Operand(index));
|
| }
|
| } else {
|
| Register index = ToRegister(instr->index());
|
| Register length = ToRegister(instr->length());
|
| if (representation.IsSmi()) {
|
| - __ cmpl(length, index);
|
| + __ CmpLogicalP(length, index);
|
| } else {
|
| - __ cmplw(length, index);
|
| + __ CmpLogical32(length, index);
|
| }
|
| }
|
| if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
|
| Label done;
|
| - __ b(NegateCondition(cc), &done);
|
| + __ b(NegateCondition(cc), &done, Label::kNear);
|
| __ stop("eliminated bounds check failed");
|
| __ bind(&done);
|
| } else {
|
| @@ -4057,7 +3973,6 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
|
| Register external_pointer = ToRegister(instr->elements());
|
| Register key = no_reg;
|
| @@ -4081,20 +3996,26 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
|
| DoubleRegister value(ToDoubleRegister(instr->value()));
|
| if (key_is_constant) {
|
| if (constant_key != 0) {
|
| - __ Add(address, external_pointer, constant_key << element_size_shift,
|
| - r0);
|
| + base_offset += constant_key << element_size_shift;
|
| + if (!is_int20(base_offset)) {
|
| + __ mov(address, Operand(base_offset));
|
| + __ AddP(address, external_pointer);
|
| + } else {
|
| + __ AddP(address, external_pointer, Operand(base_offset));
|
| + }
|
| + base_offset = 0;
|
| } else {
|
| address = external_pointer;
|
| }
|
| } else {
|
| - __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
|
| - __ add(address, external_pointer, r0);
|
| + __ IndexToArrayOffset(address, key, element_size_shift, key_is_smi);
|
| + __ AddP(address, external_pointer);
|
| }
|
| if (elements_kind == FLOAT32_ELEMENTS) {
|
| - __ frsp(double_scratch0(), value);
|
| - __ stfs(double_scratch0(), MemOperand(address, base_offset));
|
| + __ ledbr(double_scratch0(), value);
|
| + __ StoreFloat32(double_scratch0(), MemOperand(address, base_offset));
|
| } else { // Storing doubles, not floats.
|
| - __ stfd(value, MemOperand(address, base_offset));
|
| + __ StoreDouble(value, MemOperand(address, base_offset));
|
| }
|
| } else {
|
| Register value(ToRegister(instr->value()));
|
| @@ -4108,7 +4029,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
|
| if (key_is_constant) {
|
| __ StoreByte(value, mem_operand, r0);
|
| } else {
|
| - __ stbx(value, mem_operand);
|
| + __ StoreByte(value, mem_operand);
|
| }
|
| break;
|
| case INT16_ELEMENTS:
|
| @@ -4116,15 +4037,15 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
|
| if (key_is_constant) {
|
| __ StoreHalfWord(value, mem_operand, r0);
|
| } else {
|
| - __ sthx(value, mem_operand);
|
| + __ StoreHalfWord(value, mem_operand);
|
| }
|
| break;
|
| case INT32_ELEMENTS:
|
| case UINT32_ELEMENTS:
|
| if (key_is_constant) {
|
| - __ StoreWord(value, mem_operand, r0);
|
| + __ StoreW(value, mem_operand, r0);
|
| } else {
|
| - __ stwx(value, mem_operand);
|
| + __ StoreW(value, mem_operand);
|
| }
|
| break;
|
| case FLOAT32_ELEMENTS:
|
| @@ -4147,7 +4068,6 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
|
| DoubleRegister value = ToDoubleRegister(instr->value());
|
| Register elements = ToRegister(instr->elements());
|
| @@ -4170,34 +4090,48 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
|
| int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
|
| bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
|
| int base_offset = instr->base_offset() + constant_key * kDoubleSize;
|
| - if (!key_is_constant) {
|
| + bool use_scratch = false;
|
| + intptr_t address_offset = base_offset;
|
| +
|
| + if (key_is_constant) {
|
| + // Memory references support up to 20-bits signed displacement in RXY form
|
| + if (!is_int20((address_offset))) {
|
| + __ mov(scratch, Operand(address_offset));
|
| + address_offset = 0;
|
| + use_scratch = true;
|
| + }
|
| + } else {
|
| + use_scratch = true;
|
| __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
|
| - __ add(scratch, elements, scratch);
|
| - elements = scratch;
|
| - }
|
| - if (!is_int16(base_offset)) {
|
| - __ Add(scratch, elements, base_offset, r0);
|
| - base_offset = 0;
|
| - elements = scratch;
|
| + // Memory references support up to 20-bits signed displacement in RXY form
|
| + if (!is_int20((address_offset))) {
|
| + __ AddP(scratch, Operand(address_offset));
|
| + address_offset = 0;
|
| + }
|
| }
|
|
|
| if (instr->NeedsCanonicalization()) {
|
| // Turn potential sNaN value into qNaN.
|
| __ CanonicalizeNaN(double_scratch, value);
|
| - __ stfd(double_scratch, MemOperand(elements, base_offset));
|
| + DCHECK(address_offset >= 0);
|
| + if (use_scratch)
|
| + __ std(double_scratch, MemOperand(scratch, elements, address_offset));
|
| + else
|
| + __ std(double_scratch, MemOperand(elements, address_offset));
|
| } else {
|
| - __ stfd(value, MemOperand(elements, base_offset));
|
| + if (use_scratch)
|
| + __ std(value, MemOperand(scratch, elements, address_offset));
|
| + else
|
| + __ std(value, MemOperand(elements, address_offset));
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
|
| HStoreKeyed* hinstr = instr->hydrogen();
|
| Register value = ToRegister(instr->value());
|
| Register elements = ToRegister(instr->elements());
|
| Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
|
| Register scratch = scratch0();
|
| - Register store_base = scratch;
|
| int offset = instr->base_offset();
|
|
|
| // Do the store.
|
| @@ -4205,7 +4139,6 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
|
| DCHECK(!hinstr->NeedsWriteBarrier());
|
| LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
|
| offset += ToInteger32(const_operand) * kPointerSize;
|
| - store_base = elements;
|
| } else {
|
| // Even though the HLoadKeyed instruction forces the input
|
| // representation for the key to be an integer, the input gets replaced
|
| @@ -4214,14 +4147,13 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
|
| if (hinstr->key()->representation().IsSmi()) {
|
| __ SmiToPtrArrayOffset(scratch, key);
|
| } else {
|
| - __ ShiftLeftImm(scratch, key, Operand(kPointerSizeLog2));
|
| + __ ShiftLeftP(scratch, key, Operand(kPointerSizeLog2));
|
| }
|
| - __ add(scratch, elements, scratch);
|
| }
|
|
|
| Representation representation = hinstr->value()->representation();
|
|
|
| -#if V8_TARGET_ARCH_PPC64
|
| +#if V8_TARGET_ARCH_S390X
|
| // 64-bit Smi optimization
|
| if (representation.IsInteger32()) {
|
| DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
|
| @@ -4231,22 +4163,30 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
|
| }
|
| #endif
|
|
|
| - __ StoreRepresentation(value, MemOperand(store_base, offset), representation,
|
| - r0);
|
| + if (instr->key()->IsConstantOperand()) {
|
| + __ StoreRepresentation(value, MemOperand(elements, offset), representation,
|
| + scratch);
|
| + } else {
|
| + __ StoreRepresentation(value, MemOperand(scratch, elements, offset),
|
| + representation, r0);
|
| + }
|
|
|
| if (hinstr->NeedsWriteBarrier()) {
|
| SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
|
| ? OMIT_SMI_CHECK
|
| : INLINE_SMI_CHECK;
|
| // Compute address of modified element and store it into key register.
|
| - __ Add(key, store_base, offset, r0);
|
| + if (instr->key()->IsConstantOperand()) {
|
| + __ lay(key, MemOperand(elements, offset));
|
| + } else {
|
| + __ lay(key, MemOperand(scratch, elements, offset));
|
| + }
|
| __ RecordWrite(elements, key, value, GetLinkRegisterState(), kSaveFPRegs,
|
| EMIT_REMEMBERED_SET, check_needed,
|
| hinstr->PointersToHereCheckForValue());
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
|
| // By cases: external, fast double
|
| if (instr->is_fixed_typed_array()) {
|
| @@ -4258,7 +4198,6 @@ void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
|
| DCHECK(ToRegister(instr->context()).is(cp));
|
| DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
|
| @@ -4271,11 +4210,11 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
|
|
|
| Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
|
| isolate(), instr->language_mode(),
|
| - instr->hydrogen()->initialization_state()).code();
|
| + instr->hydrogen()->initialization_state())
|
| + .code();
|
| CallCode(ic, RelocInfo::CODE_TARGET, instr);
|
| }
|
|
|
| -
|
| void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
|
| class DeferredMaybeGrowElements final : public LDeferredCode {
|
| public:
|
| @@ -4288,7 +4227,7 @@ void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
|
| LMaybeGrowElements* instr_;
|
| };
|
|
|
| - Register result = r3;
|
| + Register result = r2;
|
| DeferredMaybeGrowElements* deferred =
|
| new (zone()) DeferredMaybeGrowElements(this, instr);
|
| LOperand* key = instr->key();
|
| @@ -4310,15 +4249,15 @@ void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
|
| }
|
| } else if (key->IsConstantOperand()) {
|
| int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
|
| - __ Cmpwi(ToRegister(current_capacity), Operand(constant_key), r0);
|
| + __ Cmp32(ToRegister(current_capacity), Operand(constant_key));
|
| __ ble(deferred->entry());
|
| } else if (current_capacity->IsConstantOperand()) {
|
| int32_t constant_capacity =
|
| ToInteger32(LConstantOperand::cast(current_capacity));
|
| - __ Cmpwi(ToRegister(key), Operand(constant_capacity), r0);
|
| + __ Cmp32(ToRegister(key), Operand(constant_capacity));
|
| __ bge(deferred->entry());
|
| } else {
|
| - __ cmpw(ToRegister(key), ToRegister(current_capacity));
|
| + __ Cmp32(ToRegister(key), ToRegister(current_capacity));
|
| __ bge(deferred->entry());
|
| }
|
|
|
| @@ -4331,13 +4270,12 @@ void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
|
| __ bind(deferred->exit());
|
| }
|
|
|
| -
|
| void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
|
| // TODO(3095996): Get rid of this. For now, we need to make the
|
| // result register contain a valid pointer because it is already
|
| // contained in the register pointer map.
|
| - Register result = r3;
|
| - __ li(result, Operand::Zero());
|
| + Register result = r2;
|
| + __ LoadImmP(result, Operand::Zero());
|
|
|
| // We have to call a stub.
|
| {
|
| @@ -4350,9 +4288,9 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
|
|
|
| LOperand* key = instr->key();
|
| if (key->IsConstantOperand()) {
|
| - __ LoadSmiLiteral(r6, ToSmi(LConstantOperand::cast(key)));
|
| + __ LoadSmiLiteral(r5, ToSmi(LConstantOperand::cast(key)));
|
| } else {
|
| - __ SmiTag(r6, ToRegister(key));
|
| + __ SmiTag(r5, ToRegister(key));
|
| }
|
|
|
| GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
|
| @@ -4364,11 +4302,10 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
|
| }
|
|
|
| // Deopt on smi, which means the elements array changed to dictionary mode.
|
| - __ TestIfSmi(result, r0);
|
| + __ TestIfSmi(result);
|
| DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
|
| }
|
|
|
| -
|
| void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
|
| Register object_reg = ToRegister(instr->object());
|
| Register scratch = scratch0();
|
| @@ -4380,22 +4317,21 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
|
|
|
| Label not_applicable;
|
| __ LoadP(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
|
| - __ Cmpi(scratch, Operand(from_map), r0);
|
| + __ CmpP(scratch, Operand(from_map));
|
| __ bne(¬_applicable);
|
|
|
| if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
|
| Register new_map_reg = ToRegister(instr->new_map_temp());
|
| __ mov(new_map_reg, Operand(to_map));
|
| - __ StoreP(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset),
|
| - r0);
|
| + __ StoreP(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
|
| // Write barrier.
|
| __ RecordWriteForMap(object_reg, new_map_reg, scratch,
|
| GetLinkRegisterState(), kDontSaveFPRegs);
|
| } else {
|
| DCHECK(ToRegister(instr->context()).is(cp));
|
| - DCHECK(object_reg.is(r3));
|
| + DCHECK(object_reg.is(r2));
|
| PushSafepointRegistersScope scope(this);
|
| - __ Move(r4, to_map);
|
| + __ Move(r3, to_map);
|
| bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
|
| TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
|
| __ CallStub(&stub);
|
| @@ -4405,7 +4341,6 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
|
| __ bind(¬_applicable);
|
| }
|
|
|
| -
|
| void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
|
| Register object = ToRegister(instr->object());
|
| Register temp = ToRegister(instr->temp());
|
| @@ -4415,17 +4350,15 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
|
| __ bind(&no_memento_found);
|
| }
|
|
|
| -
|
| void LCodeGen::DoStringAdd(LStringAdd* instr) {
|
| DCHECK(ToRegister(instr->context()).is(cp));
|
| - DCHECK(ToRegister(instr->left()).is(r4));
|
| - DCHECK(ToRegister(instr->right()).is(r3));
|
| + DCHECK(ToRegister(instr->left()).is(r3));
|
| + DCHECK(ToRegister(instr->right()).is(r2));
|
| StringAddStub stub(isolate(), instr->hydrogen()->flags(),
|
| instr->hydrogen()->pretenure_flag());
|
| CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
|
| }
|
|
|
| -
|
| void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
|
| class DeferredStringCharCodeAt final : public LDeferredCode {
|
| public:
|
| @@ -4447,7 +4380,6 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
|
| __ bind(deferred->exit());
|
| }
|
|
|
| -
|
| void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
|
| Register string = ToRegister(instr->string());
|
| Register result = ToRegister(instr->result());
|
| @@ -4456,7 +4388,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
|
| // TODO(3095996): Get rid of this. For now, we need to make the
|
| // result register contain a valid pointer because it is already
|
| // contained in the register pointer map.
|
| - __ li(result, Operand::Zero());
|
| + __ LoadImmP(result, Operand::Zero());
|
|
|
| PushSafepointRegistersScope scope(this);
|
| __ push(string);
|
| @@ -4473,12 +4405,11 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
|
| }
|
| CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
|
| instr->context());
|
| - __ AssertSmi(r3);
|
| - __ SmiUntag(r3);
|
| - __ StoreToSafepointRegisterSlot(r3, result);
|
| + __ AssertSmi(r2);
|
| + __ SmiUntag(r2);
|
| + __ StoreToSafepointRegisterSlot(r2, result);
|
| }
|
|
|
| -
|
| void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
|
| class DeferredStringCharFromCode final : public LDeferredCode {
|
| public:
|
| @@ -4501,19 +4432,17 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
|
| Register result = ToRegister(instr->result());
|
| DCHECK(!char_code.is(result));
|
|
|
| - __ cmpli(char_code, Operand(String::kMaxOneByteCharCode));
|
| + __ CmpLogicalP(char_code, Operand(String::kMaxOneByteCharCode));
|
| __ bgt(deferred->entry());
|
| __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
|
| - __ ShiftLeftImm(r0, char_code, Operand(kPointerSizeLog2));
|
| - __ add(result, result, r0);
|
| + __ ShiftLeftP(r0, char_code, Operand(kPointerSizeLog2));
|
| + __ AddP(result, r0);
|
| __ LoadP(result, FieldMemOperand(result, FixedArray::kHeaderSize));
|
| - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
|
| - __ cmp(result, ip);
|
| + __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
|
| __ beq(deferred->entry());
|
| __ bind(deferred->exit());
|
| }
|
|
|
| -
|
| void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
|
| Register char_code = ToRegister(instr->char_code());
|
| Register result = ToRegister(instr->result());
|
| @@ -4521,17 +4450,16 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
|
| // TODO(3095996): Get rid of this. For now, we need to make the
|
| // result register contain a valid pointer because it is already
|
| // contained in the register pointer map.
|
| - __ li(result, Operand::Zero());
|
| + __ LoadImmP(result, Operand::Zero());
|
|
|
| PushSafepointRegistersScope scope(this);
|
| __ SmiTag(char_code);
|
| __ push(char_code);
|
| CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
|
| instr->context());
|
| - __ StoreToSafepointRegisterSlot(r3, result);
|
| + __ StoreToSafepointRegisterSlot(r2, result);
|
| }
|
|
|
| -
|
| void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
|
| LOperand* input = instr->value();
|
| DCHECK(input->IsRegister() || input->IsStackSlot());
|
| @@ -4546,14 +4474,12 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
|
| LOperand* input = instr->value();
|
| LOperand* output = instr->result();
|
| __ ConvertUnsignedIntToDouble(ToRegister(input), ToDoubleRegister(output));
|
| }
|
|
|
| -
|
| void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
|
| class DeferredNumberTagI final : public LDeferredCode {
|
| public:
|
| @@ -4573,16 +4499,16 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
|
| Register dst = ToRegister(instr->result());
|
|
|
| DeferredNumberTagI* deferred = new (zone()) DeferredNumberTagI(this, instr);
|
| -#if V8_TARGET_ARCH_PPC64
|
| +#if V8_TARGET_ARCH_S390X
|
| __ SmiTag(dst, src);
|
| #else
|
| - __ SmiTagCheckOverflow(dst, src, r0);
|
| - __ BranchOnOverflow(deferred->entry());
|
| + // Add src to itself to defect SMI overflow.
|
| + __ Add32(dst, src, src);
|
| + __ b(overflow, deferred->entry());
|
| #endif
|
| __ bind(deferred->exit());
|
| }
|
|
|
| -
|
| void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
|
| class DeferredNumberTagU final : public LDeferredCode {
|
| public:
|
| @@ -4602,13 +4528,12 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
|
| Register result = ToRegister(instr->result());
|
|
|
| DeferredNumberTagU* deferred = new (zone()) DeferredNumberTagU(this, instr);
|
| - __ Cmpli(input, Operand(Smi::kMaxValue), r0);
|
| + __ CmpLogicalP(input, Operand(Smi::kMaxValue));
|
| __ bgt(deferred->entry());
|
| __ SmiTag(result, input);
|
| __ bind(deferred->exit());
|
| }
|
|
|
| -
|
| void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, LOperand* value,
|
| LOperand* temp1, LOperand* temp2,
|
| IntegerSignedness signedness) {
|
| @@ -4626,7 +4551,7 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, LOperand* value,
|
| // the value in there. If that fails, call the runtime system.
|
| if (dst.is(src)) {
|
| __ SmiUntag(src, dst);
|
| - __ xoris(src, src, Operand(HeapNumber::kSignMask >> 16));
|
| + __ xilf(src, Operand(HeapNumber::kSignMask));
|
| }
|
| __ ConvertIntToDouble(src, dbl_scratch);
|
| } else {
|
| @@ -4645,7 +4570,7 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, LOperand* value,
|
| // TODO(3095996): Put a valid pointer value in the stack slot where the
|
| // result register is stored, as this register is in the pointer map, but
|
| // contains an integer value.
|
| - __ li(dst, Operand::Zero());
|
| + __ LoadImmP(dst, Operand::Zero());
|
|
|
| // Preserve the value of all registers.
|
| PushSafepointRegistersScope scope(this);
|
| @@ -4659,16 +4584,15 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, LOperand* value,
|
| __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
|
| RecordSafepointWithRegisters(instr->pointer_map(), 0,
|
| Safepoint::kNoLazyDeopt);
|
| - __ StoreToSafepointRegisterSlot(r3, dst);
|
| + __ StoreToSafepointRegisterSlot(r2, dst);
|
| }
|
|
|
| // Done. Put the value in dbl_scratch into the value of the allocated heap
|
| // number.
|
| __ bind(&done);
|
| - __ stfd(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
|
| + __ StoreDouble(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
|
| }
|
|
|
| -
|
| void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
|
| class DeferredNumberTagD final : public LDeferredCode {
|
| public:
|
| @@ -4695,16 +4619,15 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
|
| __ b(deferred->entry());
|
| }
|
| __ bind(deferred->exit());
|
| - __ stfd(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
|
| + __ StoreDouble(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
|
| }
|
|
|
| -
|
| void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
|
| // TODO(3095996): Get rid of this. For now, we need to make the
|
| // result register contain a valid pointer because it is already
|
| // contained in the register pointer map.
|
| Register reg = ToRegister(instr->result());
|
| - __ li(reg, Operand::Zero());
|
| + __ LoadImmP(reg, Operand::Zero());
|
|
|
| PushSafepointRegistersScope scope(this);
|
| // NumberTagI and NumberTagD use the context from the frame, rather than
|
| @@ -4716,10 +4639,9 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
|
| __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
|
| RecordSafepointWithRegisters(instr->pointer_map(), 0,
|
| Safepoint::kNoLazyDeopt);
|
| - __ StoreToSafepointRegisterSlot(r3, reg);
|
| + __ StoreToSafepointRegisterSlot(r2, reg);
|
| }
|
|
|
| -
|
| void LCodeGen::DoSmiTag(LSmiTag* instr) {
|
| HChange* hchange = instr->hydrogen();
|
| Register input = ToRegister(instr->value());
|
| @@ -4729,7 +4651,7 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
|
| __ TestUnsignedSmiCandidate(input, r0);
|
| DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, cr0);
|
| }
|
| -#if !V8_TARGET_ARCH_PPC64
|
| +#if !V8_TARGET_ARCH_S390X
|
| if (hchange->CheckFlag(HValue::kCanOverflow) &&
|
| !hchange->value()->CheckFlag(HValue::kUint32)) {
|
| __ SmiTagCheckOverflow(output, input, r0);
|
| @@ -4737,27 +4659,23 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
|
| } else {
|
| #endif
|
| __ SmiTag(output, input);
|
| -#if !V8_TARGET_ARCH_PPC64
|
| +#if !V8_TARGET_ARCH_S390X
|
| }
|
| #endif
|
| }
|
|
|
| -
|
| void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
|
| - Register scratch = scratch0();
|
| Register input = ToRegister(instr->value());
|
| Register result = ToRegister(instr->result());
|
| if (instr->needs_check()) {
|
| - // If the input is a HeapObject, value of scratch won't be zero.
|
| - __ andi(scratch, input, Operand(kHeapObjectTag));
|
| - __ SmiUntag(result, input);
|
| + __ tmll(input, Operand(kHeapObjectTag));
|
| DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
|
| + __ SmiUntag(result, input);
|
| } else {
|
| __ SmiUntag(result, input);
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
|
| DoubleRegister result_reg,
|
| NumberUntagDMode mode) {
|
| @@ -4776,29 +4694,28 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
|
|
|
| // Heap number map check.
|
| __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
|
| - __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
|
| - __ cmp(scratch, ip);
|
| + __ CmpP(scratch, RootMemOperand(Heap::kHeapNumberMapRootIndex));
|
| +
|
| if (can_convert_undefined_to_nan) {
|
| - __ bne(&convert);
|
| + __ bne(&convert, Label::kNear);
|
| } else {
|
| DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
|
| }
|
| // load heap number
|
| - __ lfd(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
|
| + __ ld(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
|
| if (deoptimize_on_minus_zero) {
|
| __ TestDoubleIsMinusZero(result_reg, scratch, ip);
|
| DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
|
| }
|
| - __ b(&done);
|
| + __ b(&done, Label::kNear);
|
| if (can_convert_undefined_to_nan) {
|
| __ bind(&convert);
|
| // Convert undefined (and hole) to NaN.
|
| - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
|
| - __ cmp(input_reg, ip);
|
| + __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
|
| DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
|
| __ LoadRoot(scratch, Heap::kNanValueRootIndex);
|
| - __ lfd(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
|
| - __ b(&done);
|
| + __ ld(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
|
| + __ b(&done, Label::kNear);
|
| }
|
| } else {
|
| __ SmiUntag(scratch, input_reg);
|
| @@ -4811,7 +4728,6 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
|
| __ bind(&done);
|
| }
|
|
|
| -
|
| void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
|
| Register input_reg = ToRegister(instr->value());
|
| Register scratch1 = scratch0();
|
| @@ -4826,55 +4742,52 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
|
|
|
| // Heap number map check.
|
| __ LoadP(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
|
| - __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
|
| - __ cmp(scratch1, ip);
|
| + __ CompareRoot(scratch1, Heap::kHeapNumberMapRootIndex);
|
|
|
| if (instr->truncating()) {
|
| // Performs a truncating conversion of a floating point number as used by
|
| // the JS bitwise operations.
|
| Label no_heap_number, check_bools, check_false;
|
| - __ bne(&no_heap_number);
|
| - __ mr(scratch2, input_reg);
|
| + __ bne(&no_heap_number, Label::kNear);
|
| + __ LoadRR(scratch2, input_reg);
|
| __ TruncateHeapNumberToI(input_reg, scratch2);
|
| - __ b(&done);
|
| + __ b(&done, Label::kNear);
|
|
|
| // Check for Oddballs. Undefined/False is converted to zero and True to one
|
| // for truncating conversions.
|
| __ bind(&no_heap_number);
|
| - __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
|
| - __ cmp(input_reg, ip);
|
| + __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
|
| __ bne(&check_bools);
|
| - __ li(input_reg, Operand::Zero());
|
| - __ b(&done);
|
| + __ LoadImmP(input_reg, Operand::Zero());
|
| + __ b(&done, Label::kNear);
|
|
|
| __ bind(&check_bools);
|
| - __ LoadRoot(ip, Heap::kTrueValueRootIndex);
|
| - __ cmp(input_reg, ip);
|
| - __ bne(&check_false);
|
| - __ li(input_reg, Operand(1));
|
| - __ b(&done);
|
| + __ CompareRoot(input_reg, Heap::kTrueValueRootIndex);
|
| + __ bne(&check_false, Label::kNear);
|
| + __ LoadImmP(input_reg, Operand(1));
|
| + __ b(&done, Label::kNear);
|
|
|
| __ bind(&check_false);
|
| - __ LoadRoot(ip, Heap::kFalseValueRootIndex);
|
| - __ cmp(input_reg, ip);
|
| + __ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
|
| DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean);
|
| - __ li(input_reg, Operand::Zero());
|
| + __ LoadImmP(input_reg, Operand::Zero());
|
| } else {
|
| + // Deoptimize if we don't have a heap number.
|
| DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
|
|
|
| - __ lfd(double_scratch2,
|
| - FieldMemOperand(input_reg, HeapNumber::kValueOffset));
|
| + __ ld(double_scratch2,
|
| + FieldMemOperand(input_reg, HeapNumber::kValueOffset));
|
| if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
| // preserve heap number pointer in scratch2 for minus zero check below
|
| - __ mr(scratch2, input_reg);
|
| + __ LoadRR(scratch2, input_reg);
|
| }
|
| __ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1,
|
| double_scratch);
|
| DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
|
|
|
| if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
| - __ cmpi(input_reg, Operand::Zero());
|
| - __ bne(&done);
|
| + __ CmpP(input_reg, Operand::Zero());
|
| + __ bne(&done, Label::kNear);
|
| __ TestHeapNumberSign(scratch2, scratch1);
|
| DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
|
| }
|
| @@ -4882,7 +4795,6 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
|
| __ bind(&done);
|
| }
|
|
|
| -
|
| void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
|
| class DeferredTaggedToI final : public LDeferredCode {
|
| public:
|
| @@ -4914,7 +4826,6 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
|
| LOperand* input = instr->value();
|
| DCHECK(input->IsRegister());
|
| @@ -4932,7 +4843,6 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
|
| EmitNumberUntagD(instr, input_reg, result_reg, mode);
|
| }
|
|
|
| -
|
| void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
|
| Register result_reg = ToRegister(instr->result());
|
| Register scratch1 = scratch0();
|
| @@ -4948,8 +4858,8 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
|
| DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
|
| if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
| Label done;
|
| - __ cmpi(result_reg, Operand::Zero());
|
| - __ bne(&done);
|
| + __ CmpP(result_reg, Operand::Zero());
|
| + __ bne(&done, Label::kNear);
|
| __ TestDoubleSign(double_input, scratch1);
|
| DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
|
| __ bind(&done);
|
| @@ -4957,7 +4867,6 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
|
| Register result_reg = ToRegister(instr->result());
|
| Register scratch1 = scratch0();
|
| @@ -4973,14 +4882,14 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
|
| DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
|
| if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
| Label done;
|
| - __ cmpi(result_reg, Operand::Zero());
|
| - __ bne(&done);
|
| + __ CmpP(result_reg, Operand::Zero());
|
| + __ bne(&done, Label::kNear);
|
| __ TestDoubleSign(double_input, scratch1);
|
| DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
|
| __ bind(&done);
|
| }
|
| }
|
| -#if V8_TARGET_ARCH_PPC64
|
| +#if V8_TARGET_ARCH_S390X
|
| __ SmiTag(result_reg);
|
| #else
|
| __ SmiTagCheckOverflow(result_reg, r0);
|
| @@ -4988,48 +4897,44 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
|
| #endif
|
| }
|
|
|
| -
|
| void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
|
| LOperand* input = instr->value();
|
| - __ TestIfSmi(ToRegister(input), r0);
|
| + __ TestIfSmi(ToRegister(input));
|
| DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
|
| }
|
|
|
| -
|
| void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
|
| if (!instr->hydrogen()->value()->type().IsHeapObject()) {
|
| LOperand* input = instr->value();
|
| - __ TestIfSmi(ToRegister(input), r0);
|
| + __ TestIfSmi(ToRegister(input));
|
| DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoCheckArrayBufferNotNeutered(
|
| LCheckArrayBufferNotNeutered* instr) {
|
| Register view = ToRegister(instr->view());
|
| Register scratch = scratch0();
|
|
|
| __ LoadP(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
|
| - __ lwz(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
|
| - __ andi(r0, scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
|
| + __ LoadlW(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
|
| + __ And(r0, scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
|
| DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds, cr0);
|
| }
|
|
|
| -
|
| void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
|
| Register input = ToRegister(instr->value());
|
| Register scratch = scratch0();
|
|
|
| __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
|
| - __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
|
|
|
| if (instr->hydrogen()->is_interval_check()) {
|
| InstanceType first;
|
| InstanceType last;
|
| instr->hydrogen()->GetCheckInterval(&first, &last);
|
|
|
| - __ cmpli(scratch, Operand(first));
|
| + __ CmpLogicalByte(FieldMemOperand(scratch, Map::kInstanceTypeOffset),
|
| + Operand(first));
|
|
|
| // If there is only one type in the interval check for equality.
|
| if (first == last) {
|
| @@ -5038,7 +4943,8 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
|
| DeoptimizeIf(lt, instr, Deoptimizer::kWrongInstanceType);
|
| // Omit check for the last type.
|
| if (last != LAST_TYPE) {
|
| - __ cmpli(scratch, Operand(last));
|
| + __ CmpLogicalByte(FieldMemOperand(scratch, Map::kInstanceTypeOffset),
|
| + Operand(last));
|
| DeoptimizeIf(gt, instr, Deoptimizer::kWrongInstanceType);
|
| }
|
| }
|
| @@ -5047,20 +4953,20 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
|
| uint8_t tag;
|
| instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
|
|
|
| + __ LoadlB(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
|
| +
|
| if (base::bits::IsPowerOfTwo32(mask)) {
|
| DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
|
| - __ andi(r0, scratch, Operand(mask));
|
| - DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType,
|
| - cr0);
|
| + __ AndP(scratch, Operand(mask));
|
| + DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType);
|
| } else {
|
| - __ andi(scratch, scratch, Operand(mask));
|
| - __ cmpi(scratch, Operand(tag));
|
| + __ AndP(scratch, Operand(mask));
|
| + __ CmpP(scratch, Operand(tag));
|
| DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
|
| }
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoCheckValue(LCheckValue* instr) {
|
| Register reg = ToRegister(instr->value());
|
| Handle<HeapObject> object = instr->hydrogen()->object().handle();
|
| @@ -5069,31 +4975,28 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
|
| Register reg = ToRegister(instr->value());
|
| Handle<Cell> cell = isolate()->factory()->NewCell(object);
|
| __ mov(ip, Operand(cell));
|
| - __ LoadP(ip, FieldMemOperand(ip, Cell::kValueOffset));
|
| - __ cmp(reg, ip);
|
| + __ CmpP(reg, FieldMemOperand(ip, Cell::kValueOffset));
|
| } else {
|
| - __ Cmpi(reg, Operand(object), r0);
|
| + __ CmpP(reg, Operand(object));
|
| }
|
| DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
|
| }
|
|
|
| -
|
| void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
|
| Register temp = ToRegister(instr->temp());
|
| {
|
| PushSafepointRegistersScope scope(this);
|
| __ push(object);
|
| - __ li(cp, Operand::Zero());
|
| + __ LoadImmP(cp, Operand::Zero());
|
| __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
|
| RecordSafepointWithRegisters(instr->pointer_map(), 1,
|
| Safepoint::kNoLazyDeopt);
|
| - __ StoreToSafepointRegisterSlot(r3, temp);
|
| + __ StoreToSafepointRegisterSlot(r2, temp);
|
| }
|
| - __ TestIfSmi(temp, r0);
|
| + __ TestIfSmi(temp);
|
| DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, cr0);
|
| }
|
|
|
| -
|
| void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
|
| class DeferredCheckMaps final : public LDeferredCode {
|
| public:
|
| @@ -5121,14 +5024,13 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
|
| return;
|
| }
|
|
|
| - Register object = ToRegister(instr->value());
|
| - Register map_reg = ToRegister(instr->temp());
|
| -
|
| - __ LoadP(map_reg, FieldMemOperand(object, HeapObject::kMapOffset));
|
| + LOperand* input = instr->value();
|
| + DCHECK(input->IsRegister());
|
| + Register reg = ToRegister(input);
|
|
|
| DeferredCheckMaps* deferred = NULL;
|
| if (instr->hydrogen()->HasMigrationTarget()) {
|
| - deferred = new (zone()) DeferredCheckMaps(this, instr, object);
|
| + deferred = new (zone()) DeferredCheckMaps(this, instr, reg);
|
| __ bind(deferred->check_maps());
|
| }
|
|
|
| @@ -5136,12 +5038,12 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
|
| Label success;
|
| for (int i = 0; i < maps->size() - 1; i++) {
|
| Handle<Map> map = maps->at(i).handle();
|
| - __ CompareMap(map_reg, map, &success);
|
| + __ CompareMap(reg, map, &success);
|
| __ beq(&success);
|
| }
|
|
|
| Handle<Map> map = maps->at(maps->size() - 1).handle();
|
| - __ CompareMap(map_reg, map, &success);
|
| + __ CompareMap(reg, map, &success);
|
| if (instr->hydrogen()->HasMigrationTarget()) {
|
| __ bne(deferred->entry());
|
| } else {
|
| @@ -5151,21 +5053,18 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
|
| __ bind(&success);
|
| }
|
|
|
| -
|
| void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
|
| DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
|
| Register result_reg = ToRegister(instr->result());
|
| __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
|
| }
|
|
|
| -
|
| void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
|
| Register unclamped_reg = ToRegister(instr->unclamped());
|
| Register result_reg = ToRegister(instr->result());
|
| __ ClampUint8(result_reg, unclamped_reg);
|
| }
|
|
|
| -
|
| void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
|
| Register scratch = scratch0();
|
| Register input_reg = ToRegister(instr->unclamped());
|
| @@ -5178,21 +5077,21 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
|
|
|
| // Check for heap number
|
| __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
|
| - __ Cmpi(scratch, Operand(factory()->heap_number_map()), r0);
|
| - __ beq(&heap_number);
|
| + __ CmpP(scratch, Operand(factory()->heap_number_map()));
|
| + __ beq(&heap_number, Label::kNear);
|
|
|
| // Check for undefined. Undefined is converted to zero for clamping
|
| // conversions.
|
| - __ Cmpi(input_reg, Operand(factory()->undefined_value()), r0);
|
| + __ CmpP(input_reg, Operand(factory()->undefined_value()));
|
| DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
|
| - __ li(result_reg, Operand::Zero());
|
| - __ b(&done);
|
| + __ LoadImmP(result_reg, Operand::Zero());
|
| + __ b(&done, Label::kNear);
|
|
|
| // Heap number
|
| __ bind(&heap_number);
|
| - __ lfd(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
|
| + __ ld(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
|
| __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
|
| - __ b(&done);
|
| + __ b(&done, Label::kNear);
|
|
|
| // smi
|
| __ bind(&is_smi);
|
| @@ -5201,30 +5100,34 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
|
| __ bind(&done);
|
| }
|
|
|
| -
|
| void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
|
| DoubleRegister value_reg = ToDoubleRegister(instr->value());
|
| Register result_reg = ToRegister(instr->result());
|
| -
|
| + // TODO(joransiu): Use non-memory version.
|
| + __ stdy(value_reg, MemOperand(sp, -kDoubleSize));
|
| if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
|
| - __ MovDoubleHighToInt(result_reg, value_reg);
|
| + __ LoadlW(result_reg,
|
| + MemOperand(sp, -kDoubleSize + Register::kExponentOffset));
|
| } else {
|
| - __ MovDoubleLowToInt(result_reg, value_reg);
|
| + __ LoadlW(result_reg,
|
| + MemOperand(sp, -kDoubleSize + Register::kMantissaOffset));
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
|
| Register hi_reg = ToRegister(instr->hi());
|
| Register lo_reg = ToRegister(instr->lo());
|
| DoubleRegister result_reg = ToDoubleRegister(instr->result());
|
| -#if V8_TARGET_ARCH_PPC64
|
| - __ MovInt64ComponentsToDouble(result_reg, hi_reg, lo_reg, r0);
|
| -#else
|
| - __ MovInt64ToDouble(result_reg, hi_reg, lo_reg);
|
| -#endif
|
| -}
|
| + // TODO(joransiu): Construct with ldgr
|
| + Register scratch = scratch0();
|
| +
|
| + // Combine hi_reg:lo_reg into a single 64-bit register.
|
| + __ sllg(scratch, hi_reg, Operand(32));
|
| + __ lr(scratch, lo_reg);
|
|
|
| + // Bitwise convert from GPR to FPR
|
| + __ ldgr(result_reg, scratch);
|
| +}
|
|
|
| void LCodeGen::DoAllocate(LAllocate* instr) {
|
| class DeferredAllocate final : public LDeferredCode {
|
| @@ -5268,21 +5171,28 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
|
| if (instr->hydrogen()->MustPrefillWithFiller()) {
|
| if (instr->size()->IsConstantOperand()) {
|
| int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
|
| - __ LoadIntLiteral(scratch, size - kHeapObjectTag);
|
| + __ LoadIntLiteral(scratch, size);
|
| } else {
|
| - __ subi(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
|
| + scratch = ToRegister(instr->size());
|
| }
|
| - __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
|
| + __ lay(scratch, MemOperand(scratch, -kPointerSize));
|
| Label loop;
|
| + __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
|
| __ bind(&loop);
|
| - __ subi(scratch, scratch, Operand(kPointerSize));
|
| - __ StorePX(scratch2, MemOperand(result, scratch));
|
| - __ cmpi(scratch, Operand::Zero());
|
| + __ StoreP(scratch2, MemOperand(scratch, result, -kHeapObjectTag));
|
| +#if V8_TARGET_ARCH_S390X
|
| + __ lay(scratch, MemOperand(scratch, -kPointerSize));
|
| +#else
|
| + // TODO(joransiu): Improve the following sequence.
|
| + // Need to use AHI instead of LAY as top nibble is not set with LAY, causing
|
| + // incorrect result with the signed compare
|
| + __ AddP(scratch, Operand(-kPointerSize));
|
| +#endif
|
| + __ CmpP(scratch, Operand::Zero());
|
| __ bge(&loop);
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
|
| Register result = ToRegister(instr->result());
|
|
|
| @@ -5299,11 +5209,11 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
|
| __ push(size);
|
| } else {
|
| int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
|
| -#if !V8_TARGET_ARCH_PPC64
|
| +#if !V8_TARGET_ARCH_S390X
|
| if (size >= 0 && size <= Smi::kMaxValue) {
|
| #endif
|
| __ Push(Smi::FromInt(size));
|
| -#if !V8_TARGET_ARCH_PPC64
|
| +#if !V8_TARGET_ARCH_S390X
|
| } else {
|
| // We should never get here at runtime => abort
|
| __ stop("invalid allocation size");
|
| @@ -5324,24 +5234,22 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
|
|
|
| CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr,
|
| instr->context());
|
| - __ StoreToSafepointRegisterSlot(r3, result);
|
| + __ StoreToSafepointRegisterSlot(r2, result);
|
| }
|
|
|
| -
|
| void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
|
| - DCHECK(ToRegister(instr->value()).is(r3));
|
| - __ push(r3);
|
| + DCHECK(ToRegister(instr->value()).is(r2));
|
| + __ push(r2);
|
| CallRuntime(Runtime::kToFastProperties, 1, instr);
|
| }
|
|
|
| -
|
| void LCodeGen::DoTypeof(LTypeof* instr) {
|
| - DCHECK(ToRegister(instr->value()).is(r6));
|
| - DCHECK(ToRegister(instr->result()).is(r3));
|
| + DCHECK(ToRegister(instr->value()).is(r5));
|
| + DCHECK(ToRegister(instr->result()).is(r2));
|
| Label end, do_call;
|
| Register value_register = ToRegister(instr->value());
|
| __ JumpIfNotSmi(value_register, &do_call);
|
| - __ mov(r3, Operand(isolate()->factory()->number_string()));
|
| + __ mov(r2, Operand(isolate()->factory()->number_string()));
|
| __ b(&end);
|
| __ bind(&do_call);
|
| TypeofStub stub(isolate());
|
| @@ -5349,7 +5257,6 @@ void LCodeGen::DoTypeof(LTypeof* instr) {
|
| __ bind(&end);
|
| }
|
|
|
| -
|
| void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
|
| Register input = ToRegister(instr->value());
|
|
|
| @@ -5361,7 +5268,6 @@ void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
|
| }
|
| }
|
|
|
| -
|
| Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label,
|
| Register input, Handle<String> type_name) {
|
| Condition final_branch_condition = kNoCondition;
|
| @@ -5395,18 +5301,18 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label,
|
| __ JumpIfSmi(input, false_label);
|
| // Check for undetectable objects => true.
|
| __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
|
| - __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
|
| + __ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
|
| __ ExtractBit(r0, scratch, Map::kIsUndetectable);
|
| - __ cmpi(r0, Operand::Zero());
|
| + __ CmpP(r0, Operand::Zero());
|
| final_branch_condition = ne;
|
|
|
| } else if (String::Equals(type_name, factory->function_string())) {
|
| __ JumpIfSmi(input, false_label);
|
| __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
|
| - __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
|
| - __ andi(scratch, scratch,
|
| + __ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
|
| + __ AndP(scratch, scratch,
|
| Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
|
| - __ cmpi(scratch, Operand(1 << Map::kIsCallable));
|
| + __ CmpP(scratch, Operand(1 << Map::kIsCallable));
|
| final_branch_condition = eq;
|
|
|
| } else if (String::Equals(type_name, factory->object_string())) {
|
| @@ -5417,10 +5323,10 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label,
|
| __ CompareObjectType(input, scratch, ip, FIRST_JS_RECEIVER_TYPE);
|
| __ blt(false_label);
|
| // Check for callable or undetectable objects => false.
|
| - __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
|
| - __ andi(r0, scratch,
|
| + __ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
|
| + __ AndP(r0, scratch,
|
| Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
|
| - __ cmpi(r0, Operand::Zero());
|
| + __ CmpP(r0, Operand::Zero());
|
| final_branch_condition = eq;
|
|
|
| // clang-format off
|
| @@ -5441,7 +5347,6 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label,
|
| return final_branch_condition;
|
| }
|
|
|
| -
|
| void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
|
| if (info()->ShouldEnsureSpaceForLazyDeopt()) {
|
| // Ensure that we have enough space after the previous lazy-bailout
|
| @@ -5449,17 +5354,16 @@ void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
|
| int current_pc = masm()->pc_offset();
|
| if (current_pc < last_lazy_deopt_pc_ + space_needed) {
|
| int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
|
| - DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
|
| + DCHECK_EQ(0, padding_size % 2);
|
| while (padding_size > 0) {
|
| __ nop();
|
| - padding_size -= Assembler::kInstrSize;
|
| + padding_size -= 2;
|
| }
|
| }
|
| }
|
| last_lazy_deopt_pc_ = masm()->pc_offset();
|
| }
|
|
|
| -
|
| void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
|
| last_lazy_deopt_pc_ = masm()->pc_offset();
|
| DCHECK(instr->HasEnvironment());
|
| @@ -5468,7 +5372,6 @@ void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
|
| safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
|
| }
|
|
|
| -
|
| void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
|
| Deoptimizer::BailoutType type = instr->hydrogen()->type();
|
| // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
|
| @@ -5482,17 +5385,14 @@ void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
|
| DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type);
|
| }
|
|
|
| -
|
| void LCodeGen::DoDummy(LDummy* instr) {
|
| // Nothing to see here, move on!
|
| }
|
|
|
| -
|
| void LCodeGen::DoDummyUse(LDummyUse* instr) {
|
| // Nothing to see here, move on!
|
| }
|
|
|
| -
|
| void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
|
| PushSafepointRegistersScope scope(this);
|
| LoadContextFromDeferred(instr->context());
|
| @@ -5504,7 +5404,6 @@ void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
|
| safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
|
| }
|
|
|
| -
|
| void LCodeGen::DoStackCheck(LStackCheck* instr) {
|
| class DeferredStackCheck final : public LDeferredCode {
|
| public:
|
| @@ -5524,9 +5423,8 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
|
| if (instr->hydrogen()->is_function_entry()) {
|
| // Perform stack overflow check.
|
| Label done;
|
| - __ LoadRoot(ip, Heap::kStackLimitRootIndex);
|
| - __ cmpl(sp, ip);
|
| - __ bge(&done);
|
| + __ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex));
|
| + __ bge(&done, Label::kNear);
|
| DCHECK(instr->context()->IsRegister());
|
| DCHECK(ToRegister(instr->context()).is(cp));
|
| CallCode(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET,
|
| @@ -5537,8 +5435,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
|
| // Perform stack overflow check if this goto needs it before jumping.
|
| DeferredStackCheck* deferred_stack_check =
|
| new (zone()) DeferredStackCheck(this, instr);
|
| - __ LoadRoot(ip, Heap::kStackLimitRootIndex);
|
| - __ cmpl(sp, ip);
|
| + __ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex));
|
| __ blt(deferred_stack_check->entry());
|
| EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
|
| __ bind(instr->done_label());
|
| @@ -5550,7 +5447,6 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
|
| }
|
| }
|
|
|
| -
|
| void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
|
| // This is a pseudo-instruction that ensures that the environment here is
|
| // properly registered for deoptimization and records the assembler's PC
|
| @@ -5565,65 +5461,60 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
|
| GenerateOsrPrologue();
|
| }
|
|
|
| -
|
| void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
|
| Label use_cache, call_runtime;
|
| __ CheckEnumCache(&call_runtime);
|
|
|
| - __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
|
| + __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
|
| __ b(&use_cache);
|
|
|
| // Get the set of properties to enumerate.
|
| __ bind(&call_runtime);
|
| - __ push(r3);
|
| + __ push(r2);
|
| CallRuntime(Runtime::kForInEnumerate, instr);
|
| __ bind(&use_cache);
|
| }
|
|
|
| -
|
| void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
|
| Register map = ToRegister(instr->map());
|
| Register result = ToRegister(instr->result());
|
| Label load_cache, done;
|
| __ EnumLength(result, map);
|
| __ CmpSmiLiteral(result, Smi::FromInt(0), r0);
|
| - __ bne(&load_cache);
|
| + __ bne(&load_cache, Label::kNear);
|
| __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
|
| - __ b(&done);
|
| + __ b(&done, Label::kNear);
|
|
|
| __ bind(&load_cache);
|
| __ LoadInstanceDescriptors(map, result);
|
| __ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
|
| __ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
|
| - __ cmpi(result, Operand::Zero());
|
| + __ CmpP(result, Operand::Zero());
|
| DeoptimizeIf(eq, instr, Deoptimizer::kNoCache);
|
|
|
| __ bind(&done);
|
| }
|
|
|
| -
|
| void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
|
| Register object = ToRegister(instr->value());
|
| Register map = ToRegister(instr->map());
|
| __ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
|
| - __ cmp(map, scratch0());
|
| + __ CmpP(map, scratch0());
|
| DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
|
| }
|
|
|
| -
|
| void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
|
| Register result, Register object,
|
| Register index) {
|
| PushSafepointRegistersScope scope(this);
|
| __ Push(object, index);
|
| - __ li(cp, Operand::Zero());
|
| + __ LoadImmP(cp, Operand::Zero());
|
| __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
|
| RecordSafepointWithRegisters(instr->pointer_map(), 2,
|
| Safepoint::kNoLazyDeopt);
|
| - __ StoreToSafepointRegisterSlot(r3, result);
|
| + __ StoreToSafepointRegisterSlot(r2, result);
|
| }
|
|
|
| -
|
| void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
|
| class DeferredLoadMutableDouble final : public LDeferredCode {
|
| public:
|
| @@ -5658,36 +5549,34 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
|
| Label out_of_object, done;
|
|
|
| __ TestBitMask(index, reinterpret_cast<uintptr_t>(Smi::FromInt(1)), r0);
|
| - __ bne(deferred->entry(), cr0);
|
| - __ ShiftRightArithImm(index, index, 1);
|
| + __ bne(deferred->entry());
|
| + __ ShiftRightArithP(index, index, Operand(1));
|
|
|
| - __ cmpi(index, Operand::Zero());
|
| - __ blt(&out_of_object);
|
| + __ CmpP(index, Operand::Zero());
|
| + __ blt(&out_of_object, Label::kNear);
|
|
|
| __ SmiToPtrArrayOffset(r0, index);
|
| - __ add(scratch, object, r0);
|
| + __ AddP(scratch, object, r0);
|
| __ LoadP(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
|
|
|
| - __ b(&done);
|
| + __ b(&done, Label::kNear);
|
|
|
| __ bind(&out_of_object);
|
| __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
|
| // Index is equal to negated out of object property index plus 1.
|
| __ SmiToPtrArrayOffset(r0, index);
|
| - __ sub(scratch, result, r0);
|
| + __ SubP(scratch, result, r0);
|
| __ LoadP(result,
|
| FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize));
|
| __ bind(deferred->exit());
|
| __ bind(&done);
|
| }
|
|
|
| -
|
| void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
|
| Register context = ToRegister(instr->context());
|
| __ StoreP(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
| }
|
|
|
| -
|
| #undef __
|
| } // namespace internal
|
| } // namespace v8
|
|
|