Index: src/ppc/lithium-codegen-ppc.cc |
diff --git a/src/arm/lithium-codegen-arm.cc b/src/ppc/lithium-codegen-ppc.cc |
similarity index 62% |
copy from src/arm/lithium-codegen-arm.cc |
copy to src/ppc/lithium-codegen-ppc.cc |
index d096087b4283f16bf93adba20324692376ca549c..84204515fa4e226456b5c57fc033450f3d1a0b76 100644 |
--- a/src/arm/lithium-codegen-arm.cc |
+++ b/src/ppc/lithium-codegen-ppc.cc |
@@ -1,27 +1,28 @@ |
// Copyright 2012 the V8 project authors. All rights reserved. |
+// |
+// Copyright IBM Corp. 2012, 2013. All rights reserved. |
+// |
// Use of this source code is governed by a BSD-style license that can be |
// found in the LICENSE file. |
#include "src/v8.h" |
-#include "src/arm/lithium-codegen-arm.h" |
-#include "src/arm/lithium-gap-resolver-arm.h" |
#include "src/code-stubs.h" |
#include "src/hydrogen-osr.h" |
#include "src/stub-cache.h" |
+#include "src/ppc/lithium-codegen-ppc.h" |
+#include "src/ppc/lithium-gap-resolver-ppc.h" |
+ |
namespace v8 { |
namespace internal { |
class SafepointGenerator V8_FINAL : public CallWrapper { |
public: |
- SafepointGenerator(LCodeGen* codegen, |
- LPointerMap* pointers, |
+ SafepointGenerator(LCodeGen* codegen, LPointerMap* pointers, |
Safepoint::DeoptMode mode) |
- : codegen_(codegen), |
- pointers_(pointers), |
- deopt_mode_(mode) { } |
+ : codegen_(codegen), pointers_(pointers), deopt_mode_(mode) {} |
virtual ~SafepointGenerator() {} |
virtual void BeforeCall(int call_size) const V8_OVERRIDE {} |
@@ -49,11 +50,8 @@ bool LCodeGen::GenerateCode() { |
// the frame (that is done in GeneratePrologue). |
FrameScope frame_scope(masm_, StackFrame::NONE); |
- return GeneratePrologue() && |
- GenerateBody() && |
- GenerateDeferredCode() && |
- GenerateDeoptJumpTable() && |
- GenerateSafepointTable(); |
+ return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() && |
+ GenerateDeoptJumpTable() && GenerateSafepointTable(); |
} |
@@ -74,7 +72,7 @@ void LCodeGen::SaveCallerDoubles() { |
BitVector* doubles = chunk()->allocated_double_registers(); |
BitVector::Iterator save_iterator(doubles); |
while (!save_iterator.Done()) { |
- __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()), |
+ __ stfd(DoubleRegister::FromAllocationIndex(save_iterator.Current()), |
MemOperand(sp, count * kDoubleSize)); |
save_iterator.Advance(); |
count++; |
@@ -90,8 +88,8 @@ void LCodeGen::RestoreCallerDoubles() { |
BitVector::Iterator save_iterator(doubles); |
int count = 0; |
while (!save_iterator.Done()) { |
- __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()), |
- MemOperand(sp, count * kDoubleSize)); |
+ __ lfd(DoubleRegister::FromAllocationIndex(save_iterator.Current()), |
+ MemOperand(sp, count * kDoubleSize)); |
save_iterator.Advance(); |
count++; |
} |
@@ -111,28 +109,26 @@ bool LCodeGen::GeneratePrologue() { |
} |
#endif |
- // r1: Callee's JS function. |
+ // r4: Callee's JS function. |
// cp: Callee's context. |
- // pp: Callee's constant pool pointer (if FLAG_enable_ool_constant_pool) |
// fp: Caller's frame pointer. |
// lr: Caller's pc. |
// Sloppy mode functions and builtins need to replace the receiver with the |
// global proxy when called as functions (without an explicit receiver |
// object). |
- if (info_->this_has_uses() && |
- info_->strict_mode() == SLOPPY && |
+ if (info_->this_has_uses() && info_->strict_mode() == SLOPPY && |
!info_->is_native()) { |
Label ok; |
int receiver_offset = info_->scope()->num_parameters() * kPointerSize; |
- __ ldr(r2, MemOperand(sp, receiver_offset)); |
- __ CompareRoot(r2, Heap::kUndefinedValueRootIndex); |
- __ b(ne, &ok); |
+ __ LoadP(r5, MemOperand(sp, receiver_offset)); |
+ __ CompareRoot(r5, Heap::kUndefinedValueRootIndex); |
+ __ bne(&ok); |
- __ ldr(r2, GlobalObjectOperand()); |
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset)); |
+ __ LoadP(r5, GlobalObjectOperand()); |
+ __ LoadP(r5, FieldMemOperand(r5, GlobalObject::kGlobalProxyOffset)); |
- __ str(r2, MemOperand(sp, receiver_offset)); |
+ __ StoreP(r5, MemOperand(sp, receiver_offset)); |
__ bind(&ok); |
} |
@@ -152,22 +148,18 @@ bool LCodeGen::GeneratePrologue() { |
// Reserve space for the stack slots needed by the code. |
int slots = GetStackSlotCount(); |
if (slots > 0) { |
+ __ subi(sp, sp, Operand(slots * kPointerSize)); |
if (FLAG_debug_code) { |
- __ sub(sp, sp, Operand(slots * kPointerSize)); |
- __ push(r0); |
- __ push(r1); |
- __ add(r0, sp, Operand(slots * kPointerSize)); |
- __ mov(r1, Operand(kSlotsZapValue)); |
+ __ Push(r3, r4); |
+ __ li(r0, Operand(slots)); |
+ __ mtctr(r0); |
+ __ addi(r3, sp, Operand((slots + 2) * kPointerSize)); |
+ __ mov(r4, Operand(kSlotsZapValue)); |
Label loop; |
__ bind(&loop); |
- __ sub(r0, r0, Operand(kPointerSize)); |
- __ str(r1, MemOperand(r0, 2 * kPointerSize)); |
- __ cmp(r0, sp); |
- __ b(ne, &loop); |
- __ pop(r1); |
- __ pop(r0); |
- } else { |
- __ sub(sp, sp, Operand(slots * kPointerSize)); |
+ __ StorePU(r4, MemOperand(r3, -kPointerSize)); |
+ __ bdnz(&loop); |
+ __ Pop(r3, r4); |
} |
} |
@@ -180,45 +172,40 @@ bool LCodeGen::GeneratePrologue() { |
if (heap_slots > 0) { |
Comment(";;; Allocate local context"); |
bool need_write_barrier = true; |
- // Argument to NewContext is the function, which is in r1. |
+ // Argument to NewContext is the function, which is in r4. |
if (heap_slots <= FastNewContextStub::kMaximumSlots) { |
FastNewContextStub stub(isolate(), heap_slots); |
__ CallStub(&stub); |
// Result of FastNewContextStub is always in new space. |
need_write_barrier = false; |
} else { |
- __ push(r1); |
+ __ push(r4); |
__ CallRuntime(Runtime::kNewFunctionContext, 1); |
} |
RecordSafepoint(Safepoint::kNoLazyDeopt); |
- // Context is returned in both r0 and cp. It replaces the context |
+ // Context is returned in both r3 and cp. It replaces the context |
// passed to us. It's saved in the stack and kept live in cp. |
- __ mov(cp, r0); |
- __ str(r0, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
+ __ mr(cp, r3); |
+ __ StoreP(r3, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
// Copy any necessary parameters into the context. |
int num_parameters = scope()->num_parameters(); |
for (int i = 0; i < num_parameters; i++) { |
Variable* var = scope()->parameter(i); |
if (var->IsContextSlot()) { |
int parameter_offset = StandardFrameConstants::kCallerSPOffset + |
- (num_parameters - 1 - i) * kPointerSize; |
+ (num_parameters - 1 - i) * kPointerSize; |
// Load parameter from stack. |
- __ ldr(r0, MemOperand(fp, parameter_offset)); |
+ __ LoadP(r3, MemOperand(fp, parameter_offset)); |
// Store it in the context. |
MemOperand target = ContextOperand(cp, var->index()); |
- __ str(r0, target); |
- // Update the write barrier. This clobbers r3 and r0. |
+ __ StoreP(r3, target, r0); |
+ // Update the write barrier. This clobbers r6 and r3. |
if (need_write_barrier) { |
- __ RecordWriteContextSlot( |
- cp, |
- target.offset(), |
- r0, |
- r3, |
- GetLinkRegisterState(), |
- kSaveFPRegs); |
+ __ RecordWriteContextSlot(cp, target.offset(), r3, r6, |
+ GetLinkRegisterState(), kSaveFPRegs); |
} else if (FLAG_debug_code) { |
Label done; |
- __ JumpIfInNewSpace(cp, r0, &done); |
+ __ JumpIfInNewSpace(cp, r3, &done); |
__ Abort(kExpectedNewSpaceObject); |
__ bind(&done); |
} |
@@ -248,7 +235,7 @@ void LCodeGen::GenerateOsrPrologue() { |
// optimized frame. |
int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots(); |
DCHECK(slots >= 0); |
- __ sub(sp, sp, Operand(slots * kPointerSize)); |
+ __ subi(sp, sp, Operand(slots * kPointerSize)); |
} |
@@ -273,11 +260,11 @@ bool LCodeGen::GenerateDeferredCode() { |
RecordAndWritePosition( |
chunk()->graph()->SourcePositionToScriptPosition(value->position())); |
- Comment(";;; <@%d,#%d> " |
- "-------------------- Deferred %s --------------------", |
- code->instruction_index(), |
- code->instr()->hydrogen_value()->id(), |
- code->instr()->Mnemonic()); |
+ Comment( |
+ ";;; <@%d,#%d> " |
+ "-------------------- Deferred %s --------------------", |
+ code->instruction_index(), code->instr()->hydrogen_value()->id(), |
+ code->instr()->Mnemonic()); |
__ bind(code->entry()); |
if (NeedsDeferredFrame()) { |
Comment(";;; Build frame"); |
@@ -285,9 +272,9 @@ bool LCodeGen::GenerateDeferredCode() { |
DCHECK(info()->IsStub()); |
frame_is_built_ = true; |
__ PushFixedFrame(); |
- __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB))); |
+ __ LoadSmiLiteral(scratch0(), Smi::FromInt(StackFrame::STUB)); |
__ push(scratch0()); |
- __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); |
+ __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); |
Comment(";;; Deferred code"); |
} |
code->Generate(); |
@@ -298,31 +285,15 @@ bool LCodeGen::GenerateDeferredCode() { |
__ PopFixedFrame(); |
frame_is_built_ = false; |
} |
- __ jmp(code->exit()); |
+ __ b(code->exit()); |
} |
} |
- // Force constant pool emission at the end of the deferred code to make |
- // sure that no constant pools are emitted after. |
- masm()->CheckConstPool(true, false); |
- |
return !is_aborted(); |
} |
bool LCodeGen::GenerateDeoptJumpTable() { |
- // Check that the jump table is accessible from everywhere in the function |
- // code, i.e. that offsets to the table can be encoded in the 24bit signed |
- // immediate of a branch instruction. |
- // To simplify we consider the code size from the first instruction to the |
- // end of the jump table. We also don't consider the pc load delta. |
- // Each entry in the jump table generates one instruction and inlines one |
- // 32bit data after it. |
- if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) + |
- deopt_jump_table_.length() * 7)) { |
- Abort(kGeneratedCodeIsTooLarge); |
- } |
- |
if (deopt_jump_table_.length() > 0) { |
Label needs_frame, call_deopt_entry; |
@@ -333,6 +304,7 @@ bool LCodeGen::GenerateDeoptJumpTable() { |
int length = deopt_jump_table_.length(); |
for (int i = 0; i < length; i++) { |
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); |
__ bind(&deopt_jump_table_[i].label); |
Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type; |
@@ -359,27 +331,23 @@ bool LCodeGen::GenerateDeoptJumpTable() { |
// have a function pointer to install in the stack frame that we're |
// building, install a special marker there instead. |
DCHECK(info()->IsStub()); |
- __ mov(ip, Operand(Smi::FromInt(StackFrame::STUB))); |
- __ push(ip); |
- __ add(fp, sp, |
- Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); |
+ __ LoadSmiLiteral(r0, Smi::FromInt(StackFrame::STUB)); |
+ __ push(r0); |
+ __ addi(fp, sp, |
+ Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); |
__ bind(&call_deopt_entry); |
// Add the base address to the offset previously loaded in |
// entry_offset. |
- __ add(entry_offset, entry_offset, |
- Operand(ExternalReference::ForDeoptEntry(base))); |
- __ blx(entry_offset); |
+ __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base))); |
+ __ add(ip, entry_offset, ip); |
+ __ Call(ip); |
} |
- |
- masm()->CheckConstPool(false, false); |
} else { |
// The last entry can fall through into `call_deopt_entry`, avoiding a |
// branch. |
bool need_branch = ((i + 1) != length) || call_deopt_entry.is_bound(); |
if (need_branch) __ b(&call_deopt_entry); |
- |
- masm()->CheckConstPool(false, !need_branch); |
} |
} |
@@ -393,16 +361,12 @@ bool LCodeGen::GenerateDeoptJumpTable() { |
} |
// Add the base address to the offset previously loaded in entry_offset. |
- __ add(entry_offset, entry_offset, |
- Operand(ExternalReference::ForDeoptEntry(base))); |
- __ blx(entry_offset); |
+ __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base))); |
+ __ add(ip, entry_offset, ip); |
+ __ Call(ip); |
} |
} |
- // Force constant pool emission at the end of the deopt jump table to make |
- // sure that no constant pools are emitted after. |
- masm()->CheckConstPool(true, false); |
- |
// The deoptimization jump table is the last part of the instruction |
// sequence. Mark the generated code as done unless we bailed out. |
if (!is_aborted()) status_ = DONE; |
@@ -422,8 +386,8 @@ Register LCodeGen::ToRegister(int index) const { |
} |
-DwVfpRegister LCodeGen::ToDoubleRegister(int index) const { |
- return DwVfpRegister::FromAllocationIndex(index); |
+DoubleRegister LCodeGen::ToDoubleRegister(int index) const { |
+ return DoubleRegister::FromAllocationIndex(index); |
} |
@@ -443,7 +407,7 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { |
Representation r = chunk_->LookupLiteralRepresentation(const_op); |
if (r.IsInteger32()) { |
DCHECK(literal->IsNumber()); |
- __ mov(scratch, Operand(static_cast<int32_t>(literal->Number()))); |
+ __ LoadIntLiteral(scratch, static_cast<int32_t>(literal->Number())); |
} else if (r.IsDouble()) { |
Abort(kEmitLoadRegisterUnsupportedDoubleImmediate); |
} else { |
@@ -452,7 +416,7 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { |
} |
return scratch; |
} else if (op->IsStackSlot()) { |
- __ ldr(scratch, ToMemOperand(op)); |
+ __ LoadP(scratch, ToMemOperand(op)); |
return scratch; |
} |
UNREACHABLE(); |
@@ -460,42 +424,22 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { |
} |
-DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const { |
- DCHECK(op->IsDoubleRegister()); |
- return ToDoubleRegister(op->index()); |
+void LCodeGen::EmitLoadIntegerConstant(LConstantOperand* const_op, |
+ Register dst) { |
+ DCHECK(IsInteger32(const_op)); |
+ HConstant* constant = chunk_->LookupConstant(const_op); |
+ int32_t value = constant->Integer32Value(); |
+ if (IsSmi(const_op)) { |
+ __ LoadSmiLiteral(dst, Smi::FromInt(value)); |
+ } else { |
+ __ LoadIntLiteral(dst, value); |
+ } |
} |
-DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op, |
- SwVfpRegister flt_scratch, |
- DwVfpRegister dbl_scratch) { |
- if (op->IsDoubleRegister()) { |
- return ToDoubleRegister(op->index()); |
- } else if (op->IsConstantOperand()) { |
- LConstantOperand* const_op = LConstantOperand::cast(op); |
- HConstant* constant = chunk_->LookupConstant(const_op); |
- Handle<Object> literal = constant->handle(isolate()); |
- Representation r = chunk_->LookupLiteralRepresentation(const_op); |
- if (r.IsInteger32()) { |
- DCHECK(literal->IsNumber()); |
- __ mov(ip, Operand(static_cast<int32_t>(literal->Number()))); |
- __ vmov(flt_scratch, ip); |
- __ vcvt_f64_s32(dbl_scratch, flt_scratch); |
- return dbl_scratch; |
- } else if (r.IsDouble()) { |
- Abort(kUnsupportedDoubleImmediate); |
- } else if (r.IsTagged()) { |
- Abort(kUnsupportedTaggedImmediate); |
- } |
- } else if (op->IsStackSlot()) { |
- // TODO(regis): Why is vldr not taking a MemOperand? |
- // __ vldr(dbl_scratch, ToMemOperand(op)); |
- MemOperand mem_op = ToMemOperand(op); |
- __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset()); |
- return dbl_scratch; |
- } |
- UNREACHABLE(); |
- return dbl_scratch; |
+DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const { |
+ DCHECK(op->IsDoubleRegister()); |
+ return ToDoubleRegister(op->index()); |
} |
@@ -521,13 +465,13 @@ int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { |
} |
-int32_t LCodeGen::ToRepresentation(LConstantOperand* op, |
- const Representation& r) const { |
+intptr_t LCodeGen::ToRepresentation(LConstantOperand* op, |
+ const Representation& r) const { |
HConstant* constant = chunk_->LookupConstant(op); |
int32_t value = constant->Integer32Value(); |
if (r.IsInteger32()) return value; |
DCHECK(r.IsSmiOrTagged()); |
- return reinterpret_cast<int32_t>(Smi::FromInt(value)); |
+ return reinterpret_cast<intptr_t>(Smi::FromInt(value)); |
} |
@@ -599,8 +543,8 @@ MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const { |
} else { |
// Retrieve parameter without eager stack-frame relative to the |
// stack-pointer. |
- return MemOperand( |
- sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize); |
+ return MemOperand(sp, |
+ ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize); |
} |
} |
@@ -615,11 +559,12 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, |
int height = translation_size - environment->parameter_count(); |
WriteTranslation(environment->outer(), translation); |
- bool has_closure_id = !info()->closure().is_null() && |
+ bool has_closure_id = |
+ !info()->closure().is_null() && |
!info()->closure().is_identical_to(environment->closure()); |
int closure_id = has_closure_id |
- ? DefineDeoptimizationLiteral(environment->closure()) |
- : Translation::kSelfLiteralId; |
+ ? DefineDeoptimizationLiteral(environment->closure()) |
+ : Translation::kSelfLiteralId; |
switch (environment->frame_type()) { |
case JS_FUNCTION: |
@@ -650,22 +595,16 @@ void LCodeGen::WriteTranslation(LEnvironment* environment, |
int dematerialized_index = 0; |
for (int i = 0; i < translation_size; ++i) { |
LOperand* value = environment->values()->at(i); |
- AddToTranslation(environment, |
- translation, |
- value, |
- environment->HasTaggedValueAt(i), |
- environment->HasUint32ValueAt(i), |
- &object_index, |
- &dematerialized_index); |
+ AddToTranslation( |
+ environment, translation, value, environment->HasTaggedValueAt(i), |
+ environment->HasUint32ValueAt(i), &object_index, &dematerialized_index); |
} |
} |
void LCodeGen::AddToTranslation(LEnvironment* environment, |
- Translation* translation, |
- LOperand* op, |
- bool is_tagged, |
- bool is_uint32, |
+ Translation* translation, LOperand* op, |
+ bool is_tagged, bool is_uint32, |
int* object_index_pointer, |
int* dematerialized_index_pointer) { |
if (op == LEnvironment::materialization_marker()) { |
@@ -686,13 +625,10 @@ void LCodeGen::AddToTranslation(LEnvironment* environment, |
*dematerialized_index_pointer += object_length; |
for (int i = 0; i < object_length; ++i) { |
LOperand* value = environment->values()->at(env_offset + i); |
- AddToTranslation(environment, |
- translation, |
- value, |
+ AddToTranslation(environment, translation, value, |
environment->HasTaggedValueAt(env_offset + i), |
environment->HasUint32ValueAt(env_offset + i), |
- object_index_pointer, |
- dematerialized_index_pointer); |
+ object_index_pointer, dematerialized_index_pointer); |
} |
return; |
} |
@@ -729,49 +665,29 @@ void LCodeGen::AddToTranslation(LEnvironment* environment, |
} |
-int LCodeGen::CallCodeSize(Handle<Code> code, RelocInfo::Mode mode) { |
- int size = masm()->CallSize(code, mode); |
- if (code->kind() == Code::BINARY_OP_IC || |
- code->kind() == Code::COMPARE_IC) { |
- size += Assembler::kInstrSize; // extra nop() added in CallCodeGeneric. |
- } |
- return size; |
-} |
- |
- |
-void LCodeGen::CallCode(Handle<Code> code, |
- RelocInfo::Mode mode, |
- LInstruction* instr, |
- TargetAddressStorageMode storage_mode) { |
- CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, storage_mode); |
+void LCodeGen::CallCode(Handle<Code> code, RelocInfo::Mode mode, |
+ LInstruction* instr) { |
+ CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT); |
} |
-void LCodeGen::CallCodeGeneric(Handle<Code> code, |
- RelocInfo::Mode mode, |
+void LCodeGen::CallCodeGeneric(Handle<Code> code, RelocInfo::Mode mode, |
LInstruction* instr, |
- SafepointMode safepoint_mode, |
- TargetAddressStorageMode storage_mode) { |
+ SafepointMode safepoint_mode) { |
DCHECK(instr != NULL); |
- // Block literal pool emission to ensure nop indicating no inlined smi code |
- // is in the correct position. |
- Assembler::BlockConstPoolScope block_const_pool(masm()); |
- __ Call(code, mode, TypeFeedbackId::None(), al, storage_mode); |
+ __ Call(code, mode); |
RecordSafepointWithLazyDeopt(instr, safepoint_mode); |
// Signal that we don't inline smi code before these stubs in the |
// optimizing code generator. |
- if (code->kind() == Code::BINARY_OP_IC || |
- code->kind() == Code::COMPARE_IC) { |
+ if (code->kind() == Code::BINARY_OP_IC || code->kind() == Code::COMPARE_IC) { |
__ nop(); |
} |
} |
-void LCodeGen::CallRuntime(const Runtime::Function* function, |
- int num_arguments, |
- LInstruction* instr, |
- SaveFPRegsMode save_doubles) { |
+void LCodeGen::CallRuntime(const Runtime::Function* function, int num_arguments, |
+ LInstruction* instr, SaveFPRegsMode save_doubles) { |
DCHECK(instr != NULL); |
__ CallRuntime(function, num_arguments, save_doubles); |
@@ -784,7 +700,7 @@ void LCodeGen::LoadContextFromDeferred(LOperand* context) { |
if (context->IsRegister()) { |
__ Move(cp, ToRegister(context)); |
} else if (context->IsStackSlot()) { |
- __ ldr(cp, ToMemOperand(context)); |
+ __ LoadP(cp, ToMemOperand(context)); |
} else if (context->IsConstantOperand()) { |
HConstant* constant = |
chunk_->LookupConstant(LConstantOperand::cast(context)); |
@@ -795,14 +711,12 @@ void LCodeGen::LoadContextFromDeferred(LOperand* context) { |
} |
-void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, |
- int argc, |
- LInstruction* instr, |
- LOperand* context) { |
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, int argc, |
+ LInstruction* instr, LOperand* context) { |
LoadContextFromDeferred(context); |
__ CallRuntimeSaveDoubles(id); |
- RecordSafepointWithRegisters( |
- instr->pointer_map(), argc, Safepoint::kNoLazyDeopt); |
+ RecordSafepointWithRegisters(instr->pointer_map(), argc, |
+ Safepoint::kNoLazyDeopt); |
} |
@@ -835,17 +749,16 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, |
WriteTranslation(environment, &translation); |
int deoptimization_index = deoptimizations_.length(); |
int pc_offset = masm()->pc_offset(); |
- environment->Register(deoptimization_index, |
- translation.index(), |
+ environment->Register(deoptimization_index, translation.index(), |
(mode == Safepoint::kLazyDeopt) ? pc_offset : -1); |
deoptimizations_.Add(environment, zone()); |
} |
} |
-void LCodeGen::DeoptimizeIf(Condition condition, |
- LEnvironment* environment, |
- Deoptimizer::BailoutType bailout_type) { |
+void LCodeGen::DeoptimizeIf(Condition cond, LEnvironment* environment, |
+ Deoptimizer::BailoutType bailout_type, |
+ CRegister cr) { |
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
DCHECK(environment->HasBeenRegistered()); |
int id = environment->deoptimization_index(); |
@@ -858,49 +771,35 @@ void LCodeGen::DeoptimizeIf(Condition condition, |
} |
if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) { |
+ CRegister alt_cr = cr6; |
Register scratch = scratch0(); |
ExternalReference count = ExternalReference::stress_deopt_count(isolate()); |
- |
- // Store the condition on the stack if necessary |
- if (condition != al) { |
- __ mov(scratch, Operand::Zero(), LeaveCC, NegateCondition(condition)); |
- __ mov(scratch, Operand(1), LeaveCC, condition); |
- __ push(scratch); |
- } |
- |
- __ push(r1); |
+ Label no_deopt; |
+ DCHECK(!alt_cr.is(cr)); |
+ __ Push(r4, scratch); |
__ mov(scratch, Operand(count)); |
- __ ldr(r1, MemOperand(scratch)); |
- __ sub(r1, r1, Operand(1), SetCC); |
- __ mov(r1, Operand(FLAG_deopt_every_n_times), LeaveCC, eq); |
- __ str(r1, MemOperand(scratch)); |
- __ pop(r1); |
- |
- if (condition != al) { |
- // Clean up the stack before the deoptimizer call |
- __ pop(scratch); |
- } |
+ __ lwz(r4, MemOperand(scratch)); |
+ __ subi(r4, r4, Operand(1)); |
+ __ cmpi(r4, Operand::Zero(), alt_cr); |
+ __ bne(&no_deopt, alt_cr); |
+ __ li(r4, Operand(FLAG_deopt_every_n_times)); |
+ __ stw(r4, MemOperand(scratch)); |
+ __ Pop(r4, scratch); |
- __ Call(entry, RelocInfo::RUNTIME_ENTRY, eq); |
- |
- // 'Restore' the condition in a slightly hacky way. (It would be better |
- // to use 'msr' and 'mrs' instructions here, but they are not supported by |
- // our ARM simulator). |
- if (condition != al) { |
- condition = ne; |
- __ cmp(scratch, Operand::Zero()); |
- } |
+ __ Call(entry, RelocInfo::RUNTIME_ENTRY); |
+ __ bind(&no_deopt); |
+ __ stw(r4, MemOperand(scratch)); |
+ __ Pop(r4, scratch); |
} |
if (info()->ShouldTrapOnDeopt()) { |
- __ stop("trap_on_deopt", condition); |
+ __ stop("trap_on_deopt", cond, kDefaultStopCode, cr); |
} |
DCHECK(info()->IsStub() || frame_is_built_); |
// Go through jump table if we need to handle condition, build frame, or |
// restore caller doubles. |
- if (condition == al && frame_is_built_ && |
- !info()->saves_caller_doubles()) { |
+ if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) { |
__ Call(entry, RelocInfo::RUNTIME_ENTRY); |
} else { |
// We often have several deopts to the same entry, reuse the last |
@@ -909,22 +808,20 @@ void LCodeGen::DeoptimizeIf(Condition condition, |
(deopt_jump_table_.last().address != entry) || |
(deopt_jump_table_.last().bailout_type != bailout_type) || |
(deopt_jump_table_.last().needs_frame != !frame_is_built_)) { |
- Deoptimizer::JumpTableEntry table_entry(entry, |
- bailout_type, |
+ Deoptimizer::JumpTableEntry table_entry(entry, bailout_type, |
!frame_is_built_); |
deopt_jump_table_.Add(table_entry, zone()); |
} |
- __ b(condition, &deopt_jump_table_.last().label); |
+ __ b(cond, &deopt_jump_table_.last().label, cr); |
} |
} |
-void LCodeGen::DeoptimizeIf(Condition condition, |
- LEnvironment* environment) { |
- Deoptimizer::BailoutType bailout_type = info()->IsStub() |
- ? Deoptimizer::LAZY |
- : Deoptimizer::EAGER; |
- DeoptimizeIf(condition, environment, bailout_type); |
+void LCodeGen::DeoptimizeIf(Condition cond, LEnvironment* environment, |
+ CRegister cr) { |
+ Deoptimizer::BailoutType bailout_type = |
+ info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER; |
+ DeoptimizeIf(cond, environment, bailout_type, cr); |
} |
@@ -949,7 +846,8 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { |
Handle<FixedArray> literals = |
factory()->NewFixedArray(deoptimization_literals_.length(), TENURED); |
- { AllowDeferredHandleDereference copy_handles; |
+ { |
+ AllowDeferredHandleDereference copy_handles; |
for (int i = 0; i < deoptimization_literals_.length(); i++) { |
literals->set(i, *deoptimization_literals_[i]); |
} |
@@ -988,9 +886,7 @@ void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() { |
const ZoneList<Handle<JSFunction> >* inlined_closures = |
chunk()->inlined_closures(); |
- for (int i = 0, length = inlined_closures->length(); |
- i < length; |
- i++) { |
+ for (int i = 0, length = inlined_closures->length(); i < length; i++) { |
DefineDeoptimizationLiteral(inlined_closures->at(i)); |
} |
@@ -998,28 +894,25 @@ void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() { |
} |
-void LCodeGen::RecordSafepointWithLazyDeopt( |
- LInstruction* instr, SafepointMode safepoint_mode) { |
+void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr, |
+ SafepointMode safepoint_mode) { |
if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) { |
RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt); |
} else { |
DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); |
- RecordSafepointWithRegisters( |
- instr->pointer_map(), 0, Safepoint::kLazyDeopt); |
+ RecordSafepointWithRegisters(instr->pointer_map(), 0, |
+ Safepoint::kLazyDeopt); |
} |
} |
-void LCodeGen::RecordSafepoint( |
- LPointerMap* pointers, |
- Safepoint::Kind kind, |
- int arguments, |
- Safepoint::DeoptMode deopt_mode) { |
+void LCodeGen::RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind, |
+ int arguments, Safepoint::DeoptMode deopt_mode) { |
DCHECK(expected_safepoint_kind_ == kind); |
const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands(); |
- Safepoint safepoint = safepoints_.DefineSafepoint(masm(), |
- kind, arguments, deopt_mode); |
+ Safepoint safepoint = |
+ safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode); |
for (int i = 0; i < operands->length(); i++) { |
LOperand* pointer = operands->at(i); |
if (pointer->IsStackSlot()) { |
@@ -1028,10 +921,12 @@ void LCodeGen::RecordSafepoint( |
safepoint.DefinePointerRegister(ToRegister(pointer), zone()); |
} |
} |
- if (FLAG_enable_ool_constant_pool && (kind & Safepoint::kWithRegisters)) { |
- // Register pp always contains a pointer to the constant pool. |
- safepoint.DefinePointerRegister(pp, zone()); |
+#if V8_OOL_CONSTANT_POOL |
+ if (kind & Safepoint::kWithRegisters) { |
+ // Register always contains a pointer to the constant pool. |
+ safepoint.DefinePointerRegister(kConstantPoolRegister, zone()); |
} |
+#endif |
} |
@@ -1050,8 +945,7 @@ void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) { |
void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers, |
int arguments, |
Safepoint::DeoptMode deopt_mode) { |
- RecordSafepoint( |
- pointers, Safepoint::kWithRegisters, arguments, deopt_mode); |
+ RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode); |
} |
@@ -1071,24 +965,19 @@ static const char* LabelType(LLabel* label) { |
void LCodeGen::DoLabel(LLabel* label) { |
Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------", |
- current_instruction_, |
- label->hydrogen_value()->id(), |
- label->block_id(), |
- LabelType(label)); |
+ current_instruction_, label->hydrogen_value()->id(), |
+ label->block_id(), LabelType(label)); |
__ bind(label->label()); |
current_block_ = label->block_id(); |
DoGap(label); |
} |
-void LCodeGen::DoParallelMove(LParallelMove* move) { |
- resolver_.Resolve(move); |
-} |
+void LCodeGen::DoParallelMove(LParallelMove* move) { resolver_.Resolve(move); } |
void LCodeGen::DoGap(LGap* gap) { |
- for (int i = LGap::FIRST_INNER_POSITION; |
- i <= LGap::LAST_INNER_POSITION; |
+ for (int i = LGap::FIRST_INNER_POSITION; i <= LGap::LAST_INNER_POSITION; |
i++) { |
LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i); |
LParallelMove* move = gap->GetParallelMove(inner_pos); |
@@ -1097,9 +986,7 @@ void LCodeGen::DoGap(LGap* gap) { |
} |
-void LCodeGen::DoInstructionGap(LInstructionGap* instr) { |
- DoGap(instr); |
-} |
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) { DoGap(instr); } |
void LCodeGen::DoParameter(LParameter* instr) { |
@@ -1109,7 +996,7 @@ void LCodeGen::DoParameter(LParameter* instr) { |
void LCodeGen::DoCallStub(LCallStub* instr) { |
DCHECK(ToRegister(instr->context()).is(cp)); |
- DCHECK(ToRegister(instr->result()).is(r0)); |
+ DCHECK(ToRegister(instr->result()).is(r3)); |
switch (instr->hydrogen()->major_key()) { |
case CodeStub::RegExpExec: { |
RegExpExecStub stub(isolate()); |
@@ -1149,23 +1036,33 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) { |
// indicate that positive dividends are heavily favored, so the branching |
// version performs better. |
HMod* hmod = instr->hydrogen(); |
- int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
+ int32_t shift = WhichPowerOf2Abs(divisor); |
Label dividend_is_not_negative, done; |
if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { |
- __ cmp(dividend, Operand::Zero()); |
- __ b(pl, ÷nd_is_not_negative); |
- // Note that this is correct even for kMinInt operands. |
- __ rsb(dividend, dividend, Operand::Zero()); |
- __ and_(dividend, dividend, Operand(mask)); |
- __ rsb(dividend, dividend, Operand::Zero(), SetCC); |
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
- DeoptimizeIf(eq, instr->environment()); |
+ __ cmpwi(dividend, Operand::Zero()); |
+ __ bge(÷nd_is_not_negative); |
+ if (shift) { |
+ // Note that this is correct even for kMinInt operands. |
+ __ neg(dividend, dividend); |
+ __ ExtractBitRange(dividend, dividend, shift - 1, 0); |
+ __ neg(dividend, dividend, LeaveOE, SetRC); |
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
+ DeoptimizeIf(eq, instr->environment(), cr0); |
+ } |
+ } else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
+ __ li(dividend, Operand::Zero()); |
+ } else { |
+ DeoptimizeIf(al, instr->environment()); |
} |
__ b(&done); |
} |
__ bind(÷nd_is_not_negative); |
- __ and_(dividend, dividend, Operand(mask)); |
+ if (shift) { |
+ __ ExtractBitRange(dividend, dividend, shift - 1, 0); |
+ } else { |
+ __ li(dividend, Operand::Zero()); |
+ } |
__ bind(&done); |
} |
@@ -1183,15 +1080,15 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) { |
__ TruncatingDiv(result, dividend, Abs(divisor)); |
__ mov(ip, Operand(Abs(divisor))); |
- __ smull(result, ip, result, ip); |
- __ sub(result, dividend, result, SetCC); |
+ __ mullw(result, result, ip); |
+ __ sub(result, dividend, result, LeaveOE, SetRC); |
// Check for negative zero. |
HMod* hmod = instr->hydrogen(); |
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
Label remainder_not_zero; |
- __ b(ne, &remainder_not_zero); |
- __ cmp(dividend, Operand::Zero()); |
+ __ bne(&remainder_not_zero, cr0); |
+ __ cmpwi(dividend, Operand::Zero()); |
DeoptimizeIf(lt, instr->environment()); |
__ bind(&remainder_not_zero); |
} |
@@ -1200,109 +1097,50 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) { |
void LCodeGen::DoModI(LModI* instr) { |
HMod* hmod = instr->hydrogen(); |
- if (CpuFeatures::IsSupported(SUDIV)) { |
- CpuFeatureScope scope(masm(), SUDIV); |
- |
- Register left_reg = ToRegister(instr->left()); |
- Register right_reg = ToRegister(instr->right()); |
- Register result_reg = ToRegister(instr->result()); |
- |
- Label done; |
- // Check for x % 0, sdiv might signal an exception. We have to deopt in this |
- // case because we can't return a NaN. |
- if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { |
- __ cmp(right_reg, Operand::Zero()); |
- DeoptimizeIf(eq, instr->environment()); |
- } |
+ Register left_reg = ToRegister(instr->left()); |
+ Register right_reg = ToRegister(instr->right()); |
+ Register result_reg = ToRegister(instr->result()); |
+ Register scratch = scratch0(); |
+ Label done; |
- // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we |
- // want. We have to deopt if we care about -0, because we can't return that. |
- if (hmod->CheckFlag(HValue::kCanOverflow)) { |
- Label no_overflow_possible; |
- __ cmp(left_reg, Operand(kMinInt)); |
- __ b(ne, &no_overflow_possible); |
- __ cmp(right_reg, Operand(-1)); |
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
- DeoptimizeIf(eq, instr->environment()); |
- } else { |
- __ b(ne, &no_overflow_possible); |
- __ mov(result_reg, Operand::Zero()); |
- __ jmp(&done); |
- } |
- __ bind(&no_overflow_possible); |
- } |
+ if (hmod->CheckFlag(HValue::kCanOverflow)) { |
+ __ li(r0, Operand::Zero()); // clear xer |
+ __ mtxer(r0); |
+ } |
- // For 'r3 = r1 % r2' we can have the following ARM code: |
- // sdiv r3, r1, r2 |
- // mls r3, r3, r2, r1 |
+ __ divw(scratch, left_reg, right_reg, SetOE, SetRC); |
- __ sdiv(result_reg, left_reg, right_reg); |
- __ Mls(result_reg, result_reg, right_reg, left_reg); |
+ // Check for x % 0. |
+ if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { |
+ __ cmpwi(right_reg, Operand::Zero()); |
+ DeoptimizeIf(eq, instr->environment()); |
+ } |
- // If we care about -0, test if the dividend is <0 and the result is 0. |
+ // Check for kMinInt % -1, divw will return undefined, which is not what we |
+ // want. We have to deopt if we care about -0, because we can't return that. |
+ if (hmod->CheckFlag(HValue::kCanOverflow)) { |
+ Label no_overflow_possible; |
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
- __ cmp(result_reg, Operand::Zero()); |
- __ b(ne, &done); |
- __ cmp(left_reg, Operand::Zero()); |
- DeoptimizeIf(lt, instr->environment()); |
+ DeoptimizeIf(overflow, instr->environment(), cr0); |
+ } else { |
+ __ bnooverflow(&no_overflow_possible, cr0); |
+ __ li(result_reg, Operand::Zero()); |
+ __ b(&done); |
} |
- __ bind(&done); |
- |
- } else { |
- // General case, without any SDIV support. |
- Register left_reg = ToRegister(instr->left()); |
- Register right_reg = ToRegister(instr->right()); |
- Register result_reg = ToRegister(instr->result()); |
- Register scratch = scratch0(); |
- DCHECK(!scratch.is(left_reg)); |
- DCHECK(!scratch.is(right_reg)); |
- DCHECK(!scratch.is(result_reg)); |
- DwVfpRegister dividend = ToDoubleRegister(instr->temp()); |
- DwVfpRegister divisor = ToDoubleRegister(instr->temp2()); |
- DCHECK(!divisor.is(dividend)); |
- LowDwVfpRegister quotient = double_scratch0(); |
- DCHECK(!quotient.is(dividend)); |
- DCHECK(!quotient.is(divisor)); |
+ __ bind(&no_overflow_possible); |
+ } |
- Label done; |
- // Check for x % 0, we have to deopt in this case because we can't return a |
- // NaN. |
- if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { |
- __ cmp(right_reg, Operand::Zero()); |
- DeoptimizeIf(eq, instr->environment()); |
- } |
+ __ mullw(scratch, right_reg, scratch); |
+ __ sub(result_reg, left_reg, scratch, LeaveOE, SetRC); |
- __ Move(result_reg, left_reg); |
- // Load the arguments in VFP registers. The divisor value is preloaded |
- // before. Be careful that 'right_reg' is only live on entry. |
- // TODO(svenpanne) The last comments seems to be wrong nowadays. |
- __ vmov(double_scratch0().low(), left_reg); |
- __ vcvt_f64_s32(dividend, double_scratch0().low()); |
- __ vmov(double_scratch0().low(), right_reg); |
- __ vcvt_f64_s32(divisor, double_scratch0().low()); |
- |
- // We do not care about the sign of the divisor. Note that we still handle |
- // the kMinInt % -1 case correctly, though. |
- __ vabs(divisor, divisor); |
- // Compute the quotient and round it to a 32bit integer. |
- __ vdiv(quotient, dividend, divisor); |
- __ vcvt_s32_f64(quotient.low(), quotient); |
- __ vcvt_f64_s32(quotient, quotient.low()); |
- |
- // Compute the remainder in result. |
- __ vmul(double_scratch0(), divisor, quotient); |
- __ vcvt_s32_f64(double_scratch0().low(), double_scratch0()); |
- __ vmov(scratch, double_scratch0().low()); |
- __ sub(result_reg, left_reg, scratch, SetCC); |
- |
- // If we care about -0, test if the dividend is <0 and the result is 0. |
- if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
- __ b(ne, &done); |
- __ cmp(left_reg, Operand::Zero()); |
- DeoptimizeIf(mi, instr->environment()); |
- } |
- __ bind(&done); |
+ // If we care about -0, test if the dividend is <0 and the result is 0. |
+ if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
+ __ bne(&done, cr0); |
+ __ cmpwi(left_reg, Operand::Zero()); |
+ DeoptimizeIf(lt, instr->environment()); |
} |
+ |
+ __ bind(&done); |
} |
@@ -1316,37 +1154,41 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { |
// Check for (0 / -x) that will produce negative zero. |
HDiv* hdiv = instr->hydrogen(); |
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
- __ cmp(dividend, Operand::Zero()); |
+ __ cmpwi(dividend, Operand::Zero()); |
DeoptimizeIf(eq, instr->environment()); |
} |
// Check for (kMinInt / -1). |
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { |
- __ cmp(dividend, Operand(kMinInt)); |
+ __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); |
+ __ cmpw(dividend, r0); |
DeoptimizeIf(eq, instr->environment()); |
} |
+ |
+ int32_t shift = WhichPowerOf2Abs(divisor); |
+ |
// Deoptimize if remainder will not be 0. |
- if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && |
- divisor != 1 && divisor != -1) { |
- int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
- __ tst(dividend, Operand(mask)); |
- DeoptimizeIf(ne, instr->environment()); |
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) { |
+ __ TestBitRange(dividend, shift - 1, 0, r0); |
+ DeoptimizeIf(ne, instr->environment(), cr0); |
} |
if (divisor == -1) { // Nice shortcut, not needed for correctness. |
- __ rsb(result, dividend, Operand(0)); |
+ __ neg(result, dividend); |
return; |
} |
- int32_t shift = WhichPowerOf2Abs(divisor); |
if (shift == 0) { |
- __ mov(result, dividend); |
- } else if (shift == 1) { |
- __ add(result, dividend, Operand(dividend, LSR, 31)); |
+ __ mr(result, dividend); |
} else { |
- __ mov(result, Operand(dividend, ASR, 31)); |
- __ add(result, dividend, Operand(result, LSR, 32 - shift)); |
+ if (shift == 1) { |
+ __ srwi(result, dividend, Operand(31)); |
+ } else { |
+ __ srawi(result, dividend, 31); |
+ __ srwi(result, result, Operand(32 - shift)); |
+ } |
+ __ add(result, dividend, result); |
+ __ srawi(result, result, shift); |
} |
- if (shift > 0) __ mov(result, Operand(result, ASR, shift)); |
- if (divisor < 0) __ rsb(result, result, Operand(0)); |
+ if (divisor < 0) __ neg(result, result); |
} |
@@ -1364,17 +1206,18 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) { |
// Check for (0 / -x) that will produce negative zero. |
HDiv* hdiv = instr->hydrogen(); |
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
- __ cmp(dividend, Operand::Zero()); |
+ __ cmpwi(dividend, Operand::Zero()); |
DeoptimizeIf(eq, instr->environment()); |
} |
__ TruncatingDiv(result, dividend, Abs(divisor)); |
- if (divisor < 0) __ rsb(result, result, Operand::Zero()); |
+ if (divisor < 0) __ neg(result, result); |
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { |
+ Register scratch = scratch0(); |
__ mov(ip, Operand(divisor)); |
- __ smull(scratch0(), ip, result, ip); |
- __ sub(scratch0(), scratch0(), dividend, SetCC); |
+ __ mullw(scratch, result, ip); |
+ __ cmpw(scratch, dividend); |
DeoptimizeIf(ne, instr->environment()); |
} |
} |
@@ -1383,130 +1226,121 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) { |
// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. |
void LCodeGen::DoDivI(LDivI* instr) { |
HBinaryOperation* hdiv = instr->hydrogen(); |
- Register dividend = ToRegister(instr->dividend()); |
- Register divisor = ToRegister(instr->divisor()); |
+ const Register dividend = ToRegister(instr->dividend()); |
+ const Register divisor = ToRegister(instr->divisor()); |
Register result = ToRegister(instr->result()); |
+ DCHECK(!dividend.is(result)); |
+ DCHECK(!divisor.is(result)); |
+ |
+ if (hdiv->CheckFlag(HValue::kCanOverflow)) { |
+ __ li(r0, Operand::Zero()); // clear xer |
+ __ mtxer(r0); |
+ } |
+ |
+ __ divw(result, dividend, divisor, SetOE, SetRC); |
+ |
// Check for x / 0. |
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
- __ cmp(divisor, Operand::Zero()); |
+ __ cmpwi(divisor, Operand::Zero()); |
DeoptimizeIf(eq, instr->environment()); |
} |
// Check for (0 / -x) that will produce negative zero. |
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { |
- Label positive; |
- if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { |
- // Do the test only if it hadn't be done above. |
- __ cmp(divisor, Operand::Zero()); |
- } |
- __ b(pl, &positive); |
- __ cmp(dividend, Operand::Zero()); |
- DeoptimizeIf(eq, instr->environment()); |
- __ bind(&positive); |
+ Label dividend_not_zero; |
+ __ cmpwi(dividend, Operand::Zero()); |
+ __ bne(÷nd_not_zero); |
+ __ cmpwi(divisor, Operand::Zero()); |
+ DeoptimizeIf(lt, instr->environment()); |
+ __ bind(÷nd_not_zero); |
} |
// Check for (kMinInt / -1). |
- if (hdiv->CheckFlag(HValue::kCanOverflow) && |
- (!CpuFeatures::IsSupported(SUDIV) || |
- !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { |
- // We don't need to check for overflow when truncating with sdiv |
- // support because, on ARM, sdiv kMinInt, -1 -> kMinInt. |
- __ cmp(dividend, Operand(kMinInt)); |
- __ cmp(divisor, Operand(-1), eq); |
- DeoptimizeIf(eq, instr->environment()); |
+ if (hdiv->CheckFlag(HValue::kCanOverflow)) { |
+ Label no_overflow_possible; |
+ if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { |
+ DeoptimizeIf(overflow, instr->environment(), cr0); |
+ } else { |
+ // When truncating, we want kMinInt / -1 = kMinInt. |
+ __ bnooverflow(&no_overflow_possible, cr0); |
+ __ mr(result, dividend); |
+ } |
+ __ bind(&no_overflow_possible); |
} |
- if (CpuFeatures::IsSupported(SUDIV)) { |
- CpuFeatureScope scope(masm(), SUDIV); |
- __ sdiv(result, dividend, divisor); |
- } else { |
- DoubleRegister vleft = ToDoubleRegister(instr->temp()); |
- DoubleRegister vright = double_scratch0(); |
- __ vmov(double_scratch0().low(), dividend); |
- __ vcvt_f64_s32(vleft, double_scratch0().low()); |
- __ vmov(double_scratch0().low(), divisor); |
- __ vcvt_f64_s32(vright, double_scratch0().low()); |
- __ vdiv(vleft, vleft, vright); // vleft now contains the result. |
- __ vcvt_s32_f64(double_scratch0().low(), vleft); |
- __ vmov(result, double_scratch0().low()); |
- } |
- |
- if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { |
- // Compute remainder and deopt if it's not zero. |
- Register remainder = scratch0(); |
- __ Mls(remainder, result, divisor, dividend); |
- __ cmp(remainder, Operand::Zero()); |
+ if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { |
+ // Deoptimize if remainder is not 0. |
+ Register scratch = scratch0(); |
+ __ mullw(scratch, divisor, result); |
+ __ cmpw(dividend, scratch); |
DeoptimizeIf(ne, instr->environment()); |
} |
} |
-void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { |
- DwVfpRegister addend = ToDoubleRegister(instr->addend()); |
- DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier()); |
- DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand()); |
- |
- // This is computed in-place. |
- DCHECK(addend.is(ToDoubleRegister(instr->result()))); |
- |
- __ vmla(addend, multiplier, multiplicand); |
-} |
- |
- |
-void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) { |
- DwVfpRegister minuend = ToDoubleRegister(instr->minuend()); |
- DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier()); |
- DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand()); |
- |
- // This is computed in-place. |
- DCHECK(minuend.is(ToDoubleRegister(instr->result()))); |
- |
- __ vmls(minuend, multiplier, multiplicand); |
-} |
- |
- |
void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { |
+ HBinaryOperation* hdiv = instr->hydrogen(); |
Register dividend = ToRegister(instr->dividend()); |
Register result = ToRegister(instr->result()); |
int32_t divisor = instr->divisor(); |
- // If the divisor is 1, return the dividend. |
- if (divisor == 1) { |
- __ Move(result, dividend); |
- return; |
- } |
- |
// If the divisor is positive, things are easy: There can be no deopts and we |
// can simply do an arithmetic right shift. |
int32_t shift = WhichPowerOf2Abs(divisor); |
- if (divisor > 1) { |
- __ mov(result, Operand(dividend, ASR, shift)); |
+ if (divisor > 0) { |
+ if (shift || !result.is(dividend)) { |
+ __ srawi(result, dividend, shift); |
+ } |
return; |
} |
// If the divisor is negative, we have to negate and handle edge cases. |
- __ rsb(result, dividend, Operand::Zero(), SetCC); |
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
+ OEBit oe = LeaveOE; |
+#if V8_TARGET_ARCH_PPC64 |
+ if (divisor == -1 && hdiv->CheckFlag(HValue::kLeftCanBeMinInt)) { |
+ __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); |
+ __ cmpw(dividend, r0); |
DeoptimizeIf(eq, instr->environment()); |
} |
+#else |
+ if (hdiv->CheckFlag(HValue::kLeftCanBeMinInt)) { |
+ __ li(r0, Operand::Zero()); // clear xer |
+ __ mtxer(r0); |
+ oe = SetOE; |
+ } |
+#endif |
- // Dividing by -1 is basically negation, unless we overflow. |
- if (divisor == -1) { |
- if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { |
- DeoptimizeIf(vs, instr->environment()); |
+ __ neg(result, dividend, oe, SetRC); |
+ if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { |
+ DeoptimizeIf(eq, instr->environment(), cr0); |
+ } |
+ |
+// If the negation could not overflow, simply shifting is OK. |
+#if !V8_TARGET_ARCH_PPC64 |
+ if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { |
+#endif |
+ if (shift) { |
+ __ ShiftRightArithImm(result, result, shift); |
} |
return; |
+#if !V8_TARGET_ARCH_PPC64 |
} |
- // If the negation could not overflow, simply shifting is OK. |
- if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { |
- __ mov(result, Operand(result, ASR, shift)); |
+ // Dividing by -1 is basically negation, unless we overflow. |
+ if (divisor == -1) { |
+ DeoptimizeIf(overflow, instr->environment(), cr0); |
return; |
} |
- __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs); |
- __ mov(result, Operand(result, ASR, shift), LeaveCC, vc); |
+ Label overflow, done; |
+ __ boverflow(&overflow, cr0); |
+ __ srawi(result, result, shift); |
+ __ b(&done); |
+ __ bind(&overflow); |
+ __ mov(result, Operand(kMinInt / divisor)); |
+ __ bind(&done); |
+#endif |
} |
@@ -1524,7 +1358,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { |
// Check for (0 / -x) that will produce negative zero. |
HMathFloorOfDiv* hdiv = instr->hydrogen(); |
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
- __ cmp(dividend, Operand::Zero()); |
+ __ cmpwi(dividend, Operand::Zero()); |
DeoptimizeIf(eq, instr->environment()); |
} |
@@ -1533,7 +1367,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { |
if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || |
(divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { |
__ TruncatingDiv(result, dividend, Abs(divisor)); |
- if (divisor < 0) __ rsb(result, result, Operand::Zero()); |
+ if (divisor < 0) __ neg(result, result); |
return; |
} |
@@ -1542,16 +1376,16 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { |
Register temp = ToRegister(instr->temp()); |
DCHECK(!temp.is(dividend) && !temp.is(result)); |
Label needs_adjustment, done; |
- __ cmp(dividend, Operand::Zero()); |
+ __ cmpwi(dividend, Operand::Zero()); |
__ b(divisor > 0 ? lt : gt, &needs_adjustment); |
__ TruncatingDiv(result, dividend, Abs(divisor)); |
- if (divisor < 0) __ rsb(result, result, Operand::Zero()); |
- __ jmp(&done); |
+ if (divisor < 0) __ neg(result, result); |
+ __ b(&done); |
__ bind(&needs_adjustment); |
- __ add(temp, dividend, Operand(divisor > 0 ? 1 : -1)); |
+ __ addi(temp, dividend, Operand(divisor > 0 ? 1 : -1)); |
__ TruncatingDiv(result, temp, Abs(divisor)); |
- if (divisor < 0) __ rsb(result, result, Operand::Zero()); |
- __ sub(result, result, Operand(1)); |
+ if (divisor < 0) __ neg(result, result); |
+ __ subi(result, result, Operand(1)); |
__ bind(&done); |
} |
@@ -1559,75 +1393,102 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { |
// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. |
void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { |
HBinaryOperation* hdiv = instr->hydrogen(); |
- Register left = ToRegister(instr->dividend()); |
- Register right = ToRegister(instr->divisor()); |
+ const Register dividend = ToRegister(instr->dividend()); |
+ const Register divisor = ToRegister(instr->divisor()); |
Register result = ToRegister(instr->result()); |
+ DCHECK(!dividend.is(result)); |
+ DCHECK(!divisor.is(result)); |
+ |
+ if (hdiv->CheckFlag(HValue::kCanOverflow)) { |
+ __ li(r0, Operand::Zero()); // clear xer |
+ __ mtxer(r0); |
+ } |
+ |
+ __ divw(result, dividend, divisor, SetOE, SetRC); |
+ |
// Check for x / 0. |
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
- __ cmp(right, Operand::Zero()); |
+ __ cmpwi(divisor, Operand::Zero()); |
DeoptimizeIf(eq, instr->environment()); |
} |
// Check for (0 / -x) that will produce negative zero. |
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { |
- Label positive; |
- if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { |
- // Do the test only if it hadn't be done above. |
- __ cmp(right, Operand::Zero()); |
- } |
- __ b(pl, &positive); |
- __ cmp(left, Operand::Zero()); |
- DeoptimizeIf(eq, instr->environment()); |
- __ bind(&positive); |
+ Label dividend_not_zero; |
+ __ cmpwi(dividend, Operand::Zero()); |
+ __ bne(÷nd_not_zero); |
+ __ cmpwi(divisor, Operand::Zero()); |
+ DeoptimizeIf(lt, instr->environment()); |
+ __ bind(÷nd_not_zero); |
} |
// Check for (kMinInt / -1). |
- if (hdiv->CheckFlag(HValue::kCanOverflow) && |
- (!CpuFeatures::IsSupported(SUDIV) || |
- !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { |
- // We don't need to check for overflow when truncating with sdiv |
- // support because, on ARM, sdiv kMinInt, -1 -> kMinInt. |
- __ cmp(left, Operand(kMinInt)); |
- __ cmp(right, Operand(-1), eq); |
- DeoptimizeIf(eq, instr->environment()); |
- } |
- |
- if (CpuFeatures::IsSupported(SUDIV)) { |
- CpuFeatureScope scope(masm(), SUDIV); |
- __ sdiv(result, left, right); |
- } else { |
- DoubleRegister vleft = ToDoubleRegister(instr->temp()); |
- DoubleRegister vright = double_scratch0(); |
- __ vmov(double_scratch0().low(), left); |
- __ vcvt_f64_s32(vleft, double_scratch0().low()); |
- __ vmov(double_scratch0().low(), right); |
- __ vcvt_f64_s32(vright, double_scratch0().low()); |
- __ vdiv(vleft, vleft, vright); // vleft now contains the result. |
- __ vcvt_s32_f64(double_scratch0().low(), vleft); |
- __ vmov(result, double_scratch0().low()); |
+ if (hdiv->CheckFlag(HValue::kCanOverflow)) { |
+ Label no_overflow_possible; |
+ if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { |
+ DeoptimizeIf(overflow, instr->environment(), cr0); |
+ } else { |
+ // When truncating, we want kMinInt / -1 = kMinInt. |
+ __ bnooverflow(&no_overflow_possible, cr0); |
+ __ mr(result, dividend); |
+ } |
+ __ bind(&no_overflow_possible); |
} |
Label done; |
- Register remainder = scratch0(); |
- __ Mls(remainder, result, right, left); |
- __ cmp(remainder, Operand::Zero()); |
- __ b(eq, &done); |
- __ eor(remainder, remainder, Operand(right)); |
- __ add(result, result, Operand(remainder, ASR, 31)); |
+ Register scratch = scratch0(); |
+// If both operands have the same sign then we are done. |
+#if V8_TARGET_ARCH_PPC64 |
+ __ xor_(scratch, dividend, divisor); |
+ __ cmpwi(scratch, Operand::Zero()); |
+ __ bge(&done); |
+#else |
+ __ xor_(scratch, dividend, divisor, SetRC); |
+ __ bge(&done, cr0); |
+#endif |
+ |
+ // If there is no remainder then we are done. |
+ __ mullw(scratch, divisor, result); |
+ __ cmpw(dividend, scratch); |
+ __ beq(&done); |
+ |
+ // We performed a truncating division. Correct the result. |
+ __ subi(result, result, Operand(1)); |
__ bind(&done); |
} |
+void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { |
+ DoubleRegister addend = ToDoubleRegister(instr->addend()); |
+ DoubleRegister multiplier = ToDoubleRegister(instr->multiplier()); |
+ DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand()); |
+ DoubleRegister result = ToDoubleRegister(instr->result()); |
+ |
+ __ fmadd(result, multiplier, multiplicand, addend); |
+} |
+ |
+ |
+void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) { |
+ DoubleRegister minuend = ToDoubleRegister(instr->minuend()); |
+ DoubleRegister multiplier = ToDoubleRegister(instr->multiplier()); |
+ DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand()); |
+ DoubleRegister result = ToDoubleRegister(instr->result()); |
+ |
+ __ fmsub(result, multiplier, multiplicand, minuend); |
+} |
+ |
+ |
void LCodeGen::DoMulI(LMulI* instr) { |
+ Register scratch = scratch0(); |
Register result = ToRegister(instr->result()); |
// Note that result may alias left. |
Register left = ToRegister(instr->left()); |
LOperand* right_op = instr->right(); |
bool bailout_on_minus_zero = |
- instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); |
- bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
+ instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); |
+ bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
if (right_op->IsConstantOperand()) { |
int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); |
@@ -1635,27 +1496,47 @@ void LCodeGen::DoMulI(LMulI* instr) { |
if (bailout_on_minus_zero && (constant < 0)) { |
// The case of a null constant will be handled separately. |
// If constant is negative and left is null, the result should be -0. |
- __ cmp(left, Operand::Zero()); |
+ __ cmpi(left, Operand::Zero()); |
DeoptimizeIf(eq, instr->environment()); |
} |
switch (constant) { |
case -1: |
- if (overflow) { |
- __ rsb(result, left, Operand::Zero(), SetCC); |
- DeoptimizeIf(vs, instr->environment()); |
+ if (can_overflow) { |
+#if V8_TARGET_ARCH_PPC64 |
+ if (instr->hydrogen()->representation().IsSmi()) { |
+#endif |
+ __ li(r0, Operand::Zero()); // clear xer |
+ __ mtxer(r0); |
+ __ neg(result, left, SetOE, SetRC); |
+ DeoptimizeIf(overflow, instr->environment(), cr0); |
+#if V8_TARGET_ARCH_PPC64 |
+ } else { |
+ __ neg(result, left); |
+ __ TestIfInt32(result, scratch, r0); |
+ DeoptimizeIf(ne, instr->environment()); |
+ } |
+#endif |
} else { |
- __ rsb(result, left, Operand::Zero()); |
+ __ neg(result, left); |
} |
break; |
case 0: |
if (bailout_on_minus_zero) { |
- // If left is strictly negative and the constant is null, the |
- // result is -0. Deoptimize if required, otherwise return 0. |
- __ cmp(left, Operand::Zero()); |
- DeoptimizeIf(mi, instr->environment()); |
+// If left is strictly negative and the constant is null, the |
+// result is -0. Deoptimize if required, otherwise return 0. |
+#if V8_TARGET_ARCH_PPC64 |
+ if (instr->hydrogen()->representation().IsSmi()) { |
+#endif |
+ __ cmpi(left, Operand::Zero()); |
+#if V8_TARGET_ARCH_PPC64 |
+ } else { |
+ __ cmpwi(left, Operand::Zero()); |
+ } |
+#endif |
+ DeoptimizeIf(lt, instr->environment()); |
} |
- __ mov(result, Operand::Zero()); |
+ __ li(result, Operand::Zero()); |
break; |
case 1: |
__ Move(result, left); |
@@ -1669,23 +1550,25 @@ void LCodeGen::DoMulI(LMulI* instr) { |
if (IsPowerOf2(constant_abs)) { |
int32_t shift = WhichPowerOf2(constant_abs); |
- __ mov(result, Operand(left, LSL, shift)); |
- // Correct the sign of the result is the constant is negative. |
- if (constant < 0) __ rsb(result, result, Operand::Zero()); |
+ __ ShiftLeftImm(result, left, Operand(shift)); |
+ // Correct the sign of the result if the constant is negative. |
+ if (constant < 0) __ neg(result, result); |
} else if (IsPowerOf2(constant_abs - 1)) { |
int32_t shift = WhichPowerOf2(constant_abs - 1); |
- __ add(result, left, Operand(left, LSL, shift)); |
- // Correct the sign of the result is the constant is negative. |
- if (constant < 0) __ rsb(result, result, Operand::Zero()); |
+ __ ShiftLeftImm(scratch, left, Operand(shift)); |
+ __ add(result, scratch, left); |
+ // Correct the sign of the result if the constant is negative. |
+ if (constant < 0) __ neg(result, result); |
} else if (IsPowerOf2(constant_abs + 1)) { |
int32_t shift = WhichPowerOf2(constant_abs + 1); |
- __ rsb(result, left, Operand(left, LSL, shift)); |
- // Correct the sign of the result is the constant is negative. |
- if (constant < 0) __ rsb(result, result, Operand::Zero()); |
+ __ ShiftLeftImm(scratch, left, Operand(shift)); |
+ __ sub(result, scratch, left); |
+ // Correct the sign of the result if the constant is negative. |
+ if (constant < 0) __ neg(result, result); |
} else { |
// Generate standard code. |
__ mov(ip, Operand(constant)); |
- __ mul(result, left, ip); |
+ __ Mul(result, left, ip); |
} |
} |
@@ -1693,32 +1576,59 @@ void LCodeGen::DoMulI(LMulI* instr) { |
DCHECK(right_op->IsRegister()); |
Register right = ToRegister(right_op); |
- if (overflow) { |
- Register scratch = scratch0(); |
+ if (can_overflow) { |
+#if V8_TARGET_ARCH_PPC64 |
+ // result = left * right. |
+ if (instr->hydrogen()->representation().IsSmi()) { |
+ __ SmiUntag(result, left); |
+ __ SmiUntag(scratch, right); |
+ __ Mul(result, result, scratch); |
+ } else { |
+ __ Mul(result, left, right); |
+ } |
+ __ TestIfInt32(result, scratch, r0); |
+ DeoptimizeIf(ne, instr->environment()); |
+ if (instr->hydrogen()->representation().IsSmi()) { |
+ __ SmiTag(result); |
+ } |
+#else |
// scratch:result = left * right. |
if (instr->hydrogen()->representation().IsSmi()) { |
__ SmiUntag(result, left); |
- __ smull(result, scratch, result, right); |
+ __ mulhw(scratch, result, right); |
+ __ mullw(result, result, right); |
} else { |
- __ smull(result, scratch, left, right); |
+ __ mulhw(scratch, left, right); |
+ __ mullw(result, left, right); |
} |
- __ cmp(scratch, Operand(result, ASR, 31)); |
+ __ TestIfInt32(scratch, result, r0); |
DeoptimizeIf(ne, instr->environment()); |
+#endif |
} else { |
if (instr->hydrogen()->representation().IsSmi()) { |
__ SmiUntag(result, left); |
- __ mul(result, result, right); |
+ __ Mul(result, result, right); |
} else { |
- __ mul(result, left, right); |
+ __ Mul(result, left, right); |
} |
} |
if (bailout_on_minus_zero) { |
Label done; |
- __ teq(left, Operand(right)); |
- __ b(pl, &done); |
+#if V8_TARGET_ARCH_PPC64 |
+ if (instr->hydrogen()->representation().IsSmi()) { |
+#endif |
+ __ xor_(r0, left, right, SetRC); |
+ __ bge(&done, cr0); |
+#if V8_TARGET_ARCH_PPC64 |
+ } else { |
+ __ xor_(r0, left, right); |
+ __ cmpwi(r0, Operand::Zero()); |
+ __ bge(&done); |
+ } |
+#endif |
// Bail out if the result is minus zero. |
- __ cmp(result, Operand::Zero()); |
+ __ cmpi(result, Operand::Zero()); |
DeoptimizeIf(eq, instr->environment()); |
__ bind(&done); |
} |
@@ -1739,20 +1649,38 @@ void LCodeGen::DoBitI(LBitI* instr) { |
} else { |
DCHECK(right_op->IsRegister() || right_op->IsConstantOperand()); |
right = ToOperand(right_op); |
+ |
+ if (right_op->IsConstantOperand() && is_uint16(right.immediate())) { |
+ switch (instr->op()) { |
+ case Token::BIT_AND: |
+ __ andi(result, left, right); |
+ break; |
+ case Token::BIT_OR: |
+ __ ori(result, left, right); |
+ break; |
+ case Token::BIT_XOR: |
+ __ xori(result, left, right); |
+ break; |
+ default: |
+ UNREACHABLE(); |
+ break; |
+ } |
+ return; |
+ } |
} |
switch (instr->op()) { |
case Token::BIT_AND: |
- __ and_(result, left, right); |
+ __ And(result, left, right); |
break; |
case Token::BIT_OR: |
- __ orr(result, left, right); |
+ __ Or(result, left, right); |
break; |
case Token::BIT_XOR: |
if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) { |
- __ mvn(result, Operand(left)); |
+ __ notx(result, left); |
} else { |
- __ eor(result, left, right); |
+ __ Xor(result, left, right); |
} |
break; |
default: |
@@ -1771,24 +1699,32 @@ void LCodeGen::DoShiftI(LShiftI* instr) { |
Register scratch = scratch0(); |
if (right_op->IsRegister()) { |
// Mask the right_op operand. |
- __ and_(scratch, ToRegister(right_op), Operand(0x1F)); |
+ __ andi(scratch, ToRegister(right_op), Operand(0x1F)); |
switch (instr->op()) { |
case Token::ROR: |
- __ mov(result, Operand(left, ROR, scratch)); |
+ // rotate_right(a, b) == rotate_left(a, 32 - b) |
+ __ subfic(scratch, scratch, Operand(32)); |
+ __ rotlw(result, left, scratch); |
break; |
case Token::SAR: |
- __ mov(result, Operand(left, ASR, scratch)); |
+ __ sraw(result, left, scratch); |
break; |
case Token::SHR: |
if (instr->can_deopt()) { |
- __ mov(result, Operand(left, LSR, scratch), SetCC); |
- DeoptimizeIf(mi, instr->environment()); |
+ __ srw(result, left, scratch, SetRC); |
+#if V8_TARGET_ARCH_PPC64 |
+ __ extsw(result, result, SetRC); |
+#endif |
+ DeoptimizeIf(lt, instr->environment(), cr0); |
} else { |
- __ mov(result, Operand(left, LSR, scratch)); |
+ __ srw(result, left, scratch); |
} |
break; |
case Token::SHL: |
- __ mov(result, Operand(left, LSL, scratch)); |
+ __ slw(result, left, scratch); |
+#if V8_TARGET_ARCH_PPC64 |
+ __ extsw(result, result); |
+#endif |
break; |
default: |
UNREACHABLE(); |
@@ -1800,43 +1736,51 @@ void LCodeGen::DoShiftI(LShiftI* instr) { |
uint8_t shift_count = static_cast<uint8_t>(value & 0x1F); |
switch (instr->op()) { |
case Token::ROR: |
- if (shift_count != 0) { |
- __ mov(result, Operand(left, ROR, shift_count)); |
+ if (shift_count != 0) { |
+ __ rotrwi(result, left, shift_count); |
} else { |
__ Move(result, left); |
} |
break; |
case Token::SAR: |
if (shift_count != 0) { |
- __ mov(result, Operand(left, ASR, shift_count)); |
+ __ srawi(result, left, shift_count); |
} else { |
__ Move(result, left); |
} |
break; |
case Token::SHR: |
if (shift_count != 0) { |
- __ mov(result, Operand(left, LSR, shift_count)); |
+ __ srwi(result, left, Operand(shift_count)); |
} else { |
if (instr->can_deopt()) { |
- __ tst(left, Operand(0x80000000)); |
- DeoptimizeIf(ne, instr->environment()); |
+ __ cmpwi(left, Operand::Zero()); |
+ DeoptimizeIf(lt, instr->environment()); |
} |
__ Move(result, left); |
} |
break; |
case Token::SHL: |
if (shift_count != 0) { |
+#if V8_TARGET_ARCH_PPC64 |
+ if (instr->hydrogen_value()->representation().IsSmi()) { |
+ __ sldi(result, left, Operand(shift_count)); |
+#else |
if (instr->hydrogen_value()->representation().IsSmi() && |
instr->can_deopt()) { |
if (shift_count != 1) { |
- __ mov(result, Operand(left, LSL, shift_count - 1)); |
- __ SmiTag(result, result, SetCC); |
+ __ slwi(result, left, Operand(shift_count - 1)); |
+ __ SmiTagCheckOverflow(result, result, scratch); |
} else { |
- __ SmiTag(result, left, SetCC); |
+ __ SmiTagCheckOverflow(result, left, scratch); |
} |
- DeoptimizeIf(vs, instr->environment()); |
+ DeoptimizeIf(lt, instr->environment(), cr0); |
+#endif |
} else { |
- __ mov(result, Operand(left, LSL, shift_count)); |
+ __ slwi(result, left, Operand(shift_count)); |
+#if V8_TARGET_ARCH_PPC64 |
+ __ extsw(result, result); |
+#endif |
} |
} else { |
__ Move(result, left); |
@@ -1851,23 +1795,35 @@ void LCodeGen::DoShiftI(LShiftI* instr) { |
void LCodeGen::DoSubI(LSubI* instr) { |
- LOperand* left = instr->left(); |
LOperand* right = instr->right(); |
- LOperand* result = instr->result(); |
+ Register left = ToRegister(instr->left()); |
+ Register result = ToRegister(instr->result()); |
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
- SBit set_cond = can_overflow ? SetCC : LeaveCC; |
- |
- if (right->IsStackSlot()) { |
- Register right_reg = EmitLoadRegister(right, ip); |
- __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); |
+ if (!can_overflow && right->IsConstantOperand()) { |
+ Operand right_operand = ToOperand(right); |
+ __ Add(result, left, -right_operand.immediate(), r0); |
} else { |
- DCHECK(right->IsRegister() || right->IsConstantOperand()); |
- __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); |
+ Register right_reg = EmitLoadRegister(right, ip); |
+ |
+ if (!can_overflow) { |
+ __ sub(result, left, right_reg); |
+ } else { |
+ __ SubAndCheckForOverflow(result, left, right_reg, scratch0(), r0); |
+// Doptimize on overflow |
+#if V8_TARGET_ARCH_PPC64 |
+ if (!instr->hydrogen()->representation().IsSmi()) { |
+ __ extsw(scratch0(), scratch0(), SetRC); |
+ } |
+#endif |
+ DeoptimizeIf(lt, instr->environment(), cr0); |
+ } |
} |
- if (can_overflow) { |
- DeoptimizeIf(vs, instr->environment()); |
+#if V8_TARGET_ARCH_PPC64 |
+ if (!instr->hydrogen()->representation().IsSmi()) { |
+ __ extsw(result, result); |
} |
+#endif |
} |
@@ -1875,19 +1831,16 @@ void LCodeGen::DoRSubI(LRSubI* instr) { |
LOperand* left = instr->left(); |
LOperand* right = instr->right(); |
LOperand* result = instr->result(); |
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
- SBit set_cond = can_overflow ? SetCC : LeaveCC; |
- if (right->IsStackSlot()) { |
- Register right_reg = EmitLoadRegister(right, ip); |
- __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); |
- } else { |
- DCHECK(right->IsRegister() || right->IsConstantOperand()); |
- __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); |
- } |
+ DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow) && |
+ right->IsConstantOperand()); |
- if (can_overflow) { |
- DeoptimizeIf(vs, instr->environment()); |
+ Operand right_operand = ToOperand(right); |
+ if (is_int16(right_operand.immediate())) { |
+ __ subfic(ToRegister(result), ToRegister(left), right_operand); |
+ } else { |
+ __ mov(r0, right_operand); |
+ __ sub(ToRegister(result), r0, ToRegister(left)); |
} |
} |
@@ -1898,15 +1851,17 @@ void LCodeGen::DoConstantI(LConstantI* instr) { |
void LCodeGen::DoConstantS(LConstantS* instr) { |
- __ mov(ToRegister(instr->result()), Operand(instr->value())); |
+ __ LoadSmiLiteral(ToRegister(instr->result()), instr->value()); |
} |
+// TODO(penguin): put const to constant pool instead |
+// of storing double to stack |
void LCodeGen::DoConstantD(LConstantD* instr) { |
DCHECK(instr->result()->IsDoubleRegister()); |
- DwVfpRegister result = ToDoubleRegister(instr->result()); |
+ DoubleRegister result = ToDoubleRegister(instr->result()); |
double v = instr->value(); |
- __ Vmov(result, v, scratch0()); |
+ __ LoadDoubleLiteral(result, v, scratch0()); |
} |
@@ -1936,40 +1891,40 @@ void LCodeGen::DoDateField(LDateField* instr) { |
Smi* index = instr->index(); |
Label runtime, done; |
DCHECK(object.is(result)); |
- DCHECK(object.is(r0)); |
+ DCHECK(object.is(r3)); |
DCHECK(!scratch.is(scratch0())); |
DCHECK(!scratch.is(object)); |
- __ SmiTst(object); |
- DeoptimizeIf(eq, instr->environment()); |
+ __ TestIfSmi(object, r0); |
+ DeoptimizeIf(eq, instr->environment(), cr0); |
__ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE); |
DeoptimizeIf(ne, instr->environment()); |
if (index->value() == 0) { |
- __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset)); |
+ __ LoadP(result, FieldMemOperand(object, JSDate::kValueOffset)); |
} else { |
if (index->value() < JSDate::kFirstUncachedField) { |
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); |
__ mov(scratch, Operand(stamp)); |
- __ ldr(scratch, MemOperand(scratch)); |
- __ ldr(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset)); |
+ __ LoadP(scratch, MemOperand(scratch)); |
+ __ LoadP(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset)); |
__ cmp(scratch, scratch0()); |
- __ b(ne, &runtime); |
- __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset + |
- kPointerSize * index->value())); |
- __ jmp(&done); |
+ __ bne(&runtime); |
+ __ LoadP(result, |
+ FieldMemOperand(object, JSDate::kValueOffset + |
+ kPointerSize * index->value())); |
+ __ b(&done); |
} |
__ bind(&runtime); |
__ PrepareCallCFunction(2, scratch); |
- __ mov(r1, Operand(index)); |
+ __ LoadSmiLiteral(r4, index); |
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2); |
__ bind(&done); |
} |
} |
-MemOperand LCodeGen::BuildSeqStringOperand(Register string, |
- LOperand* index, |
+MemOperand LCodeGen::BuildSeqStringOperand(Register string, LOperand* index, |
String::Encoding encoding) { |
if (index->IsConstantOperand()) { |
int offset = ToInteger32(LConstantOperand::cast(index)); |
@@ -1983,10 +1938,11 @@ MemOperand LCodeGen::BuildSeqStringOperand(Register string, |
DCHECK(!scratch.is(string)); |
DCHECK(!scratch.is(ToRegister(index))); |
if (encoding == String::ONE_BYTE_ENCODING) { |
- __ add(scratch, string, Operand(ToRegister(index))); |
+ __ add(scratch, string, ToRegister(index)); |
} else { |
STATIC_ASSERT(kUC16Size == 2); |
- __ add(scratch, string, Operand(ToRegister(index), LSL, 1)); |
+ __ ShiftLeftImm(scratch, ToRegister(index), Operand(1)); |
+ __ add(scratch, string, scratch); |
} |
return FieldMemOperand(scratch, SeqString::kHeaderSize); |
} |
@@ -1999,23 +1955,24 @@ void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) { |
if (FLAG_debug_code) { |
Register scratch = scratch0(); |
- __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset)); |
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); |
+ __ LoadP(scratch, FieldMemOperand(string, HeapObject::kMapOffset)); |
+ __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); |
- __ and_(scratch, scratch, |
+ __ andi(scratch, scratch, |
Operand(kStringRepresentationMask | kStringEncodingMask)); |
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; |
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; |
- __ cmp(scratch, Operand(encoding == String::ONE_BYTE_ENCODING |
- ? one_byte_seq_type : two_byte_seq_type)); |
+ __ cmpi(scratch, |
+ Operand(encoding == String::ONE_BYTE_ENCODING ? one_byte_seq_type |
+ : two_byte_seq_type)); |
__ Check(eq, kUnexpectedStringType); |
} |
MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding); |
if (encoding == String::ONE_BYTE_ENCODING) { |
- __ ldrb(result, operand); |
+ __ lbz(result, operand); |
} else { |
- __ ldrh(result, operand); |
+ __ lhz(result, operand); |
} |
} |
@@ -2031,37 +1988,55 @@ void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) { |
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; |
int encoding_mask = |
instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING |
- ? one_byte_seq_type : two_byte_seq_type; |
+ ? one_byte_seq_type |
+ : two_byte_seq_type; |
__ EmitSeqStringSetCharCheck(string, index, value, encoding_mask); |
} |
MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding); |
if (encoding == String::ONE_BYTE_ENCODING) { |
- __ strb(value, operand); |
+ __ stb(value, operand); |
} else { |
- __ strh(value, operand); |
+ __ sth(value, operand); |
} |
} |
void LCodeGen::DoAddI(LAddI* instr) { |
- LOperand* left = instr->left(); |
LOperand* right = instr->right(); |
- LOperand* result = instr->result(); |
+ Register left = ToRegister(instr->left()); |
+ Register result = ToRegister(instr->result()); |
bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
- SBit set_cond = can_overflow ? SetCC : LeaveCC; |
+#if V8_TARGET_ARCH_PPC64 |
+ bool isInteger = !(instr->hydrogen()->representation().IsSmi() || |
+ instr->hydrogen()->representation().IsExternal()); |
+#endif |
- if (right->IsStackSlot()) { |
- Register right_reg = EmitLoadRegister(right, ip); |
- __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); |
+ if (!can_overflow && right->IsConstantOperand()) { |
+ Operand right_operand = ToOperand(right); |
+ __ Add(result, left, right_operand.immediate(), r0); |
} else { |
- DCHECK(right->IsRegister() || right->IsConstantOperand()); |
- __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); |
+ Register right_reg = EmitLoadRegister(right, ip); |
+ |
+ if (!can_overflow) { |
+ __ add(result, left, right_reg); |
+ } else { // can_overflow. |
+ __ AddAndCheckForOverflow(result, left, right_reg, scratch0(), r0); |
+#if V8_TARGET_ARCH_PPC64 |
+ if (isInteger) { |
+ __ extsw(scratch0(), scratch0(), SetRC); |
+ } |
+#endif |
+ // Doptimize on overflow |
+ DeoptimizeIf(lt, instr->environment(), cr0); |
+ } |
} |
- if (can_overflow) { |
- DeoptimizeIf(vs, instr->environment()); |
+#if V8_TARGET_ARCH_PPC64 |
+ if (isInteger) { |
+ __ extsw(result, result); |
} |
+#endif |
} |
@@ -2069,92 +2044,97 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) { |
LOperand* left = instr->left(); |
LOperand* right = instr->right(); |
HMathMinMax::Operation operation = instr->hydrogen()->operation(); |
+ Condition cond = (operation == HMathMinMax::kMathMin) ? le : ge; |
if (instr->hydrogen()->representation().IsSmiOrInteger32()) { |
- Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge; |
Register left_reg = ToRegister(left); |
- Operand right_op = (right->IsRegister() || right->IsConstantOperand()) |
- ? ToOperand(right) |
- : Operand(EmitLoadRegister(right, ip)); |
+ Register right_reg = EmitLoadRegister(right, ip); |
Register result_reg = ToRegister(instr->result()); |
- __ cmp(left_reg, right_op); |
- __ Move(result_reg, left_reg, condition); |
- __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition)); |
- } else { |
- DCHECK(instr->hydrogen()->representation().IsDouble()); |
- DwVfpRegister left_reg = ToDoubleRegister(left); |
- DwVfpRegister right_reg = ToDoubleRegister(right); |
- DwVfpRegister result_reg = ToDoubleRegister(instr->result()); |
- Label result_is_nan, return_left, return_right, check_zero, done; |
- __ VFPCompareAndSetFlags(left_reg, right_reg); |
- if (operation == HMathMinMax::kMathMin) { |
- __ b(mi, &return_left); |
- __ b(gt, &return_right); |
- } else { |
- __ b(mi, &return_right); |
- __ b(gt, &return_left); |
- } |
- __ b(vs, &result_is_nan); |
- // Left equals right => check for -0. |
- __ VFPCompareAndSetFlags(left_reg, 0.0); |
- if (left_reg.is(result_reg) || right_reg.is(result_reg)) { |
- __ b(ne, &done); // left == right != 0. |
+ Label return_left, done; |
+#if V8_TARGET_ARCH_PPC64 |
+ if (instr->hydrogen_value()->representation().IsSmi()) { |
+#endif |
+ __ cmp(left_reg, right_reg); |
+#if V8_TARGET_ARCH_PPC64 |
} else { |
- __ b(ne, &return_left); // left == right != 0. |
+ __ cmpw(left_reg, right_reg); |
} |
+#endif |
+ __ b(cond, &return_left); |
+ __ Move(result_reg, right_reg); |
+ __ b(&done); |
+ __ bind(&return_left); |
+ __ Move(result_reg, left_reg); |
+ __ bind(&done); |
+ } else { |
+ DCHECK(instr->hydrogen()->representation().IsDouble()); |
+ DoubleRegister left_reg = ToDoubleRegister(left); |
+ DoubleRegister right_reg = ToDoubleRegister(right); |
+ DoubleRegister result_reg = ToDoubleRegister(instr->result()); |
+ Label check_nan_left, check_zero, return_left, return_right, done; |
+ __ fcmpu(left_reg, right_reg); |
+ __ bunordered(&check_nan_left); |
+ __ beq(&check_zero); |
+ __ b(cond, &return_left); |
+ __ b(&return_right); |
+ |
+ __ bind(&check_zero); |
+ __ fcmpu(left_reg, kDoubleRegZero); |
+ __ bne(&return_left); // left == right != 0. |
+ |
// At this point, both left and right are either 0 or -0. |
+ // N.B. The following works because +0 + -0 == +0 |
if (operation == HMathMinMax::kMathMin) { |
- // We could use a single 'vorr' instruction here if we had NEON support. |
- __ vneg(left_reg, left_reg); |
- __ vsub(result_reg, left_reg, right_reg); |
- __ vneg(result_reg, result_reg); |
+ // For min we want logical-or of sign bit: -(-L + -R) |
+ __ fneg(left_reg, left_reg); |
+ __ fsub(result_reg, left_reg, right_reg); |
+ __ fneg(result_reg, result_reg); |
} else { |
- // Since we operate on +0 and/or -0, vadd and vand have the same effect; |
- // the decision for vadd is easy because vand is a NEON instruction. |
- __ vadd(result_reg, left_reg, right_reg); |
+ // For max we want logical-and of sign bit: (L + R) |
+ __ fadd(result_reg, left_reg, right_reg); |
} |
__ b(&done); |
- __ bind(&result_is_nan); |
- __ vadd(result_reg, left_reg, right_reg); |
- __ b(&done); |
+ __ bind(&check_nan_left); |
+ __ fcmpu(left_reg, left_reg); |
+ __ bunordered(&return_left); // left == NaN. |
__ bind(&return_right); |
- __ Move(result_reg, right_reg); |
- if (!left_reg.is(result_reg)) { |
- __ b(&done); |
+ if (!right_reg.is(result_reg)) { |
+ __ fmr(result_reg, right_reg); |
} |
+ __ b(&done); |
__ bind(&return_left); |
- __ Move(result_reg, left_reg); |
- |
+ if (!left_reg.is(result_reg)) { |
+ __ fmr(result_reg, left_reg); |
+ } |
__ bind(&done); |
} |
} |
void LCodeGen::DoArithmeticD(LArithmeticD* instr) { |
- DwVfpRegister left = ToDoubleRegister(instr->left()); |
- DwVfpRegister right = ToDoubleRegister(instr->right()); |
- DwVfpRegister result = ToDoubleRegister(instr->result()); |
+ DoubleRegister left = ToDoubleRegister(instr->left()); |
+ DoubleRegister right = ToDoubleRegister(instr->right()); |
+ DoubleRegister result = ToDoubleRegister(instr->result()); |
switch (instr->op()) { |
case Token::ADD: |
- __ vadd(result, left, right); |
+ __ fadd(result, left, right); |
break; |
case Token::SUB: |
- __ vsub(result, left, right); |
+ __ fsub(result, left, right); |
break; |
case Token::MUL: |
- __ vmul(result, left, right); |
+ __ fmul(result, left, right); |
break; |
case Token::DIV: |
- __ vdiv(result, left, right); |
+ __ fdiv(result, left, right); |
break; |
case Token::MOD: { |
__ PrepareCallCFunction(0, 2, scratch0()); |
__ MovToFloatParameters(left, right); |
- __ CallCFunction( |
- ExternalReference::mod_two_doubles_operation(isolate()), |
- 0, 2); |
+ __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), |
+ 0, 2); |
// Move the result in the double result register. |
__ MovFromFloatResult(result); |
break; |
@@ -2168,64 +2148,69 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) { |
void LCodeGen::DoArithmeticT(LArithmeticT* instr) { |
DCHECK(ToRegister(instr->context()).is(cp)); |
- DCHECK(ToRegister(instr->left()).is(r1)); |
- DCHECK(ToRegister(instr->right()).is(r0)); |
- DCHECK(ToRegister(instr->result()).is(r0)); |
+ DCHECK(ToRegister(instr->left()).is(r4)); |
+ DCHECK(ToRegister(instr->right()).is(r3)); |
+ DCHECK(ToRegister(instr->result()).is(r3)); |
BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE); |
- // Block literal pool emission to ensure nop indicating no inlined smi code |
- // is in the correct position. |
- Assembler::BlockConstPoolScope block_const_pool(masm()); |
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
} |
-template<class InstrType> |
-void LCodeGen::EmitBranch(InstrType instr, Condition condition) { |
+template <class InstrType> |
+void LCodeGen::EmitBranch(InstrType instr, Condition cond, CRegister cr) { |
int left_block = instr->TrueDestination(chunk_); |
int right_block = instr->FalseDestination(chunk_); |
int next_block = GetNextEmittedBlock(); |
- if (right_block == left_block || condition == al) { |
+ if (right_block == left_block || cond == al) { |
EmitGoto(left_block); |
} else if (left_block == next_block) { |
- __ b(NegateCondition(condition), chunk_->GetAssemblyLabel(right_block)); |
+ __ b(NegateCondition(cond), chunk_->GetAssemblyLabel(right_block), cr); |
} else if (right_block == next_block) { |
- __ b(condition, chunk_->GetAssemblyLabel(left_block)); |
+ __ b(cond, chunk_->GetAssemblyLabel(left_block), cr); |
} else { |
- __ b(condition, chunk_->GetAssemblyLabel(left_block)); |
+ __ b(cond, chunk_->GetAssemblyLabel(left_block), cr); |
__ b(chunk_->GetAssemblyLabel(right_block)); |
} |
} |
-template<class InstrType> |
-void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition) { |
+template <class InstrType> |
+void LCodeGen::EmitFalseBranch(InstrType instr, Condition cond, CRegister cr) { |
int false_block = instr->FalseDestination(chunk_); |
- __ b(condition, chunk_->GetAssemblyLabel(false_block)); |
+ __ b(cond, chunk_->GetAssemblyLabel(false_block), cr); |
} |
-void LCodeGen::DoDebugBreak(LDebugBreak* instr) { |
- __ stop("LBreak"); |
-} |
+void LCodeGen::DoDebugBreak(LDebugBreak* instr) { __ stop("LBreak"); } |
void LCodeGen::DoBranch(LBranch* instr) { |
Representation r = instr->hydrogen()->value()->representation(); |
- if (r.IsInteger32() || r.IsSmi()) { |
+ DoubleRegister dbl_scratch = double_scratch0(); |
+ const uint crZOrNaNBits = (1 << (31 - Assembler::encode_crbit(cr7, CR_EQ)) | |
+ 1 << (31 - Assembler::encode_crbit(cr7, CR_FU))); |
+ |
+ if (r.IsInteger32()) { |
+ DCHECK(!info()->IsStub()); |
+ Register reg = ToRegister(instr->value()); |
+ __ cmpwi(reg, Operand::Zero()); |
+ EmitBranch(instr, ne); |
+ } else if (r.IsSmi()) { |
DCHECK(!info()->IsStub()); |
Register reg = ToRegister(instr->value()); |
- __ cmp(reg, Operand::Zero()); |
+ __ cmpi(reg, Operand::Zero()); |
EmitBranch(instr, ne); |
} else if (r.IsDouble()) { |
DCHECK(!info()->IsStub()); |
- DwVfpRegister reg = ToDoubleRegister(instr->value()); |
+ DoubleRegister reg = ToDoubleRegister(instr->value()); |
// Test the double value. Zero and NaN are false. |
- __ VFPCompareAndSetFlags(reg, 0.0); |
- __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN -> false) |
- EmitBranch(instr, ne); |
+ __ fcmpu(reg, kDoubleRegZero, cr7); |
+ __ mfcr(r0); |
+ __ andi(r0, r0, Operand(crZOrNaNBits)); |
+ EmitBranch(instr, eq, cr0); |
} else { |
DCHECK(r.IsTagged()); |
Register reg = ToRegister(instr->value()); |
@@ -2236,23 +2221,23 @@ void LCodeGen::DoBranch(LBranch* instr) { |
EmitBranch(instr, eq); |
} else if (type.IsSmi()) { |
DCHECK(!info()->IsStub()); |
- __ cmp(reg, Operand::Zero()); |
+ __ cmpi(reg, Operand::Zero()); |
EmitBranch(instr, ne); |
} else if (type.IsJSArray()) { |
DCHECK(!info()->IsStub()); |
EmitBranch(instr, al); |
} else if (type.IsHeapNumber()) { |
DCHECK(!info()->IsStub()); |
- DwVfpRegister dbl_scratch = double_scratch0(); |
- __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); |
+ __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); |
// Test the double value. Zero and NaN are false. |
- __ VFPCompareAndSetFlags(dbl_scratch, 0.0); |
- __ cmp(r0, r0, vs); // If NaN, set the Z flag. (NaN) |
- EmitBranch(instr, ne); |
+ __ fcmpu(dbl_scratch, kDoubleRegZero, cr7); |
+ __ mfcr(r0); |
+ __ andi(r0, r0, Operand(crZOrNaNBits)); |
+ EmitBranch(instr, eq, cr0); |
} else if (type.IsString()) { |
DCHECK(!info()->IsStub()); |
- __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset)); |
- __ cmp(ip, Operand::Zero()); |
+ __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset)); |
+ __ cmpi(ip, Operand::Zero()); |
EmitBranch(instr, ne); |
} else { |
ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types(); |
@@ -2262,58 +2247,58 @@ void LCodeGen::DoBranch(LBranch* instr) { |
if (expected.Contains(ToBooleanStub::UNDEFINED)) { |
// undefined -> false. |
__ CompareRoot(reg, Heap::kUndefinedValueRootIndex); |
- __ b(eq, instr->FalseLabel(chunk_)); |
+ __ beq(instr->FalseLabel(chunk_)); |
} |
if (expected.Contains(ToBooleanStub::BOOLEAN)) { |
// Boolean -> its value. |
__ CompareRoot(reg, Heap::kTrueValueRootIndex); |
- __ b(eq, instr->TrueLabel(chunk_)); |
+ __ beq(instr->TrueLabel(chunk_)); |
__ CompareRoot(reg, Heap::kFalseValueRootIndex); |
- __ b(eq, instr->FalseLabel(chunk_)); |
+ __ beq(instr->FalseLabel(chunk_)); |
} |
if (expected.Contains(ToBooleanStub::NULL_TYPE)) { |
// 'null' -> false. |
__ CompareRoot(reg, Heap::kNullValueRootIndex); |
- __ b(eq, instr->FalseLabel(chunk_)); |
+ __ beq(instr->FalseLabel(chunk_)); |
} |
if (expected.Contains(ToBooleanStub::SMI)) { |
// Smis: 0 -> false, all other -> true. |
- __ cmp(reg, Operand::Zero()); |
- __ b(eq, instr->FalseLabel(chunk_)); |
+ __ cmpi(reg, Operand::Zero()); |
+ __ beq(instr->FalseLabel(chunk_)); |
__ JumpIfSmi(reg, instr->TrueLabel(chunk_)); |
} else if (expected.NeedsMap()) { |
// If we need a map later and have a Smi -> deopt. |
- __ SmiTst(reg); |
- DeoptimizeIf(eq, instr->environment()); |
+ __ TestIfSmi(reg, r0); |
+ DeoptimizeIf(eq, instr->environment(), cr0); |
} |
const Register map = scratch0(); |
if (expected.NeedsMap()) { |
- __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset)); |
+ __ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset)); |
if (expected.CanBeUndetectable()) { |
// Undetectable -> false. |
- __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset)); |
- __ tst(ip, Operand(1 << Map::kIsUndetectable)); |
- __ b(ne, instr->FalseLabel(chunk_)); |
+ __ lbz(ip, FieldMemOperand(map, Map::kBitFieldOffset)); |
+ __ TestBit(ip, Map::kIsUndetectable, r0); |
+ __ bne(instr->FalseLabel(chunk_), cr0); |
} |
} |
if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) { |
// spec object -> true. |
__ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE); |
- __ b(ge, instr->TrueLabel(chunk_)); |
+ __ bge(instr->TrueLabel(chunk_)); |
} |
if (expected.Contains(ToBooleanStub::STRING)) { |
// String value -> false iff empty. |
Label not_string; |
__ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE); |
- __ b(ge, ¬_string); |
- __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset)); |
- __ cmp(ip, Operand::Zero()); |
- __ b(ne, instr->TrueLabel(chunk_)); |
+ __ bge(¬_string); |
+ __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset)); |
+ __ cmpi(ip, Operand::Zero()); |
+ __ bne(instr->TrueLabel(chunk_)); |
__ b(instr->FalseLabel(chunk_)); |
__ bind(¬_string); |
} |
@@ -2321,19 +2306,20 @@ void LCodeGen::DoBranch(LBranch* instr) { |
if (expected.Contains(ToBooleanStub::SYMBOL)) { |
// Symbol value -> true. |
__ CompareInstanceType(map, ip, SYMBOL_TYPE); |
- __ b(eq, instr->TrueLabel(chunk_)); |
+ __ beq(instr->TrueLabel(chunk_)); |
} |
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) { |
// heap number -> false iff +0, -0, or NaN. |
- DwVfpRegister dbl_scratch = double_scratch0(); |
Label not_heap_number; |
__ CompareRoot(map, Heap::kHeapNumberMapRootIndex); |
- __ b(ne, ¬_heap_number); |
- __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); |
- __ VFPCompareAndSetFlags(dbl_scratch, 0.0); |
- __ cmp(r0, r0, vs); // NaN -> false. |
- __ b(eq, instr->FalseLabel(chunk_)); // +0, -0 -> false. |
+ __ bne(¬_heap_number); |
+ __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset)); |
+ // Test the double value. Zero and NaN are false. |
+ __ fcmpu(dbl_scratch, kDoubleRegZero, cr7); |
+ __ mfcr(r0); |
+ __ andi(r0, r0, Operand(crZOrNaNBits)); |
+ __ bne(instr->FalseLabel(chunk_), cr0); |
__ b(instr->TrueLabel(chunk_)); |
__ bind(¬_heap_number); |
} |
@@ -2350,17 +2336,15 @@ void LCodeGen::DoBranch(LBranch* instr) { |
void LCodeGen::EmitGoto(int block) { |
if (!IsNextEmittedBlock(block)) { |
- __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); |
+ __ b(chunk_->GetAssemblyLabel(LookupDestination(block))); |
} |
} |
-void LCodeGen::DoGoto(LGoto* instr) { |
- EmitGoto(instr->block_id()); |
-} |
+void LCodeGen::DoGoto(LGoto* instr) { EmitGoto(instr->block_id()); } |
-Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { |
+Condition LCodeGen::TokenToCondition(Token::Value op) { |
Condition cond = kNoCondition; |
switch (op) { |
case Token::EQ: |
@@ -2372,16 +2356,16 @@ Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) { |
cond = ne; |
break; |
case Token::LT: |
- cond = is_unsigned ? lo : lt; |
+ cond = lt; |
break; |
case Token::GT: |
- cond = is_unsigned ? hi : gt; |
+ cond = gt; |
break; |
case Token::LTE: |
- cond = is_unsigned ? ls : le; |
+ cond = le; |
break; |
case Token::GTE: |
- cond = is_unsigned ? hs : ge; |
+ cond = ge; |
break; |
case Token::IN: |
case Token::INSTANCEOF: |
@@ -2398,42 +2382,69 @@ void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) { |
bool is_unsigned = |
instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) || |
instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32); |
- Condition cond = TokenToCondition(instr->op(), is_unsigned); |
+ Condition cond = TokenToCondition(instr->op()); |
if (left->IsConstantOperand() && right->IsConstantOperand()) { |
// We can statically evaluate the comparison. |
double left_val = ToDouble(LConstantOperand::cast(left)); |
double right_val = ToDouble(LConstantOperand::cast(right)); |
- int next_block = EvalComparison(instr->op(), left_val, right_val) ? |
- instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_); |
+ int next_block = EvalComparison(instr->op(), left_val, right_val) |
+ ? instr->TrueDestination(chunk_) |
+ : instr->FalseDestination(chunk_); |
EmitGoto(next_block); |
} else { |
if (instr->is_double()) { |
// Compare left and right operands as doubles and load the |
// resulting flags into the normal status register. |
- __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right)); |
- // If a NaN is involved, i.e. the result is unordered (V set), |
+ __ fcmpu(ToDoubleRegister(left), ToDoubleRegister(right)); |
+ // If a NaN is involved, i.e. the result is unordered, |
// jump to false block label. |
- __ b(vs, instr->FalseLabel(chunk_)); |
+ __ bunordered(instr->FalseLabel(chunk_)); |
} else { |
if (right->IsConstantOperand()) { |
int32_t value = ToInteger32(LConstantOperand::cast(right)); |
if (instr->hydrogen_value()->representation().IsSmi()) { |
- __ cmp(ToRegister(left), Operand(Smi::FromInt(value))); |
+ if (is_unsigned) { |
+ __ CmplSmiLiteral(ToRegister(left), Smi::FromInt(value), r0); |
+ } else { |
+ __ CmpSmiLiteral(ToRegister(left), Smi::FromInt(value), r0); |
+ } |
} else { |
- __ cmp(ToRegister(left), Operand(value)); |
+ if (is_unsigned) { |
+ __ Cmplwi(ToRegister(left), Operand(value), r0); |
+ } else { |
+ __ Cmpwi(ToRegister(left), Operand(value), r0); |
+ } |
} |
} else if (left->IsConstantOperand()) { |
int32_t value = ToInteger32(LConstantOperand::cast(left)); |
if (instr->hydrogen_value()->representation().IsSmi()) { |
- __ cmp(ToRegister(right), Operand(Smi::FromInt(value))); |
+ if (is_unsigned) { |
+ __ CmplSmiLiteral(ToRegister(right), Smi::FromInt(value), r0); |
+ } else { |
+ __ CmpSmiLiteral(ToRegister(right), Smi::FromInt(value), r0); |
+ } |
} else { |
- __ cmp(ToRegister(right), Operand(value)); |
+ if (is_unsigned) { |
+ __ Cmplwi(ToRegister(right), Operand(value), r0); |
+ } else { |
+ __ Cmpwi(ToRegister(right), Operand(value), r0); |
+ } |
} |
// We commuted the operands, so commute the condition. |
cond = CommuteCondition(cond); |
+ } else if (instr->hydrogen_value()->representation().IsSmi()) { |
+ if (is_unsigned) { |
+ __ cmpl(ToRegister(left), ToRegister(right)); |
+ } else { |
+ __ cmp(ToRegister(left), ToRegister(right)); |
+ } |
} else { |
- __ cmp(ToRegister(left), ToRegister(right)); |
+ if (is_unsigned) { |
+ __ cmplw(ToRegister(left), ToRegister(right)); |
+ } else { |
+ __ cmpw(ToRegister(left), ToRegister(right)); |
+ } |
} |
} |
EmitBranch(instr, cond); |
@@ -2445,7 +2456,7 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { |
Register left = ToRegister(instr->left()); |
Register right = ToRegister(instr->right()); |
- __ cmp(left, Operand(right)); |
+ __ cmp(left, right); |
EmitBranch(instr, eq); |
} |
@@ -2459,13 +2470,13 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { |
return; |
} |
- DwVfpRegister input_reg = ToDoubleRegister(instr->object()); |
- __ VFPCompareAndSetFlags(input_reg, input_reg); |
- EmitFalseBranch(instr, vc); |
+ DoubleRegister input_reg = ToDoubleRegister(instr->object()); |
+ __ fcmpu(input_reg, input_reg); |
+ EmitFalseBranch(instr, ordered); |
Register scratch = scratch0(); |
- __ VmovHigh(scratch, input_reg); |
- __ cmp(scratch, Operand(kHoleNanUpper32)); |
+ __ MovDoubleHighToInt(scratch, input_reg); |
+ __ Cmpi(scratch, Operand(kHoleNanUpper32), r0); |
EmitBranch(instr, eq); |
} |
@@ -2476,50 +2487,61 @@ void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) { |
Register scratch = ToRegister(instr->temp()); |
if (rep.IsDouble()) { |
- DwVfpRegister value = ToDoubleRegister(instr->value()); |
- __ VFPCompareAndSetFlags(value, 0.0); |
+ DoubleRegister value = ToDoubleRegister(instr->value()); |
+ __ fcmpu(value, kDoubleRegZero); |
EmitFalseBranch(instr, ne); |
- __ VmovHigh(scratch, value); |
- __ cmp(scratch, Operand(0x80000000)); |
+#if V8_TARGET_ARCH_PPC64 |
+ __ MovDoubleToInt64(scratch, value); |
+#else |
+ __ MovDoubleHighToInt(scratch, value); |
+#endif |
+ __ cmpi(scratch, Operand::Zero()); |
+ EmitBranch(instr, lt); |
} else { |
Register value = ToRegister(instr->value()); |
- __ CheckMap(value, |
- scratch, |
- Heap::kHeapNumberMapRootIndex, |
- instr->FalseLabel(chunk()), |
- DO_SMI_CHECK); |
- __ ldr(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset)); |
- __ ldr(ip, FieldMemOperand(value, HeapNumber::kMantissaOffset)); |
- __ cmp(scratch, Operand(0x80000000)); |
- __ cmp(ip, Operand(0x00000000), eq); |
+ __ CheckMap(value, scratch, Heap::kHeapNumberMapRootIndex, |
+ instr->FalseLabel(chunk()), DO_SMI_CHECK); |
+#if V8_TARGET_ARCH_PPC64 |
+ __ LoadP(scratch, FieldMemOperand(value, HeapNumber::kValueOffset)); |
+ __ li(ip, Operand(1)); |
+ __ rotrdi(ip, ip, 1); // ip = 0x80000000_00000000 |
+ __ cmp(scratch, ip); |
+#else |
+ __ lwz(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset)); |
+ __ lwz(ip, FieldMemOperand(value, HeapNumber::kMantissaOffset)); |
+ Label skip; |
+ __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); |
+ __ cmp(scratch, r0); |
+ __ bne(&skip); |
+ __ cmpi(ip, Operand::Zero()); |
+ __ bind(&skip); |
+#endif |
+ EmitBranch(instr, eq); |
} |
- EmitBranch(instr, eq); |
} |
-Condition LCodeGen::EmitIsObject(Register input, |
- Register temp1, |
- Label* is_not_object, |
- Label* is_object) { |
+Condition LCodeGen::EmitIsObject(Register input, Register temp1, |
+ Label* is_not_object, Label* is_object) { |
Register temp2 = scratch0(); |
__ JumpIfSmi(input, is_not_object); |
__ LoadRoot(temp2, Heap::kNullValueRootIndex); |
__ cmp(input, temp2); |
- __ b(eq, is_object); |
+ __ beq(is_object); |
// Load map. |
- __ ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset)); |
+ __ LoadP(temp1, FieldMemOperand(input, HeapObject::kMapOffset)); |
// Undetectable objects behave like undefined. |
- __ ldrb(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset)); |
- __ tst(temp2, Operand(1 << Map::kIsUndetectable)); |
- __ b(ne, is_not_object); |
+ __ lbz(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset)); |
+ __ TestBit(temp2, Map::kIsUndetectable, r0); |
+ __ bne(is_not_object, cr0); |
// Load instance type and check that it is in object type range. |
- __ ldrb(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset)); |
- __ cmp(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); |
- __ b(lt, is_not_object); |
- __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE)); |
+ __ lbz(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset)); |
+ __ cmpi(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); |
+ __ blt(is_not_object); |
+ __ cmpi(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE)); |
return le; |
} |
@@ -2528,16 +2550,14 @@ void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) { |
Register reg = ToRegister(instr->value()); |
Register temp1 = ToRegister(instr->temp()); |
- Condition true_cond = |
- EmitIsObject(reg, temp1, |
- instr->FalseLabel(chunk_), instr->TrueLabel(chunk_)); |
+ Condition true_cond = EmitIsObject(reg, temp1, instr->FalseLabel(chunk_), |
+ instr->TrueLabel(chunk_)); |
EmitBranch(instr, true_cond); |
} |
-Condition LCodeGen::EmitIsString(Register input, |
- Register temp1, |
+Condition LCodeGen::EmitIsString(Register input, Register temp1, |
Label* is_not_string, |
SmiCheck check_needed = INLINE_SMI_CHECK) { |
if (check_needed == INLINE_SMI_CHECK) { |
@@ -2553,9 +2573,9 @@ void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) { |
Register reg = ToRegister(instr->value()); |
Register temp1 = ToRegister(instr->temp()); |
- SmiCheck check_needed = |
- instr->hydrogen()->value()->type().IsHeapObject() |
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
+ SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject() |
+ ? OMIT_SMI_CHECK |
+ : INLINE_SMI_CHECK; |
Condition true_cond = |
EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed); |
@@ -2565,8 +2585,8 @@ void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) { |
void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) { |
Register input_reg = EmitLoadRegister(instr->value(), ip); |
- __ SmiTst(input_reg); |
- EmitBranch(instr, eq); |
+ __ TestIfSmi(input_reg, r0); |
+ EmitBranch(instr, eq, cr0); |
} |
@@ -2577,10 +2597,10 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) { |
if (!instr->hydrogen()->value()->type().IsHeapObject()) { |
__ JumpIfSmi(input, instr->FalseLabel(chunk_)); |
} |
- __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset)); |
- __ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset)); |
- __ tst(temp, Operand(1 << Map::kIsUndetectable)); |
- EmitBranch(instr, ne); |
+ __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset)); |
+ __ lbz(temp, FieldMemOperand(temp, Map::kBitFieldOffset)); |
+ __ TestBit(temp, Map::kIsUndetectable, r0); |
+ EmitBranch(instr, ne, cr0); |
} |
@@ -2610,8 +2630,8 @@ void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) { |
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); |
CallCode(ic, RelocInfo::CODE_TARGET, instr); |
- // This instruction also signals no smi code inlined. |
- __ cmp(r0, Operand::Zero()); |
+ // This instruction also signals no smi code inlined |
+ __ cmpi(r3, Operand::Zero()); |
Condition condition = ComputeCompareCondition(op); |
@@ -2632,8 +2652,8 @@ static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) { |
InstanceType from = instr->from(); |
InstanceType to = instr->to(); |
if (from == to) return eq; |
- if (to == LAST_TYPE) return hs; |
- if (from == FIRST_TYPE) return ls; |
+ if (to == LAST_TYPE) return ge; |
+ if (from == FIRST_TYPE) return le; |
UNREACHABLE(); |
return eq; |
} |
@@ -2658,7 +2678,7 @@ void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) { |
__ AssertString(input); |
- __ ldr(result, FieldMemOperand(input, String::kHashFieldOffset)); |
+ __ lwz(result, FieldMemOperand(input, String::kHashFieldOffset)); |
__ IndexFromHash(result, result); |
} |
@@ -2668,21 +2688,18 @@ void LCodeGen::DoHasCachedArrayIndexAndBranch( |
Register input = ToRegister(instr->value()); |
Register scratch = scratch0(); |
- __ ldr(scratch, |
- FieldMemOperand(input, String::kHashFieldOffset)); |
- __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask)); |
- EmitBranch(instr, eq); |
+ __ lwz(scratch, FieldMemOperand(input, String::kHashFieldOffset)); |
+ __ mov(r0, Operand(String::kContainsCachedArrayIndexMask)); |
+ __ and_(r0, scratch, r0, SetRC); |
+ EmitBranch(instr, eq, cr0); |
} |
// Branches to a label or falls through with the answer in flags. Trashes |
// the temp registers, but not the input. |
-void LCodeGen::EmitClassOfTest(Label* is_true, |
- Label* is_false, |
- Handle<String>class_name, |
- Register input, |
- Register temp, |
- Register temp2) { |
+void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false, |
+ Handle<String> class_name, Register input, |
+ Register temp, Register temp2) { |
DCHECK(!input.is(temp)); |
DCHECK(!input.is(temp2)); |
DCHECK(!temp.is(temp2)); |
@@ -2699,45 +2716,45 @@ void LCodeGen::EmitClassOfTest(Label* is_true, |
LAST_SPEC_OBJECT_TYPE - 1); |
STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); |
__ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE); |
- __ b(lt, is_false); |
- __ b(eq, is_true); |
- __ cmp(temp2, Operand(LAST_SPEC_OBJECT_TYPE)); |
- __ b(eq, is_true); |
+ __ blt(is_false); |
+ __ beq(is_true); |
+ __ cmpi(temp2, Operand(LAST_SPEC_OBJECT_TYPE)); |
+ __ beq(is_true); |
} else { |
// Faster code path to avoid two compares: subtract lower bound from the |
// actual type and do a signed compare with the width of the type range. |
- __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset)); |
- __ ldrb(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset)); |
- __ sub(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); |
- __ cmp(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE - |
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); |
- __ b(gt, is_false); |
+ __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset)); |
+ __ lbz(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset)); |
+ __ subi(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); |
+ __ cmpi(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE - |
+ FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); |
+ __ bgt(is_false); |
} |
// Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range. |
// Check if the constructor in the map is a function. |
- __ ldr(temp, FieldMemOperand(temp, Map::kConstructorOffset)); |
+ __ LoadP(temp, FieldMemOperand(temp, Map::kConstructorOffset)); |
// Objects with a non-function constructor have class 'Object'. |
__ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE); |
if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) { |
- __ b(ne, is_true); |
+ __ bne(is_true); |
} else { |
- __ b(ne, is_false); |
+ __ bne(is_false); |
} |
// temp now contains the constructor function. Grab the |
// instance class name from there. |
- __ ldr(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset)); |
- __ ldr(temp, FieldMemOperand(temp, |
- SharedFunctionInfo::kInstanceClassNameOffset)); |
+ __ LoadP(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset)); |
+ __ LoadP(temp, |
+ FieldMemOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset)); |
// The class name we are testing against is internalized since it's a literal. |
// The name in the constructor is internalized because of the way the context |
// is booted. This routine isn't expected to work for random API-created |
// classes and it doesn't have to because you can't access it with natives |
// syntax. Since both sides are internalized it is sufficient to use an |
// identity comparison. |
- __ cmp(temp, Operand(class_name)); |
+ __ Cmpi(temp, Operand(class_name), r0); |
// End with the answer in flags. |
} |
@@ -2749,7 +2766,7 @@ void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) { |
Handle<String> class_name = instr->hydrogen()->class_name(); |
EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), |
- class_name, input, temp, temp2); |
+ class_name, input, temp, temp2); |
EmitBranch(instr, eq); |
} |
@@ -2759,23 +2776,29 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) { |
Register reg = ToRegister(instr->value()); |
Register temp = ToRegister(instr->temp()); |
- __ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset)); |
- __ cmp(temp, Operand(instr->map())); |
+ __ LoadP(temp, FieldMemOperand(reg, HeapObject::kMapOffset)); |
+ __ Cmpi(temp, Operand(instr->map()), r0); |
EmitBranch(instr, eq); |
} |
void LCodeGen::DoInstanceOf(LInstanceOf* instr) { |
DCHECK(ToRegister(instr->context()).is(cp)); |
- DCHECK(ToRegister(instr->left()).is(r0)); // Object is in r0. |
- DCHECK(ToRegister(instr->right()).is(r1)); // Function is in r1. |
+ DCHECK(ToRegister(instr->left()).is(r3)); // Object is in r3. |
+ DCHECK(ToRegister(instr->right()).is(r4)); // Function is in r4. |
InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters); |
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
- __ cmp(r0, Operand::Zero()); |
- __ mov(r0, Operand(factory()->false_value()), LeaveCC, ne); |
- __ mov(r0, Operand(factory()->true_value()), LeaveCC, eq); |
+ Label equal, done; |
+ __ cmpi(r3, Operand::Zero()); |
+ __ beq(&equal); |
+ __ mov(r3, Operand(factory()->false_value())); |
+ __ b(&done); |
+ |
+ __ bind(&equal); |
+ __ mov(r3, Operand(factory()->true_value())); |
+ __ bind(&done); |
} |
@@ -2784,23 +2807,20 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { |
public: |
DeferredInstanceOfKnownGlobal(LCodeGen* codegen, |
LInstanceOfKnownGlobal* instr) |
- : LDeferredCode(codegen), instr_(instr) { } |
+ : LDeferredCode(codegen), instr_(instr) {} |
virtual void Generate() V8_OVERRIDE { |
- codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_, |
- &load_bool_); |
+ codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_); |
} |
virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
Label* map_check() { return &map_check_; } |
- Label* load_bool() { return &load_bool_; } |
private: |
LInstanceOfKnownGlobal* instr_; |
Label map_check_; |
- Label load_bool_; |
}; |
DeferredInstanceOfKnownGlobal* deferred; |
- deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr); |
+ deferred = new (zone()) DeferredInstanceOfKnownGlobal(this, instr); |
Label done, false_result; |
Register object = ToRegister(instr->value()); |
@@ -2815,21 +2835,20 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { |
// instanceof stub. |
Label cache_miss; |
Register map = temp; |
- __ ldr(map, FieldMemOperand(object, HeapObject::kMapOffset)); |
+ __ LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset)); |
{ |
// Block constant pool emission to ensure the positions of instructions are |
// as expected by the patcher. See InstanceofStub::Generate(). |
- Assembler::BlockConstPoolScope block_const_pool(masm()); |
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); |
__ bind(deferred->map_check()); // Label for calculating code patching. |
// We use Factory::the_hole_value() on purpose instead of loading from the |
// root array to force relocation to be able to later patch with |
// the cached map. |
Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value()); |
__ mov(ip, Operand(Handle<Object>(cell))); |
- __ ldr(ip, FieldMemOperand(ip, PropertyCell::kValueOffset)); |
- __ cmp(map, Operand(ip)); |
- __ b(ne, &cache_miss); |
- __ bind(deferred->load_bool()); // Label for calculating code patching. |
+ __ LoadP(ip, FieldMemOperand(ip, PropertyCell::kValueOffset)); |
+ __ cmp(map, ip); |
+ __ bne(&cache_miss); |
// We use Factory::the_hole_value() on purpose instead of loading from the |
// root array to force relocation to be able to later patch |
// with true or false. |
@@ -2842,12 +2861,12 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { |
__ bind(&cache_miss); |
// Null is not instance of anything. |
__ LoadRoot(ip, Heap::kNullValueRootIndex); |
- __ cmp(object, Operand(ip)); |
- __ b(eq, &false_result); |
+ __ cmp(object, ip); |
+ __ beq(&false_result); |
// String values is not instance of anything. |
Condition is_string = masm_->IsObjectStringType(object, temp); |
- __ b(is_string, &false_result); |
+ __ b(is_string, &false_result, cr0); |
// Go to the deferred code. |
__ b(deferred->entry()); |
@@ -2863,11 +2882,10 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { |
void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, |
- Label* map_check, |
- Label* bool_load) { |
+ Label* map_check) { |
InstanceofStub::Flags flags = InstanceofStub::kNoFlags; |
- flags = static_cast<InstanceofStub::Flags>( |
- flags | InstanceofStub::kArgsInRegisters); |
+ flags = static_cast<InstanceofStub::Flags>(flags | |
+ InstanceofStub::kArgsInRegisters); |
flags = static_cast<InstanceofStub::Flags>( |
flags | InstanceofStub::kCallSiteInlineCheck); |
flags = static_cast<InstanceofStub::Flags>( |
@@ -2878,45 +2896,22 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, |
LoadContextFromDeferred(instr->context()); |
__ Move(InstanceofStub::right(), instr->function()); |
- |
- int call_size = CallCodeSize(stub.GetCode(), RelocInfo::CODE_TARGET); |
- int additional_delta = (call_size / Assembler::kInstrSize) + 4; |
- // Make sure that code size is predicable, since we use specific constants |
- // offsets in the code to find embedded values.. |
- PredictableCodeSizeScope predictable( |
- masm_, (additional_delta + 1) * Assembler::kInstrSize); |
- // Make sure we don't emit any additional entries in the constant pool before |
- // the call to ensure that the CallCodeSize() calculated the correct number of |
- // instructions for the constant pool load. |
+ // Include instructions below in delta: mov + call = mov + (mov + 2) |
+ static const int kAdditionalDelta = (2 * Assembler::kMovInstructions) + 2; |
+ int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta; |
{ |
- ConstantPoolUnavailableScope constant_pool_unavailable(masm_); |
- int map_check_delta = |
- masm_->InstructionsGeneratedSince(map_check) + additional_delta; |
- int bool_load_delta = |
- masm_->InstructionsGeneratedSince(bool_load) + additional_delta; |
- Label before_push_delta; |
- __ bind(&before_push_delta); |
- __ BlockConstPoolFor(additional_delta); |
- // r5 is used to communicate the offset to the location of the map check. |
- __ mov(r5, Operand(map_check_delta * kPointerSize)); |
- // r6 is used to communicate the offset to the location of the bool load. |
- __ mov(r6, Operand(bool_load_delta * kPointerSize)); |
- // The mov above can generate one or two instructions. The delta was |
- // computed for two instructions, so we need to pad here in case of one |
- // instruction. |
- while (masm_->InstructionsGeneratedSince(&before_push_delta) != 4) { |
- __ nop(); |
- } |
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); |
+ // r8 is used to communicate the offset to the location of the map check. |
+ __ mov(r8, Operand(delta * Instruction::kInstrSize)); |
} |
- CallCodeGeneric(stub.GetCode(), |
- RelocInfo::CODE_TARGET, |
- instr, |
+ CallCodeGeneric(stub.GetCode(), RelocInfo::CODE_TARGET, instr, |
RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS); |
+ DCHECK(delta == masm_->InstructionsGeneratedSince(map_check)); |
LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment(); |
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index()); |
- // Put the result value (r0) into the result register slot and |
+ // Put the result value (r3) into the result register slot and |
// restore all registers. |
- __ StoreToSafepointRegisterSlot(r0, ToRegister(instr->result())); |
+ __ StoreToSafepointRegisterSlot(r3, ToRegister(instr->result())); |
} |
@@ -2926,27 +2921,32 @@ void LCodeGen::DoCmpT(LCmpT* instr) { |
Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op); |
CallCode(ic, RelocInfo::CODE_TARGET, instr); |
- // This instruction also signals no smi code inlined. |
- __ cmp(r0, Operand::Zero()); |
+ // This instruction also signals no smi code inlined |
+ __ cmpi(r3, Operand::Zero()); |
Condition condition = ComputeCompareCondition(op); |
- __ LoadRoot(ToRegister(instr->result()), |
- Heap::kTrueValueRootIndex, |
- condition); |
- __ LoadRoot(ToRegister(instr->result()), |
- Heap::kFalseValueRootIndex, |
- NegateCondition(condition)); |
+ Label true_value, done; |
+ |
+ __ b(condition, &true_value); |
+ |
+ __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex); |
+ __ b(&done); |
+ |
+ __ bind(&true_value); |
+ __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex); |
+ |
+ __ bind(&done); |
} |
void LCodeGen::DoReturn(LReturn* instr) { |
if (FLAG_trace && info()->IsOptimizing()) { |
// Push the return value on the stack as the parameter. |
- // Runtime::TraceExit returns its parameter in r0. We're leaving the code |
+ // Runtime::TraceExit returns its parameter in r3. We're leaving the code |
// managed by the register allocator and tearing down the frame, it's |
// safe to write to the context register. |
- __ push(r0); |
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
+ __ push(r3); |
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
__ CallRuntime(Runtime::kTraceExit, 1); |
} |
if (info()->saves_caller_doubles()) { |
@@ -2960,16 +2960,16 @@ void LCodeGen::DoReturn(LReturn* instr) { |
int parameter_count = ToInteger32(instr->constant_parameter_count()); |
int32_t sp_delta = (parameter_count + 1) * kPointerSize; |
if (sp_delta != 0) { |
- __ add(sp, sp, Operand(sp_delta)); |
+ __ addi(sp, sp, Operand(sp_delta)); |
} |
} else { |
Register reg = ToRegister(instr->parameter_count()); |
// The argument count parameter is a smi |
- __ SmiUntag(reg); |
- __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2)); |
+ __ SmiToPtrArrayOffset(r0, reg); |
+ __ add(sp, sp, r0); |
} |
- __ Jump(lr); |
+ __ blr(); |
if (no_frame_start != -1) { |
info_->AddNoFrameRange(no_frame_start, masm_->pc_offset()); |
@@ -2980,7 +2980,7 @@ void LCodeGen::DoReturn(LReturn* instr) { |
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { |
Register result = ToRegister(instr->result()); |
__ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); |
- __ ldr(result, FieldMemOperand(ip, Cell::kValueOffset)); |
+ __ LoadP(result, FieldMemOperand(ip, Cell::kValueOffset)); |
if (instr->hydrogen()->RequiresHoleCheck()) { |
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
__ cmp(result, ip); |
@@ -2992,7 +2992,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { |
void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) { |
DCHECK(ToRegister(instr->context()).is(cp)); |
DCHECK(ToRegister(instr->global_object()).is(LoadIC::ReceiverRegister())); |
- DCHECK(ToRegister(instr->result()).is(r0)); |
+ DCHECK(ToRegister(instr->result()).is(r3)); |
__ mov(LoadIC::NameRegister(), Operand(instr->name())); |
if (FLAG_vector_ics) { |
@@ -3024,13 +3024,13 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { |
if (instr->hydrogen()->RequiresHoleCheck()) { |
// We use a temp to check the payload (CompareRoot might clobber ip). |
Register payload = ToRegister(instr->temp()); |
- __ ldr(payload, FieldMemOperand(cell, Cell::kValueOffset)); |
+ __ LoadP(payload, FieldMemOperand(cell, Cell::kValueOffset)); |
__ CompareRoot(payload, Heap::kTheHoleValueRootIndex); |
DeoptimizeIf(eq, instr->environment()); |
} |
// Store the value. |
- __ str(value, FieldMemOperand(cell, Cell::kValueOffset)); |
+ __ StoreP(value, FieldMemOperand(cell, Cell::kValueOffset), r0); |
// Cells are always rescanned, so no write barrier here. |
} |
@@ -3038,14 +3038,17 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { |
void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { |
Register context = ToRegister(instr->context()); |
Register result = ToRegister(instr->result()); |
- __ ldr(result, ContextOperand(context, instr->slot_index())); |
+ __ LoadP(result, ContextOperand(context, instr->slot_index())); |
if (instr->hydrogen()->RequiresHoleCheck()) { |
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
__ cmp(result, ip); |
if (instr->hydrogen()->DeoptimizesOnHole()) { |
DeoptimizeIf(eq, instr->environment()); |
} else { |
- __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq); |
+ Label skip; |
+ __ bne(&skip); |
+ __ mov(result, Operand(factory()->undefined_value())); |
+ __ bind(&skip); |
} |
} |
} |
@@ -3060,29 +3063,24 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { |
Label skip_assignment; |
if (instr->hydrogen()->RequiresHoleCheck()) { |
- __ ldr(scratch, target); |
+ __ LoadP(scratch, target); |
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
__ cmp(scratch, ip); |
if (instr->hydrogen()->DeoptimizesOnHole()) { |
DeoptimizeIf(eq, instr->environment()); |
} else { |
- __ b(ne, &skip_assignment); |
+ __ bne(&skip_assignment); |
} |
} |
- __ str(value, target); |
+ __ StoreP(value, target, r0); |
if (instr->hydrogen()->NeedsWriteBarrier()) { |
- SmiCheck check_needed = |
- instr->hydrogen()->value()->type().IsHeapObject() |
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
- __ RecordWriteContextSlot(context, |
- target.offset(), |
- value, |
- scratch, |
- GetLinkRegisterState(), |
- kSaveFPRegs, |
- EMIT_REMEMBERED_SET, |
- check_needed); |
+ SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject() |
+ ? OMIT_SMI_CHECK |
+ : INLINE_SMI_CHECK; |
+ __ RecordWriteContextSlot(context, target.offset(), value, scratch, |
+ GetLinkRegisterState(), kSaveFPRegs, |
+ EMIT_REMEMBERED_SET, check_needed); |
} |
__ bind(&skip_assignment); |
@@ -3097,32 +3095,48 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) { |
if (access.IsExternalMemory()) { |
Register result = ToRegister(instr->result()); |
MemOperand operand = MemOperand(object, offset); |
- __ Load(result, operand, access.representation()); |
+ __ LoadRepresentation(result, operand, access.representation(), r0); |
return; |
} |
if (instr->hydrogen()->representation().IsDouble()) { |
- DwVfpRegister result = ToDoubleRegister(instr->result()); |
- __ vldr(result, FieldMemOperand(object, offset)); |
+ DoubleRegister result = ToDoubleRegister(instr->result()); |
+ __ lfd(result, FieldMemOperand(object, offset)); |
return; |
} |
Register result = ToRegister(instr->result()); |
if (!access.IsInobject()) { |
- __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); |
+ __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); |
object = result; |
} |
- MemOperand operand = FieldMemOperand(object, offset); |
- __ Load(result, operand, access.representation()); |
+ |
+ Representation representation = access.representation(); |
+ |
+#if V8_TARGET_ARCH_PPC64 |
+ // 64-bit Smi optimization |
+ if (representation.IsSmi() && |
+ instr->hydrogen()->representation().IsInteger32()) { |
+ // Read int value directly from upper half of the smi. |
+ STATIC_ASSERT(kSmiTag == 0); |
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); |
+#if V8_TARGET_LITTLE_ENDIAN |
+ offset += kPointerSize / 2; |
+#endif |
+ representation = Representation::Integer32(); |
+ } |
+#endif |
+ |
+ __ LoadRepresentation(result, FieldMemOperand(object, offset), representation, |
+ r0); |
} |
void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { |
DCHECK(ToRegister(instr->context()).is(cp)); |
DCHECK(ToRegister(instr->object()).is(LoadIC::ReceiverRegister())); |
- DCHECK(ToRegister(instr->result()).is(r0)); |
+ DCHECK(ToRegister(instr->result()).is(r3)); |
- // Name is always in r2. |
__ mov(LoadIC::NameRegister(), Operand(instr->name())); |
if (FLAG_vector_ics) { |
Register vector = ToRegister(instr->temp_vector()); |
@@ -3134,7 +3148,7 @@ void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) { |
Operand(Smi::FromInt(instr->hydrogen()->slot()))); |
} |
Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL); |
- CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); |
+ CallCode(ic, RelocInfo::CODE_TARGET, instr); |
} |
@@ -3144,8 +3158,8 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { |
Register result = ToRegister(instr->result()); |
// Get the prototype or initial map from the function. |
- __ ldr(result, |
- FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); |
+ __ LoadP(result, |
+ FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); |
// Check that the function has a prototype or an initial map. |
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
@@ -3155,10 +3169,10 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { |
// If the function does not have an initial map, we're done. |
Label done; |
__ CompareObjectType(result, scratch, scratch, MAP_TYPE); |
- __ b(ne, &done); |
+ __ bne(&done); |
// Get the prototype from the initial map. |
- __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); |
+ __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset)); |
// All done. |
__ bind(&done); |
@@ -3181,28 +3195,32 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) { |
if (instr->index()->IsConstantOperand()) { |
int const_index = ToInteger32(LConstantOperand::cast(instr->index())); |
int index = (const_length - const_index) + 1; |
- __ ldr(result, MemOperand(arguments, index * kPointerSize)); |
+ __ LoadP(result, MemOperand(arguments, index * kPointerSize), r0); |
} else { |
Register index = ToRegister(instr->index()); |
- __ rsb(result, index, Operand(const_length + 1)); |
- __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2)); |
+ __ subfic(result, index, Operand(const_length + 1)); |
+ __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2)); |
+ __ LoadPX(result, MemOperand(arguments, result)); |
} |
} else if (instr->index()->IsConstantOperand()) { |
- Register length = ToRegister(instr->length()); |
- int const_index = ToInteger32(LConstantOperand::cast(instr->index())); |
- int loc = const_index - 1; |
- if (loc != 0) { |
- __ sub(result, length, Operand(loc)); |
- __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2)); |
- } else { |
- __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2)); |
- } |
+ Register length = ToRegister(instr->length()); |
+ int const_index = ToInteger32(LConstantOperand::cast(instr->index())); |
+ int loc = const_index - 1; |
+ if (loc != 0) { |
+ __ subi(result, length, Operand(loc)); |
+ __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2)); |
+ __ LoadPX(result, MemOperand(arguments, result)); |
} else { |
+ __ ShiftLeftImm(result, length, Operand(kPointerSizeLog2)); |
+ __ LoadPX(result, MemOperand(arguments, result)); |
+ } |
+ } else { |
Register length = ToRegister(instr->length()); |
Register index = ToRegister(instr->index()); |
__ sub(result, length, index); |
- __ add(result, result, Operand(1)); |
- __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2)); |
+ __ addi(result, result, Operand(1)); |
+ __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2)); |
+ __ LoadPX(result, MemOperand(arguments, result)); |
} |
} |
@@ -3222,61 +3240,91 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { |
key = ToRegister(instr->key()); |
} |
int element_size_shift = ElementsKindToShiftSize(elements_kind); |
- int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) |
- ? (element_size_shift - kSmiTagSize) : element_size_shift; |
+ bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi(); |
int base_offset = instr->base_offset(); |
if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || |
elements_kind == FLOAT32_ELEMENTS || |
elements_kind == EXTERNAL_FLOAT64_ELEMENTS || |
elements_kind == FLOAT64_ELEMENTS) { |
- int base_offset = instr->base_offset(); |
- DwVfpRegister result = ToDoubleRegister(instr->result()); |
- Operand operand = key_is_constant |
- ? Operand(constant_key << element_size_shift) |
- : Operand(key, LSL, shift_size); |
- __ add(scratch0(), external_pointer, operand); |
+ DoubleRegister result = ToDoubleRegister(instr->result()); |
+ if (key_is_constant) { |
+ __ Add(scratch0(), external_pointer, constant_key << element_size_shift, |
+ r0); |
+ } else { |
+ __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi); |
+ __ add(scratch0(), external_pointer, r0); |
+ } |
if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || |
elements_kind == FLOAT32_ELEMENTS) { |
- __ vldr(double_scratch0().low(), scratch0(), base_offset); |
- __ vcvt_f64_f32(result, double_scratch0().low()); |
- } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS |
- __ vldr(result, scratch0(), base_offset); |
+ __ lfs(result, MemOperand(scratch0(), base_offset)); |
+ } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS |
+ __ lfd(result, MemOperand(scratch0(), base_offset)); |
} |
} else { |
Register result = ToRegister(instr->result()); |
- MemOperand mem_operand = PrepareKeyedOperand( |
- key, external_pointer, key_is_constant, constant_key, |
- element_size_shift, shift_size, base_offset); |
+ MemOperand mem_operand = |
+ PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi, |
+ constant_key, element_size_shift, base_offset); |
switch (elements_kind) { |
case EXTERNAL_INT8_ELEMENTS: |
case INT8_ELEMENTS: |
- __ ldrsb(result, mem_operand); |
+ if (key_is_constant) { |
+ __ LoadByte(result, mem_operand, r0); |
+ } else { |
+ __ lbzx(result, mem_operand); |
+ } |
+ __ extsb(result, result); |
break; |
case EXTERNAL_UINT8_CLAMPED_ELEMENTS: |
case EXTERNAL_UINT8_ELEMENTS: |
case UINT8_ELEMENTS: |
case UINT8_CLAMPED_ELEMENTS: |
- __ ldrb(result, mem_operand); |
+ if (key_is_constant) { |
+ __ LoadByte(result, mem_operand, r0); |
+ } else { |
+ __ lbzx(result, mem_operand); |
+ } |
break; |
case EXTERNAL_INT16_ELEMENTS: |
case INT16_ELEMENTS: |
- __ ldrsh(result, mem_operand); |
+ if (key_is_constant) { |
+ __ LoadHalfWord(result, mem_operand, r0); |
+ } else { |
+ __ lhzx(result, mem_operand); |
+ } |
+ __ extsh(result, result); |
break; |
case EXTERNAL_UINT16_ELEMENTS: |
case UINT16_ELEMENTS: |
- __ ldrh(result, mem_operand); |
+ if (key_is_constant) { |
+ __ LoadHalfWord(result, mem_operand, r0); |
+ } else { |
+ __ lhzx(result, mem_operand); |
+ } |
break; |
case EXTERNAL_INT32_ELEMENTS: |
case INT32_ELEMENTS: |
- __ ldr(result, mem_operand); |
+ if (key_is_constant) { |
+ __ LoadWord(result, mem_operand, r0); |
+ } else { |
+ __ lwzx(result, mem_operand); |
+ } |
+#if V8_TARGET_ARCH_PPC64 |
+ __ extsw(result, result); |
+#endif |
break; |
case EXTERNAL_UINT32_ELEMENTS: |
case UINT32_ELEMENTS: |
- __ ldr(result, mem_operand); |
+ if (key_is_constant) { |
+ __ LoadWord(result, mem_operand, r0); |
+ } else { |
+ __ lwzx(result, mem_operand); |
+ } |
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { |
- __ cmp(result, Operand(0x80000000)); |
- DeoptimizeIf(cs, instr->environment()); |
+ __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); |
+ __ cmplw(result, r0); |
+ DeoptimizeIf(ge, instr->environment()); |
} |
break; |
case FLOAT32_ELEMENTS: |
@@ -3302,39 +3350,50 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { |
Register elements = ToRegister(instr->elements()); |
bool key_is_constant = instr->key()->IsConstantOperand(); |
Register key = no_reg; |
- DwVfpRegister result = ToDoubleRegister(instr->result()); |
+ DoubleRegister result = ToDoubleRegister(instr->result()); |
Register scratch = scratch0(); |
int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); |
- |
- int base_offset = instr->base_offset(); |
+ bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi(); |
+ int constant_key = 0; |
if (key_is_constant) { |
- int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); |
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key())); |
if (constant_key & 0xF0000000) { |
Abort(kArrayIndexConstantValueTooBig); |
} |
- base_offset += constant_key * kDoubleSize; |
+ } else { |
+ key = ToRegister(instr->key()); |
} |
- __ add(scratch, elements, Operand(base_offset)); |
+ int base_offset = instr->base_offset() + constant_key * kDoubleSize; |
if (!key_is_constant) { |
- key = ToRegister(instr->key()); |
- int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) |
- ? (element_size_shift - kSmiTagSize) : element_size_shift; |
- __ add(scratch, scratch, Operand(key, LSL, shift_size)); |
+ __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi); |
+ __ add(scratch, elements, r0); |
+ elements = scratch; |
} |
- |
- __ vldr(result, scratch, 0); |
+ if (!is_int16(base_offset)) { |
+ __ Add(scratch, elements, base_offset, r0); |
+ base_offset = 0; |
+ elements = scratch; |
+ } |
+ __ lfd(result, MemOperand(elements, base_offset)); |
if (instr->hydrogen()->RequiresHoleCheck()) { |
- __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32))); |
- __ cmp(scratch, Operand(kHoleNanUpper32)); |
+ if (is_int16(base_offset + Register::kExponentOffset)) { |
+ __ lwz(scratch, |
+ MemOperand(elements, base_offset + Register::kExponentOffset)); |
+ } else { |
+ __ addi(scratch, elements, Operand(base_offset)); |
+ __ lwz(scratch, MemOperand(scratch, Register::kExponentOffset)); |
+ } |
+ __ Cmpi(scratch, Operand(kHoleNanUpper32), r0); |
DeoptimizeIf(eq, instr->environment()); |
} |
} |
void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { |
+ HLoadKeyed* hinstr = instr->hydrogen(); |
Register elements = ToRegister(instr->elements()); |
Register result = ToRegister(instr->result()); |
Register scratch = scratch0(); |
@@ -3351,19 +3410,39 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { |
// representation for the key to be an integer, the input gets replaced |
// during bound check elimination with the index argument to the bounds |
// check, which can be tagged, so that case must be handled here, too. |
- if (instr->hydrogen()->key()->representation().IsSmi()) { |
- __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key)); |
+ if (hinstr->key()->representation().IsSmi()) { |
+ __ SmiToPtrArrayOffset(r0, key); |
} else { |
- __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); |
+ __ ShiftLeftImm(r0, key, Operand(kPointerSizeLog2)); |
} |
+ __ add(scratch, elements, r0); |
+ } |
+ |
+ bool requires_hole_check = hinstr->RequiresHoleCheck(); |
+ Representation representation = hinstr->representation(); |
+ |
+#if V8_TARGET_ARCH_PPC64 |
+ // 64-bit Smi optimization |
+ if (representation.IsInteger32() && |
+ hinstr->elements_kind() == FAST_SMI_ELEMENTS) { |
+ DCHECK(!requires_hole_check); |
+ // Read int value directly from upper half of the smi. |
+ STATIC_ASSERT(kSmiTag == 0); |
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); |
+#if V8_TARGET_LITTLE_ENDIAN |
+ offset += kPointerSize / 2; |
+#endif |
} |
- __ ldr(result, MemOperand(store_base, offset)); |
+#endif |
+ |
+ __ LoadRepresentation(result, MemOperand(store_base, offset), representation, |
+ r0); |
// Check for the hole value. |
- if (instr->hydrogen()->RequiresHoleCheck()) { |
- if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { |
- __ SmiTst(result); |
- DeoptimizeIf(ne, instr->environment()); |
+ if (requires_hole_check) { |
+ if (IsFastSmiElementsKind(hinstr->elements_kind())) { |
+ __ TestIfSmi(result, r0); |
+ DeoptimizeIf(ne, instr->environment(), cr0); |
} else { |
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); |
__ cmp(result, scratch); |
@@ -3384,34 +3463,34 @@ void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { |
} |
-MemOperand LCodeGen::PrepareKeyedOperand(Register key, |
- Register base, |
- bool key_is_constant, |
+MemOperand LCodeGen::PrepareKeyedOperand(Register key, Register base, |
+ bool key_is_constant, bool key_is_smi, |
int constant_key, |
- int element_size, |
- int shift_size, |
+ int element_size_shift, |
int base_offset) { |
+ Register scratch = scratch0(); |
+ |
if (key_is_constant) { |
- return MemOperand(base, (constant_key << element_size) + base_offset); |
+ return MemOperand(base, (constant_key << element_size_shift) + base_offset); |
} |
- if (base_offset == 0) { |
- if (shift_size >= 0) { |
- return MemOperand(base, key, LSL, shift_size); |
- } else { |
- DCHECK_EQ(-1, shift_size); |
- return MemOperand(base, key, LSR, 1); |
- } |
+ bool needs_shift = |
+ (element_size_shift != (key_is_smi ? kSmiTagSize + kSmiShiftSize : 0)); |
+ |
+ if (!(base_offset || needs_shift)) { |
+ return MemOperand(base, key); |
} |
- if (shift_size >= 0) { |
- __ add(scratch0(), base, Operand(key, LSL, shift_size)); |
- return MemOperand(scratch0(), base_offset); |
- } else { |
- DCHECK_EQ(-1, shift_size); |
- __ add(scratch0(), base, Operand(key, ASR, 1)); |
- return MemOperand(scratch0(), base_offset); |
+ if (needs_shift) { |
+ __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi); |
+ key = scratch; |
+ } |
+ |
+ if (base_offset) { |
+ __ Add(scratch, key, base_offset, r0); |
} |
+ |
+ return MemOperand(base, scratch); |
} |
@@ -3431,7 +3510,7 @@ void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) { |
} |
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); |
- CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); |
+ CallCode(ic, RelocInfo::CODE_TARGET, instr); |
} |
@@ -3440,18 +3519,24 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) { |
Register result = ToRegister(instr->result()); |
if (instr->hydrogen()->from_inlined()) { |
- __ sub(result, sp, Operand(2 * kPointerSize)); |
+ __ subi(result, sp, Operand(2 * kPointerSize)); |
} else { |
// Check if the calling frame is an arguments adaptor frame. |
Label done, adapted; |
- __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
- __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset)); |
- __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
+ __ LoadP(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
+ __ LoadP(result, |
+ MemOperand(scratch, StandardFrameConstants::kContextOffset)); |
+ __ CmpSmiLiteral(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0); |
// Result is the frame pointer for the frame if not adapted and for the real |
// frame below the adaptor frame if adapted. |
- __ mov(result, fp, LeaveCC, ne); |
- __ mov(result, scratch, LeaveCC, eq); |
+ __ beq(&adapted); |
+ __ mr(result, fp); |
+ __ b(&done); |
+ |
+ __ bind(&adapted); |
+ __ mr(result, scratch); |
+ __ bind(&done); |
} |
} |
@@ -3465,12 +3550,12 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) { |
// If no arguments adaptor frame the number of arguments is fixed. |
__ cmp(fp, elem); |
__ mov(result, Operand(scope()->num_parameters())); |
- __ b(eq, &done); |
+ __ beq(&done); |
// Arguments adaptor frame present. Get argument length from there. |
- __ ldr(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
- __ ldr(result, |
- MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
+ __ LoadP(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
+ __ LoadP(result, |
+ MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
__ SmiUntag(result); |
// Argument length is in result register. |
@@ -3492,47 +3577,56 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { |
if (!instr->hydrogen()->known_function()) { |
// Do not transform the receiver to object for strict mode |
// functions. |
- __ ldr(scratch, |
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); |
- __ ldr(scratch, |
+ __ LoadP(scratch, |
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); |
+ __ lwz(scratch, |
FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset)); |
- int mask = 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize); |
- __ tst(scratch, Operand(mask)); |
- __ b(ne, &result_in_receiver); |
+ __ TestBit(scratch, |
+#if V8_TARGET_ARCH_PPC64 |
+ SharedFunctionInfo::kStrictModeFunction, |
+#else |
+ SharedFunctionInfo::kStrictModeFunction + kSmiTagSize, |
+#endif |
+ r0); |
+ __ bne(&result_in_receiver, cr0); |
// Do not transform the receiver to object for builtins. |
- __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize))); |
- __ b(ne, &result_in_receiver); |
+ __ TestBit(scratch, |
+#if V8_TARGET_ARCH_PPC64 |
+ SharedFunctionInfo::kNative, |
+#else |
+ SharedFunctionInfo::kNative + kSmiTagSize, |
+#endif |
+ r0); |
+ __ bne(&result_in_receiver, cr0); |
} |
// Normal function. Replace undefined or null with global receiver. |
__ LoadRoot(scratch, Heap::kNullValueRootIndex); |
__ cmp(receiver, scratch); |
- __ b(eq, &global_object); |
+ __ beq(&global_object); |
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); |
__ cmp(receiver, scratch); |
- __ b(eq, &global_object); |
+ __ beq(&global_object); |
// Deoptimize if the receiver is not a JS object. |
- __ SmiTst(receiver); |
- DeoptimizeIf(eq, instr->environment()); |
+ __ TestIfSmi(receiver, r0); |
+ DeoptimizeIf(eq, instr->environment(), cr0); |
__ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE); |
DeoptimizeIf(lt, instr->environment()); |
__ b(&result_in_receiver); |
__ bind(&global_object); |
- __ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset)); |
- __ ldr(result, |
- ContextOperand(result, Context::GLOBAL_OBJECT_INDEX)); |
- __ ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset)); |
- |
+ __ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset)); |
+ __ LoadP(result, ContextOperand(result, Context::GLOBAL_OBJECT_INDEX)); |
+ __ LoadP(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset)); |
if (result.is(receiver)) { |
__ bind(&result_in_receiver); |
} else { |
Label result_ok; |
__ b(&result_ok); |
__ bind(&result_in_receiver); |
- __ mov(result, receiver); |
+ __ mr(result, receiver); |
__ bind(&result_ok); |
} |
} |
@@ -3544,41 +3638,42 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) { |
Register length = ToRegister(instr->length()); |
Register elements = ToRegister(instr->elements()); |
Register scratch = scratch0(); |
- DCHECK(receiver.is(r0)); // Used for parameter count. |
- DCHECK(function.is(r1)); // Required by InvokeFunction. |
- DCHECK(ToRegister(instr->result()).is(r0)); |
+ DCHECK(receiver.is(r3)); // Used for parameter count. |
+ DCHECK(function.is(r4)); // Required by InvokeFunction. |
+ DCHECK(ToRegister(instr->result()).is(r3)); |
// Copy the arguments to this function possibly from the |
// adaptor frame below it. |
const uint32_t kArgumentsLimit = 1 * KB; |
- __ cmp(length, Operand(kArgumentsLimit)); |
- DeoptimizeIf(hi, instr->environment()); |
+ __ cmpli(length, Operand(kArgumentsLimit)); |
+ DeoptimizeIf(gt, instr->environment()); |
// Push the receiver and use the register to keep the original |
// number of arguments. |
__ push(receiver); |
- __ mov(receiver, length); |
+ __ mr(receiver, length); |
// The arguments are at a one pointer size offset from elements. |
- __ add(elements, elements, Operand(1 * kPointerSize)); |
+ __ addi(elements, elements, Operand(1 * kPointerSize)); |
// Loop through the arguments pushing them onto the execution |
// stack. |
Label invoke, loop; |
// length is a small non-negative integer, due to the test above. |
- __ cmp(length, Operand::Zero()); |
- __ b(eq, &invoke); |
+ __ cmpi(length, Operand::Zero()); |
+ __ beq(&invoke); |
+ __ mtctr(length); |
__ bind(&loop); |
- __ ldr(scratch, MemOperand(elements, length, LSL, 2)); |
+ __ ShiftLeftImm(r0, length, Operand(kPointerSizeLog2)); |
+ __ LoadPX(scratch, MemOperand(elements, r0)); |
__ push(scratch); |
- __ sub(length, length, Operand(1), SetCC); |
- __ b(ne, &loop); |
+ __ addi(length, length, Operand(-1)); |
+ __ bdnz(&loop); |
__ bind(&invoke); |
DCHECK(instr->HasPointerMap()); |
LPointerMap* pointers = instr->pointer_map(); |
- SafepointGenerator safepoint_generator( |
- this, pointers, Safepoint::kLazyDeopt); |
- // The number of arguments is stored in receiver which is r0, as expected |
+ SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt); |
+ // The number of arguments is stored in receiver which is r3, as expected |
// by InvokeFunction. |
ParameterCount actual(receiver); |
__ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator); |
@@ -3596,14 +3691,12 @@ void LCodeGen::DoPushArgument(LPushArgument* instr) { |
} |
-void LCodeGen::DoDrop(LDrop* instr) { |
- __ Drop(instr->count()); |
-} |
+void LCodeGen::DoDrop(LDrop* instr) { __ Drop(instr->count()); } |
void LCodeGen::DoThisFunction(LThisFunction* instr) { |
Register result = ToRegister(instr->result()); |
- __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
+ __ LoadP(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
} |
@@ -3611,7 +3704,7 @@ void LCodeGen::DoContext(LContext* instr) { |
// If there is a non-return use, the context must be moved to a register. |
Register result = ToRegister(instr->result()); |
if (info()->IsOptimizing()) { |
- __ ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
+ __ LoadP(result, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
} else { |
// If there is no frame, the context must be in cp. |
DCHECK(result.is(cp)); |
@@ -3624,17 +3717,15 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) { |
__ push(cp); // The context is the first argument. |
__ Move(scratch0(), instr->hydrogen()->pairs()); |
__ push(scratch0()); |
- __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags()))); |
+ __ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags())); |
__ push(scratch0()); |
CallRuntime(Runtime::kDeclareGlobals, 3, instr); |
} |
void LCodeGen::CallKnownFunction(Handle<JSFunction> function, |
- int formal_parameter_count, |
- int arity, |
- LInstruction* instr, |
- R1State r1_state) { |
+ int formal_parameter_count, int arity, |
+ LInstruction* instr, R4State r4_state) { |
bool dont_adapt_arguments = |
formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel; |
bool can_invoke_directly = |
@@ -3643,22 +3734,26 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function, |
LPointerMap* pointers = instr->pointer_map(); |
if (can_invoke_directly) { |
- if (r1_state == R1_UNINITIALIZED) { |
- __ Move(r1, function); |
+ if (r4_state == R4_UNINITIALIZED) { |
+ __ Move(r4, function); |
} |
// Change context. |
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); |
+ __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset)); |
- // Set r0 to arguments count if adaption is not needed. Assumes that r0 |
+ // Set r3 to arguments count if adaption is not needed. Assumes that r3 |
// is available to write to at this point. |
if (dont_adapt_arguments) { |
- __ mov(r0, Operand(arity)); |
+ __ mov(r3, Operand(arity)); |
} |
// Invoke function. |
- __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); |
- __ Call(ip); |
+ if (function.is_identical_to(info()->closure())) { |
+ __ CallSelf(); |
+ } else { |
+ __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset)); |
+ __ Call(ip); |
+ } |
// Set up deoptimization. |
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); |
@@ -3679,21 +3774,21 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { |
Register scratch = scratch0(); |
// Deoptimize if not a heap number. |
- __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
+ __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
- __ cmp(scratch, Operand(ip)); |
+ __ cmp(scratch, ip); |
DeoptimizeIf(ne, instr->environment()); |
Label done; |
Register exponent = scratch0(); |
scratch = no_reg; |
- __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); |
+ __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); |
// Check the sign of the argument. If the argument is positive, just |
// return it. |
- __ tst(exponent, Operand(HeapNumber::kSignMask)); |
+ __ cmpwi(exponent, Operand::Zero()); |
// Move the input to the result if necessary. |
__ Move(result, input); |
- __ b(eq, &done); |
+ __ bge(&done); |
// Input is negative. Reverse its sign. |
// Preserve the value of all registers. |
@@ -3702,10 +3797,10 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { |
// Registers were saved at the safepoint, so we can use |
// many scratch registers. |
- Register tmp1 = input.is(r1) ? r0 : r1; |
- Register tmp2 = input.is(r2) ? r0 : r2; |
- Register tmp3 = input.is(r3) ? r0 : r3; |
- Register tmp4 = input.is(r4) ? r0 : r4; |
+ Register tmp1 = input.is(r4) ? r3 : r4; |
+ Register tmp2 = input.is(r5) ? r3 : r5; |
+ Register tmp3 = input.is(r6) ? r3 : r6; |
+ Register tmp4 = input.is(r7) ? r3 : r7; |
// exponent: floating point exponent value. |
@@ -3720,18 +3815,19 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { |
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr, |
instr->context()); |
// Set the pointer to the new heap number in tmp. |
- if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0)); |
+ if (!tmp1.is(r3)) __ mr(tmp1, r3); |
// Restore input_reg after call to runtime. |
__ LoadFromSafepointRegisterSlot(input, input); |
- __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); |
+ __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); |
__ bind(&allocated); |
// exponent: floating point exponent value. |
// tmp1: allocated heap number. |
- __ bic(exponent, exponent, Operand(HeapNumber::kSignMask)); |
- __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset)); |
- __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset)); |
- __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset)); |
+ STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); |
+ __ clrlwi(exponent, exponent, Operand(1)); // clear sign bit |
+ __ stw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset)); |
+ __ lwz(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset)); |
+ __ stw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset)); |
__ StoreToSafepointRegisterSlot(tmp1, result); |
} |
@@ -3740,147 +3836,189 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { |
} |
-void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { |
+void LCodeGen::EmitMathAbs(LMathAbs* instr) { |
Register input = ToRegister(instr->value()); |
Register result = ToRegister(instr->result()); |
- __ cmp(input, Operand::Zero()); |
- __ Move(result, input, pl); |
- // We can make rsb conditional because the previous cmp instruction |
- // will clear the V (overflow) flag and rsb won't set this flag |
- // if input is positive. |
- __ rsb(result, input, Operand::Zero(), SetCC, mi); |
+ Label done; |
+ __ cmpi(input, Operand::Zero()); |
+ __ Move(result, input); |
+ __ bge(&done); |
+ __ li(r0, Operand::Zero()); // clear xer |
+ __ mtxer(r0); |
+ __ neg(result, result, SetOE, SetRC); |
// Deoptimize on overflow. |
- DeoptimizeIf(vs, instr->environment()); |
+ DeoptimizeIf(overflow, instr->environment(), cr0); |
+ __ bind(&done); |
} |
+#if V8_TARGET_ARCH_PPC64 |
+void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) { |
+ Register input = ToRegister(instr->value()); |
+ Register result = ToRegister(instr->result()); |
+ Label done; |
+ __ cmpwi(input, Operand::Zero()); |
+ __ Move(result, input); |
+ __ bge(&done); |
+ |
+ // Deoptimize on overflow. |
+ __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); |
+ __ cmpw(input, r0); |
+ DeoptimizeIf(eq, instr->environment()); |
+ |
+ __ neg(result, result); |
+ __ bind(&done); |
+} |
+#endif |
+ |
+ |
void LCodeGen::DoMathAbs(LMathAbs* instr) { |
// Class for deferred case. |
class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode { |
public: |
DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) |
- : LDeferredCode(codegen), instr_(instr) { } |
+ : LDeferredCode(codegen), instr_(instr) {} |
virtual void Generate() V8_OVERRIDE { |
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_); |
} |
virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
+ |
private: |
LMathAbs* instr_; |
}; |
Representation r = instr->hydrogen()->value()->representation(); |
if (r.IsDouble()) { |
- DwVfpRegister input = ToDoubleRegister(instr->value()); |
- DwVfpRegister result = ToDoubleRegister(instr->result()); |
- __ vabs(result, input); |
+ DoubleRegister input = ToDoubleRegister(instr->value()); |
+ DoubleRegister result = ToDoubleRegister(instr->result()); |
+ __ fabs(result, input); |
+#if V8_TARGET_ARCH_PPC64 |
+ } else if (r.IsInteger32()) { |
+ EmitInteger32MathAbs(instr); |
+ } else if (r.IsSmi()) { |
+#else |
} else if (r.IsSmiOrInteger32()) { |
- EmitIntegerMathAbs(instr); |
+#endif |
+ EmitMathAbs(instr); |
} else { |
// Representation is tagged. |
DeferredMathAbsTaggedHeapNumber* deferred = |
- new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr); |
+ new (zone()) DeferredMathAbsTaggedHeapNumber(this, instr); |
Register input = ToRegister(instr->value()); |
// Smi check. |
__ JumpIfNotSmi(input, deferred->entry()); |
// If smi, handle it directly. |
- EmitIntegerMathAbs(instr); |
+ EmitMathAbs(instr); |
__ bind(deferred->exit()); |
} |
} |
void LCodeGen::DoMathFloor(LMathFloor* instr) { |
- DwVfpRegister input = ToDoubleRegister(instr->value()); |
+ DoubleRegister input = ToDoubleRegister(instr->value()); |
Register result = ToRegister(instr->result()); |
Register input_high = scratch0(); |
+ Register scratch = ip; |
Label done, exact; |
- __ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact); |
+ __ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done, |
+ &exact); |
DeoptimizeIf(al, instr->environment()); |
__ bind(&exact); |
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
// Test for -0. |
- __ cmp(result, Operand::Zero()); |
- __ b(ne, &done); |
- __ cmp(input_high, Operand::Zero()); |
- DeoptimizeIf(mi, instr->environment()); |
+ __ cmpi(result, Operand::Zero()); |
+ __ bne(&done); |
+ __ cmpwi(input_high, Operand::Zero()); |
+ DeoptimizeIf(lt, instr->environment()); |
} |
__ bind(&done); |
} |
void LCodeGen::DoMathRound(LMathRound* instr) { |
- DwVfpRegister input = ToDoubleRegister(instr->value()); |
+ DoubleRegister input = ToDoubleRegister(instr->value()); |
Register result = ToRegister(instr->result()); |
- DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp()); |
- DwVfpRegister input_plus_dot_five = double_scratch1; |
- Register input_high = scratch0(); |
- DwVfpRegister dot_five = double_scratch0(); |
+ DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp()); |
+ DoubleRegister input_plus_dot_five = double_scratch1; |
+ Register scratch1 = scratch0(); |
+ Register scratch2 = ip; |
+ DoubleRegister dot_five = double_scratch0(); |
Label convert, done; |
- __ Vmov(dot_five, 0.5, scratch0()); |
- __ vabs(double_scratch1, input); |
- __ VFPCompareAndSetFlags(double_scratch1, dot_five); |
+ __ LoadDoubleLiteral(dot_five, 0.5, r0); |
+ __ fabs(double_scratch1, input); |
+ __ fcmpu(double_scratch1, dot_five); |
+ DeoptimizeIf(unordered, instr->environment()); |
// If input is in [-0.5, -0], the result is -0. |
// If input is in [+0, +0.5[, the result is +0. |
// If the input is +0.5, the result is 1. |
- __ b(hi, &convert); // Out of [-0.5, +0.5]. |
+ __ bgt(&convert); // Out of [-0.5, +0.5]. |
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
- __ VmovHigh(input_high, input); |
- __ cmp(input_high, Operand::Zero()); |
- DeoptimizeIf(mi, instr->environment()); // [-0.5, -0]. |
+#if V8_TARGET_ARCH_PPC64 |
+ __ MovDoubleToInt64(scratch1, input); |
+#else |
+ __ MovDoubleHighToInt(scratch1, input); |
+#endif |
+ __ cmpi(scratch1, Operand::Zero()); |
+ DeoptimizeIf(lt, instr->environment()); // [-0.5, -0]. |
} |
- __ VFPCompareAndSetFlags(input, dot_five); |
- __ mov(result, Operand(1), LeaveCC, eq); // +0.5. |
+ Label return_zero; |
+ __ fcmpu(input, dot_five); |
+ __ bne(&return_zero); |
+ __ li(result, Operand(1)); // +0.5. |
+ __ b(&done); |
// Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on |
// flag kBailoutOnMinusZero. |
- __ mov(result, Operand::Zero(), LeaveCC, ne); |
+ __ bind(&return_zero); |
+ __ li(result, Operand::Zero()); |
__ b(&done); |
__ bind(&convert); |
- __ vadd(input_plus_dot_five, input, dot_five); |
+ __ fadd(input_plus_dot_five, input, dot_five); |
// Reuse dot_five (double_scratch0) as we no longer need this value. |
- __ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(), |
- &done, &done); |
+ __ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2, |
+ double_scratch0(), &done, &done); |
DeoptimizeIf(al, instr->environment()); |
__ bind(&done); |
} |
void LCodeGen::DoMathFround(LMathFround* instr) { |
- DwVfpRegister input_reg = ToDoubleRegister(instr->value()); |
- DwVfpRegister output_reg = ToDoubleRegister(instr->result()); |
- LowDwVfpRegister scratch = double_scratch0(); |
- __ vcvt_f32_f64(scratch.low(), input_reg); |
- __ vcvt_f64_f32(output_reg, scratch.low()); |
+ DoubleRegister input_reg = ToDoubleRegister(instr->value()); |
+ DoubleRegister output_reg = ToDoubleRegister(instr->result()); |
+ __ frsp(output_reg, input_reg); |
} |
void LCodeGen::DoMathSqrt(LMathSqrt* instr) { |
- DwVfpRegister input = ToDoubleRegister(instr->value()); |
- DwVfpRegister result = ToDoubleRegister(instr->result()); |
- __ vsqrt(result, input); |
+ DoubleRegister input = ToDoubleRegister(instr->value()); |
+ DoubleRegister result = ToDoubleRegister(instr->result()); |
+ __ fsqrt(result, input); |
} |
void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) { |
- DwVfpRegister input = ToDoubleRegister(instr->value()); |
- DwVfpRegister result = ToDoubleRegister(instr->result()); |
- DwVfpRegister temp = double_scratch0(); |
+ DoubleRegister input = ToDoubleRegister(instr->value()); |
+ DoubleRegister result = ToDoubleRegister(instr->result()); |
+ DoubleRegister temp = double_scratch0(); |
// Note that according to ECMA-262 15.8.2.13: |
// Math.pow(-Infinity, 0.5) == Infinity |
// Math.sqrt(-Infinity) == NaN |
- Label done; |
- __ vmov(temp, -V8_INFINITY, scratch0()); |
- __ VFPCompareAndSetFlags(input, temp); |
- __ vneg(result, temp, eq); |
- __ b(&done, eq); |
+ Label skip, done; |
+ |
+ __ LoadDoubleLiteral(temp, -V8_INFINITY, scratch0()); |
+ __ fcmpu(input, temp); |
+ __ bne(&skip); |
+ __ fneg(result, temp); |
+ __ b(&done); |
// Add +0 to convert -0 to +0. |
- __ vadd(result, input, kDoubleRegZero); |
- __ vsqrt(result, result); |
+ __ bind(&skip); |
+ __ fadd(result, input, kDoubleRegZero); |
+ __ fsqrt(result, result); |
__ bind(&done); |
} |
@@ -3890,21 +4028,20 @@ void LCodeGen::DoPower(LPower* instr) { |
// Having marked this as a call, we can use any registers. |
// Just make sure that the input/output registers are the expected ones. |
DCHECK(!instr->right()->IsDoubleRegister() || |
- ToDoubleRegister(instr->right()).is(d1)); |
- DCHECK(!instr->right()->IsRegister() || |
- ToRegister(instr->right()).is(r2)); |
- DCHECK(ToDoubleRegister(instr->left()).is(d0)); |
- DCHECK(ToDoubleRegister(instr->result()).is(d2)); |
+ ToDoubleRegister(instr->right()).is(d2)); |
+ DCHECK(!instr->right()->IsRegister() || ToRegister(instr->right()).is(r5)); |
+ DCHECK(ToDoubleRegister(instr->left()).is(d1)); |
+ DCHECK(ToDoubleRegister(instr->result()).is(d3)); |
if (exponent_type.IsSmi()) { |
MathPowStub stub(isolate(), MathPowStub::TAGGED); |
__ CallStub(&stub); |
} else if (exponent_type.IsTagged()) { |
Label no_deopt; |
- __ JumpIfSmi(r2, &no_deopt); |
- __ ldr(r6, FieldMemOperand(r2, HeapObject::kMapOffset)); |
+ __ JumpIfSmi(r5, &no_deopt); |
+ __ LoadP(r10, FieldMemOperand(r5, HeapObject::kMapOffset)); |
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
- __ cmp(r6, Operand(ip)); |
+ __ cmp(r10, ip); |
DeoptimizeIf(ne, instr->environment()); |
__ bind(&no_deopt); |
MathPowStub stub(isolate(), MathPowStub::TAGGED); |
@@ -3921,24 +4058,23 @@ void LCodeGen::DoPower(LPower* instr) { |
void LCodeGen::DoMathExp(LMathExp* instr) { |
- DwVfpRegister input = ToDoubleRegister(instr->value()); |
- DwVfpRegister result = ToDoubleRegister(instr->result()); |
- DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp()); |
- DwVfpRegister double_scratch2 = double_scratch0(); |
+ DoubleRegister input = ToDoubleRegister(instr->value()); |
+ DoubleRegister result = ToDoubleRegister(instr->result()); |
+ DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp()); |
+ DoubleRegister double_scratch2 = double_scratch0(); |
Register temp1 = ToRegister(instr->temp1()); |
Register temp2 = ToRegister(instr->temp2()); |
- MathExpGenerator::EmitMathExp( |
- masm(), input, result, double_scratch1, double_scratch2, |
- temp1, temp2, scratch0()); |
+ MathExpGenerator::EmitMathExp(masm(), input, result, double_scratch1, |
+ double_scratch2, temp1, temp2, scratch0()); |
} |
void LCodeGen::DoMathLog(LMathLog* instr) { |
__ PrepareCallCFunction(0, 1, scratch0()); |
__ MovToFloatParameter(ToDoubleRegister(instr->value())); |
- __ CallCFunction(ExternalReference::math_log_double_function(isolate()), |
- 0, 1); |
+ __ CallCFunction(ExternalReference::math_log_double_function(isolate()), 0, |
+ 1); |
__ MovFromFloatResult(ToDoubleRegister(instr->result())); |
} |
@@ -3946,13 +4082,13 @@ void LCodeGen::DoMathLog(LMathLog* instr) { |
void LCodeGen::DoMathClz32(LMathClz32* instr) { |
Register input = ToRegister(instr->value()); |
Register result = ToRegister(instr->result()); |
- __ clz(result, input); |
+ __ cntlzw_(result, input); |
} |
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { |
DCHECK(ToRegister(instr->context()).is(cp)); |
- DCHECK(ToRegister(instr->function()).is(r1)); |
+ DCHECK(ToRegister(instr->function()).is(r4)); |
DCHECK(instr->HasPointerMap()); |
Handle<JSFunction> known_function = instr->hydrogen()->known_function(); |
@@ -3960,19 +4096,17 @@ void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) { |
LPointerMap* pointers = instr->pointer_map(); |
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); |
ParameterCount count(instr->arity()); |
- __ InvokeFunction(r1, count, CALL_FUNCTION, generator); |
+ __ InvokeFunction(r4, count, CALL_FUNCTION, generator); |
} else { |
CallKnownFunction(known_function, |
instr->hydrogen()->formal_parameter_count(), |
- instr->arity(), |
- instr, |
- R1_CONTAINS_TARGET); |
+ instr->arity(), instr, R4_CONTAINS_TARGET); |
} |
} |
void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { |
- DCHECK(ToRegister(instr->result()).is(r0)); |
+ DCHECK(ToRegister(instr->result()).is(r3)); |
LPointerMap* pointers = instr->pointer_map(); |
SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt); |
@@ -3981,21 +4115,12 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { |
LConstantOperand* target = LConstantOperand::cast(instr->target()); |
Handle<Code> code = Handle<Code>::cast(ToHandle(target)); |
generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET)); |
- PlatformInterfaceDescriptor* call_descriptor = |
- instr->descriptor()->platform_specific_descriptor(); |
- __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al, |
- call_descriptor->storage_mode()); |
+ __ Call(code, RelocInfo::CODE_TARGET); |
} else { |
DCHECK(instr->target()->IsRegister()); |
Register target = ToRegister(instr->target()); |
generator.BeforeCall(__ CallSize(target)); |
- // Make sure we don't emit any additional entries in the constant pool |
- // before the call to ensure that the CallCodeSize() calculated the correct |
- // number of instructions for the constant pool load. |
- { |
- ConstantPoolUnavailableScope constant_pool_unavailable(masm_); |
- __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag)); |
- } |
+ __ addi(target, target, Operand(Code::kHeaderSize - kHeapObjectTag)); |
__ Call(target); |
} |
generator.AfterCall(); |
@@ -4003,18 +4128,18 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) { |
void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) { |
- DCHECK(ToRegister(instr->function()).is(r1)); |
- DCHECK(ToRegister(instr->result()).is(r0)); |
+ DCHECK(ToRegister(instr->function()).is(r4)); |
+ DCHECK(ToRegister(instr->result()).is(r3)); |
if (instr->hydrogen()->pass_argument_count()) { |
- __ mov(r0, Operand(instr->arity())); |
+ __ mov(r3, Operand(instr->arity())); |
} |
// Change context. |
- __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); |
+ __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset)); |
// Load the code entry address |
- __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); |
+ __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset)); |
__ Call(ip); |
RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT); |
@@ -4023,8 +4148,8 @@ void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) { |
void LCodeGen::DoCallFunction(LCallFunction* instr) { |
DCHECK(ToRegister(instr->context()).is(cp)); |
- DCHECK(ToRegister(instr->function()).is(r1)); |
- DCHECK(ToRegister(instr->result()).is(r0)); |
+ DCHECK(ToRegister(instr->function()).is(r4)); |
+ DCHECK(ToRegister(instr->result()).is(r3)); |
int arity = instr->arity(); |
CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags()); |
@@ -4034,12 +4159,12 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) { |
void LCodeGen::DoCallNew(LCallNew* instr) { |
DCHECK(ToRegister(instr->context()).is(cp)); |
- DCHECK(ToRegister(instr->constructor()).is(r1)); |
- DCHECK(ToRegister(instr->result()).is(r0)); |
+ DCHECK(ToRegister(instr->constructor()).is(r4)); |
+ DCHECK(ToRegister(instr->result()).is(r3)); |
- __ mov(r0, Operand(instr->arity())); |
- // No cell in r2 for construct type feedback in optimized code |
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); |
+ __ mov(r3, Operand(instr->arity())); |
+ // No cell in r5 for construct type feedback in optimized code |
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex); |
CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS); |
CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); |
} |
@@ -4047,11 +4172,11 @@ void LCodeGen::DoCallNew(LCallNew* instr) { |
void LCodeGen::DoCallNewArray(LCallNewArray* instr) { |
DCHECK(ToRegister(instr->context()).is(cp)); |
- DCHECK(ToRegister(instr->constructor()).is(r1)); |
- DCHECK(ToRegister(instr->result()).is(r0)); |
+ DCHECK(ToRegister(instr->constructor()).is(r4)); |
+ DCHECK(ToRegister(instr->result()).is(r3)); |
- __ mov(r0, Operand(instr->arity())); |
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); |
+ __ mov(r3, Operand(instr->arity())); |
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex); |
ElementsKind kind = instr->hydrogen()->elements_kind(); |
AllocationSiteOverrideMode override_mode = |
(AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE) |
@@ -4067,16 +4192,15 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) { |
Label packed_case; |
// We might need a change here |
// look at the first argument |
- __ ldr(r5, MemOperand(sp, 0)); |
- __ cmp(r5, Operand::Zero()); |
- __ b(eq, &packed_case); |
+ __ LoadP(r8, MemOperand(sp, 0)); |
+ __ cmpi(r8, Operand::Zero()); |
+ __ beq(&packed_case); |
ElementsKind holey_kind = GetHoleyElementsKind(kind); |
- ArraySingleArgumentConstructorStub stub(isolate(), |
- holey_kind, |
+ ArraySingleArgumentConstructorStub stub(isolate(), holey_kind, |
override_mode); |
CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr); |
- __ jmp(&done); |
+ __ b(&done); |
__ bind(&packed_case); |
} |
@@ -4098,9 +4222,10 @@ void LCodeGen::DoCallRuntime(LCallRuntime* instr) { |
void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) { |
Register function = ToRegister(instr->function()); |
Register code_object = ToRegister(instr->code_object()); |
- __ add(code_object, code_object, Operand(Code::kHeaderSize - kHeapObjectTag)); |
- __ str(code_object, |
- FieldMemOperand(function, JSFunction::kCodeEntryOffset)); |
+ __ addi(code_object, code_object, |
+ Operand(Code::kHeaderSize - kHeapObjectTag)); |
+ __ StoreP(code_object, |
+ FieldMemOperand(function, JSFunction::kCodeEntryOffset), r0); |
} |
@@ -4109,7 +4234,7 @@ void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { |
Register base = ToRegister(instr->base_object()); |
if (instr->offset()->IsConstantOperand()) { |
LConstantOperand* offset = LConstantOperand::cast(instr->offset()); |
- __ add(result, base, Operand(ToInteger32(offset))); |
+ __ Add(result, base, ToInteger32(offset), r0); |
} else { |
Register offset = ToRegister(instr->offset()); |
__ add(result, base, offset); |
@@ -4118,83 +4243,91 @@ void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) { |
void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) { |
+ HStoreNamedField* hinstr = instr->hydrogen(); |
Representation representation = instr->representation(); |
Register object = ToRegister(instr->object()); |
Register scratch = scratch0(); |
- HObjectAccess access = instr->hydrogen()->access(); |
+ HObjectAccess access = hinstr->access(); |
int offset = access.offset(); |
if (access.IsExternalMemory()) { |
Register value = ToRegister(instr->value()); |
MemOperand operand = MemOperand(object, offset); |
- __ Store(value, operand, representation); |
+ __ StoreRepresentation(value, operand, representation, r0); |
return; |
} |
__ AssertNotSmi(object); |
- DCHECK(!representation.IsSmi() || |
- !instr->value()->IsConstantOperand() || |
+#if V8_TARGET_ARCH_PPC64 |
+ DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() || |
+ IsInteger32(LConstantOperand::cast(instr->value()))); |
+#else |
+ DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() || |
IsSmi(LConstantOperand::cast(instr->value()))); |
+#endif |
if (representation.IsDouble()) { |
DCHECK(access.IsInobject()); |
- DCHECK(!instr->hydrogen()->has_transition()); |
- DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); |
- DwVfpRegister value = ToDoubleRegister(instr->value()); |
- __ vstr(value, FieldMemOperand(object, offset)); |
+ DCHECK(!hinstr->has_transition()); |
+ DCHECK(!hinstr->NeedsWriteBarrier()); |
+ DoubleRegister value = ToDoubleRegister(instr->value()); |
+ __ stfd(value, FieldMemOperand(object, offset)); |
return; |
} |
- if (instr->hydrogen()->has_transition()) { |
- Handle<Map> transition = instr->hydrogen()->transition_map(); |
+ if (hinstr->has_transition()) { |
+ Handle<Map> transition = hinstr->transition_map(); |
AddDeprecationDependency(transition); |
__ mov(scratch, Operand(transition)); |
- __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); |
- if (instr->hydrogen()->NeedsWriteBarrierForMap()) { |
+ __ StoreP(scratch, FieldMemOperand(object, HeapObject::kMapOffset), r0); |
+ if (hinstr->NeedsWriteBarrierForMap()) { |
Register temp = ToRegister(instr->temp()); |
// Update the write barrier for the map field. |
- __ RecordWriteForMap(object, |
- scratch, |
- temp, |
- GetLinkRegisterState(), |
+ __ RecordWriteForMap(object, scratch, temp, GetLinkRegisterState(), |
kSaveFPRegs); |
} |
} |
// Do the store. |
Register value = ToRegister(instr->value()); |
+ |
+#if V8_TARGET_ARCH_PPC64 |
+ // 64-bit Smi optimization |
+ if (representation.IsSmi() && |
+ hinstr->value()->representation().IsInteger32()) { |
+ DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY); |
+ // Store int value directly to upper half of the smi. |
+ STATIC_ASSERT(kSmiTag == 0); |
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); |
+#if V8_TARGET_LITTLE_ENDIAN |
+ offset += kPointerSize / 2; |
+#endif |
+ representation = Representation::Integer32(); |
+ } |
+#endif |
+ |
if (access.IsInobject()) { |
MemOperand operand = FieldMemOperand(object, offset); |
- __ Store(value, operand, representation); |
- if (instr->hydrogen()->NeedsWriteBarrier()) { |
+ __ StoreRepresentation(value, operand, representation, r0); |
+ if (hinstr->NeedsWriteBarrier()) { |
// Update the write barrier for the object for in-object properties. |
- __ RecordWriteField(object, |
- offset, |
- value, |
- scratch, |
- GetLinkRegisterState(), |
- kSaveFPRegs, |
- EMIT_REMEMBERED_SET, |
- instr->hydrogen()->SmiCheckForWriteBarrier(), |
- instr->hydrogen()->PointersToHereCheckForValue()); |
+ __ RecordWriteField( |
+ object, offset, value, scratch, GetLinkRegisterState(), kSaveFPRegs, |
+ EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(), |
+ hinstr->PointersToHereCheckForValue()); |
} |
} else { |
- __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset)); |
+ __ LoadP(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset)); |
MemOperand operand = FieldMemOperand(scratch, offset); |
- __ Store(value, operand, representation); |
- if (instr->hydrogen()->NeedsWriteBarrier()) { |
+ __ StoreRepresentation(value, operand, representation, r0); |
+ if (hinstr->NeedsWriteBarrier()) { |
// Update the write barrier for the properties array. |
// object is used as a scratch register. |
- __ RecordWriteField(scratch, |
- offset, |
- value, |
- object, |
- GetLinkRegisterState(), |
- kSaveFPRegs, |
- EMIT_REMEMBERED_SET, |
- instr->hydrogen()->SmiCheckForWriteBarrier(), |
- instr->hydrogen()->PointersToHereCheckForValue()); |
+ __ RecordWriteField( |
+ scratch, offset, value, object, GetLinkRegisterState(), kSaveFPRegs, |
+ EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(), |
+ hinstr->PointersToHereCheckForValue()); |
} |
} |
} |
@@ -4207,21 +4340,41 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { |
__ mov(StoreIC::NameRegister(), Operand(instr->name())); |
Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode()); |
- CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); |
+ CallCode(ic, RelocInfo::CODE_TARGET, instr); |
} |
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { |
- Condition cc = instr->hydrogen()->allow_equality() ? hi : hs; |
- if (instr->index()->IsConstantOperand()) { |
- Operand index = ToOperand(instr->index()); |
- Register length = ToRegister(instr->length()); |
- __ cmp(length, index); |
+ Representation representation = instr->hydrogen()->length()->representation(); |
+ DCHECK(representation.Equals(instr->hydrogen()->index()->representation())); |
+ DCHECK(representation.IsSmiOrInteger32()); |
+ |
+ Condition cc = instr->hydrogen()->allow_equality() ? lt : le; |
+ if (instr->length()->IsConstantOperand()) { |
+ int32_t length = ToInteger32(LConstantOperand::cast(instr->length())); |
+ Register index = ToRegister(instr->index()); |
+ if (representation.IsSmi()) { |
+ __ Cmpli(index, Operand(Smi::FromInt(length)), r0); |
+ } else { |
+ __ Cmplwi(index, Operand(length), r0); |
+ } |
cc = CommuteCondition(cc); |
+ } else if (instr->index()->IsConstantOperand()) { |
+ int32_t index = ToInteger32(LConstantOperand::cast(instr->index())); |
+ Register length = ToRegister(instr->length()); |
+ if (representation.IsSmi()) { |
+ __ Cmpli(length, Operand(Smi::FromInt(index)), r0); |
+ } else { |
+ __ Cmplwi(length, Operand(index), r0); |
+ } |
} else { |
Register index = ToRegister(instr->index()); |
- Operand length = ToOperand(instr->length()); |
- __ cmp(index, length); |
+ Register length = ToRegister(instr->length()); |
+ if (representation.IsSmi()) { |
+ __ cmpl(length, index); |
+ } else { |
+ __ cmplw(length, index); |
+ } |
} |
if (FLAG_debug_code && instr->hydrogen()->skip_check()) { |
Label done; |
@@ -4249,8 +4402,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { |
key = ToRegister(instr->key()); |
} |
int element_size_shift = ElementsKindToShiftSize(elements_kind); |
- int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) |
- ? (element_size_shift - kSmiTagSize) : element_size_shift; |
+ bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi(); |
int base_offset = instr->base_offset(); |
if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || |
@@ -4258,30 +4410,30 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { |
elements_kind == EXTERNAL_FLOAT64_ELEMENTS || |
elements_kind == FLOAT64_ELEMENTS) { |
Register address = scratch0(); |
- DwVfpRegister value(ToDoubleRegister(instr->value())); |
+ DoubleRegister value(ToDoubleRegister(instr->value())); |
if (key_is_constant) { |
if (constant_key != 0) { |
- __ add(address, external_pointer, |
- Operand(constant_key << element_size_shift)); |
+ __ Add(address, external_pointer, constant_key << element_size_shift, |
+ r0); |
} else { |
address = external_pointer; |
} |
} else { |
- __ add(address, external_pointer, Operand(key, LSL, shift_size)); |
+ __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi); |
+ __ add(address, external_pointer, r0); |
} |
if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS || |
elements_kind == FLOAT32_ELEMENTS) { |
- __ vcvt_f32_f64(double_scratch0().low(), value); |
- __ vstr(double_scratch0().low(), address, base_offset); |
+ __ frsp(double_scratch0(), value); |
+ __ stfs(double_scratch0(), MemOperand(address, base_offset)); |
} else { // Storing doubles, not floats. |
- __ vstr(value, address, base_offset); |
+ __ stfd(value, MemOperand(address, base_offset)); |
} |
} else { |
Register value(ToRegister(instr->value())); |
- MemOperand mem_operand = PrepareKeyedOperand( |
- key, external_pointer, key_is_constant, constant_key, |
- element_size_shift, shift_size, |
- base_offset); |
+ MemOperand mem_operand = |
+ PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi, |
+ constant_key, element_size_shift, base_offset); |
switch (elements_kind) { |
case EXTERNAL_UINT8_CLAMPED_ELEMENTS: |
case EXTERNAL_INT8_ELEMENTS: |
@@ -4289,19 +4441,31 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { |
case UINT8_ELEMENTS: |
case UINT8_CLAMPED_ELEMENTS: |
case INT8_ELEMENTS: |
- __ strb(value, mem_operand); |
+ if (key_is_constant) { |
+ __ StoreByte(value, mem_operand, r0); |
+ } else { |
+ __ stbx(value, mem_operand); |
+ } |
break; |
case EXTERNAL_INT16_ELEMENTS: |
case EXTERNAL_UINT16_ELEMENTS: |
case INT16_ELEMENTS: |
case UINT16_ELEMENTS: |
- __ strh(value, mem_operand); |
+ if (key_is_constant) { |
+ __ StoreHalfWord(value, mem_operand, r0); |
+ } else { |
+ __ sthx(value, mem_operand); |
+ } |
break; |
case EXTERNAL_INT32_ELEMENTS: |
case EXTERNAL_UINT32_ELEMENTS: |
case INT32_ELEMENTS: |
case UINT32_ELEMENTS: |
- __ str(value, mem_operand); |
+ if (key_is_constant) { |
+ __ StoreWord(value, mem_operand, r0); |
+ } else { |
+ __ stwx(value, mem_operand); |
+ } |
break; |
case FLOAT32_ELEMENTS: |
case FLOAT64_ELEMENTS: |
@@ -4323,58 +4487,60 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { |
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { |
- DwVfpRegister value = ToDoubleRegister(instr->value()); |
+ DoubleRegister value = ToDoubleRegister(instr->value()); |
Register elements = ToRegister(instr->elements()); |
+ Register key = no_reg; |
Register scratch = scratch0(); |
- DwVfpRegister double_scratch = double_scratch0(); |
+ DoubleRegister double_scratch = double_scratch0(); |
bool key_is_constant = instr->key()->IsConstantOperand(); |
- int base_offset = instr->base_offset(); |
+ int constant_key = 0; |
// Calculate the effective address of the slot in the array to store the |
// double value. |
- int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); |
if (key_is_constant) { |
- int constant_key = ToInteger32(LConstantOperand::cast(instr->key())); |
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key())); |
if (constant_key & 0xF0000000) { |
Abort(kArrayIndexConstantValueTooBig); |
} |
- __ add(scratch, elements, |
- Operand((constant_key << element_size_shift) + base_offset)); |
} else { |
- int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) |
- ? (element_size_shift - kSmiTagSize) : element_size_shift; |
- __ add(scratch, elements, Operand(base_offset)); |
- __ add(scratch, scratch, |
- Operand(ToRegister(instr->key()), LSL, shift_size)); |
+ key = ToRegister(instr->key()); |
+ } |
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS); |
+ bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi(); |
+ int base_offset = instr->base_offset() + constant_key * kDoubleSize; |
+ if (!key_is_constant) { |
+ __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi); |
+ __ add(scratch, elements, scratch); |
+ elements = scratch; |
+ } |
+ if (!is_int16(base_offset)) { |
+ __ Add(scratch, elements, base_offset, r0); |
+ base_offset = 0; |
+ elements = scratch; |
} |
if (instr->NeedsCanonicalization()) { |
// Force a canonical NaN. |
- if (masm()->emit_debug_code()) { |
- __ vmrs(ip); |
- __ tst(ip, Operand(kVFPDefaultNaNModeControlBit)); |
- __ Assert(ne, kDefaultNaNModeNotSet); |
- } |
- __ VFPCanonicalizeNaN(double_scratch, value); |
- __ vstr(double_scratch, scratch, 0); |
+ __ CanonicalizeNaN(double_scratch, value); |
+ __ stfd(double_scratch, MemOperand(elements, base_offset)); |
} else { |
- __ vstr(value, scratch, 0); |
+ __ stfd(value, MemOperand(elements, base_offset)); |
} |
} |
void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { |
+ HStoreKeyed* hinstr = instr->hydrogen(); |
Register value = ToRegister(instr->value()); |
Register elements = ToRegister(instr->elements()); |
- Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) |
- : no_reg; |
+ Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg; |
Register scratch = scratch0(); |
Register store_base = scratch; |
int offset = instr->base_offset(); |
// Do the store. |
if (instr->key()->IsConstantOperand()) { |
- DCHECK(!instr->hydrogen()->NeedsWriteBarrier()); |
+ DCHECK(!hinstr->NeedsWriteBarrier()); |
LConstantOperand* const_operand = LConstantOperand::cast(instr->key()); |
offset += ToInteger32(const_operand) * kPointerSize; |
store_base = elements; |
@@ -4383,28 +4549,42 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) { |
// representation for the key to be an integer, the input gets replaced |
// during bound check elimination with the index argument to the bounds |
// check, which can be tagged, so that case must be handled here, too. |
- if (instr->hydrogen()->key()->representation().IsSmi()) { |
- __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key)); |
+ if (hinstr->key()->representation().IsSmi()) { |
+ __ SmiToPtrArrayOffset(scratch, key); |
} else { |
- __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); |
+ __ ShiftLeftImm(scratch, key, Operand(kPointerSizeLog2)); |
} |
+ __ add(scratch, elements, scratch); |
} |
- __ str(value, MemOperand(store_base, offset)); |
- if (instr->hydrogen()->NeedsWriteBarrier()) { |
- SmiCheck check_needed = |
- instr->hydrogen()->value()->type().IsHeapObject() |
- ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
+ Representation representation = hinstr->value()->representation(); |
+ |
+#if V8_TARGET_ARCH_PPC64 |
+ // 64-bit Smi optimization |
+ if (representation.IsInteger32()) { |
+ DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY); |
+ DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS); |
+ // Store int value directly to upper half of the smi. |
+ STATIC_ASSERT(kSmiTag == 0); |
+ STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32); |
+#if V8_TARGET_LITTLE_ENDIAN |
+ offset += kPointerSize / 2; |
+#endif |
+ } |
+#endif |
+ |
+ __ StoreRepresentation(value, MemOperand(store_base, offset), representation, |
+ r0); |
+ |
+ if (hinstr->NeedsWriteBarrier()) { |
+ SmiCheck check_needed = hinstr->value()->type().IsHeapObject() |
+ ? OMIT_SMI_CHECK |
+ : INLINE_SMI_CHECK; |
// Compute address of modified element and store it into key register. |
- __ add(key, store_base, Operand(offset)); |
- __ RecordWrite(elements, |
- key, |
- value, |
- GetLinkRegisterState(), |
- kSaveFPRegs, |
- EMIT_REMEMBERED_SET, |
- check_needed, |
- instr->hydrogen()->PointersToHereCheckForValue()); |
+ __ Add(key, store_base, offset, r0); |
+ __ RecordWrite(elements, key, value, GetLinkRegisterState(), kSaveFPRegs, |
+ EMIT_REMEMBERED_SET, check_needed, |
+ hinstr->PointersToHereCheckForValue()); |
} |
} |
@@ -4427,10 +4607,11 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) { |
DCHECK(ToRegister(instr->key()).is(KeyedStoreIC::NameRegister())); |
DCHECK(ToRegister(instr->value()).is(KeyedStoreIC::ValueRegister())); |
- Handle<Code> ic = instr->strict_mode() == STRICT |
- ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() |
- : isolate()->builtins()->KeyedStoreIC_Initialize(); |
- CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS); |
+ Handle<Code> ic = |
+ (instr->strict_mode() == STRICT) |
+ ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict() |
+ : isolate()->builtins()->KeyedStoreIC_Initialize(); |
+ CallCode(ic, RelocInfo::CODE_TARGET, instr); |
} |
@@ -4444,30 +4625,28 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { |
ElementsKind to_kind = instr->to_kind(); |
Label not_applicable; |
- __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset)); |
- __ cmp(scratch, Operand(from_map)); |
- __ b(ne, ¬_applicable); |
+ __ LoadP(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset)); |
+ __ Cmpi(scratch, Operand(from_map), r0); |
+ __ bne(¬_applicable); |
if (IsSimpleMapChangeTransition(from_kind, to_kind)) { |
Register new_map_reg = ToRegister(instr->new_map_temp()); |
__ mov(new_map_reg, Operand(to_map)); |
- __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset)); |
+ __ StoreP(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset), |
+ r0); |
// Write barrier. |
- __ RecordWriteForMap(object_reg, |
- new_map_reg, |
- scratch, |
- GetLinkRegisterState(), |
- kDontSaveFPRegs); |
+ __ RecordWriteForMap(object_reg, new_map_reg, scratch, |
+ GetLinkRegisterState(), kDontSaveFPRegs); |
} else { |
DCHECK(ToRegister(instr->context()).is(cp)); |
- DCHECK(object_reg.is(r0)); |
+ DCHECK(object_reg.is(r3)); |
PushSafepointRegistersScope scope(this); |
- __ Move(r1, to_map); |
+ __ Move(r4, to_map); |
bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE; |
TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array); |
__ CallStub(&stub); |
- RecordSafepointWithRegisters( |
- instr->pointer_map(), 0, Safepoint::kLazyDeopt); |
+ RecordSafepointWithRegisters(instr->pointer_map(), 0, |
+ Safepoint::kLazyDeopt); |
} |
__ bind(¬_applicable); |
} |
@@ -4485,10 +4664,9 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { |
void LCodeGen::DoStringAdd(LStringAdd* instr) { |
DCHECK(ToRegister(instr->context()).is(cp)); |
- DCHECK(ToRegister(instr->left()).is(r1)); |
- DCHECK(ToRegister(instr->right()).is(r0)); |
- StringAddStub stub(isolate(), |
- instr->hydrogen()->flags(), |
+ DCHECK(ToRegister(instr->left()).is(r4)); |
+ DCHECK(ToRegister(instr->right()).is(r3)); |
+ StringAddStub stub(isolate(), instr->hydrogen()->flags(), |
instr->hydrogen()->pretenure_flag()); |
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
} |
@@ -4498,23 +4676,22 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) { |
class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode { |
public: |
DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr) |
- : LDeferredCode(codegen), instr_(instr) { } |
+ : LDeferredCode(codegen), instr_(instr) {} |
virtual void Generate() V8_OVERRIDE { |
codegen()->DoDeferredStringCharCodeAt(instr_); |
} |
virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
+ |
private: |
LStringCharCodeAt* instr_; |
}; |
DeferredStringCharCodeAt* deferred = |
- new(zone()) DeferredStringCharCodeAt(this, instr); |
+ new (zone()) DeferredStringCharCodeAt(this, instr); |
- StringCharLoadGenerator::Generate(masm(), |
- ToRegister(instr->string()), |
- ToRegister(instr->index()), |
- ToRegister(instr->result()), |
- deferred->entry()); |
+ StringCharLoadGenerator::Generate( |
+ masm(), ToRegister(instr->string()), ToRegister(instr->index()), |
+ ToRegister(instr->result()), deferred->entry()); |
__ bind(deferred->exit()); |
} |
@@ -4527,7 +4704,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { |
// TODO(3095996): Get rid of this. For now, we need to make the |
// result register contain a valid pointer because it is already |
// contained in the register pointer map. |
- __ mov(result, Operand::Zero()); |
+ __ li(result, Operand::Zero()); |
PushSafepointRegistersScope scope(this); |
__ push(string); |
@@ -4535,7 +4712,7 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { |
// DoStringCharCodeAt above. |
if (instr->index()->IsConstantOperand()) { |
int const_index = ToInteger32(LConstantOperand::cast(instr->index())); |
- __ mov(scratch, Operand(Smi::FromInt(const_index))); |
+ __ LoadSmiLiteral(scratch, Smi::FromInt(const_index)); |
__ push(scratch); |
} else { |
Register index = ToRegister(instr->index()); |
@@ -4544,41 +4721,43 @@ void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) { |
} |
CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr, |
instr->context()); |
- __ AssertSmi(r0); |
- __ SmiUntag(r0); |
- __ StoreToSafepointRegisterSlot(r0, result); |
+ __ AssertSmi(r3); |
+ __ SmiUntag(r3); |
+ __ StoreToSafepointRegisterSlot(r3, result); |
} |
void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) { |
- class DeferredStringCharFromCode V8_FINAL : public LDeferredCode { |
+ class DeferredStringCharFromCode : public LDeferredCode { |
public: |
DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr) |
- : LDeferredCode(codegen), instr_(instr) { } |
+ : LDeferredCode(codegen), instr_(instr) {} |
virtual void Generate() V8_OVERRIDE { |
codegen()->DoDeferredStringCharFromCode(instr_); |
} |
virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
+ |
private: |
LStringCharFromCode* instr_; |
}; |
DeferredStringCharFromCode* deferred = |
- new(zone()) DeferredStringCharFromCode(this, instr); |
+ new (zone()) DeferredStringCharFromCode(this, instr); |
DCHECK(instr->hydrogen()->value()->representation().IsInteger32()); |
Register char_code = ToRegister(instr->char_code()); |
Register result = ToRegister(instr->result()); |
DCHECK(!char_code.is(result)); |
- __ cmp(char_code, Operand(String::kMaxOneByteCharCode)); |
- __ b(hi, deferred->entry()); |
+ __ cmpli(char_code, Operand(String::kMaxOneByteCharCode)); |
+ __ bgt(deferred->entry()); |
__ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex); |
- __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2)); |
- __ ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize)); |
+ __ ShiftLeftImm(r0, char_code, Operand(kPointerSizeLog2)); |
+ __ add(result, result, r0); |
+ __ LoadP(result, FieldMemOperand(result, FixedArray::kHeaderSize)); |
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
__ cmp(result, ip); |
- __ b(eq, deferred->entry()); |
+ __ beq(deferred->entry()); |
__ bind(deferred->exit()); |
} |
@@ -4590,13 +4769,13 @@ void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) { |
// TODO(3095996): Get rid of this. For now, we need to make the |
// result register contain a valid pointer because it is already |
// contained in the register pointer map. |
- __ mov(result, Operand::Zero()); |
+ __ li(result, Operand::Zero()); |
PushSafepointRegistersScope scope(this); |
__ SmiTag(char_code); |
__ push(char_code); |
CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context()); |
- __ StoreToSafepointRegisterSlot(r0, result); |
+ __ StoreToSafepointRegisterSlot(r3, result); |
} |
@@ -4605,25 +4784,20 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) { |
DCHECK(input->IsRegister() || input->IsStackSlot()); |
LOperand* output = instr->result(); |
DCHECK(output->IsDoubleRegister()); |
- SwVfpRegister single_scratch = double_scratch0().low(); |
if (input->IsStackSlot()) { |
Register scratch = scratch0(); |
- __ ldr(scratch, ToMemOperand(input)); |
- __ vmov(single_scratch, scratch); |
+ __ LoadP(scratch, ToMemOperand(input)); |
+ __ ConvertIntToDouble(scratch, ToDoubleRegister(output)); |
} else { |
- __ vmov(single_scratch, ToRegister(input)); |
+ __ ConvertIntToDouble(ToRegister(input), ToDoubleRegister(output)); |
} |
- __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch); |
} |
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) { |
LOperand* input = instr->value(); |
LOperand* output = instr->result(); |
- |
- SwVfpRegister flt_scratch = double_scratch0().low(); |
- __ vmov(flt_scratch, ToRegister(input)); |
- __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch); |
+ __ ConvertUnsignedIntToDouble(ToRegister(input), ToDoubleRegister(output)); |
} |
@@ -4631,15 +4805,13 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) { |
class DeferredNumberTagI V8_FINAL : public LDeferredCode { |
public: |
DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr) |
- : LDeferredCode(codegen), instr_(instr) { } |
+ : LDeferredCode(codegen), instr_(instr) {} |
virtual void Generate() V8_OVERRIDE { |
- codegen()->DoDeferredNumberTagIU(instr_, |
- instr_->value(), |
- instr_->temp1(), |
- instr_->temp2(), |
- SIGNED_INT32); |
+ codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(), |
+ instr_->temp2(), SIGNED_INT32); |
} |
virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
+ |
private: |
LNumberTagI* instr_; |
}; |
@@ -4647,9 +4819,13 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) { |
Register src = ToRegister(instr->value()); |
Register dst = ToRegister(instr->result()); |
- DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr); |
- __ SmiTag(dst, src, SetCC); |
- __ b(vs, deferred->entry()); |
+ DeferredNumberTagI* deferred = new (zone()) DeferredNumberTagI(this, instr); |
+#if V8_TARGET_ARCH_PPC64 |
+ __ SmiTag(dst, src); |
+#else |
+ __ SmiTagCheckOverflow(dst, src, r0); |
+ __ BranchOnOverflow(deferred->entry()); |
+#endif |
__ bind(deferred->exit()); |
} |
@@ -4658,15 +4834,13 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) { |
class DeferredNumberTagU V8_FINAL : public LDeferredCode { |
public: |
DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr) |
- : LDeferredCode(codegen), instr_(instr) { } |
+ : LDeferredCode(codegen), instr_(instr) {} |
virtual void Generate() V8_OVERRIDE { |
- codegen()->DoDeferredNumberTagIU(instr_, |
- instr_->value(), |
- instr_->temp1(), |
- instr_->temp2(), |
- UNSIGNED_INT32); |
+ codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(), |
+ instr_->temp2(), UNSIGNED_INT32); |
} |
virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
+ |
private: |
LNumberTagU* instr_; |
}; |
@@ -4674,18 +4848,16 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) { |
Register input = ToRegister(instr->value()); |
Register result = ToRegister(instr->result()); |
- DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr); |
- __ cmp(input, Operand(Smi::kMaxValue)); |
- __ b(hi, deferred->entry()); |
+ DeferredNumberTagU* deferred = new (zone()) DeferredNumberTagU(this, instr); |
+ __ Cmpli(input, Operand(Smi::kMaxValue), r0); |
+ __ bgt(deferred->entry()); |
__ SmiTag(result, input); |
__ bind(deferred->exit()); |
} |
-void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, |
- LOperand* value, |
- LOperand* temp1, |
- LOperand* temp2, |
+void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, LOperand* value, |
+ LOperand* temp1, LOperand* temp2, |
IntegerSignedness signedness) { |
Label done, slow; |
Register src = ToRegister(value); |
@@ -4693,7 +4865,7 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, |
Register tmp1 = scratch0(); |
Register tmp2 = ToRegister(temp1); |
Register tmp3 = ToRegister(temp2); |
- LowDwVfpRegister dbl_scratch = double_scratch0(); |
+ DoubleRegister dbl_scratch = double_scratch0(); |
if (signedness == SIGNED_INT32) { |
// There was overflow, so bits 30 and 31 of the original integer |
@@ -4701,18 +4873,16 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, |
// the value in there. If that fails, call the runtime system. |
if (dst.is(src)) { |
__ SmiUntag(src, dst); |
- __ eor(src, src, Operand(0x80000000)); |
+ __ xoris(src, src, Operand(HeapNumber::kSignMask >> 16)); |
} |
- __ vmov(dbl_scratch.low(), src); |
- __ vcvt_f64_s32(dbl_scratch, dbl_scratch.low()); |
+ __ ConvertIntToDouble(src, dbl_scratch); |
} else { |
- __ vmov(dbl_scratch.low(), src); |
- __ vcvt_f64_u32(dbl_scratch, dbl_scratch.low()); |
+ __ ConvertUnsignedIntToDouble(src, dbl_scratch); |
} |
if (FLAG_inline_new) { |
__ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex); |
- __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT); |
+ __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow); |
__ b(&done); |
} |
@@ -4722,7 +4892,7 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, |
// TODO(3095996): Put a valid pointer value in the stack slot where the |
// result register is stored, as this register is in the pointer map, but |
// contains an integer value. |
- __ mov(dst, Operand::Zero()); |
+ __ li(dst, Operand::Zero()); |
// Preserve the value of all registers. |
PushSafepointRegistersScope scope(this); |
@@ -4732,19 +4902,17 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, |
// They only call Runtime::kAllocateHeapNumber. |
// The corresponding HChange instructions are added in a phase that does |
// not have easy access to the local context. |
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); |
- RecordSafepointWithRegisters( |
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); |
- __ sub(r0, r0, Operand(kHeapObjectTag)); |
- __ StoreToSafepointRegisterSlot(r0, dst); |
+ RecordSafepointWithRegisters(instr->pointer_map(), 0, |
+ Safepoint::kNoLazyDeopt); |
+ __ StoreToSafepointRegisterSlot(r3, dst); |
} |
// Done. Put the value in dbl_scratch into the value of the allocated heap |
// number. |
__ bind(&done); |
- __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset); |
- __ add(dst, dst, Operand(kHeapObjectTag)); |
+ __ stfd(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset)); |
} |
@@ -4752,34 +4920,31 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { |
class DeferredNumberTagD V8_FINAL : public LDeferredCode { |
public: |
DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr) |
- : LDeferredCode(codegen), instr_(instr) { } |
+ : LDeferredCode(codegen), instr_(instr) {} |
virtual void Generate() V8_OVERRIDE { |
codegen()->DoDeferredNumberTagD(instr_); |
} |
virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
+ |
private: |
LNumberTagD* instr_; |
}; |
- DwVfpRegister input_reg = ToDoubleRegister(instr->value()); |
+ DoubleRegister input_reg = ToDoubleRegister(instr->value()); |
Register scratch = scratch0(); |
Register reg = ToRegister(instr->result()); |
Register temp1 = ToRegister(instr->temp()); |
Register temp2 = ToRegister(instr->temp2()); |
- DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr); |
+ DeferredNumberTagD* deferred = new (zone()) DeferredNumberTagD(this, instr); |
if (FLAG_inline_new) { |
__ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex); |
- // We want the untagged address first for performance |
- __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(), |
- DONT_TAG_RESULT); |
+ __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry()); |
} else { |
- __ jmp(deferred->entry()); |
+ __ b(deferred->entry()); |
} |
__ bind(deferred->exit()); |
- __ vstr(input_reg, reg, HeapNumber::kValueOffset); |
- // Now that we have finished with the object's real address tag it |
- __ add(reg, reg, Operand(kHeapObjectTag)); |
+ __ stfd(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset)); |
} |
@@ -4788,7 +4953,7 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { |
// result register contain a valid pointer because it is already |
// contained in the register pointer map. |
Register reg = ToRegister(instr->result()); |
- __ mov(reg, Operand::Zero()); |
+ __ li(reg, Operand::Zero()); |
PushSafepointRegistersScope scope(this); |
// NumberTagI and NumberTagD use the context from the frame, rather than |
@@ -4796,12 +4961,11 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) { |
// They only call Runtime::kAllocateHeapNumber. |
// The corresponding HChange instructions are added in a phase that does |
// not have easy access to the local context. |
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
__ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); |
- RecordSafepointWithRegisters( |
- instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); |
- __ sub(r0, r0, Operand(kHeapObjectTag)); |
- __ StoreToSafepointRegisterSlot(r0, reg); |
+ RecordSafepointWithRegisters(instr->pointer_map(), 0, |
+ Safepoint::kNoLazyDeopt); |
+ __ StoreToSafepointRegisterSlot(r3, reg); |
} |
@@ -4811,75 +4975,87 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) { |
Register output = ToRegister(instr->result()); |
if (hchange->CheckFlag(HValue::kCanOverflow) && |
hchange->value()->CheckFlag(HValue::kUint32)) { |
- __ tst(input, Operand(0xc0000000)); |
- DeoptimizeIf(ne, instr->environment()); |
+ __ TestUnsignedSmiCandidate(input, r0); |
+ DeoptimizeIf(ne, instr->environment(), cr0); |
} |
+#if !V8_TARGET_ARCH_PPC64 |
if (hchange->CheckFlag(HValue::kCanOverflow) && |
!hchange->value()->CheckFlag(HValue::kUint32)) { |
- __ SmiTag(output, input, SetCC); |
- DeoptimizeIf(vs, instr->environment()); |
+ __ SmiTagCheckOverflow(output, input, r0); |
+ DeoptimizeIf(lt, instr->environment(), cr0); |
} else { |
+#endif |
__ SmiTag(output, input); |
+#if !V8_TARGET_ARCH_PPC64 |
} |
+#endif |
} |
void LCodeGen::DoSmiUntag(LSmiUntag* instr) { |
+ Register scratch = scratch0(); |
Register input = ToRegister(instr->value()); |
Register result = ToRegister(instr->result()); |
if (instr->needs_check()) { |
STATIC_ASSERT(kHeapObjectTag == 1); |
- // If the input is a HeapObject, SmiUntag will set the carry flag. |
- __ SmiUntag(result, input, SetCC); |
- DeoptimizeIf(cs, instr->environment()); |
+ // If the input is a HeapObject, value of scratch won't be zero. |
+ __ andi(scratch, input, Operand(kHeapObjectTag)); |
+ __ SmiUntag(result, input); |
+ DeoptimizeIf(ne, instr->environment(), cr0); |
} else { |
__ SmiUntag(result, input); |
} |
} |
-void LCodeGen::EmitNumberUntagD(Register input_reg, |
- DwVfpRegister result_reg, |
+void LCodeGen::EmitNumberUntagD(Register input_reg, DoubleRegister result_reg, |
bool can_convert_undefined_to_nan, |
bool deoptimize_on_minus_zero, |
- LEnvironment* env, |
- NumberUntagDMode mode) { |
+ LEnvironment* env, NumberUntagDMode mode) { |
Register scratch = scratch0(); |
- SwVfpRegister flt_scratch = double_scratch0().low(); |
DCHECK(!result_reg.is(double_scratch0())); |
+ |
Label convert, load_smi, done; |
+ |
if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { |
// Smi check. |
__ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); |
+ |
// Heap number map check. |
- __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); |
+ __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); |
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
- __ cmp(scratch, Operand(ip)); |
+ __ cmp(scratch, ip); |
if (can_convert_undefined_to_nan) { |
- __ b(ne, &convert); |
+ __ bne(&convert); |
} else { |
DeoptimizeIf(ne, env); |
} |
// load heap number |
- __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag); |
+ __ lfd(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); |
if (deoptimize_on_minus_zero) { |
- __ VmovLow(scratch, result_reg); |
- __ cmp(scratch, Operand::Zero()); |
- __ b(ne, &done); |
- __ VmovHigh(scratch, result_reg); |
- __ cmp(scratch, Operand(HeapNumber::kSignMask)); |
+#if V8_TARGET_ARCH_PPC64 |
+ __ MovDoubleToInt64(scratch, result_reg); |
+ // rotate left by one for simple compare. |
+ __ rldicl(scratch, scratch, 1, 0); |
+ __ cmpi(scratch, Operand(1)); |
+#else |
+ __ MovDoubleToInt64(scratch, ip, result_reg); |
+ __ cmpi(ip, Operand::Zero()); |
+ __ bne(&done); |
+ __ Cmpi(scratch, Operand(HeapNumber::kSignMask), r0); |
+#endif |
DeoptimizeIf(eq, env); |
} |
- __ jmp(&done); |
+ __ b(&done); |
if (can_convert_undefined_to_nan) { |
__ bind(&convert); |
// Convert undefined (and hole) to NaN. |
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
- __ cmp(input_reg, Operand(ip)); |
+ __ cmp(input_reg, ip); |
DeoptimizeIf(ne, env); |
__ LoadRoot(scratch, Heap::kNanValueRootIndex); |
- __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag); |
- __ jmp(&done); |
+ __ lfd(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset)); |
+ __ b(&done); |
} |
} else { |
__ SmiUntag(scratch, input_reg); |
@@ -4888,8 +5064,7 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, |
// Smi to double register conversion |
__ bind(&load_smi); |
// scratch: untagged value of input_reg |
- __ vmov(flt_scratch, scratch); |
- __ vcvt_f64_s32(result_reg, flt_scratch); |
+ __ ConvertIntToDouble(scratch, result_reg); |
__ bind(&done); |
} |
@@ -4898,30 +5073,25 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { |
Register input_reg = ToRegister(instr->value()); |
Register scratch1 = scratch0(); |
Register scratch2 = ToRegister(instr->temp()); |
- LowDwVfpRegister double_scratch = double_scratch0(); |
- DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp2()); |
+ DoubleRegister double_scratch = double_scratch0(); |
+ DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2()); |
DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2)); |
DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1)); |
Label done; |
- // The input was optimistically untagged; revert it. |
- // The carry flag is set when we reach this deferred code as we just executed |
- // SmiUntag(heap_object, SetCC) |
- STATIC_ASSERT(kHeapObjectTag == 1); |
- __ adc(scratch2, input_reg, Operand(input_reg)); |
- |
// Heap number map check. |
- __ ldr(scratch1, FieldMemOperand(scratch2, HeapObject::kMapOffset)); |
+ __ LoadP(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset)); |
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
- __ cmp(scratch1, Operand(ip)); |
+ __ cmp(scratch1, ip); |
if (instr->truncating()) { |
// Performs a truncating conversion of a floating point number as used by |
// the JS bitwise operations. |
Label no_heap_number, check_bools, check_false; |
- __ b(ne, &no_heap_number); |
+ __ bne(&no_heap_number); |
+ __ mr(scratch2, input_reg); |
__ TruncateHeapNumberToI(input_reg, scratch2); |
__ b(&done); |
@@ -4929,39 +5099,45 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { |
// for truncating conversions. |
__ bind(&no_heap_number); |
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
- __ cmp(scratch2, Operand(ip)); |
- __ b(ne, &check_bools); |
- __ mov(input_reg, Operand::Zero()); |
+ __ cmp(input_reg, ip); |
+ __ bne(&check_bools); |
+ __ li(input_reg, Operand::Zero()); |
__ b(&done); |
__ bind(&check_bools); |
__ LoadRoot(ip, Heap::kTrueValueRootIndex); |
- __ cmp(scratch2, Operand(ip)); |
- __ b(ne, &check_false); |
- __ mov(input_reg, Operand(1)); |
+ __ cmp(input_reg, ip); |
+ __ bne(&check_false); |
+ __ li(input_reg, Operand(1)); |
__ b(&done); |
__ bind(&check_false); |
__ LoadRoot(ip, Heap::kFalseValueRootIndex); |
- __ cmp(scratch2, Operand(ip)); |
+ __ cmp(input_reg, ip); |
DeoptimizeIf(ne, instr->environment()); |
- __ mov(input_reg, Operand::Zero()); |
- __ b(&done); |
+ __ li(input_reg, Operand::Zero()); |
} else { |
// Deoptimize if we don't have a heap number. |
DeoptimizeIf(ne, instr->environment()); |
- __ sub(ip, scratch2, Operand(kHeapObjectTag)); |
- __ vldr(double_scratch2, ip, HeapNumber::kValueOffset); |
- __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch); |
+ __ lfd(double_scratch2, |
+ FieldMemOperand(input_reg, HeapNumber::kValueOffset)); |
+ if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
+ // preserve heap number pointer in scratch2 for minus zero check below |
+ __ mr(scratch2, input_reg); |
+ } |
+ __ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1, |
+ double_scratch); |
DeoptimizeIf(ne, instr->environment()); |
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
- __ cmp(input_reg, Operand::Zero()); |
- __ b(ne, &done); |
- __ VmovHigh(scratch1, double_scratch2); |
- __ tst(scratch1, Operand(HeapNumber::kSignMask)); |
- DeoptimizeIf(ne, instr->environment()); |
+ __ cmpi(input_reg, Operand::Zero()); |
+ __ bne(&done); |
+ __ lwz(scratch1, |
+ FieldMemOperand(scratch2, HeapNumber::kValueOffset + |
+ Register::kExponentOffset)); |
+ __ cmpwi(scratch1, Operand::Zero()); |
+ DeoptimizeIf(lt, instr->environment()); |
} |
} |
__ bind(&done); |
@@ -4972,11 +5148,12 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) { |
class DeferredTaggedToI V8_FINAL : public LDeferredCode { |
public: |
DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) |
- : LDeferredCode(codegen), instr_(instr) { } |
+ : LDeferredCode(codegen), instr_(instr) {} |
virtual void Generate() V8_OVERRIDE { |
codegen()->DoDeferredTaggedToI(instr_); |
} |
virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
+ |
private: |
LTaggedToI* instr_; |
}; |
@@ -4990,14 +5167,12 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) { |
if (instr->hydrogen()->value()->representation().IsSmi()) { |
__ SmiUntag(input_reg); |
} else { |
- DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr); |
- |
- // Optimistically untag the input. |
- // If the input is a HeapObject, SmiUntag will set the carry flag. |
- __ SmiUntag(input_reg, SetCC); |
- // Branch to deferred code if the input was tagged. |
- // The deferred code will take care of restoring the tag. |
- __ b(cs, deferred->entry()); |
+ DeferredTaggedToI* deferred = new (zone()) DeferredTaggedToI(this, instr); |
+ |
+ // Branch to deferred code if the input is a HeapObject. |
+ __ JumpIfNotSmi(input_reg, deferred->entry()); |
+ |
+ __ SmiUntag(input_reg); |
__ bind(deferred->exit()); |
} |
} |
@@ -5010,39 +5185,44 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) { |
DCHECK(result->IsDoubleRegister()); |
Register input_reg = ToRegister(input); |
- DwVfpRegister result_reg = ToDoubleRegister(result); |
+ DoubleRegister result_reg = ToDoubleRegister(result); |
HValue* value = instr->hydrogen()->value(); |
NumberUntagDMode mode = value->representation().IsSmi() |
- ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; |
+ ? NUMBER_CANDIDATE_IS_SMI |
+ : NUMBER_CANDIDATE_IS_ANY_TAGGED; |
EmitNumberUntagD(input_reg, result_reg, |
instr->hydrogen()->can_convert_undefined_to_nan(), |
instr->hydrogen()->deoptimize_on_minus_zero(), |
- instr->environment(), |
- mode); |
+ instr->environment(), mode); |
} |
void LCodeGen::DoDoubleToI(LDoubleToI* instr) { |
Register result_reg = ToRegister(instr->result()); |
Register scratch1 = scratch0(); |
- DwVfpRegister double_input = ToDoubleRegister(instr->value()); |
- LowDwVfpRegister double_scratch = double_scratch0(); |
+ DoubleRegister double_input = ToDoubleRegister(instr->value()); |
+ DoubleRegister double_scratch = double_scratch0(); |
if (instr->truncating()) { |
__ TruncateDoubleToI(result_reg, double_input); |
} else { |
- __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); |
+ __ TryDoubleToInt32Exact(result_reg, double_input, scratch1, |
+ double_scratch); |
// Deoptimize if the input wasn't a int32 (inside a double). |
DeoptimizeIf(ne, instr->environment()); |
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
Label done; |
- __ cmp(result_reg, Operand::Zero()); |
- __ b(ne, &done); |
- __ VmovHigh(scratch1, double_input); |
- __ tst(scratch1, Operand(HeapNumber::kSignMask)); |
- DeoptimizeIf(ne, instr->environment()); |
+ __ cmpi(result_reg, Operand::Zero()); |
+ __ bne(&done); |
+#if V8_TARGET_ARCH_PPC64 |
+ __ MovDoubleToInt64(scratch1, double_input); |
+#else |
+ __ MovDoubleHighToInt(scratch1, double_input); |
+#endif |
+ __ cmpi(scratch1, Operand::Zero()); |
+ DeoptimizeIf(lt, instr->environment()); |
__ bind(&done); |
} |
} |
@@ -5052,42 +5232,51 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) { |
void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { |
Register result_reg = ToRegister(instr->result()); |
Register scratch1 = scratch0(); |
- DwVfpRegister double_input = ToDoubleRegister(instr->value()); |
- LowDwVfpRegister double_scratch = double_scratch0(); |
+ DoubleRegister double_input = ToDoubleRegister(instr->value()); |
+ DoubleRegister double_scratch = double_scratch0(); |
if (instr->truncating()) { |
__ TruncateDoubleToI(result_reg, double_input); |
} else { |
- __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); |
+ __ TryDoubleToInt32Exact(result_reg, double_input, scratch1, |
+ double_scratch); |
// Deoptimize if the input wasn't a int32 (inside a double). |
DeoptimizeIf(ne, instr->environment()); |
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
Label done; |
- __ cmp(result_reg, Operand::Zero()); |
- __ b(ne, &done); |
- __ VmovHigh(scratch1, double_input); |
- __ tst(scratch1, Operand(HeapNumber::kSignMask)); |
- DeoptimizeIf(ne, instr->environment()); |
+ __ cmpi(result_reg, Operand::Zero()); |
+ __ bne(&done); |
+#if V8_TARGET_ARCH_PPC64 |
+ __ MovDoubleToInt64(scratch1, double_input); |
+#else |
+ __ MovDoubleHighToInt(scratch1, double_input); |
+#endif |
+ __ cmpi(scratch1, Operand::Zero()); |
+ DeoptimizeIf(lt, instr->environment()); |
__ bind(&done); |
} |
} |
- __ SmiTag(result_reg, SetCC); |
- DeoptimizeIf(vs, instr->environment()); |
+#if V8_TARGET_ARCH_PPC64 |
+ __ SmiTag(result_reg); |
+#else |
+ __ SmiTagCheckOverflow(result_reg, r0); |
+ DeoptimizeIf(lt, instr->environment(), cr0); |
+#endif |
} |
void LCodeGen::DoCheckSmi(LCheckSmi* instr) { |
LOperand* input = instr->value(); |
- __ SmiTst(ToRegister(input)); |
- DeoptimizeIf(ne, instr->environment()); |
+ __ TestIfSmi(ToRegister(input), r0); |
+ DeoptimizeIf(ne, instr->environment(), cr0); |
} |
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { |
if (!instr->hydrogen()->value()->type().IsHeapObject()) { |
LOperand* input = instr->value(); |
- __ SmiTst(ToRegister(input)); |
- DeoptimizeIf(eq, instr->environment()); |
+ __ TestIfSmi(ToRegister(input), r0); |
+ DeoptimizeIf(eq, instr->environment(), cr0); |
} |
} |
@@ -5096,25 +5285,25 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { |
Register input = ToRegister(instr->value()); |
Register scratch = scratch0(); |
- __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); |
+ __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
+ __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); |
if (instr->hydrogen()->is_interval_check()) { |
InstanceType first; |
InstanceType last; |
instr->hydrogen()->GetCheckInterval(&first, &last); |
- __ cmp(scratch, Operand(first)); |
+ __ cmpli(scratch, Operand(first)); |
// If there is only one type in the interval check for equality. |
if (first == last) { |
DeoptimizeIf(ne, instr->environment()); |
} else { |
- DeoptimizeIf(lo, instr->environment()); |
+ DeoptimizeIf(lt, instr->environment()); |
// Omit check for the last type. |
if (last != LAST_TYPE) { |
- __ cmp(scratch, Operand(last)); |
- DeoptimizeIf(hi, instr->environment()); |
+ __ cmpli(scratch, Operand(last)); |
+ DeoptimizeIf(gt, instr->environment()); |
} |
} |
} else { |
@@ -5124,11 +5313,11 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { |
if (IsPowerOf2(mask)) { |
DCHECK(tag == 0 || IsPowerOf2(tag)); |
- __ tst(scratch, Operand(mask)); |
- DeoptimizeIf(tag == 0 ? ne : eq, instr->environment()); |
+ __ andi(r0, scratch, Operand(mask)); |
+ DeoptimizeIf(tag == 0 ? ne : eq, instr->environment(), cr0); |
} else { |
- __ and_(scratch, scratch, Operand(mask)); |
- __ cmp(scratch, Operand(tag)); |
+ __ andi(scratch, scratch, Operand(mask)); |
+ __ cmpi(scratch, Operand(tag)); |
DeoptimizeIf(ne, instr->environment()); |
} |
} |
@@ -5143,10 +5332,10 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) { |
Register reg = ToRegister(instr->value()); |
Handle<Cell> cell = isolate()->factory()->NewCell(object); |
__ mov(ip, Operand(Handle<Object>(cell))); |
- __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset)); |
+ __ LoadP(ip, FieldMemOperand(ip, Cell::kValueOffset)); |
__ cmp(reg, ip); |
} else { |
- __ cmp(reg, Operand(object)); |
+ __ Cmpi(reg, Operand(object), r0); |
} |
DeoptimizeIf(ne, instr->environment()); |
} |
@@ -5156,14 +5345,14 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { |
{ |
PushSafepointRegistersScope scope(this); |
__ push(object); |
- __ mov(cp, Operand::Zero()); |
+ __ li(cp, Operand::Zero()); |
__ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); |
- RecordSafepointWithRegisters( |
- instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); |
- __ StoreToSafepointRegisterSlot(r0, scratch0()); |
+ RecordSafepointWithRegisters(instr->pointer_map(), 1, |
+ Safepoint::kNoLazyDeopt); |
+ __ StoreToSafepointRegisterSlot(r3, scratch0()); |
} |
- __ tst(scratch0(), Operand(kSmiTagMask)); |
- DeoptimizeIf(eq, instr->environment()); |
+ __ TestIfSmi(scratch0(), r0); |
+ DeoptimizeIf(eq, instr->environment(), cr0); |
} |
@@ -5179,6 +5368,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
} |
Label* check_maps() { return &check_maps_; } |
virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
+ |
private: |
LCheckMaps* instr_; |
Label check_maps_; |
@@ -5199,11 +5389,11 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
DCHECK(input->IsRegister()); |
Register reg = ToRegister(input); |
- __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); |
+ __ LoadP(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); |
DeferredCheckMaps* deferred = NULL; |
if (instr->hydrogen()->HasMigrationTarget()) { |
- deferred = new(zone()) DeferredCheckMaps(this, instr, reg); |
+ deferred = new (zone()) DeferredCheckMaps(this, instr, reg); |
__ bind(deferred->check_maps()); |
} |
@@ -5212,13 +5402,13 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
for (int i = 0; i < maps->size() - 1; i++) { |
Handle<Map> map = maps->at(i).handle(); |
__ CompareMap(map_reg, map, &success); |
- __ b(eq, &success); |
+ __ beq(&success); |
} |
Handle<Map> map = maps->at(maps->size() - 1).handle(); |
__ CompareMap(map_reg, map, &success); |
if (instr->hydrogen()->HasMigrationTarget()) { |
- __ b(ne, deferred->entry()); |
+ __ bne(deferred->entry()); |
} else { |
DeoptimizeIf(ne, instr->environment()); |
} |
@@ -5228,7 +5418,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { |
- DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped()); |
+ DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); |
Register result_reg = ToRegister(instr->result()); |
__ ClampDoubleToUint8(result_reg, value_reg, double_scratch0()); |
} |
@@ -5245,29 +5435,29 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { |
Register scratch = scratch0(); |
Register input_reg = ToRegister(instr->unclamped()); |
Register result_reg = ToRegister(instr->result()); |
- DwVfpRegister temp_reg = ToDoubleRegister(instr->temp()); |
+ DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); |
Label is_smi, done, heap_number; |
// Both smi and heap number cases are handled. |
__ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi); |
// Check for heap number |
- __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); |
- __ cmp(scratch, Operand(factory()->heap_number_map())); |
- __ b(eq, &heap_number); |
+ __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); |
+ __ Cmpi(scratch, Operand(factory()->heap_number_map()), r0); |
+ __ beq(&heap_number); |
// Check for undefined. Undefined is converted to zero for clamping |
// conversions. |
- __ cmp(input_reg, Operand(factory()->undefined_value())); |
+ __ Cmpi(input_reg, Operand(factory()->undefined_value()), r0); |
DeoptimizeIf(ne, instr->environment()); |
- __ mov(result_reg, Operand::Zero()); |
- __ jmp(&done); |
+ __ li(result_reg, Operand::Zero()); |
+ __ b(&done); |
// Heap number |
__ bind(&heap_number); |
- __ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); |
+ __ lfd(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); |
__ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0()); |
- __ jmp(&done); |
+ __ b(&done); |
// smi |
__ bind(&is_smi); |
@@ -5278,12 +5468,13 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { |
void LCodeGen::DoDoubleBits(LDoubleBits* instr) { |
- DwVfpRegister value_reg = ToDoubleRegister(instr->value()); |
+ DoubleRegister value_reg = ToDoubleRegister(instr->value()); |
Register result_reg = ToRegister(instr->result()); |
+ |
if (instr->hydrogen()->bits() == HDoubleBits::HIGH) { |
- __ VmovHigh(result_reg, value_reg); |
+ __ MovDoubleHighToInt(result_reg, value_reg); |
} else { |
- __ VmovLow(result_reg, value_reg); |
+ __ MovDoubleLowToInt(result_reg, value_reg); |
} |
} |
@@ -5291,9 +5482,12 @@ void LCodeGen::DoDoubleBits(LDoubleBits* instr) { |
void LCodeGen::DoConstructDouble(LConstructDouble* instr) { |
Register hi_reg = ToRegister(instr->hi()); |
Register lo_reg = ToRegister(instr->lo()); |
- DwVfpRegister result_reg = ToDoubleRegister(instr->result()); |
- __ VmovHigh(result_reg, hi_reg); |
- __ VmovLow(result_reg, lo_reg); |
+ DoubleRegister result_reg = ToDoubleRegister(instr->result()); |
+#if V8_TARGET_ARCH_PPC64 |
+ __ MovInt64ComponentsToDouble(result_reg, hi_reg, lo_reg, r0); |
+#else |
+ __ MovInt64ToDouble(result_reg, hi_reg, lo_reg); |
+#endif |
} |
@@ -5301,17 +5495,17 @@ void LCodeGen::DoAllocate(LAllocate* instr) { |
class DeferredAllocate V8_FINAL : public LDeferredCode { |
public: |
DeferredAllocate(LCodeGen* codegen, LAllocate* instr) |
- : LDeferredCode(codegen), instr_(instr) { } |
+ : LDeferredCode(codegen), instr_(instr) {} |
virtual void Generate() V8_OVERRIDE { |
codegen()->DoDeferredAllocate(instr_); |
} |
virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
+ |
private: |
LAllocate* instr_; |
}; |
- DeferredAllocate* deferred = |
- new(zone()) DeferredAllocate(this, instr); |
+ DeferredAllocate* deferred = new (zone()) DeferredAllocate(this, instr); |
Register result = ToRegister(instr->result()); |
Register scratch = ToRegister(instr->temp1()); |
@@ -5336,7 +5530,7 @@ void LCodeGen::DoAllocate(LAllocate* instr) { |
if (size <= Page::kMaxRegularHeapObjectSize) { |
__ Allocate(size, result, scratch, scratch2, deferred->entry(), flags); |
} else { |
- __ jmp(deferred->entry()); |
+ __ b(deferred->entry()); |
} |
} else { |
Register size = ToRegister(instr->size()); |
@@ -5349,16 +5543,17 @@ void LCodeGen::DoAllocate(LAllocate* instr) { |
STATIC_ASSERT(kHeapObjectTag == 1); |
if (instr->size()->IsConstantOperand()) { |
int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); |
- __ mov(scratch, Operand(size - kHeapObjectTag)); |
+ __ LoadIntLiteral(scratch, size - kHeapObjectTag); |
} else { |
- __ sub(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag)); |
+ __ subi(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag)); |
} |
__ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map())); |
Label loop; |
__ bind(&loop); |
- __ sub(scratch, scratch, Operand(kPointerSize), SetCC); |
- __ str(scratch2, MemOperand(result, scratch)); |
- __ b(ge, &loop); |
+ __ subi(scratch, scratch, Operand(kPointerSize)); |
+ __ StorePX(scratch2, MemOperand(result, scratch)); |
+ __ cmpi(scratch, Operand::Zero()); |
+ __ bge(&loop); |
} |
} |
@@ -5369,7 +5564,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) { |
// TODO(3095996): Get rid of this. For now, we need to make the |
// result register contain a valid pointer because it is already |
// contained in the register pointer map. |
- __ mov(result, Operand(Smi::FromInt(0))); |
+ __ LoadSmiLiteral(result, Smi::FromInt(0)); |
PushSafepointRegistersScope scope(this); |
if (instr->size()->IsRegister()) { |
@@ -5379,13 +5574,17 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) { |
__ push(size); |
} else { |
int32_t size = ToInteger32(LConstantOperand::cast(instr->size())); |
+#if !V8_TARGET_ARCH_PPC64 |
if (size >= 0 && size <= Smi::kMaxValue) { |
+#endif |
__ Push(Smi::FromInt(size)); |
+#if !V8_TARGET_ARCH_PPC64 |
} else { |
// We should never get here at runtime => abort |
__ stop("invalid allocation size"); |
return; |
} |
+#endif |
} |
int flags = AllocateDoubleAlignFlag::encode( |
@@ -5402,15 +5601,15 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) { |
} |
__ Push(Smi::FromInt(flags)); |
- CallRuntimeFromDeferred( |
- Runtime::kAllocateInTargetSpace, 2, instr, instr->context()); |
- __ StoreToSafepointRegisterSlot(r0, result); |
+ CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr, |
+ instr->context()); |
+ __ StoreToSafepointRegisterSlot(r3, result); |
} |
void LCodeGen::DoToFastProperties(LToFastProperties* instr) { |
- DCHECK(ToRegister(instr->value()).is(r0)); |
- __ push(r0); |
+ DCHECK(ToRegister(instr->value()).is(r3)); |
+ __ push(r3); |
CallRuntime(Runtime::kToFastProperties, 1, instr); |
} |
@@ -5419,43 +5618,43 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { |
DCHECK(ToRegister(instr->context()).is(cp)); |
Label materialized; |
// Registers will be used as follows: |
- // r6 = literals array. |
- // r1 = regexp literal. |
- // r0 = regexp literal clone. |
- // r2-5 are used as temporaries. |
+ // r10 = literals array. |
+ // r4 = regexp literal. |
+ // r3 = regexp literal clone. |
+ // r5 and r7-r9 are used as temporaries. |
int literal_offset = |
FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index()); |
- __ Move(r6, instr->hydrogen()->literals()); |
- __ ldr(r1, FieldMemOperand(r6, literal_offset)); |
+ __ Move(r10, instr->hydrogen()->literals()); |
+ __ LoadP(r4, FieldMemOperand(r10, literal_offset)); |
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
- __ cmp(r1, ip); |
- __ b(ne, &materialized); |
+ __ cmp(r4, ip); |
+ __ bne(&materialized); |
// Create regexp literal using runtime function |
- // Result will be in r0. |
- __ mov(r5, Operand(Smi::FromInt(instr->hydrogen()->literal_index()))); |
- __ mov(r4, Operand(instr->hydrogen()->pattern())); |
- __ mov(r3, Operand(instr->hydrogen()->flags())); |
- __ Push(r6, r5, r4, r3); |
+ // Result will be in r3. |
+ __ LoadSmiLiteral(r9, Smi::FromInt(instr->hydrogen()->literal_index())); |
+ __ mov(r8, Operand(instr->hydrogen()->pattern())); |
+ __ mov(r7, Operand(instr->hydrogen()->flags())); |
+ __ Push(r10, r9, r8, r7); |
CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr); |
- __ mov(r1, r0); |
+ __ mr(r4, r3); |
__ bind(&materialized); |
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; |
Label allocated, runtime_allocate; |
- __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT); |
- __ jmp(&allocated); |
+ __ Allocate(size, r3, r5, r6, &runtime_allocate, TAG_OBJECT); |
+ __ b(&allocated); |
__ bind(&runtime_allocate); |
- __ mov(r0, Operand(Smi::FromInt(size))); |
- __ Push(r1, r0); |
+ __ LoadSmiLiteral(r3, Smi::FromInt(size)); |
+ __ Push(r4, r3); |
CallRuntime(Runtime::kAllocateInNewSpace, 1, instr); |
- __ pop(r1); |
+ __ pop(r4); |
__ bind(&allocated); |
// Copy the content into the newly allocated memory. |
- __ CopyFields(r0, r1, double_scratch0(), size / kPointerSize); |
+ __ CopyFields(r3, r4, r5.bit(), size / kPointerSize); |
} |
@@ -5465,16 +5664,15 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) { |
// space for nested functions that don't need literals cloning. |
bool pretenure = instr->hydrogen()->pretenure(); |
if (!pretenure && instr->hydrogen()->has_no_literals()) { |
- FastNewClosureStub stub(isolate(), |
- instr->hydrogen()->strict_mode(), |
+ FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(), |
instr->hydrogen()->is_generator()); |
- __ mov(r2, Operand(instr->hydrogen()->shared_info())); |
+ __ mov(r5, Operand(instr->hydrogen()->shared_info())); |
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr); |
} else { |
- __ mov(r2, Operand(instr->hydrogen()->shared_info())); |
- __ mov(r1, Operand(pretenure ? factory()->true_value() |
+ __ mov(r5, Operand(instr->hydrogen()->shared_info())); |
+ __ mov(r4, Operand(pretenure ? factory()->true_value() |
: factory()->false_value())); |
- __ Push(cp, r2, r1); |
+ __ Push(cp, r5, r4); |
CallRuntime(Runtime::kNewClosure, 3, instr); |
} |
} |
@@ -5490,35 +5688,33 @@ void LCodeGen::DoTypeof(LTypeof* instr) { |
void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) { |
Register input = ToRegister(instr->value()); |
- Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_), |
- instr->FalseLabel(chunk_), |
- input, |
- instr->type_literal()); |
+ Condition final_branch_condition = |
+ EmitTypeofIs(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), input, |
+ instr->type_literal()); |
if (final_branch_condition != kNoCondition) { |
EmitBranch(instr, final_branch_condition); |
} |
} |
-Condition LCodeGen::EmitTypeofIs(Label* true_label, |
- Label* false_label, |
- Register input, |
- Handle<String> type_name) { |
+Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label, |
+ Register input, Handle<String> type_name) { |
Condition final_branch_condition = kNoCondition; |
Register scratch = scratch0(); |
Factory* factory = isolate()->factory(); |
if (String::Equals(type_name, factory->number_string())) { |
__ JumpIfSmi(input, true_label); |
- __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
+ __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
__ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); |
final_branch_condition = eq; |
} else if (String::Equals(type_name, factory->string_string())) { |
__ JumpIfSmi(input, false_label); |
__ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE); |
- __ b(ge, false_label); |
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); |
- __ tst(scratch, Operand(1 << Map::kIsUndetectable)); |
+ __ bge(false_label); |
+ __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); |
+ __ ExtractBit(r0, scratch, Map::kIsUndetectable); |
+ __ cmpi(r0, Operand::Zero()); |
final_branch_condition = eq; |
} else if (String::Equals(type_name, factory->symbol_string())) { |
@@ -5528,18 +5724,19 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, |
} else if (String::Equals(type_name, factory->boolean_string())) { |
__ CompareRoot(input, Heap::kTrueValueRootIndex); |
- __ b(eq, true_label); |
+ __ beq(true_label); |
__ CompareRoot(input, Heap::kFalseValueRootIndex); |
final_branch_condition = eq; |
} else if (String::Equals(type_name, factory->undefined_string())) { |
__ CompareRoot(input, Heap::kUndefinedValueRootIndex); |
- __ b(eq, true_label); |
+ __ beq(true_label); |
__ JumpIfSmi(input, false_label); |
// Check for undetectable objects => true. |
- __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); |
- __ tst(scratch, Operand(1 << Map::kIsUndetectable)); |
+ __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
+ __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset)); |
+ __ ExtractBit(r0, scratch, Map::kIsUndetectable); |
+ __ cmpi(r0, Operand::Zero()); |
final_branch_condition = ne; |
} else if (String::Equals(type_name, factory->function_string())) { |
@@ -5547,23 +5744,21 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label, |
Register type_reg = scratch; |
__ JumpIfSmi(input, false_label); |
__ CompareObjectType(input, scratch, type_reg, JS_FUNCTION_TYPE); |
- __ b(eq, true_label); |
- __ cmp(type_reg, Operand(JS_FUNCTION_PROXY_TYPE)); |
+ __ beq(true_label); |
+ __ cmpi(type_reg, Operand(JS_FUNCTION_PROXY_TYPE)); |
final_branch_condition = eq; |
} else if (String::Equals(type_name, factory->object_string())) { |
Register map = scratch; |
__ JumpIfSmi(input, false_label); |
__ CompareRoot(input, Heap::kNullValueRootIndex); |
- __ b(eq, true_label); |
- __ CheckObjectTypeRange(input, |
- map, |
- FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, |
- LAST_NONCALLABLE_SPEC_OBJECT_TYPE, |
- false_label); |
+ __ beq(true_label); |
+ __ CheckObjectTypeRange(input, map, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, |
+ LAST_NONCALLABLE_SPEC_OBJECT_TYPE, false_label); |
// Check for undetectable objects => false. |
- __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); |
- __ tst(scratch, Operand(1 << Map::kIsUndetectable)); |
+ __ lbz(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); |
+ __ ExtractBit(r0, scratch, Map::kIsUndetectable); |
+ __ cmpi(r0, Operand::Zero()); |
final_branch_condition = eq; |
} else { |
@@ -5585,16 +5780,19 @@ void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) { |
void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) { |
DCHECK(!temp1.is(temp2)); |
// Get the frame pointer for the calling frame. |
- __ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
+ __ LoadP(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
// Skip the arguments adaptor frame if it exists. |
- __ ldr(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset)); |
- __ cmp(temp2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
- __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset), eq); |
+ Label check_frame_marker; |
+ __ LoadP(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset)); |
+ __ CmpSmiLiteral(temp2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0); |
+ __ bne(&check_frame_marker); |
+ __ LoadP(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset)); |
// Check the marker in the calling frame. |
- __ ldr(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset)); |
- __ cmp(temp1, Operand(Smi::FromInt(StackFrame::CONSTRUCT))); |
+ __ bind(&check_frame_marker); |
+ __ LoadP(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset)); |
+ __ CmpSmiLiteral(temp1, Smi::FromInt(StackFrame::CONSTRUCT), r0); |
} |
@@ -5604,8 +5802,6 @@ void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) { |
// instruction for patching the code here. |
int current_pc = masm()->pc_offset(); |
if (current_pc < last_lazy_deopt_pc_ + space_needed) { |
- // Block literal pool emission for duration of padding. |
- Assembler::BlockConstPoolScope block_const_pool(masm()); |
int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc; |
DCHECK_EQ(0, padding_size % Assembler::kInstrSize); |
while (padding_size > 0) { |
@@ -5668,11 +5864,12 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) { |
class DeferredStackCheck V8_FINAL : public LDeferredCode { |
public: |
DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr) |
- : LDeferredCode(codegen), instr_(instr) { } |
+ : LDeferredCode(codegen), instr_(instr) {} |
virtual void Generate() V8_OVERRIDE { |
codegen()->DoDeferredStackCheck(instr_); |
} |
virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
+ |
private: |
LStackCheck* instr_; |
}; |
@@ -5685,23 +5882,21 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) { |
// Perform stack overflow check. |
Label done; |
__ LoadRoot(ip, Heap::kStackLimitRootIndex); |
- __ cmp(sp, Operand(ip)); |
- __ b(hs, &done); |
- Handle<Code> stack_check = isolate()->builtins()->StackCheck(); |
- PredictableCodeSizeScope predictable(masm(), |
- CallCodeSize(stack_check, RelocInfo::CODE_TARGET)); |
+ __ cmpl(sp, ip); |
+ __ bge(&done); |
DCHECK(instr->context()->IsRegister()); |
DCHECK(ToRegister(instr->context()).is(cp)); |
- CallCode(stack_check, RelocInfo::CODE_TARGET, instr); |
+ CallCode(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET, |
+ instr); |
__ bind(&done); |
} else { |
DCHECK(instr->hydrogen()->is_backwards_branch()); |
// Perform stack overflow check if this goto needs it before jumping. |
DeferredStackCheck* deferred_stack_check = |
- new(zone()) DeferredStackCheck(this, instr); |
+ new (zone()) DeferredStackCheck(this, instr); |
__ LoadRoot(ip, Heap::kStackLimitRootIndex); |
- __ cmp(sp, Operand(ip)); |
- __ b(lo, deferred_stack_check->entry()); |
+ __ cmpl(sp, ip); |
+ __ blt(deferred_stack_check->entry()); |
EnsureSpaceForLazyDeopt(Deoptimizer::patch_size()); |
__ bind(instr->done_label()); |
deferred_stack_check->SetExit(instr->done_label()); |
@@ -5730,35 +5925,35 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) { |
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { |
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
- __ cmp(r0, ip); |
+ __ cmp(r3, ip); |
DeoptimizeIf(eq, instr->environment()); |
- Register null_value = r5; |
+ Register null_value = r8; |
__ LoadRoot(null_value, Heap::kNullValueRootIndex); |
- __ cmp(r0, null_value); |
+ __ cmp(r3, null_value); |
DeoptimizeIf(eq, instr->environment()); |
- __ SmiTst(r0); |
- DeoptimizeIf(eq, instr->environment()); |
+ __ TestIfSmi(r3, r0); |
+ DeoptimizeIf(eq, instr->environment(), cr0); |
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); |
- __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE); |
+ __ CompareObjectType(r3, r4, r4, LAST_JS_PROXY_TYPE); |
DeoptimizeIf(le, instr->environment()); |
Label use_cache, call_runtime; |
__ CheckEnumCache(null_value, &call_runtime); |
- __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); |
+ __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset)); |
__ b(&use_cache); |
// Get the set of properties to enumerate. |
__ bind(&call_runtime); |
- __ push(r0); |
+ __ push(r3); |
CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); |
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); |
+ __ LoadP(r4, FieldMemOperand(r3, HeapObject::kMapOffset)); |
__ LoadRoot(ip, Heap::kMetaMapRootIndex); |
- __ cmp(r1, ip); |
+ __ cmp(r4, ip); |
DeoptimizeIf(ne, instr->environment()); |
__ bind(&use_cache); |
} |
@@ -5769,18 +5964,16 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { |
Register result = ToRegister(instr->result()); |
Label load_cache, done; |
__ EnumLength(result, map); |
- __ cmp(result, Operand(Smi::FromInt(0))); |
- __ b(ne, &load_cache); |
+ __ CmpSmiLiteral(result, Smi::FromInt(0), r0); |
+ __ bne(&load_cache); |
__ mov(result, Operand(isolate()->factory()->empty_fixed_array())); |
- __ jmp(&done); |
+ __ b(&done); |
__ bind(&load_cache); |
__ LoadInstanceDescriptors(map, result); |
- __ ldr(result, |
- FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); |
- __ ldr(result, |
- FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); |
- __ cmp(result, Operand::Zero()); |
+ __ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); |
+ __ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); |
+ __ cmpi(result, Operand::Zero()); |
DeoptimizeIf(eq, instr->environment()); |
__ bind(&done); |
@@ -5790,45 +5983,40 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { |
void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { |
Register object = ToRegister(instr->value()); |
Register map = ToRegister(instr->map()); |
- __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); |
+ __ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); |
__ cmp(map, scratch0()); |
DeoptimizeIf(ne, instr->environment()); |
} |
void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, |
- Register result, |
- Register object, |
+ Register result, Register object, |
Register index) { |
PushSafepointRegistersScope scope(this); |
- __ Push(object); |
- __ Push(index); |
- __ mov(cp, Operand::Zero()); |
+ __ Push(object, index); |
+ __ li(cp, Operand::Zero()); |
__ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble); |
- RecordSafepointWithRegisters( |
- instr->pointer_map(), 2, Safepoint::kNoLazyDeopt); |
- __ StoreToSafepointRegisterSlot(r0, result); |
+ RecordSafepointWithRegisters(instr->pointer_map(), 2, |
+ Safepoint::kNoLazyDeopt); |
+ __ StoreToSafepointRegisterSlot(r3, result); |
} |
void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { |
class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode { |
public: |
- DeferredLoadMutableDouble(LCodeGen* codegen, |
- LLoadFieldByIndex* instr, |
- Register result, |
- Register object, |
- Register index) |
+ DeferredLoadMutableDouble(LCodeGen* codegen, LLoadFieldByIndex* instr, |
+ Register result, Register object, Register index) |
: LDeferredCode(codegen), |
instr_(instr), |
result_(result), |
object_(object), |
- index_(index) { |
- } |
+ index_(index) {} |
virtual void Generate() V8_OVERRIDE { |
codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_); |
} |
virtual LInstruction* instr() V8_OVERRIDE { return instr_; } |
+ |
private: |
LLoadFieldByIndex* instr_; |
Register result_; |
@@ -5842,30 +6030,31 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { |
Register scratch = scratch0(); |
DeferredLoadMutableDouble* deferred; |
- deferred = new(zone()) DeferredLoadMutableDouble( |
- this, instr, result, object, index); |
+ deferred = new (zone()) |
+ DeferredLoadMutableDouble(this, instr, result, object, index); |
Label out_of_object, done; |
- __ tst(index, Operand(Smi::FromInt(1))); |
- __ b(ne, deferred->entry()); |
- __ mov(index, Operand(index, ASR, 1)); |
+ __ TestBitMask(index, reinterpret_cast<uintptr_t>(Smi::FromInt(1)), r0); |
+ __ bne(deferred->entry(), cr0); |
+ __ ShiftRightArithImm(index, index, 1); |
- __ cmp(index, Operand::Zero()); |
- __ b(lt, &out_of_object); |
+ __ cmpi(index, Operand::Zero()); |
+ __ blt(&out_of_object); |
- __ add(scratch, object, Operand::PointerOffsetFromSmiKey(index)); |
- __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize)); |
+ __ SmiToPtrArrayOffset(r0, index); |
+ __ add(scratch, object, r0); |
+ __ LoadP(result, FieldMemOperand(scratch, JSObject::kHeaderSize)); |
__ b(&done); |
__ bind(&out_of_object); |
- __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); |
+ __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); |
// Index is equal to negated out of object property index plus 1. |
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); |
- __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index)); |
- __ ldr(result, FieldMemOperand(scratch, |
- FixedArray::kHeaderSize - kPointerSize)); |
+ __ SmiToPtrArrayOffset(r0, index); |
+ __ sub(scratch, result, r0); |
+ __ LoadP(result, |
+ FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize)); |
__ bind(deferred->exit()); |
__ bind(&done); |
} |
@@ -5873,7 +6062,7 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { |
void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) { |
Register context = ToRegister(instr->context()); |
- __ str(context, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
+ __ StoreP(context, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
} |
@@ -5887,5 +6076,5 @@ void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) { |
#undef __ |
- |
-} } // namespace v8::internal |
+} |
+} // namespace v8::internal |