Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(100)

Unified Diff: src/arm/lithium-codegen-arm.cc

Issue 11659022: Generate the TransitionElementsStub using Crankshaft (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Address review feedback Created 7 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/arm/lithium-codegen-arm.h ('k') | src/arm/macro-assembler-arm.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/arm/lithium-codegen-arm.cc
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index a18f894b00c00bba1809f3e6407b66cf17d1a3f8..9fdb22b8d50114c43dba07cab931fa8403f0616f 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -146,16 +146,23 @@ bool LCodeGen::GeneratePrologue() {
info()->set_prologue_offset(masm_->pc_offset());
if (NeedsEagerFrame()) {
- PredictableCodeSizeScope predictible_code_size_scope(
- masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
- // The following three instructions must remain together and unmodified
- // for code aging to work properly.
- __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- // Load undefined value here, so the value is ready for the loop
- // below.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- // Adjust FP to point to saved FP.
- __ add(fp, sp, Operand(2 * kPointerSize));
+ if (info()->IsStub()) {
+ __ stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
+ __ Push(Smi::FromInt(StackFrame::STUB));
+ // Adjust FP to point to saved FP.
+ __ add(fp, sp, Operand(2 * kPointerSize));
+ } else {
+ PredictableCodeSizeScope predictible_code_size_scope(
+ masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
+ // The following three instructions must remain together and unmodified
+ // for code aging to work properly.
+ __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+ // Load undefined value here, so the value is ready for the loop
+ // below.
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ // Adjust FP to point to saved FP.
+ __ add(fp, sp, Operand(2 * kPointerSize));
+ }
frame_is_built_ = true;
}
@@ -163,18 +170,38 @@ bool LCodeGen::GeneratePrologue() {
int slots = GetStackSlotCount();
if (slots > 0) {
if (FLAG_debug_code) {
- __ mov(r0, Operand(slots));
- __ mov(r2, Operand(kSlotsZapValue));
+ __ sub(sp, sp, Operand(slots * kPointerSize));
+ __ push(r0);
+ __ push(r1);
+ __ add(r0, sp, Operand(slots * kPointerSize));
+ __ mov(r1, Operand(kSlotsZapValue));
Label loop;
__ bind(&loop);
- __ push(r2);
- __ sub(r0, r0, Operand(1), SetCC);
+ __ sub(r0, r0, Operand(kPointerSize));
+ __ str(r1, MemOperand(r0, 2 * kPointerSize));
+ __ cmp(r0, sp);
__ b(ne, &loop);
+ __ pop(r1);
+ __ pop(r0);
} else {
__ sub(sp, sp, Operand(slots * kPointerSize));
}
}
+ if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+ Comment(";;; Save clobbered callee double registers");
+ int count = 0;
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ while (!save_iterator.Done()) {
+ __ vstr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
+ MemOperand(sp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
+ }
+
// Possibly allocate a local context.
int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
@@ -2820,11 +2847,26 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ push(r0);
__ CallRuntime(Runtime::kTraceExit, 1);
}
+ if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+ ASSERT(NeedsEagerFrame());
+ BitVector* doubles = chunk()->allocated_double_registers();
+ BitVector::Iterator save_iterator(doubles);
+ int count = 0;
+ while (!save_iterator.Done()) {
+ __ vldr(DwVfpRegister::FromAllocationIndex(save_iterator.Current()),
+ MemOperand(sp, count * kDoubleSize));
+ save_iterator.Advance();
+ count++;
+ }
+ }
if (NeedsEagerFrame()) {
int32_t sp_delta = (GetParameterCount() + 1) * kPointerSize;
__ mov(sp, fp);
__ ldm(ia_w, sp, fp.bit() | lr.bit());
- __ add(sp, sp, Operand(sp_delta));
+ if (!info()->IsStub()) {
+ __ add(sp, sp, Operand(sp_delta));
+ }
}
__ Jump(lr);
}
@@ -3587,8 +3629,14 @@ void LCodeGen::DoThisFunction(LThisFunction* instr) {
void LCodeGen::DoContext(LContext* instr) {
+ // If there is a non-return use, the context must be moved to a register.
Register result = ToRegister(instr->result());
- __ mov(result, cp);
+ for (HUseIterator it(instr->hydrogen()->uses()); !it.Done(); it.Advance()) {
+ if (!it.value()->IsReturn()) {
+ __ mov(result, cp);
+ return;
+ }
+ }
}
@@ -4507,7 +4555,6 @@ void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
Register object_reg = ToRegister(instr->object());
- Register new_map_reg = ToRegister(instr->new_map_temp());
Register scratch = scratch0();
Handle<Map> from_map = instr->original_map();
@@ -4519,18 +4566,29 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
__ cmp(scratch, Operand(from_map));
__ b(ne, &not_applicable);
- __ mov(new_map_reg, Operand(to_map));
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ Register new_map_reg = ToRegister(instr->new_map_temp());
+ __ mov(new_map_reg, Operand(to_map));
__ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
// Write barrier.
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
scratch, kLRHasBeenSaved, kDontSaveFPRegs);
+ } else if (FLAG_compiled_transitions) {
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ Move(r0, object_reg);
+ __ Move(r1, to_map);
+ TransitionElementsKindStub stub(from_kind, to_kind);
+ __ CallStub(&stub);
+ RecordSafepointWithRegisters(
+ instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
} else if (IsFastSmiElementsKind(from_kind) &&
IsFastDoubleElementsKind(to_kind)) {
Register fixed_object_reg = ToRegister(instr->temp());
ASSERT(fixed_object_reg.is(r2));
+ Register new_map_reg = ToRegister(instr->new_map_temp());
ASSERT(new_map_reg.is(r3));
+ __ mov(new_map_reg, Operand(to_map));
__ mov(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(),
RelocInfo::CODE_TARGET, instr);
@@ -4538,7 +4596,9 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
IsFastObjectElementsKind(to_kind)) {
Register fixed_object_reg = ToRegister(instr->temp());
ASSERT(fixed_object_reg.is(r2));
+ Register new_map_reg = ToRegister(instr->new_map_temp());
ASSERT(new_map_reg.is(r3));
+ __ mov(new_map_reg, Operand(to_map));
__ mov(fixed_object_reg, object_reg);
CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(),
RelocInfo::CODE_TARGET, instr);
@@ -4549,6 +4609,14 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
}
+void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
+ Register object = ToRegister(instr->object());
+ Register temp = ToRegister(instr->temp());
+ __ TestJSArrayForAllocationSiteInfo(object, temp);
+ DeoptimizeIf(eq, instr->environment());
+}
+
+
void LCodeGen::DoStringAdd(LStringAdd* instr) {
__ push(ToRegister(instr->left()));
__ push(ToRegister(instr->right()));
@@ -4885,6 +4953,58 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
Register temp1 = ToRegister(instr->temp());
Register temp2 = ToRegister(instr->temp2());
+ bool convert_hole = false;
+ HValue* change_input = instr->hydrogen()->value();
+ if (change_input->IsLoadKeyed()) {
+ HLoadKeyed* load = HLoadKeyed::cast(change_input);
+ convert_hole = load->UsesMustHandleHole();
+ }
+
+ Label no_special_nan_handling;
+ Label done;
+ if (convert_hole) {
+ if (CpuFeatures::IsSupported(VFP2)) {
+ CpuFeatures::Scope scope(VFP2);
+ DwVfpRegister input_reg = ToDoubleRegister(instr->value());
+ __ VFPCompareAndSetFlags(input_reg, input_reg);
+ __ b(vc, &no_special_nan_handling);
+ __ vmov(reg, scratch0(), input_reg);
+ __ cmp(scratch0(), Operand(kHoleNanUpper32));
+ Label canonicalize;
+ __ b(ne, &canonicalize);
+ __ Move(reg, factory()->the_hole_value());
+ __ b(&done);
+ __ bind(&canonicalize);
+ __ Vmov(input_reg,
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double(),
+ no_reg);
+ } else {
+ Label not_hole;
+ __ cmp(sfpd_hi, Operand(kHoleNanUpper32));
+ __ b(ne, &not_hole);
+ __ Move(reg, factory()->the_hole_value());
+ __ b(&done);
+ __ bind(&not_hole);
+ __ and_(scratch, sfpd_hi, Operand(0x7ff00000));
+ __ cmp(scratch, Operand(0x7ff00000));
+ __ b(ne, &no_special_nan_handling);
+ Label special_nan_handling;
+ __ tst(sfpd_hi, Operand(0x000FFFFF));
+ __ b(ne, &special_nan_handling);
+ __ cmp(sfpd_lo, Operand(0));
+ __ b(eq, &no_special_nan_handling);
+ __ bind(&special_nan_handling);
+ double canonical_nan =
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double();
+ uint64_t casted_nan = BitCast<uint64_t>(canonical_nan);
+ __ mov(sfpd_lo,
+ Operand(static_cast<uint32_t>(casted_nan & 0xFFFFFFFF)));
+ __ mov(sfpd_hi,
+ Operand(static_cast<uint32_t>(casted_nan >> 32)));
+ }
+ }
+
+ __ bind(&no_special_nan_handling);
DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
__ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
@@ -4904,6 +5024,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
}
// Now that we have finished with the object's real address tag it
__ add(reg, reg, Operand(kHeapObjectTag));
+ __ bind(&done);
}
@@ -4945,7 +5066,8 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
DwVfpRegister result_reg,
bool deoptimize_on_undefined,
bool deoptimize_on_minus_zero,
- LEnvironment* env) {
+ LEnvironment* env,
+ NumberUntagDMode mode) {
Register scratch = scratch0();
SwVfpRegister flt_scratch = double_scratch0().low();
ASSERT(!result_reg.is(double_scratch0()));
@@ -4953,43 +5075,57 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
Label load_smi, heap_number, done;
- // Smi check.
- __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
+ if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+ // Smi check.
+ __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
- // Heap number map check.
- __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch, Operand(ip));
- if (deoptimize_on_undefined) {
- DeoptimizeIf(ne, env);
- } else {
- Label heap_number;
- __ b(eq, &heap_number);
+ // Heap number map check.
+ __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+ __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+ __ cmp(scratch, Operand(ip));
+ if (deoptimize_on_undefined) {
+ DeoptimizeIf(ne, env);
+ } else {
+ Label heap_number;
+ __ b(eq, &heap_number);
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(input_reg, Operand(ip));
- DeoptimizeIf(ne, env);
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(input_reg, Operand(ip));
+ DeoptimizeIf(ne, env);
- // Convert undefined to NaN.
- __ LoadRoot(ip, Heap::kNanValueRootIndex);
- __ sub(ip, ip, Operand(kHeapObjectTag));
+ // Convert undefined to NaN.
+ __ LoadRoot(ip, Heap::kNanValueRootIndex);
+ __ sub(ip, ip, Operand(kHeapObjectTag));
+ __ vldr(result_reg, ip, HeapNumber::kValueOffset);
+ __ jmp(&done);
+
+ __ bind(&heap_number);
+ }
+ // Heap number to double register conversion.
+ __ sub(ip, input_reg, Operand(kHeapObjectTag));
__ vldr(result_reg, ip, HeapNumber::kValueOffset);
+ if (deoptimize_on_minus_zero) {
+ __ vmov(ip, result_reg.low());
+ __ cmp(ip, Operand::Zero());
+ __ b(ne, &done);
+ __ vmov(ip, result_reg.high());
+ __ cmp(ip, Operand(HeapNumber::kSignMask));
+ DeoptimizeIf(eq, env);
+ }
__ jmp(&done);
-
- __ bind(&heap_number);
- }
- // Heap number to double register conversion.
- __ sub(ip, input_reg, Operand(kHeapObjectTag));
- __ vldr(result_reg, ip, HeapNumber::kValueOffset);
- if (deoptimize_on_minus_zero) {
- __ vmov(ip, result_reg.low());
- __ cmp(ip, Operand::Zero());
- __ b(ne, &done);
- __ vmov(ip, result_reg.high());
- __ cmp(ip, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(eq, env);
+ } else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) {
+ __ SmiUntag(scratch, input_reg, SetCC);
+ DeoptimizeIf(cs, env);
+ } else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) {
+ __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
+ __ Vmov(result_reg,
+ FixedDoubleArray::hole_nan_as_double(),
+ no_reg);
+ __ b(&done);
+ } else {
+ __ SmiUntag(scratch, input_reg);
+ ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
}
- __ jmp(&done);
// Smi to double register conversion
__ bind(&load_smi);
@@ -5117,10 +5253,28 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
Register input_reg = ToRegister(input);
DwVfpRegister result_reg = ToDoubleRegister(result);
+ NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
+ HValue* value = instr->hydrogen()->value();
+ if (value->type().IsSmi()) {
+ if (value->IsLoadKeyed()) {
+ HLoadKeyed* load = HLoadKeyed::cast(value);
+ if (load->UsesMustHandleHole()) {
+ if (load->hole_mode() == ALLOW_RETURN_HOLE) {
+ mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE;
+ } else {
+ mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE;
+ }
+ } else {
+ mode = NUMBER_CANDIDATE_IS_SMI;
+ }
+ }
+ }
+
EmitNumberUntagD(input_reg, result_reg,
instr->hydrogen()->deoptimize_on_undefined(),
instr->hydrogen()->deoptimize_on_minus_zero(),
- instr->environment());
+ instr->environment(),
+ mode);
}
@@ -5421,6 +5575,63 @@ void LCodeGen::DoDeferredAllocateObject(LAllocateObject* instr) {
}
+void LCodeGen::DoAllocate(LAllocate* instr) {
+ class DeferredAllocate: public LDeferredCode {
+ public:
+ DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LAllocate* instr_;
+ };
+
+ DeferredAllocate* deferred =
+ new(zone()) DeferredAllocate(this, instr);
+
+ Register size = ToRegister(instr->size());
+ Register result = ToRegister(instr->result());
+ Register scratch = ToRegister(instr->temp1());
+ Register scratch2 = ToRegister(instr->temp2());
+
+ HAllocate* original_instr = instr->hydrogen();
+ if (original_instr->size()->IsConstant()) {
+ UNREACHABLE();
+ } else {
+ // Allocate memory for the object.
+ AllocationFlags flags = TAG_OBJECT;
+ if (original_instr->MustAllocateDoubleAligned()) {
+ flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+ }
+ __ AllocateInNewSpace(size,
+ result,
+ scratch,
+ scratch2,
+ deferred->entry(),
+ TAG_OBJECT);
+ }
+
+ __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
+ Register size = ToRegister(instr->size());
+ Register result = ToRegister(instr->result());
+
+ // TODO(3095996): Get rid of this. For now, we need to make the
+ // result register contain a valid pointer because it is already
+ // contained in the register pointer map.
+ __ mov(result, Operand(Smi::FromInt(0)));
+
+ PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+ __ SmiTag(size, size);
+ __ push(size);
+ CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
+ __ StoreToSafepointRegisterSlot(r0, result);
+}
+
+
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
Handle<FixedArray> literals(instr->environment()->closure()->literals());
ElementsKind boilerplate_elements_kind =
« no previous file with comments | « src/arm/lithium-codegen-arm.h ('k') | src/arm/macro-assembler-arm.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698