| Index: src/mips/lithium-codegen-mips.cc
|
| diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
|
| index ab2dcd7c8010b5d088080d69f95cc59b3ba9ecbb..30d7efd1b0d2f89034346ada2d66a9ab13ca19be 100644
|
| --- a/src/mips/lithium-codegen-mips.cc
|
| +++ b/src/mips/lithium-codegen-mips.cc
|
| @@ -84,7 +84,7 @@ void LCodeGen::FinishCode(Handle<Code> code) {
|
| ASSERT(is_done());
|
| code->set_stack_slots(GetStackSlotCount());
|
| code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
|
| - RegisterDependentCodeForEmbeddedMaps(code);
|
| + if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
|
| PopulateDeoptimizationData(code);
|
| info()->CommitDependencies(code);
|
| }
|
| @@ -179,8 +179,7 @@ bool LCodeGen::GeneratePrologue() {
|
| if (slots > 0) {
|
| if (FLAG_debug_code) {
|
| __ Subu(sp, sp, Operand(slots * kPointerSize));
|
| - __ push(a0);
|
| - __ push(a1);
|
| + __ Push(a0, a1);
|
| __ Addu(a0, sp, Operand(slots * kPointerSize));
|
| __ li(a1, Operand(kSlotsZapValue));
|
| Label loop;
|
| @@ -188,8 +187,7 @@ bool LCodeGen::GeneratePrologue() {
|
| __ Subu(a0, a0, Operand(kPointerSize));
|
| __ sw(a1, MemOperand(a0, 2 * kPointerSize));
|
| __ Branch(&loop, ne, a0, Operand(sp));
|
| - __ pop(a1);
|
| - __ pop(a0);
|
| + __ Pop(a0, a1);
|
| } else {
|
| __ Subu(sp, sp, Operand(slots * kPointerSize));
|
| }
|
| @@ -204,17 +202,18 @@ bool LCodeGen::GeneratePrologue() {
|
| if (heap_slots > 0) {
|
| Comment(";;; Allocate local context");
|
| // Argument to NewContext is the function, which is in a1.
|
| - __ push(a1);
|
| if (heap_slots <= FastNewContextStub::kMaximumSlots) {
|
| FastNewContextStub stub(heap_slots);
|
| __ CallStub(&stub);
|
| } else {
|
| + __ push(a1);
|
| __ CallRuntime(Runtime::kNewFunctionContext, 1);
|
| }
|
| RecordSafepoint(Safepoint::kNoLazyDeopt);
|
| - // Context is returned in both v0 and cp. It replaces the context
|
| - // passed to us. It's saved in the stack and kept live in cp.
|
| - __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
| + // Context is returned in both v0. It replaces the context passed to us.
|
| + // It's saved in the stack and kept live in cp.
|
| + __ mov(cp, v0);
|
| + __ sw(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
| // Copy any necessary parameters into the context.
|
| int num_parameters = scope()->num_parameters();
|
| for (int i = 0; i < num_parameters; i++) {
|
| @@ -268,7 +267,8 @@ bool LCodeGen::GenerateDeferredCode() {
|
|
|
| HValue* value =
|
| instructions_->at(code->instruction_index())->hydrogen_value();
|
| - RecordAndWritePosition(value->position());
|
| + RecordAndWritePosition(
|
| + chunk()->graph()->SourcePositionToScriptPosition(value->position()));
|
|
|
| Comment(";;; <@%d,#%d> "
|
| "-------------------- Deferred %s --------------------",
|
| @@ -860,6 +860,14 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
|
| translations_.CreateByteArray(isolate()->factory());
|
| data->SetTranslationByteArray(*translations);
|
| data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
|
| + data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
|
| + if (info_->IsOptimizing()) {
|
| + // Reference to shared function info does not change between phases.
|
| + AllowDeferredHandleDereference allow_handle_dereference;
|
| + data->SetSharedFunctionInfo(*info_->shared_info());
|
| + } else {
|
| + data->SetSharedFunctionInfo(Smi::FromInt(0));
|
| + }
|
|
|
| Handle<FixedArray> literals =
|
| factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
|
| @@ -1030,11 +1038,6 @@ void LCodeGen::DoCallStub(LCallStub* instr) {
|
| ASSERT(ToRegister(instr->context()).is(cp));
|
| ASSERT(ToRegister(instr->result()).is(v0));
|
| switch (instr->hydrogen()->major_key()) {
|
| - case CodeStub::RegExpConstructResult: {
|
| - RegExpConstructResultStub stub;
|
| - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
|
| - break;
|
| - }
|
| case CodeStub::RegExpExec: {
|
| RegExpExecStub stub;
|
| CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
|
| @@ -1065,7 +1068,7 @@ void LCodeGen::DoModI(LModI* instr) {
|
| HMod* hmod = instr->hydrogen();
|
| HValue* left = hmod->left();
|
| HValue* right = hmod->right();
|
| - if (hmod->HasPowerOf2Divisor()) {
|
| + if (hmod->RightIsPowerOf2()) {
|
| const Register left_reg = ToRegister(instr->left());
|
| const Register result_reg = ToRegister(instr->result());
|
|
|
| @@ -1655,41 +1658,6 @@ void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
|
| }
|
|
|
|
|
| -void LCodeGen::DoElementsKind(LElementsKind* instr) {
|
| - Register result = ToRegister(instr->result());
|
| - Register input = ToRegister(instr->value());
|
| -
|
| - // Load map into |result|.
|
| - __ lw(result, FieldMemOperand(input, HeapObject::kMapOffset));
|
| - // Load the map's "bit field 2" into |result|. We only need the first byte,
|
| - // but the following bit field extraction takes care of that anyway.
|
| - __ lbu(result, FieldMemOperand(result, Map::kBitField2Offset));
|
| - // Retrieve elements_kind from bit field 2.
|
| - __ Ext(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
|
| -}
|
| -
|
| -
|
| -void LCodeGen::DoValueOf(LValueOf* instr) {
|
| - Register input = ToRegister(instr->value());
|
| - Register result = ToRegister(instr->result());
|
| - Register map = ToRegister(instr->temp());
|
| - Label done;
|
| -
|
| - if (!instr->hydrogen()->value()->IsHeapObject()) {
|
| - // If the object is a smi return the object.
|
| - __ Move(result, input);
|
| - __ JumpIfSmi(input, &done);
|
| - }
|
| -
|
| - // If the object is not a value type, return the object.
|
| - __ GetObjectType(input, map, map);
|
| - __ Branch(&done, ne, map, Operand(JS_VALUE_TYPE));
|
| - __ lw(result, FieldMemOperand(input, JSValue::kValueOffset));
|
| -
|
| - __ bind(&done);
|
| -}
|
| -
|
| -
|
| void LCodeGen::DoDateField(LDateField* instr) {
|
| Register object = ToRegister(instr->date());
|
| Register result = ToRegister(instr->result());
|
| @@ -1806,17 +1774,6 @@ void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
|
| }
|
|
|
|
|
| -void LCodeGen::DoThrow(LThrow* instr) {
|
| - __ push(ToRegister(instr->value()));
|
| - ASSERT(ToRegister(instr->context()).is(cp));
|
| - CallRuntime(Runtime::kThrow, 1, instr);
|
| -
|
| - if (FLAG_debug_code) {
|
| - __ stop("Unreachable code.");
|
| - }
|
| -}
|
| -
|
| -
|
| void LCodeGen::DoAddI(LAddI* instr) {
|
| LOperand* left = instr->left();
|
| LOperand* right = instr->right();
|
| @@ -2665,10 +2622,10 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
|
| Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
|
| __ li(at, Operand(Handle<Object>(cell)));
|
| __ lw(at, FieldMemOperand(at, PropertyCell::kValueOffset));
|
| - __ Branch(&cache_miss, ne, map, Operand(at));
|
| + __ BranchShort(&cache_miss, ne, map, Operand(at));
|
| // We use Factory::the_hole_value() on purpose instead of loading from the
|
| // root array to force relocation to be able to later patch
|
| - // with true or false.
|
| + // with true or false. The distance from map check has to be constant.
|
| __ li(result, Operand(factory()->the_hole_value()), CONSTANT_SIZE);
|
| __ Branch(&done);
|
|
|
| @@ -2999,15 +2956,6 @@ void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
|
| }
|
|
|
|
|
| -void LCodeGen::DoLoadExternalArrayPointer(
|
| - LLoadExternalArrayPointer* instr) {
|
| - Register to_reg = ToRegister(instr->result());
|
| - Register from_reg = ToRegister(instr->object());
|
| - __ lw(to_reg, FieldMemOperand(from_reg,
|
| - ExternalArray::kExternalPointerOffset));
|
| -}
|
| -
|
| -
|
| void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
|
| Register arguments = ToRegister(instr->arguments());
|
| Register result = ToRegister(instr->result());
|
| @@ -3074,9 +3022,9 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
|
| ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
|
| : 0;
|
|
|
| - if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
|
| + if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
|
| elements_kind == FLOAT32_ELEMENTS ||
|
| - elements_kind == EXTERNAL_DOUBLE_ELEMENTS ||
|
| + elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
|
| elements_kind == FLOAT64_ELEMENTS) {
|
| int base_offset =
|
| (instr->additional_index() << element_size_shift) + additional_offset;
|
| @@ -3087,7 +3035,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
|
| __ sll(scratch0(), key, shift_size);
|
| __ Addu(scratch0(), scratch0(), external_pointer);
|
| }
|
| - if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
|
| + if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
|
| elements_kind == FLOAT32_ELEMENTS) {
|
| __ lwc1(result, MemOperand(scratch0(), base_offset));
|
| __ cvt_d_s(result, result);
|
| @@ -3101,29 +3049,29 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
|
| element_size_shift, shift_size,
|
| instr->additional_index(), additional_offset);
|
| switch (elements_kind) {
|
| - case EXTERNAL_BYTE_ELEMENTS:
|
| + case EXTERNAL_INT8_ELEMENTS:
|
| case INT8_ELEMENTS:
|
| __ lb(result, mem_operand);
|
| break;
|
| - case EXTERNAL_PIXEL_ELEMENTS:
|
| - case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
|
| + case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
|
| + case EXTERNAL_UINT8_ELEMENTS:
|
| case UINT8_ELEMENTS:
|
| case UINT8_CLAMPED_ELEMENTS:
|
| __ lbu(result, mem_operand);
|
| break;
|
| - case EXTERNAL_SHORT_ELEMENTS:
|
| + case EXTERNAL_INT16_ELEMENTS:
|
| case INT16_ELEMENTS:
|
| __ lh(result, mem_operand);
|
| break;
|
| - case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
|
| + case EXTERNAL_UINT16_ELEMENTS:
|
| case UINT16_ELEMENTS:
|
| __ lhu(result, mem_operand);
|
| break;
|
| - case EXTERNAL_INT_ELEMENTS:
|
| + case EXTERNAL_INT32_ELEMENTS:
|
| case INT32_ELEMENTS:
|
| __ lw(result, mem_operand);
|
| break;
|
| - case EXTERNAL_UNSIGNED_INT_ELEMENTS:
|
| + case EXTERNAL_UINT32_ELEMENTS:
|
| case UINT32_ELEMENTS:
|
| __ lw(result, mem_operand);
|
| if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
|
| @@ -3133,8 +3081,8 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
|
| break;
|
| case FLOAT32_ELEMENTS:
|
| case FLOAT64_ELEMENTS:
|
| - case EXTERNAL_FLOAT_ELEMENTS:
|
| - case EXTERNAL_DOUBLE_ELEMENTS:
|
| + case EXTERNAL_FLOAT32_ELEMENTS:
|
| + case EXTERNAL_FLOAT64_ELEMENTS:
|
| case FAST_DOUBLE_ELEMENTS:
|
| case FAST_ELEMENTS:
|
| case FAST_SMI_ELEMENTS:
|
| @@ -3363,19 +3311,21 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
|
| // passed unchanged to builtins and strict-mode functions.
|
| Label global_object, result_in_receiver;
|
|
|
| - // Do not transform the receiver to object for strict mode
|
| - // functions.
|
| - __ lw(scratch,
|
| - FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
|
| - __ lw(scratch,
|
| - FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
|
| + if (!instr->hydrogen()->known_function()) {
|
| + // Do not transform the receiver to object for strict mode
|
| + // functions.
|
| + __ lw(scratch,
|
| + FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
|
| + __ lw(scratch,
|
| + FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
|
|
|
| - // Do not transform the receiver to object for builtins.
|
| - int32_t strict_mode_function_mask =
|
| - 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
|
| - int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
|
| - __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
|
| - __ Branch(&result_in_receiver, ne, scratch, Operand(zero_reg));
|
| + // Do not transform the receiver to object for builtins.
|
| + int32_t strict_mode_function_mask =
|
| + 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
|
| + int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
|
| + __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
|
| + __ Branch(&result_in_receiver, ne, scratch, Operand(zero_reg));
|
| + }
|
|
|
| // Normal function. Replace undefined or null with global receiver.
|
| __ LoadRoot(scratch, Heap::kNullValueRootIndex);
|
| @@ -3390,14 +3340,14 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
|
| __ GetObjectType(receiver, scratch, scratch);
|
| DeoptimizeIf(lt, instr->environment(),
|
| scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
|
| - __ Branch(&result_in_receiver);
|
|
|
| + __ Branch(&result_in_receiver);
|
| __ bind(&global_object);
|
| - __ lw(receiver, FieldMemOperand(function, JSFunction::kContextOffset));
|
| - __ lw(receiver,
|
| - ContextOperand(receiver, Context::GLOBAL_OBJECT_INDEX));
|
| - __ lw(receiver,
|
| - FieldMemOperand(receiver, GlobalObject::kGlobalReceiverOffset));
|
| + __ lw(result, FieldMemOperand(function, JSFunction::kContextOffset));
|
| + __ lw(result,
|
| + ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
|
| + __ lw(result,
|
| + FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset));
|
|
|
| if (result.is(receiver)) {
|
| __ bind(&result_in_receiver);
|
| @@ -3493,14 +3443,6 @@ void LCodeGen::DoContext(LContext* instr) {
|
| }
|
|
|
|
|
| -void LCodeGen::DoOuterContext(LOuterContext* instr) {
|
| - Register context = ToRegister(instr->context());
|
| - Register result = ToRegister(instr->result());
|
| - __ lw(result,
|
| - MemOperand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
|
| -}
|
| -
|
| -
|
| void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
|
| ASSERT(ToRegister(instr->context()).is(cp));
|
| __ li(scratch0(), instr->hydrogen()->pairs());
|
| @@ -3511,20 +3453,6 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
|
| }
|
|
|
|
|
| -void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
|
| - Register context = ToRegister(instr->context());
|
| - Register result = ToRegister(instr->result());
|
| - __ lw(result, ContextOperand(context, Context::GLOBAL_OBJECT_INDEX));
|
| -}
|
| -
|
| -
|
| -void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
|
| - Register global = ToRegister(instr->global_object());
|
| - Register result = ToRegister(instr->result());
|
| - __ lw(result, FieldMemOperand(global, GlobalObject::kGlobalReceiverOffset));
|
| -}
|
| -
|
| -
|
| void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
|
| int formal_parameter_count,
|
| int arity,
|
| @@ -3836,6 +3764,7 @@ void LCodeGen::DoPower(LPower* instr) {
|
| Label no_deopt;
|
| __ JumpIfSmi(a2, &no_deopt);
|
| __ lw(t3, FieldMemOperand(a2, HeapObject::kMapOffset));
|
| + __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
|
| DeoptimizeIf(ne, instr->environment(), t3, Operand(at));
|
| __ bind(&no_deopt);
|
| MathPowStub stub(MathPowStub::TAGGED);
|
| @@ -3874,6 +3803,13 @@ void LCodeGen::DoMathLog(LMathLog* instr) {
|
| }
|
|
|
|
|
| +void LCodeGen::DoMathClz32(LMathClz32* instr) {
|
| + Register input = ToRegister(instr->value());
|
| + Register result = ToRegister(instr->result());
|
| + __ Clz(result, input);
|
| +}
|
| +
|
| +
|
| void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
|
| ASSERT(ToRegister(instr->context()).is(cp));
|
| ASSERT(ToRegister(instr->function()).is(a1));
|
| @@ -3942,13 +3878,8 @@ void LCodeGen::DoCallFunction(LCallFunction* instr) {
|
| ASSERT(ToRegister(instr->result()).is(v0));
|
|
|
| int arity = instr->arity();
|
| - CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
|
| - if (instr->hydrogen()->IsTailCall()) {
|
| - if (NeedsEagerFrame()) __ mov(sp, fp);
|
| - __ Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
|
| - } else {
|
| - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
|
| - }
|
| + CallFunctionStub stub(arity, instr->hydrogen()->function_flags());
|
| + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
|
| }
|
|
|
|
|
| @@ -3972,7 +3903,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
|
| ASSERT(ToRegister(instr->result()).is(v0));
|
|
|
| __ li(a0, Operand(instr->arity()));
|
| - __ li(a2, Operand(instr->hydrogen()->property_cell()));
|
| + __ li(a2, Operand(factory()->undefined_value()));
|
| ElementsKind kind = instr->hydrogen()->elements_kind();
|
| AllocationSiteOverrideMode override_mode =
|
| (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
|
| @@ -4052,14 +3983,20 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
|
| }
|
|
|
| Handle<Map> transition = instr->transition();
|
| + SmiCheck check_needed =
|
| + instr->hydrogen()->value()->IsHeapObject()
|
| + ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
|
|
|
| if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
|
| Register value = ToRegister(instr->value());
|
| if (!instr->hydrogen()->value()->type().IsHeapObject()) {
|
| __ SmiTst(value, scratch);
|
| DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
|
| +
|
| + // We know that value is a smi now, so we can omit the check below.
|
| + check_needed = OMIT_SMI_CHECK;
|
| }
|
| - } else if (FLAG_track_double_fields && representation.IsDouble()) {
|
| + } else if (representation.IsDouble()) {
|
| ASSERT(transition.is_null());
|
| ASSERT(access.IsInobject());
|
| ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
|
| @@ -4087,10 +4024,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
|
|
|
| // Do the store.
|
| Register value = ToRegister(instr->value());
|
| - ASSERT(!object.is(value));
|
| - SmiCheck check_needed =
|
| - instr->hydrogen()->value()->IsHeapObject()
|
| - ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
|
| if (access.IsInobject()) {
|
| MemOperand operand = FieldMemOperand(object, offset);
|
| __ Store(value, operand, representation);
|
| @@ -4199,9 +4132,9 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
|
| ? FixedTypedArrayBase::kDataOffset - kHeapObjectTag
|
| : 0;
|
|
|
| - if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
|
| + if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
|
| elements_kind == FLOAT32_ELEMENTS ||
|
| - elements_kind == EXTERNAL_DOUBLE_ELEMENTS ||
|
| + elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
|
| elements_kind == FLOAT64_ELEMENTS) {
|
| int base_offset =
|
| (instr->additional_index() << element_size_shift) + additional_offset;
|
| @@ -4219,7 +4152,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
|
| __ Addu(address, external_pointer, address);
|
| }
|
|
|
| - if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
|
| + if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
|
| elements_kind == FLOAT32_ELEMENTS) {
|
| __ cvt_s_d(double_scratch0(), value);
|
| __ swc1(double_scratch0(), MemOperand(address, base_offset));
|
| @@ -4233,30 +4166,30 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
|
| element_size_shift, shift_size,
|
| instr->additional_index(), additional_offset);
|
| switch (elements_kind) {
|
| - case EXTERNAL_PIXEL_ELEMENTS:
|
| - case EXTERNAL_BYTE_ELEMENTS:
|
| - case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
|
| + case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
|
| + case EXTERNAL_INT8_ELEMENTS:
|
| + case EXTERNAL_UINT8_ELEMENTS:
|
| case UINT8_ELEMENTS:
|
| case UINT8_CLAMPED_ELEMENTS:
|
| case INT8_ELEMENTS:
|
| __ sb(value, mem_operand);
|
| break;
|
| - case EXTERNAL_SHORT_ELEMENTS:
|
| - case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
|
| + case EXTERNAL_INT16_ELEMENTS:
|
| + case EXTERNAL_UINT16_ELEMENTS:
|
| case INT16_ELEMENTS:
|
| case UINT16_ELEMENTS:
|
| __ sh(value, mem_operand);
|
| break;
|
| - case EXTERNAL_INT_ELEMENTS:
|
| - case EXTERNAL_UNSIGNED_INT_ELEMENTS:
|
| + case EXTERNAL_INT32_ELEMENTS:
|
| + case EXTERNAL_UINT32_ELEMENTS:
|
| case INT32_ELEMENTS:
|
| case UINT32_ELEMENTS:
|
| __ sw(value, mem_operand);
|
| break;
|
| case FLOAT32_ELEMENTS:
|
| case FLOAT64_ELEMENTS:
|
| - case EXTERNAL_FLOAT_ELEMENTS:
|
| - case EXTERNAL_DOUBLE_ELEMENTS:
|
| + case EXTERNAL_FLOAT32_ELEMENTS:
|
| + case EXTERNAL_FLOAT64_ELEMENTS:
|
| case FAST_DOUBLE_ELEMENTS:
|
| case FAST_ELEMENTS:
|
| case FAST_SMI_ELEMENTS:
|
| @@ -4308,8 +4241,8 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
|
|
|
| // Only load canonical NaN if the comparison above set the overflow.
|
| __ bind(&is_nan);
|
| - __ Move(double_scratch,
|
| - FixedDoubleArray::canonical_not_the_hole_nan_as_double());
|
| + __ LoadRoot(at, Heap::kNanValueRootIndex);
|
| + __ ldc1(double_scratch, FieldMemOperand(at, HeapNumber::kValueOffset));
|
| __ sdc1(double_scratch, MemOperand(scratch, instr->additional_index() <<
|
| element_size_shift));
|
| __ Branch(&done);
|
| @@ -4444,18 +4377,11 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
|
|
|
| void LCodeGen::DoStringAdd(LStringAdd* instr) {
|
| ASSERT(ToRegister(instr->context()).is(cp));
|
| - if (FLAG_new_string_add) {
|
| - ASSERT(ToRegister(instr->left()).is(a1));
|
| - ASSERT(ToRegister(instr->right()).is(a0));
|
| - NewStringAddStub stub(instr->hydrogen()->flags(),
|
| - isolate()->heap()->GetPretenureMode());
|
| - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
|
| - } else {
|
| - __ push(ToRegister(instr->left()));
|
| - __ push(ToRegister(instr->right()));
|
| - StringAddStub stub(instr->hydrogen()->flags());
|
| - CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
|
| - }
|
| + ASSERT(ToRegister(instr->left()).is(a1));
|
| + ASSERT(ToRegister(instr->right()).is(a0));
|
| + StringAddStub stub(instr->hydrogen()->flags(),
|
| + instr->hydrogen()->pretenure_flag());
|
| + CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
|
| }
|
|
|
|
|
| @@ -4886,8 +4812,9 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
|
| // Performs a truncating conversion of a floating point number as used by
|
| // the JS bitwise operations.
|
| Label no_heap_number, check_bools, check_false;
|
| - __ Branch(&no_heap_number, ne, scratch1, Operand(at)); // HeapNumber map?
|
| - __ mov(scratch2, input_reg);
|
| + // Check HeapNumber map.
|
| + __ Branch(USE_DELAY_SLOT, &no_heap_number, ne, scratch1, Operand(at));
|
| + __ mov(scratch2, input_reg); // In delay slot.
|
| __ TruncateHeapNumberToI(input_reg, scratch2);
|
| __ Branch(&done);
|
|
|
| @@ -5288,7 +5215,11 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
|
| }
|
| if (instr->size()->IsConstantOperand()) {
|
| int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
|
| - __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
|
| + if (size <= Page::kMaxRegularHeapObjectSize) {
|
| + __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
|
| + } else {
|
| + __ jmp(deferred->entry());
|
| + }
|
| } else {
|
| Register size = ToRegister(instr->size());
|
| __ Allocate(size,
|
|
|