| Index: src/arm/lithium-codegen-arm.cc
|
| diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
|
| index 12fce439f2ac9faab6e8b0164a836febe09414b0..343b0322f771e2c1ef5af6e3ae91d07be0e042e9 100644
|
| --- a/src/arm/lithium-codegen-arm.cc
|
| +++ b/src/arm/lithium-codegen-arm.cc
|
| @@ -162,6 +162,9 @@ bool LCodeGen::GeneratePrologue() {
|
| // for code aging to work properly.
|
| __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
|
| __ nop(ip.code());
|
| + if (masm()->is_thumb_mode()) {
|
| + __ nop(ip.code());
|
| + }
|
| // Adjust FP to point to saved FP.
|
| __ add(fp, sp, Operand(2 * kPointerSize));
|
| }
|
| @@ -891,6 +894,10 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
|
|
|
| data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
|
| data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
|
| + int mode = 0;
|
| + if (code->is_thumb_mode()) {
|
| + mode = 1;
|
| + }
|
|
|
| // Populate the deoptimization entries.
|
| for (int i = 0; i < length; i++) {
|
| @@ -900,6 +907,7 @@ void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
|
| data->SetArgumentsStackHeight(i,
|
| Smi::FromInt(env->arguments_stack_height()));
|
| data->SetPc(i, Smi::FromInt(env->pc_offset()));
|
| + data->SetMode(i, Smi::FromInt(mode));
|
| }
|
| code->set_deoptimization_data(*data);
|
| }
|
| @@ -2780,14 +2788,22 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
|
| // the cached map.
|
| PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize);
|
| Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
|
| - __ mov(ip, Operand(Handle<Object>(cell)));
|
| + if (masm_->is_thumb_mode()) {
|
| + __ ldr_pc_thumb(ip, Operand(Handle<Object>(cell)));
|
| + } else {
|
| + __ mov(ip, Operand(Handle<Object>(cell)));
|
| + }
|
| __ ldr(ip, FieldMemOperand(ip, PropertyCell::kValueOffset));
|
| __ cmp(map, Operand(ip));
|
| __ b(ne, &cache_miss);
|
| // We use Factory::the_hole_value() on purpose instead of loading from the
|
| // root array to force relocation to be able to later patch
|
| // with true or false.
|
| - __ mov(result, Operand(factory()->the_hole_value()));
|
| + if (masm_->is_thumb_mode()) {
|
| + __ ldr_pc_thumb(result, Operand(factory()->the_hole_value()));
|
| + } else {
|
| + __ mov(result, Operand(factory()->the_hole_value()));
|
| + }
|
| }
|
| __ b(&done);
|
|
|
| @@ -2839,19 +2855,23 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
|
| ASSERT(temp.is(r4));
|
| __ LoadHeapObject(InstanceofStub::right(), instr->function());
|
| static const int kAdditionalDelta = 5;
|
| + static const int kAdditionalSizeDelta = 20;
|
| // Make sure that code size is predicable, since we use specific constants
|
| // offsets in the code to find embedded values..
|
| PredictableCodeSizeScope predictable(masm_, 6 * Assembler::kInstrSize);
|
| - int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
|
| + int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalSizeDelta;
|
| Label before_push_delta;
|
| __ bind(&before_push_delta);
|
| __ BlockConstPoolFor(kAdditionalDelta);
|
| - __ mov(temp, Operand(delta * kPointerSize));
|
| + __ mov(temp, Operand(delta));
|
| // The mov above can generate one or two instructions. The delta was computed
|
| // for two instructions, so we need to pad here in case of one instruction.
|
| - if (masm_->InstructionsGeneratedSince(&before_push_delta) != 2) {
|
| + if (masm_->SizeOfCodeGeneratedSince(&before_push_delta) != 8) {
|
| ASSERT_EQ(1, masm_->InstructionsGeneratedSince(&before_push_delta));
|
| __ nop();
|
| + if (masm_->is_thumb_mode()) {
|
| + __ nop();
|
| + }
|
| }
|
| __ StoreToSafepointRegisterSlot(temp, temp);
|
| CallCodeGeneric(stub.GetCode(isolate()),
|
| @@ -5570,7 +5590,6 @@ void LCodeGen::EnsureSpaceForLazyDeopt() {
|
| // Block literal pool emission for duration of padding.
|
| Assembler::BlockConstPoolScope block_const_pool(masm());
|
| int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
|
| - ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
|
| while (padding_size > 0) {
|
| __ nop();
|
| padding_size -= Assembler::kInstrSize;
|
|
|