Chromium Code Reviews| Index: src/ia32/lithium-codegen-ia32.cc |
| diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc |
| index ebeaaa8216a0db4747f6c9aa1820a06a92e7f2a0..7b2d0420a7ecabed0fa951af0d88751a8a2c900b 100644 |
| --- a/src/ia32/lithium-codegen-ia32.cc |
| +++ b/src/ia32/lithium-codegen-ia32.cc |
| @@ -2047,7 +2047,7 @@ void LCodeGen::DoDateField(LDateField* instr) { |
| __ j(not_equal, &runtime, Label::kNear); |
| __ mov(result, FieldOperand(object, JSDate::kValueOffset + |
| kPointerSize * index->value())); |
| - __ jmp(&done); |
| + __ jmp(&done, Label::kNear); |
| } |
| __ bind(&runtime); |
| __ PrepareCallCFunction(2, scratch); |
| @@ -2589,7 +2589,7 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) { |
| __ fld(0); |
| __ FCmp(); |
| Label ok; |
| - __ j(parity_even, &ok); |
| + __ j(parity_even, &ok, Label::kNear); |
| __ fstp(0); |
| EmitFalseBranch(instr, no_condition); |
| __ bind(&ok); |
| @@ -2913,7 +2913,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { |
| Register temp = ToRegister(instr->temp()); |
| // A Smi is not an instance of anything. |
| - __ JumpIfSmi(object, &false_result); |
| + __ JumpIfSmi(object, &false_result, Label::kNear); |
| // This is the inlined call site instanceof cache. The two occurences of the |
| // hole value will be patched to the last map/result pair generated by the |
| @@ -2926,18 +2926,18 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) { |
| __ cmp(map, Operand::ForCell(cache_cell)); // Patched to cached map. |
| __ j(not_equal, &cache_miss, Label::kNear); |
| __ mov(eax, factory()->the_hole_value()); // Patched to either true or false. |
| - __ jmp(&done); |
| + __ jmp(&done, Label::kNear); |
| // The inlined call site cache did not match. Check for null and string |
| // before calling the deferred code. |
| __ bind(&cache_miss); |
| // Null is not an instance of anything. |
| __ cmp(object, factory()->null_value()); |
| - __ j(equal, &false_result); |
| + __ j(equal, &false_result, Label::kNear); |
| // String values are not instances of anything. |
| Condition is_string = masm_->IsObjectStringType(object, temp, temp); |
| - __ j(is_string, &false_result); |
| + __ j(is_string, &false_result, Label::kNear); |
| // Go to the deferred code. |
| __ jmp(deferred->entry()); |
| @@ -3082,7 +3082,7 @@ void LCodeGen::DoReturn(LReturn* instr) { |
| if (dynamic_frame_alignment_) { |
| Label no_padding; |
| __ cmp(edx, Immediate(kNoAlignmentPadding)); |
| - __ j(equal, &no_padding); |
| + __ j(equal, &no_padding, Label::kNear); |
| EmitReturn(instr, true); |
| __ bind(&no_padding); |
| @@ -3607,12 +3607,12 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { |
| FieldOperand(function, JSFunction::kSharedFunctionInfoOffset)); |
| __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset), |
| 1 << SharedFunctionInfo::kStrictModeBitWithinByte); |
| - __ j(not_equal, &receiver_ok); // A near jump is not sufficient here! |
|
Jakob Kummerow
2013/10/28 10:37:19
Why do you think this comment is not true anymore?
Weiliang
2013/10/28 14:31:49
Yes, thanks for pointing it out. near jump is not
|
| + __ j(not_equal, &receiver_ok, Label::kNear); |
| // Do not transform the receiver to object for builtins. |
| __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset), |
| 1 << SharedFunctionInfo::kNativeBitWithinByte); |
| - __ j(not_equal, &receiver_ok); |
| + __ j(not_equal, &receiver_ok, Label::kNear); |
| // Normal function. Replace undefined or null with global receiver. |
| __ cmp(receiver, factory()->null_value()); |
| @@ -3821,7 +3821,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { |
| // |result| are the same register and |input| will be restored |
| // unchanged by popping safepoint registers. |
| __ test(tmp, Immediate(HeapNumber::kSignMask)); |
| - __ j(zero, &done); |
| + __ j(zero, &done, Label::kNear); |
| __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow); |
| __ jmp(&allocated, Label::kNear); |
| @@ -3977,7 +3977,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) { |
| Label done, round_to_zero, below_one_half, do_not_compensate; |
| __ movsd(xmm_scratch, Operand::StaticVariable(one_half)); |
| __ ucomisd(xmm_scratch, input_reg); |
| - __ j(above, &below_one_half); |
| + __ j(above, &below_one_half, Label::kNear); |
| // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x). |
| __ addsd(xmm_scratch, input_reg); |
| @@ -3986,12 +3986,12 @@ void LCodeGen::DoMathRound(LMathRound* instr) { |
| __ cmp(output_reg, 0x80000000u); |
| __ RecordComment("D2I conversion overflow"); |
| DeoptimizeIf(equal, instr->environment()); |
| - __ jmp(&done); |
| + __ jmp(&done, Label::kNear); |
| __ bind(&below_one_half); |
| __ movsd(xmm_scratch, Operand::StaticVariable(minus_one_half)); |
| __ ucomisd(xmm_scratch, input_reg); |
| - __ j(below_equal, &round_to_zero); |
| + __ j(below_equal, &round_to_zero, Label::kNear); |
| // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then |
| // compare and compensate. |
| @@ -4005,10 +4005,10 @@ void LCodeGen::DoMathRound(LMathRound* instr) { |
| __ Cvtsi2sd(xmm_scratch, output_reg); |
| __ ucomisd(xmm_scratch, input_temp); |
| - __ j(equal, &done); |
| + __ j(equal, &done, Label::kNear); |
| __ sub(output_reg, Immediate(1)); |
| // No overflow because we already ruled out minint. |
| - __ jmp(&done); |
| + __ jmp(&done, Label::kNear); |
| __ bind(&round_to_zero); |
| // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if |
| @@ -4363,13 +4363,13 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) { |
| // look at the first argument |
| __ mov(ecx, Operand(esp, 0)); |
| __ test(ecx, ecx); |
| - __ j(zero, &packed_case); |
| + __ j(zero, &packed_case, Label::kNear); |
| ElementsKind holey_kind = GetHoleyElementsKind(kind); |
| ArraySingleArgumentConstructorStub stub(holey_kind, context_mode, |
| override_mode); |
| CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr); |
| - __ jmp(&done); |
| + __ jmp(&done, Label::kNear); |
| __ bind(&packed_case); |
| } |
| @@ -4666,7 +4666,7 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { |
| Label have_value; |
| __ ucomisd(value, value); |
| - __ j(parity_odd, &have_value); // NaN. |
| + __ j(parity_odd, &have_value, Label::kNear); // NaN. |
| __ movsd(value, Operand::StaticVariable(canonical_nan_reference)); |
| __ bind(&have_value); |
| @@ -4702,15 +4702,15 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) { |
| __ fld(0); |
| __ FCmp(); |
| - __ j(parity_odd, &no_special_nan_handling); |
| + __ j(parity_odd, &no_special_nan_handling, Label::kNear); |
| __ sub(esp, Immediate(kDoubleSize)); |
| __ fst_d(MemOperand(esp, 0)); |
| __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)), |
| Immediate(kHoleNanUpper32)); |
| __ add(esp, Immediate(kDoubleSize)); |
| Label canonicalize; |
| - __ j(not_equal, &canonicalize); |
| - __ jmp(&no_special_nan_handling); |
| + __ j(not_equal, &canonicalize, Label::kNear); |
| + __ jmp(&no_special_nan_handling, Label::kNear); |
| __ bind(&canonicalize); |
| __ fstp(0); |
| __ fld_d(Operand::StaticVariable(canonical_nan_reference)); |
| @@ -5679,12 +5679,12 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
| Label success; |
| for (int i = 0; i < map_set.size() - 1; i++) { |
| Handle<Map> map = map_set.at(i).handle(); |
| - __ CompareMap(reg, map, &success); |
| - __ j(equal, &success); |
| + __ CompareMap(reg, map); |
| + __ j(equal, &success, Label::kNear); |
| } |
| Handle<Map> map = map_set.at(map_set.size() - 1).handle(); |
| - __ CompareMap(reg, map, &success); |
| + __ CompareMap(reg, map); |
| if (instr->hydrogen()->has_migration_target()) { |
| __ j(not_equal, deferred->entry()); |
| } else { |
| @@ -5762,13 +5762,13 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) { |
| // Check for heap number |
| __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset), |
| factory()->heap_number_map()); |
| - __ j(equal, &heap_number, Label::kFar); |
| + __ j(equal, &heap_number, Label::kNear); |
| // Check for undefined. Undefined is converted to zero for clamping |
| // conversions. |
| __ cmp(input_reg, factory()->undefined_value()); |
| DeoptimizeIf(not_equal, instr->environment()); |
| - __ jmp(&zero_result); |
| + __ jmp(&zero_result, Label::kNear); |
| // Heap number |
| __ bind(&heap_number); |
| @@ -5783,15 +5783,15 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) { |
| // Test for negative values --> clamp to zero |
| __ test(scratch, scratch); |
| - __ j(negative, &zero_result); |
| + __ j(negative, &zero_result, Label::kNear); |
| // Get exponent alone in scratch2. |
| __ mov(scratch2, scratch); |
| __ and_(scratch2, HeapNumber::kExponentMask); |
| __ shr(scratch2, HeapNumber::kExponentShift); |
| - __ j(zero, &zero_result); |
| + __ j(zero, &zero_result, Label::kNear); |
| __ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1)); |
| - __ j(negative, &zero_result); |
| + __ j(negative, &zero_result, Label::kNear); |
| const uint32_t non_int8_exponent = 7; |
| __ cmp(scratch2, Immediate(non_int8_exponent + 1)); |
| @@ -5822,18 +5822,18 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) { |
| __ and_(scratch2, Immediate((1 << one_bit_shift) - 1)); |
| __ cmp(scratch2, Immediate(1 << one_half_bit_shift)); |
| Label no_round; |
| - __ j(less, &no_round); |
| + __ j(less, &no_round, Label::kNear); |
| Label round_up; |
| __ mov(scratch2, Immediate(1 << one_half_bit_shift)); |
| - __ j(greater, &round_up); |
| + __ j(greater, &round_up, Label::kNear); |
| __ test(scratch3, scratch3); |
| - __ j(not_zero, &round_up); |
| + __ j(not_zero, &round_up, Label::kNear); |
| __ mov(scratch2, scratch); |
| __ and_(scratch2, Immediate(1 << one_bit_shift)); |
| __ shr(scratch2, 1); |
| __ bind(&round_up); |
| __ add(scratch, scratch2); |
| - __ j(overflow, &largest_value); |
| + __ j(overflow, &largest_value, Label::kNear); |
| __ bind(&no_round); |
| __ shr(scratch, 23); |
| __ mov(result_reg, scratch); |
| @@ -5848,7 +5848,7 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) { |
| // bit is set. |
| __ and_(scratch, HeapNumber::kMantissaMask); |
| __ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset)); |
| - __ j(not_zero, &zero_result); // M!=0 --> NaN |
| + __ j(not_zero, &zero_result, Label::kNear); // M!=0 --> NaN |
| // Infinity -> Fall through to map to 255. |
| __ bind(&largest_value); |
| @@ -5857,7 +5857,7 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) { |
| __ bind(&zero_result); |
| __ xor_(result_reg, result_reg); |
| - __ jmp(&done); |
| + __ jmp(&done, Label::kNear); |
| // smi |
| __ bind(&is_smi); |
| @@ -6005,7 +6005,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { |
| int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; |
| Label allocated, runtime_allocate; |
| __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT); |
| - __ jmp(&allocated); |
| + __ jmp(&allocated, Label::kNear); |
| __ bind(&runtime_allocate); |
| __ push(ebx); |
| @@ -6331,9 +6331,9 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { |
| Label load_cache, done; |
| __ EnumLength(result, map); |
| __ cmp(result, Immediate(Smi::FromInt(0))); |
| - __ j(not_equal, &load_cache); |
| + __ j(not_equal, &load_cache, Label::kNear); |
| __ mov(result, isolate()->factory()->empty_fixed_array()); |
| - __ jmp(&done); |
| + __ jmp(&done, Label::kNear); |
| __ bind(&load_cache); |
| __ LoadInstanceDescriptors(map, result); |
| @@ -6361,7 +6361,7 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) { |
| Label out_of_object, done; |
| __ cmp(index, Immediate(0)); |
| - __ j(less, &out_of_object); |
| + __ j(less, &out_of_object, Label::kNear); |
| __ mov(object, FieldOperand(object, |
| index, |
| times_half_pointer_size, |