| Index: src/ia32/lithium-codegen-ia32.cc
|
| diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
|
| index 44b952cfa702bbbb4030036695c32c25bf1e1b99..f457da04fa66076c24f7b7980f7f5364c4f16503 100644
|
| --- a/src/ia32/lithium-codegen-ia32.cc
|
| +++ b/src/ia32/lithium-codegen-ia32.cc
|
| @@ -1043,7 +1043,7 @@ void LCodeGen::DeoptimizeIf(Condition cc,
|
| return;
|
| }
|
|
|
| - if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
|
| + if (DeoptEveryNTimes()) {
|
| ExternalReference count = ExternalReference::stress_deopt_count(isolate());
|
| Label no_deopt;
|
| __ pushfd();
|
| @@ -2047,7 +2047,7 @@ void LCodeGen::DoDateField(LDateField* instr) {
|
| __ j(not_equal, &runtime, Label::kNear);
|
| __ mov(result, FieldOperand(object, JSDate::kValueOffset +
|
| kPointerSize * index->value()));
|
| - __ jmp(&done);
|
| + __ jmp(&done, Label::kNear);
|
| }
|
| __ bind(&runtime);
|
| __ PrepareCallCFunction(2, scratch);
|
| @@ -2647,7 +2647,7 @@ void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
|
| __ fld(0);
|
| __ FCmp();
|
| Label ok;
|
| - __ j(parity_even, &ok);
|
| + __ j(parity_even, &ok, Label::kNear);
|
| __ fstp(0);
|
| EmitFalseBranch(instr, no_condition);
|
| __ bind(&ok);
|
| @@ -2971,7 +2971,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
|
| Register temp = ToRegister(instr->temp());
|
|
|
| // A Smi is not an instance of anything.
|
| - __ JumpIfSmi(object, &false_result);
|
| + __ JumpIfSmi(object, &false_result, Label::kNear);
|
|
|
| // This is the inlined call site instanceof cache. The two occurences of the
|
| // hole value will be patched to the last map/result pair generated by the
|
| @@ -2984,18 +2984,18 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
|
| __ cmp(map, Operand::ForCell(cache_cell)); // Patched to cached map.
|
| __ j(not_equal, &cache_miss, Label::kNear);
|
| __ mov(eax, factory()->the_hole_value()); // Patched to either true or false.
|
| - __ jmp(&done);
|
| + __ jmp(&done, Label::kNear);
|
|
|
| // The inlined call site cache did not match. Check for null and string
|
| // before calling the deferred code.
|
| __ bind(&cache_miss);
|
| // Null is not an instance of anything.
|
| __ cmp(object, factory()->null_value());
|
| - __ j(equal, &false_result);
|
| + __ j(equal, &false_result, Label::kNear);
|
|
|
| // String values are not instances of anything.
|
| Condition is_string = masm_->IsObjectStringType(object, temp, temp);
|
| - __ j(is_string, &false_result);
|
| + __ j(is_string, &false_result, Label::kNear);
|
|
|
| // Go to the deferred code.
|
| __ jmp(deferred->entry());
|
| @@ -3140,7 +3140,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
|
| if (dynamic_frame_alignment_) {
|
| Label no_padding;
|
| __ cmp(edx, Immediate(kNoAlignmentPadding));
|
| - __ j(equal, &no_padding);
|
| + __ j(equal, &no_padding, Label::kNear);
|
|
|
| EmitReturn(instr, true);
|
| __ bind(&no_padding);
|
| @@ -3658,6 +3658,7 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
|
| // object as a receiver to normal functions. Values have to be
|
| // passed unchanged to builtins and strict-mode functions.
|
| Label global_object, receiver_ok;
|
| + Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
|
|
|
| // Do not transform the receiver to object for strict mode
|
| // functions.
|
| @@ -3665,12 +3666,12 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
|
| FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
|
| __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
|
| 1 << SharedFunctionInfo::kStrictModeBitWithinByte);
|
| - __ j(not_equal, &receiver_ok); // A near jump is not sufficient here!
|
| + __ j(not_equal, &receiver_ok, dist);
|
|
|
| // Do not transform the receiver to object for builtins.
|
| __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
|
| 1 << SharedFunctionInfo::kNativeBitWithinByte);
|
| - __ j(not_equal, &receiver_ok);
|
| + __ j(not_equal, &receiver_ok, dist);
|
|
|
| // Normal function. Replace undefined or null with global receiver.
|
| __ cmp(receiver, factory()->null_value());
|
| @@ -3879,7 +3880,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
|
| // |result| are the same register and |input| will be restored
|
| // unchanged by popping safepoint registers.
|
| __ test(tmp, Immediate(HeapNumber::kSignMask));
|
| - __ j(zero, &done);
|
| + __ j(zero, &done, Label::kNear);
|
|
|
| __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
|
| __ jmp(&allocated, Label::kNear);
|
| @@ -4033,9 +4034,11 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
|
| ExternalReference::address_of_minus_one_half();
|
|
|
| Label done, round_to_zero, below_one_half, do_not_compensate;
|
| + Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
|
| +
|
| __ movsd(xmm_scratch, Operand::StaticVariable(one_half));
|
| __ ucomisd(xmm_scratch, input_reg);
|
| - __ j(above, &below_one_half);
|
| + __ j(above, &below_one_half, Label::kNear);
|
|
|
| // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
|
| __ addsd(xmm_scratch, input_reg);
|
| @@ -4044,12 +4047,12 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
|
| __ cmp(output_reg, 0x80000000u);
|
| __ RecordComment("D2I conversion overflow");
|
| DeoptimizeIf(equal, instr->environment());
|
| - __ jmp(&done);
|
| + __ jmp(&done, dist);
|
|
|
| __ bind(&below_one_half);
|
| __ movsd(xmm_scratch, Operand::StaticVariable(minus_one_half));
|
| __ ucomisd(xmm_scratch, input_reg);
|
| - __ j(below_equal, &round_to_zero);
|
| + __ j(below_equal, &round_to_zero, Label::kNear);
|
|
|
| // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
|
| // compare and compensate.
|
| @@ -4063,10 +4066,10 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
|
|
|
| __ Cvtsi2sd(xmm_scratch, output_reg);
|
| __ ucomisd(xmm_scratch, input_temp);
|
| - __ j(equal, &done);
|
| + __ j(equal, &done, dist);
|
| __ sub(output_reg, Immediate(1));
|
| // No overflow because we already ruled out minint.
|
| - __ jmp(&done);
|
| + __ jmp(&done, dist);
|
|
|
| __ bind(&round_to_zero);
|
| // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
|
| @@ -4421,13 +4424,13 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
|
| // look at the first argument
|
| __ mov(ecx, Operand(esp, 0));
|
| __ test(ecx, ecx);
|
| - __ j(zero, &packed_case);
|
| + __ j(zero, &packed_case, Label::kNear);
|
|
|
| ElementsKind holey_kind = GetHoleyElementsKind(kind);
|
| ArraySingleArgumentConstructorStub stub(holey_kind, context_mode,
|
| override_mode);
|
| CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
|
| - __ jmp(&done);
|
| + __ jmp(&done, Label::kNear);
|
| __ bind(&packed_case);
|
| }
|
|
|
| @@ -4724,7 +4727,7 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
|
| Label have_value;
|
|
|
| __ ucomisd(value, value);
|
| - __ j(parity_odd, &have_value); // NaN.
|
| + __ j(parity_odd, &have_value, Label::kNear); // NaN.
|
|
|
| __ movsd(value, Operand::StaticVariable(canonical_nan_reference));
|
| __ bind(&have_value);
|
| @@ -4760,15 +4763,15 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
|
| __ fld(0);
|
| __ FCmp();
|
|
|
| - __ j(parity_odd, &no_special_nan_handling);
|
| + __ j(parity_odd, &no_special_nan_handling, Label::kNear);
|
| __ sub(esp, Immediate(kDoubleSize));
|
| __ fst_d(MemOperand(esp, 0));
|
| __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)),
|
| Immediate(kHoleNanUpper32));
|
| __ add(esp, Immediate(kDoubleSize));
|
| Label canonicalize;
|
| - __ j(not_equal, &canonicalize);
|
| - __ jmp(&no_special_nan_handling);
|
| + __ j(not_equal, &canonicalize, Label::kNear);
|
| + __ jmp(&no_special_nan_handling, Label::kNear);
|
| __ bind(&canonicalize);
|
| __ fstp(0);
|
| __ fld_d(Operand::StaticVariable(canonical_nan_reference));
|
| @@ -5748,12 +5751,12 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
|
| Label success;
|
| for (int i = 0; i < map_set.size() - 1; i++) {
|
| Handle<Map> map = map_set.at(i).handle();
|
| - __ CompareMap(reg, map, &success);
|
| - __ j(equal, &success);
|
| + __ CompareMap(reg, map);
|
| + __ j(equal, &success, Label::kNear);
|
| }
|
|
|
| Handle<Map> map = map_set.at(map_set.size() - 1).handle();
|
| - __ CompareMap(reg, map, &success);
|
| + __ CompareMap(reg, map);
|
| if (instr->hydrogen()->has_migration_target()) {
|
| __ j(not_equal, deferred->entry());
|
| } else {
|
| @@ -5831,13 +5834,13 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
|
| // Check for heap number
|
| __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
|
| factory()->heap_number_map());
|
| - __ j(equal, &heap_number, Label::kFar);
|
| + __ j(equal, &heap_number, Label::kNear);
|
|
|
| // Check for undefined. Undefined is converted to zero for clamping
|
| // conversions.
|
| __ cmp(input_reg, factory()->undefined_value());
|
| DeoptimizeIf(not_equal, instr->environment());
|
| - __ jmp(&zero_result);
|
| + __ jmp(&zero_result, Label::kNear);
|
|
|
| // Heap number
|
| __ bind(&heap_number);
|
| @@ -5852,15 +5855,15 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
|
|
|
| // Test for negative values --> clamp to zero
|
| __ test(scratch, scratch);
|
| - __ j(negative, &zero_result);
|
| + __ j(negative, &zero_result, Label::kNear);
|
|
|
| // Get exponent alone in scratch2.
|
| __ mov(scratch2, scratch);
|
| __ and_(scratch2, HeapNumber::kExponentMask);
|
| __ shr(scratch2, HeapNumber::kExponentShift);
|
| - __ j(zero, &zero_result);
|
| + __ j(zero, &zero_result, Label::kNear);
|
| __ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1));
|
| - __ j(negative, &zero_result);
|
| + __ j(negative, &zero_result, Label::kNear);
|
|
|
| const uint32_t non_int8_exponent = 7;
|
| __ cmp(scratch2, Immediate(non_int8_exponent + 1));
|
| @@ -5891,18 +5894,18 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
|
| __ and_(scratch2, Immediate((1 << one_bit_shift) - 1));
|
| __ cmp(scratch2, Immediate(1 << one_half_bit_shift));
|
| Label no_round;
|
| - __ j(less, &no_round);
|
| + __ j(less, &no_round, Label::kNear);
|
| Label round_up;
|
| __ mov(scratch2, Immediate(1 << one_half_bit_shift));
|
| - __ j(greater, &round_up);
|
| + __ j(greater, &round_up, Label::kNear);
|
| __ test(scratch3, scratch3);
|
| - __ j(not_zero, &round_up);
|
| + __ j(not_zero, &round_up, Label::kNear);
|
| __ mov(scratch2, scratch);
|
| __ and_(scratch2, Immediate(1 << one_bit_shift));
|
| __ shr(scratch2, 1);
|
| __ bind(&round_up);
|
| __ add(scratch, scratch2);
|
| - __ j(overflow, &largest_value);
|
| + __ j(overflow, &largest_value, Label::kNear);
|
| __ bind(&no_round);
|
| __ shr(scratch, 23);
|
| __ mov(result_reg, scratch);
|
| @@ -5917,7 +5920,7 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
|
| // bit is set.
|
| __ and_(scratch, HeapNumber::kMantissaMask);
|
| __ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
|
| - __ j(not_zero, &zero_result); // M!=0 --> NaN
|
| + __ j(not_zero, &zero_result, Label::kNear); // M!=0 --> NaN
|
| // Infinity -> Fall through to map to 255.
|
|
|
| __ bind(&largest_value);
|
| @@ -5926,7 +5929,7 @@ void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
|
|
|
| __ bind(&zero_result);
|
| __ xor_(result_reg, result_reg);
|
| - __ jmp(&done);
|
| + __ jmp(&done, Label::kNear);
|
|
|
| // smi
|
| __ bind(&is_smi);
|
| @@ -6074,7 +6077,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
|
| int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
|
| Label allocated, runtime_allocate;
|
| __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
|
| - __ jmp(&allocated);
|
| + __ jmp(&allocated, Label::kNear);
|
|
|
| __ bind(&runtime_allocate);
|
| __ push(ebx);
|
| @@ -6400,9 +6403,9 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
|
| Label load_cache, done;
|
| __ EnumLength(result, map);
|
| __ cmp(result, Immediate(Smi::FromInt(0)));
|
| - __ j(not_equal, &load_cache);
|
| + __ j(not_equal, &load_cache, Label::kNear);
|
| __ mov(result, isolate()->factory()->empty_fixed_array());
|
| - __ jmp(&done);
|
| + __ jmp(&done, Label::kNear);
|
|
|
| __ bind(&load_cache);
|
| __ LoadInstanceDescriptors(map, result);
|
| @@ -6430,7 +6433,7 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
|
|
|
| Label out_of_object, done;
|
| __ cmp(index, Immediate(0));
|
| - __ j(less, &out_of_object);
|
| + __ j(less, &out_of_object, Label::kNear);
|
| __ mov(object, FieldOperand(object,
|
| index,
|
| times_half_pointer_size,
|
|
|