Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(188)

Side by Side Diff: src/arm/code-stubs-arm.cc

Issue 11028027: Revert trunk to bleeding_edge at r12484 (Closed) Base URL: https://v8.googlecode.com/svn/trunk
Patch Set: Created 8 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/assembler-arm.cc ('k') | src/arm/full-codegen-arm.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 3618 matching lines...) Expand 10 before | Expand all | Expand 10 after
3629 __ VFPCompareAndSetFlags(double_scratch, double_exponent); 3629 __ VFPCompareAndSetFlags(double_scratch, double_exponent);
3630 __ b(eq, &int_exponent_convert); 3630 __ b(eq, &int_exponent_convert);
3631 3631
3632 if (exponent_type_ == ON_STACK) { 3632 if (exponent_type_ == ON_STACK) {
3633 // Detect square root case. Crankshaft detects constant +/-0.5 at 3633 // Detect square root case. Crankshaft detects constant +/-0.5 at
3634 // compile time and uses DoMathPowHalf instead. We then skip this check 3634 // compile time and uses DoMathPowHalf instead. We then skip this check
3635 // for non-constant cases of +/-0.5 as these hardly occur. 3635 // for non-constant cases of +/-0.5 as these hardly occur.
3636 Label not_plus_half; 3636 Label not_plus_half;
3637 3637
3638 // Test for 0.5. 3638 // Test for 0.5.
3639 __ vmov(double_scratch, 0.5, scratch); 3639 __ vmov(double_scratch, 0.5);
3640 __ VFPCompareAndSetFlags(double_exponent, double_scratch); 3640 __ VFPCompareAndSetFlags(double_exponent, double_scratch);
3641 __ b(ne, &not_plus_half); 3641 __ b(ne, &not_plus_half);
3642 3642
3643 // Calculates square root of base. Check for the special case of 3643 // Calculates square root of base. Check for the special case of
3644 // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13). 3644 // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
3645 __ vmov(double_scratch, -V8_INFINITY, scratch); 3645 __ vmov(double_scratch, -V8_INFINITY);
3646 __ VFPCompareAndSetFlags(double_base, double_scratch); 3646 __ VFPCompareAndSetFlags(double_base, double_scratch);
3647 __ vneg(double_result, double_scratch, eq); 3647 __ vneg(double_result, double_scratch, eq);
3648 __ b(eq, &done); 3648 __ b(eq, &done);
3649 3649
3650 // Add +0 to convert -0 to +0. 3650 // Add +0 to convert -0 to +0.
3651 __ vadd(double_scratch, double_base, kDoubleRegZero); 3651 __ vadd(double_scratch, double_base, kDoubleRegZero);
3652 __ vsqrt(double_result, double_scratch); 3652 __ vsqrt(double_result, double_scratch);
3653 __ jmp(&done); 3653 __ jmp(&done);
3654 3654
3655 __ bind(&not_plus_half); 3655 __ bind(&not_plus_half);
3656 __ vmov(double_scratch, -0.5, scratch); 3656 __ vmov(double_scratch, -0.5);
3657 __ VFPCompareAndSetFlags(double_exponent, double_scratch); 3657 __ VFPCompareAndSetFlags(double_exponent, double_scratch);
3658 __ b(ne, &call_runtime); 3658 __ b(ne, &call_runtime);
3659 3659
3660 // Calculates square root of base. Check for the special case of 3660 // Calculates square root of base. Check for the special case of
3661 // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13). 3661 // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
3662 __ vmov(double_scratch, -V8_INFINITY, scratch); 3662 __ vmov(double_scratch, -V8_INFINITY);
3663 __ VFPCompareAndSetFlags(double_base, double_scratch); 3663 __ VFPCompareAndSetFlags(double_base, double_scratch);
3664 __ vmov(double_result, kDoubleRegZero, eq); 3664 __ vmov(double_result, kDoubleRegZero, eq);
3665 __ b(eq, &done); 3665 __ b(eq, &done);
3666 3666
3667 // Add +0 to convert -0 to +0. 3667 // Add +0 to convert -0 to +0.
3668 __ vadd(double_scratch, double_base, kDoubleRegZero); 3668 __ vadd(double_scratch, double_base, kDoubleRegZero);
3669 __ vmov(double_result, 1.0, scratch); 3669 __ vmov(double_result, 1.0);
3670 __ vsqrt(double_scratch, double_scratch); 3670 __ vsqrt(double_scratch, double_scratch);
3671 __ vdiv(double_result, double_result, double_scratch); 3671 __ vdiv(double_result, double_result, double_scratch);
3672 __ jmp(&done); 3672 __ jmp(&done);
3673 } 3673 }
3674 3674
3675 __ push(lr); 3675 __ push(lr);
3676 { 3676 {
3677 AllowExternalCallThatCantCauseGC scope(masm); 3677 AllowExternalCallThatCantCauseGC scope(masm);
3678 __ PrepareCallCFunction(0, 2, scratch); 3678 __ PrepareCallCFunction(0, 2, scratch);
3679 __ SetCallCDoubleArguments(double_base, double_exponent); 3679 __ SetCallCDoubleArguments(double_base, double_exponent);
(...skipping 14 matching lines...) Expand all
3694 __ bind(&int_exponent); 3694 __ bind(&int_exponent);
3695 3695
3696 // Get two copies of exponent in the registers scratch and exponent. 3696 // Get two copies of exponent in the registers scratch and exponent.
3697 if (exponent_type_ == INTEGER) { 3697 if (exponent_type_ == INTEGER) {
3698 __ mov(scratch, exponent); 3698 __ mov(scratch, exponent);
3699 } else { 3699 } else {
3700 // Exponent has previously been stored into scratch as untagged integer. 3700 // Exponent has previously been stored into scratch as untagged integer.
3701 __ mov(exponent, scratch); 3701 __ mov(exponent, scratch);
3702 } 3702 }
3703 __ vmov(double_scratch, double_base); // Back up base. 3703 __ vmov(double_scratch, double_base); // Back up base.
3704 __ vmov(double_result, 1.0, scratch2); 3704 __ vmov(double_result, 1.0);
3705 3705
3706 // Get absolute value of exponent. 3706 // Get absolute value of exponent.
3707 __ cmp(scratch, Operand(0)); 3707 __ cmp(scratch, Operand(0));
3708 __ mov(scratch2, Operand(0), LeaveCC, mi); 3708 __ mov(scratch2, Operand(0), LeaveCC, mi);
3709 __ sub(scratch, scratch2, scratch, LeaveCC, mi); 3709 __ sub(scratch, scratch2, scratch, LeaveCC, mi);
3710 3710
3711 Label while_true; 3711 Label while_true;
3712 __ bind(&while_true); 3712 __ bind(&while_true);
3713 __ mov(scratch, Operand(scratch, ASR, 1), SetCC); 3713 __ mov(scratch, Operand(scratch, ASR, 1), SetCC);
3714 __ vmul(double_result, double_result, double_scratch, cs); 3714 __ vmul(double_result, double_result, double_scratch, cs);
3715 __ vmul(double_scratch, double_scratch, double_scratch, ne); 3715 __ vmul(double_scratch, double_scratch, double_scratch, ne);
3716 __ b(ne, &while_true); 3716 __ b(ne, &while_true);
3717 3717
3718 __ cmp(exponent, Operand(0)); 3718 __ cmp(exponent, Operand(0));
3719 __ b(ge, &done); 3719 __ b(ge, &done);
3720 __ vmov(double_scratch, 1.0, scratch); 3720 __ vmov(double_scratch, 1.0);
3721 __ vdiv(double_result, double_scratch, double_result); 3721 __ vdiv(double_result, double_scratch, double_result);
3722 // Test whether result is zero. Bail out to check for subnormal result. 3722 // Test whether result is zero. Bail out to check for subnormal result.
3723 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases. 3723 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
3724 __ VFPCompareAndSetFlags(double_result, 0.0); 3724 __ VFPCompareAndSetFlags(double_result, 0.0);
3725 __ b(ne, &done); 3725 __ b(ne, &done);
3726 // double_exponent may not containe the exponent value if the input was a 3726 // double_exponent may not containe the exponent value if the input was a
3727 // smi. We set it with exponent value before bailing out. 3727 // smi. We set it with exponent value before bailing out.
3728 __ vmov(single_scratch, exponent); 3728 __ vmov(single_scratch, exponent);
3729 __ vcvt_f64_s32(double_exponent, single_scratch); 3729 __ vcvt_f64_s32(double_exponent, single_scratch);
3730 3730
(...skipping 1494 matching lines...) Expand 10 before | Expand all | Expand 10 after
5225 // r0: JSArray, tagged. 5225 // r0: JSArray, tagged.
5226 // r3: FixedArray, tagged. 5226 // r3: FixedArray, tagged.
5227 // r5: Number of elements in array, untagged. 5227 // r5: Number of elements in array, untagged.
5228 5228
5229 // Set map. 5229 // Set map.
5230 __ mov(r2, Operand(factory->fixed_array_map())); 5230 __ mov(r2, Operand(factory->fixed_array_map()));
5231 __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); 5231 __ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
5232 // Set FixedArray length. 5232 // Set FixedArray length.
5233 __ mov(r6, Operand(r5, LSL, kSmiTagSize)); 5233 __ mov(r6, Operand(r5, LSL, kSmiTagSize));
5234 __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset)); 5234 __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
5235 // Fill contents of fixed-array with undefined. 5235 // Fill contents of fixed-array with the-hole.
5236 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); 5236 __ mov(r2, Operand(factory->the_hole_value()));
5237 __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); 5237 __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
5238 // Fill fixed array elements with undefined. 5238 // Fill fixed array elements with hole.
5239 // r0: JSArray, tagged. 5239 // r0: JSArray, tagged.
5240 // r2: undefined. 5240 // r2: the hole.
5241 // r3: Start of elements in FixedArray. 5241 // r3: Start of elements in FixedArray.
5242 // r5: Number of elements to fill. 5242 // r5: Number of elements to fill.
5243 Label loop; 5243 Label loop;
5244 __ cmp(r5, Operand(0)); 5244 __ cmp(r5, Operand(0));
5245 __ bind(&loop); 5245 __ bind(&loop);
5246 __ b(le, &done); // Jump if r5 is negative or zero. 5246 __ b(le, &done); // Jump if r5 is negative or zero.
5247 __ sub(r5, r5, Operand(1), SetCC); 5247 __ sub(r5, r5, Operand(1), SetCC);
5248 __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2)); 5248 __ str(r2, MemOperand(r3, r5, LSL, kPointerSizeLog2));
5249 __ jmp(&loop); 5249 __ jmp(&loop);
5250 5250
(...skipping 2004 matching lines...) Expand 10 before | Expand all | Expand 10 after
7255 // StoreArrayLiteralElementStub::Generate 7255 // StoreArrayLiteralElementStub::Generate
7256 { REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET }, 7256 { REG(r5), REG(r0), REG(r6), EMIT_REMEMBERED_SET },
7257 // FastNewClosureStub::Generate 7257 // FastNewClosureStub::Generate
7258 { REG(r2), REG(r4), REG(r1), EMIT_REMEMBERED_SET }, 7258 { REG(r2), REG(r4), REG(r1), EMIT_REMEMBERED_SET },
7259 // Null termination. 7259 // Null termination.
7260 { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET} 7260 { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
7261 }; 7261 };
7262 7262
7263 #undef REG 7263 #undef REG
7264 7264
7265
7266 bool RecordWriteStub::IsPregenerated() { 7265 bool RecordWriteStub::IsPregenerated() {
7267 for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime; 7266 for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7268 !entry->object.is(no_reg); 7267 !entry->object.is(no_reg);
7269 entry++) { 7268 entry++) {
7270 if (object_.is(entry->object) && 7269 if (object_.is(entry->object) &&
7271 value_.is(entry->value) && 7270 value_.is(entry->value) &&
7272 address_.is(entry->address) && 7271 address_.is(entry->address) &&
7273 remembered_set_action_ == entry->action && 7272 remembered_set_action_ == entry->action &&
7274 save_fp_regs_mode_ == kDontSaveFPRegs) { 7273 save_fp_regs_mode_ == kDontSaveFPRegs) {
7275 return true; 7274 return true;
(...skipping 21 matching lines...) Expand all
7297 RecordWriteStub stub(entry->object, 7296 RecordWriteStub stub(entry->object,
7298 entry->value, 7297 entry->value,
7299 entry->address, 7298 entry->address,
7300 entry->action, 7299 entry->action,
7301 kDontSaveFPRegs); 7300 kDontSaveFPRegs);
7302 stub.GetCode()->set_is_pregenerated(true); 7301 stub.GetCode()->set_is_pregenerated(true);
7303 } 7302 }
7304 } 7303 }
7305 7304
7306 7305
7307 bool CodeStub::CanUseFPRegisters() {
7308 return CpuFeatures::IsSupported(VFP2);
7309 }
7310
7311
7312 // Takes the input in 3 registers: address_ value_ and object_. A pointer to 7306 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
7313 // the value has just been written into the object, now this stub makes sure 7307 // the value has just been written into the object, now this stub makes sure
7314 // we keep the GC informed. The word in the object where the value has been 7308 // we keep the GC informed. The word in the object where the value has been
7315 // written is in the address register. 7309 // written is in the address register.
7316 void RecordWriteStub::Generate(MacroAssembler* masm) { 7310 void RecordWriteStub::Generate(MacroAssembler* masm) {
7317 Label skip_to_incremental_noncompacting; 7311 Label skip_to_incremental_noncompacting;
7318 Label skip_to_incremental_compacting; 7312 Label skip_to_incremental_compacting;
7319 7313
7320 // The first two instructions are generated with labels so as to get the 7314 // The first two instructions are generated with labels so as to get the
7321 // offset fixed up correctly by the bind(Label*) call. We patch it back and 7315 // offset fixed up correctly by the bind(Label*) call. We patch it back and
(...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after
7430 7424
7431 7425
7432 void RecordWriteStub::CheckNeedsToInformIncrementalMarker( 7426 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
7433 MacroAssembler* masm, 7427 MacroAssembler* masm,
7434 OnNoNeedToInformIncrementalMarker on_no_need, 7428 OnNoNeedToInformIncrementalMarker on_no_need,
7435 Mode mode) { 7429 Mode mode) {
7436 Label on_black; 7430 Label on_black;
7437 Label need_incremental; 7431 Label need_incremental;
7438 Label need_incremental_pop_scratch; 7432 Label need_incremental_pop_scratch;
7439 7433
7440 __ and_(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
7441 __ ldr(regs_.scratch1(),
7442 MemOperand(regs_.scratch0(),
7443 MemoryChunk::kWriteBarrierCounterOffset));
7444 __ sub(regs_.scratch1(), regs_.scratch1(), Operand(1), SetCC);
7445 __ str(regs_.scratch1(),
7446 MemOperand(regs_.scratch0(),
7447 MemoryChunk::kWriteBarrierCounterOffset));
7448 __ b(mi, &need_incremental);
7449
7450 // Let's look at the color of the object: If it is not black we don't have 7434 // Let's look at the color of the object: If it is not black we don't have
7451 // to inform the incremental marker. 7435 // to inform the incremental marker.
7452 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black); 7436 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
7453 7437
7454 regs_.Restore(masm); 7438 regs_.Restore(masm);
7455 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { 7439 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
7456 __ RememberedSetHelper(object_, 7440 __ RememberedSetHelper(object_,
7457 address_, 7441 address_,
7458 value_, 7442 value_,
7459 save_fp_regs_mode_, 7443 save_fp_regs_mode_,
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after
7560 // and value is Smi. 7544 // and value is Smi.
7561 __ bind(&smi_element); 7545 __ bind(&smi_element);
7562 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); 7546 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
7563 __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); 7547 __ add(r6, r5, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
7564 __ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize)); 7548 __ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize));
7565 __ Ret(); 7549 __ Ret();
7566 7550
7567 // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS. 7551 // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
7568 __ bind(&double_elements); 7552 __ bind(&double_elements);
7569 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); 7553 __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
7570 __ StoreNumberToDoubleElements(r0, r3, r1, 7554 __ StoreNumberToDoubleElements(r0, r3, r1, r5, r6, r7, r9, r2,
7571 // Overwrites all regs after this.
7572 r5, r6, r7, r9, r2,
7573 &slow_elements); 7555 &slow_elements);
7574 __ Ret(); 7556 __ Ret();
7575 } 7557 }
7576 7558
7577 7559
7578 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { 7560 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
7579 if (entry_hook_ != NULL) { 7561 if (entry_hook_ != NULL) {
7580 ProfileEntryHookStub stub; 7562 ProfileEntryHookStub stub;
7581 __ push(lr); 7563 __ push(lr);
7582 __ CallStub(&stub); 7564 __ CallStub(&stub);
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
7631 7613
7632 __ Pop(lr, r5, r1); 7614 __ Pop(lr, r5, r1);
7633 __ Ret(); 7615 __ Ret();
7634 } 7616 }
7635 7617
7636 #undef __ 7618 #undef __
7637 7619
7638 } } // namespace v8::internal 7620 } } // namespace v8::internal
7639 7621
7640 #endif // V8_TARGET_ARCH_ARM 7622 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/assembler-arm.cc ('k') | src/arm/full-codegen-arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698